code
stringlengths 22
1.05M
| apis
sequencelengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
from io import BytesIO
from io import StringIO
import json
from bson.dbref import DBRef
import datetime
from bson import json_util
import logging
import base64
jsonCode ={
"building":{
"Essae Vaishnavi Solitaire": {
"id": "B1",
"division": {
"SS": {
"id": "D1",
"dept":{
"Semicon":{
"id":"DEP1",
"floor":{"0":"0",
"1":"1",
"2":"2",
"3":"3",
"4":"4",
"5":"5",
"6":"6"
}
},
"RND":{
"id":"DEP2",
"floor":{"0":"0",
"1":"1",
"2":"2",
"3":"3",
"4":"4",
"5":"5",
"6":"6"
}
},
"Mobile":{
"id":"DEP3",
"floor":{"0":"0",
"1":"1",
"2":"2",
"3":"3",
"4":"4",
"5":"5",
"6":"6"
}
}
}
},
"TTEC": {
"id": "D2",
"dept":{
"TTEC-AL":{
"id":"DEP1",
"floor":{"0":"0",
"1":"1",
"2":"2",
"3":"3",
"4":"4",
"5":"5",
"6":"6"
}
},
"TTEC-SL":{
"id":"DEP2",
"floor":{"0":"0",
"1":"1",
"2":"2",
"3":"3",
"4":"4",
"5":"5",
"6":"6"
}
},
"TTEC-DL":{
"id":"DEP3",
"floor":{"0":"0",
"1":"1",
"2":"2",
"3":"3",
"4":"4",
"5":"5",
"6":"6"
}
},
"TTEC-CI":{
"id":"DEP4",
"floor":{"0":"0",
"1":"1",
"2":"2",
"3":"3",
"4":"4",
"5":"5",
"6":"6"
}
}
}
}
}
},
"Fortune Summit": {
"id": "B2",
"division": {
"TMSC": {
"id": "D1",
"dept":{
"Medical":{
"id":"DEP1",
"floor":{"0":"0",
"1":"1",
"2":"2",
"3":"3",
"4":"4",
"5":"5",
"6":"6"
}
},
"RND":{
"id":"DEP2",
"floor":{"0":"0",
"1":"1",
"2":"2",
"3":"3",
"4":"4",
"5":"5",
"6":"6"
}
},
"Imaging":{
"id":"DEP3",
"floor":{"0":"0",
"1":"1",
"2":"2",
"3":"3",
"4":"4",
"5":"5",
"6":"6"
}
}
}
},
"tmc": {
"id": "D2",
"dept":{
"tmc-1":{
"id":"DEP1",
"floor":{"0":"0",
"1":"1",
"2":"2",
"3":"3",
"4":"4",
"5":"5",
"6":"6"
}
},
"tmc-2":{
"id":"DEP2",
"floor":{"0":"0",
"1":"1",
"2":"2",
"3":"3",
"4":"4",
"5":"5",
"6":"6"
}
},
"tmc-3":{
"id":"DEP3",
"floor":{"0":"0",
"1":"1",
"2":"2",
"3":"3",
"4":"4",
"5":"5",
"6":"6"
}
}
}
}
}
}
}
}
#Create and configure logger
logging.basicConfig(filename="server.log",
format='%(asctime)s %(message)s',
filemode='a')
#Creating an object
logger=logging.getLogger()
#Setting the threshold of logger to DEBUG
logger.setLevel(logging.DEBUG)
import pymongo
uri = "mongodb://218ffa09-0ee0-4-231-b9ee:zTV4cwDG0vM49J2GFsw72JzwOD79Bv3dPU8fbVLb5pbh3p0CmTBYcvhrFKTjtl1s7hgYSfRbMOrsVve6hfvhag==@218ffa09-0ee0-4-231-b9ee.documents.azure.com:10255/?ssl=true&replicaSet=globaldb"
client = pymongo.MongoClient(uri)
print("Obtained the client")
mydb = client.test
def sortingReq(item):
new_thrash_date = datetime.datetime.strptime(item["scan_date"], '%d-%m-%Y').date()
return new_thrash_date
def checkIfAutoThrashed(jsonData,tags):
if(len(tags) < 3):
return False
a = mydb.userInfo.find_one({"name":jsonData["name"]})
newDbref = DBRef("mydb.userInfo",a["_id"])
foundMails = mydb.mltable.find({"otherdbref":newDbref,"status":"trash"})
foundMailsList = list(mydb.mltable.find({"otherdbref":newDbref,"status":"trash"}))
if(len(foundMailsList) < 10):
return False
tagcount = 0
thrashcount = 0
for item in foundMails:
for tag in tags:
if(tag in item["tags"]):
tagcount+=1
if(tagcount >= 3):
thrashcount+=1
if(thrashcount >=10):
return True
return False
def generateqrcode(jsonData,filenameJPG,tags,fromMFP):
logger.debug("Received data for generating color code = ")
logger.debug(jsonData)
ilocation=1
today = datetime.datetime.now()
date = str(today.day)
time = str(today.hour) + ":" + str(today.minute) + ":" + str(today.second)+":"+str(today.microsecond)
dateTimeNow = date+':'+time
logger.debug("Current Datetime - "+dateTimeNow)
dateTimeNow = str(today.day)+str(today.hour)+str(today.minute)+str(today.second)+(str(today.microsecond)[:2])
logger.debug("Unique Code - "+dateTimeNow)
if(int(jsonData["cubicle"])>25 and int(jsonData["cubicle"])<=50):
ilocation=2
elif(int(jsonData["cubicle"])>50 and int(jsonData["cubicle"])<=75):
ilocation=3
else:
ilocation=4
logger.debug(jsonData["building"])
colorCode=jsonCode["building"][jsonData["building"]]["id"]+':'+jsonCode["building"][jsonData["building"]]["division"][jsonData["division"]]["id"]+':'+dateTimeNow
logger.debug("ColorCode - "+colorCode)
logger.debug("generateColorCode:: ColorCode value ="+colorCode)
import qrcode
img = qrcode.make(colorCode)
logger.debug(type(img))
autoThrashed = checkIfAutoThrashed(jsonData,tags)
logger.debug("Auto thrashed value is %d" % autoThrashed)
logger.debug("Tags are %s" % tags)
import sendEmail as se
se.execute(str(jsonData["email"]),filenameJPG,str(colorCode),img,autoThrashed,fromMFP)
#img = qrcode.make(colorCode)
#img.save(colorCode+".png")
newjsonData = {"name":jsonData["name"],"code":colorCode,"email":jsonData["email"],"division":jsonData["division"],"department":jsonData["department"],"floor":jsonData["floor"],"cubicle":jsonData["cubicle"],"building":jsonData["building"]}
if(fromMFP):
newjsonData["source"] = "MFP"
else:
newjsonData["source"] = "Mobile"
return addEntry(newjsonData,tags,autoThrashed);
def addEntry(jsonData,tags,autoThrashed):
a = mydb.userInfo.find_one({"name":jsonData["name"]})
newDbref = DBRef("mydb.userInfo",a["_id"])
scan_date = datetime.datetime.today()
scan_date = scan_date + datetime.timedelta(hours=9)
end_date = scan_date + datetime.timedelta(days=10)
scan_date = str(scan_date.day) +"-"+ str(scan_date.month)+"-" + str(scan_date.year)
end_date = str(end_date.day) +"-" +str(end_date.month)+"-" + str(end_date.year)
if(autoThrashed):
end_date = scan_date
if( not autoThrashed and len(tags) >= 3):
#mydb.mltable.insert({"code":jsonData["code"],"tags": tags,"status":"Keep","user_id":1,"otherdbref":newDbref}) Actual Code
mydb.mltable.insert({"code":jsonData["code"],"tags": tags,"status":"Keep","user_id":1,"otherdbref":newDbref})#Test code to be removed
#end_date = scan_date
mydb.userMailInfo.insert({"code":jsonData["code"],"scan_date":scan_date,"end_date":end_date,"otherdbref":newDbref,"userDeleted":False,"user_id":1,"source":jsonData["source"]})
jsonData["autoThrashed"] = autoThrashed
return json.dumps(jsonData)
def read_fromDB():
new_list = list()
for item in mydb.userMailInfo.find({},{"_id":0,"user_id":0}):
print(item)
otherdbref = item["otherdbref"]
newjson = mydb.userInfo.find_one({"_id":otherdbref.id},{"_id":0,"user_id":0})
dall = {}
item.pop("otherdbref")
dall.update(item)
dall.update(newjson)
print(dall)
new_list.append(dall)
new_list.reverse()
return json.dumps(new_list,default=json_util.default)
def getspecificDate(jsonData):
logger.debug(jsonData)
num = int(jsonData['page'])
skips = 10 * (num - 1)
if(jsonData["action"] == "all"):
all_list = list(mydb.userMailInfo.find({"userDeleted":False},{'_id' : 0,'user_id':0}))
all_list.reverse()
totalsize = len(all_list)
all_list = all_list[skips:]
all_list = all_list[:10]
new_list_new = list()
for item in all_list:
otherdbref = item["otherdbref"]
newjson = mydb.userInfo.find_one({"_id":otherdbref.id},{"_id":0,"user_id":0})
dall = {}
item.pop("otherdbref")
dall.update(item)
dall.update(newjson)
print(dall)
new_list_new.append(dall)
new_list_new.append({"totalsize":totalsize})
logger.debug(new_list_new)
#new_list_new.sort(key = lambda x : x["name"])
return json.dumps(new_list_new, default=json_util.default)
elif(jsonData["action"] == "today"):
all_list = list(mydb.userMailInfo.find({"userDeleted":False},{'_id' : 0,'user_id':0}))
thrash_date = datetime.datetime.today()
thrash_date = thrash_date + datetime.timedelta(hours=9)
thrash_date = str(thrash_date.day) + "-" +str(thrash_date.month)+"-" + str(thrash_date.year)
thrash_date = datetime.datetime.strptime(thrash_date, '%d-%m-%Y').date()
new_list = list()
for item in all_list:
if(item['end_date'] == "DONT TRASH"):
continue
db_date = datetime.datetime.strptime(item['end_date'],'%d-%m-%Y').date()
if(db_date <= thrash_date):
new_list.append(item)
new_list.reverse()
totalsize = len(new_list)
new_list = new_list[skips:]
new_list = new_list[:10]
new_list_new = list()
for item in new_list:
otherdbref = item["otherdbref"]
newjson = mydb.userInfo.find_one({"_id":otherdbref.id},{"_id":0,"user_id":0})
dall = {}
item.pop("otherdbref")
dall.update(item)
dall.update(newjson)
print(dall)
new_list_new.append(dall)
new_list_new.append({"totalsize":totalsize})
logger.debug(new_list_new)
#new_list_new.sort(key = lambda x : x["name"])
return json.dumps(new_list_new, default=json_util.default)
else:
all_list = list(mydb.userMailInfo.find({"userDeleted":False},{'_id' : 0,'user_id':0}))
thrash_date = datetime.datetime.today()
thrash_date = thrash_date + datetime.timedelta(hours=9)
thrash_date = str(thrash_date.day) + "-" +str(thrash_date.month)+"-" + str(thrash_date.year)
thrash_date = datetime.datetime.strptime(thrash_date, '%d-%m-%Y').date()
new_list = list()
for item in all_list:
db_date = datetime.datetime.strptime(item['scan_date'],'%d-%m-%Y').date()
if(db_date == thrash_date):
new_list.append(item)
new_list.reverse()
totalsize = len(new_list)
new_list = new_list[skips:]
new_list = new_list[:10]
new_list_new = list()
for item in new_list:
otherdbref = item["otherdbref"]
newjson = mydb.userInfo.find_one({"_id":otherdbref.id},{"_id":0,"user_id":0})
dall = {}
item.pop("otherdbref")
dall.update(item)
dall.update(newjson)
print(dall)
new_list_new.append(dall)
new_list_new.append({"totalsize":totalsize})
logger.debug(new_list_new)
return json.dumps(new_list_new, default=json_util.default)
def update_DB(jsonData):
logger.debug("DBUMI::Update_db() entry")
logger.debug(jsonData["code"])
logger.debug(jsonData["end_date"])
foundmail = mydb.userMailInfo.find_one({"code":jsonData["code"]},{"_id":1})
logger.debug(foundmail)
foundMl = mydb.mltable.find_one({"code":jsonData["code"]},{"_id":1})
logger.debug(foundMl)
mydb.userMailInfo.update_many({"_id":foundmail["_id"],"user_id":1},{"$set":{'end_date':str(jsonData['end_date'])}})
if(not jsonData['end_date'] == "DONT TRASH"):
mydb.mltable.update_many({"_id":foundMl["_id"],"user_id":1},{"$set":{"status":"trash"}})
return json.dumps({"status": "Success","statusreason": "updateSucess"})
#Clear DB only for testing
def delete_entry(jsonData):
logger.debug("DBUMI::delete_entry() entry")
logger.debug(jsonData["code"])
mydb.userMailInfo.delete_one({"code":jsonData["code"],"user_id":1})
return json.dumps({"status": "Success","statusreason": "updateSucess"})
def clear_db():
mydb.userMailInfo.remove({})
| [
"logging.basicConfig",
"bson.dbref.DBRef",
"logging.getLogger",
"datetime.datetime.strptime",
"json.dumps",
"datetime.datetime.now",
"qrcode.make",
"datetime.datetime.today",
"pymongo.MongoClient",
"datetime.timedelta"
] | [((7101, 7195), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '"""server.log"""', 'format': '"""%(asctime)s %(message)s"""', 'filemode': '"""a"""'}), "(filename='server.log', format='%(asctime)s %(message)s',\n filemode='a')\n", (7120, 7195), False, 'import logging\n'), ((7282, 7301), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (7299, 7301), False, 'import logging\n'), ((7618, 7642), 'pymongo.MongoClient', 'pymongo.MongoClient', (['uri'], {}), '(uri)\n', (7637, 7642), False, 'import pymongo\n'), ((7984, 8016), 'bson.dbref.DBRef', 'DBRef', (['"""mydb.userInfo"""', "a['_id']"], {}), "('mydb.userInfo', a['_id'])\n", (7989, 8016), False, 'from bson.dbref import DBRef\n'), ((8680, 8703), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8701, 8703), False, 'import datetime\n'), ((9637, 9659), 'qrcode.make', 'qrcode.make', (['colorCode'], {}), '(colorCode)\n', (9648, 9659), False, 'import qrcode\n'), ((10546, 10578), 'bson.dbref.DBRef', 'DBRef', (['"""mydb.userInfo"""', "a['_id']"], {}), "('mydb.userInfo', a['_id'])\n", (10551, 10578), False, 'from bson.dbref import DBRef\n'), ((10594, 10619), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (10617, 10619), False, 'import datetime\n'), ((11539, 11559), 'json.dumps', 'json.dumps', (['jsonData'], {}), '(jsonData)\n', (11549, 11559), False, 'import json\n'), ((12001, 12048), 'json.dumps', 'json.dumps', (['new_list'], {'default': 'json_util.default'}), '(new_list, default=json_util.default)\n', (12011, 12048), False, 'import json\n'), ((16407, 16472), 'json.dumps', 'json.dumps', (["{'status': 'Success', 'statusreason': 'updateSucess'}"], {}), "({'status': 'Success', 'statusreason': 'updateSucess'})\n", (16417, 16472), False, 'import json\n'), ((16693, 16758), 'json.dumps', 'json.dumps', (["{'status': 'Success', 'statusreason': 'updateSucess'}"], {}), "({'status': 'Success', 'statusreason': 'updateSucess'})\n", (16703, 16758), False, 'import json\n'), ((10648, 10675), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(9)'}), '(hours=9)\n', (10666, 10675), False, 'import datetime\n'), ((10703, 10730), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(10)'}), '(days=10)\n', (10721, 10730), False, 'import datetime\n'), ((12971, 13022), 'json.dumps', 'json.dumps', (['new_list_new'], {'default': 'json_util.default'}), '(new_list_new, default=json_util.default)\n', (12981, 13022), False, 'import json\n'), ((7735, 7792), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (["item['scan_date']", '"""%d-%m-%Y"""'], {}), "(item['scan_date'], '%d-%m-%Y')\n", (7761, 7792), False, 'import datetime\n'), ((13190, 13215), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (13213, 13215), False, 'import datetime\n'), ((14438, 14489), 'json.dumps', 'json.dumps', (['new_list_new'], {'default': 'json_util.default'}), '(new_list_new, default=json_util.default)\n', (14448, 14489), False, 'import json\n'), ((14617, 14642), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (14640, 14642), False, 'import datetime\n'), ((15726, 15777), 'json.dumps', 'json.dumps', (['new_list_new'], {'default': 'json_util.default'}), '(new_list_new, default=json_util.default)\n', (15736, 15777), False, 'import json\n'), ((13252, 13279), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(9)'}), '(hours=9)\n', (13270, 13279), False, 'import datetime\n'), ((14679, 14706), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(9)'}), '(hours=9)\n', (14697, 14706), False, 'import datetime\n'), ((13411, 13462), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['thrash_date', '"""%d-%m-%Y"""'], {}), "(thrash_date, '%d-%m-%Y')\n", (13437, 13462), False, 'import datetime\n'), ((14838, 14889), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['thrash_date', '"""%d-%m-%Y"""'], {}), "(thrash_date, '%d-%m-%Y')\n", (14864, 14889), False, 'import datetime\n'), ((13623, 13679), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (["item['end_date']", '"""%d-%m-%Y"""'], {}), "(item['end_date'], '%d-%m-%Y')\n", (13649, 13679), False, 'import datetime\n'), ((14975, 15032), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (["item['scan_date']", '"""%d-%m-%Y"""'], {}), "(item['scan_date'], '%d-%m-%Y')\n", (15001, 15032), False, 'import datetime\n')] |
# python3
# coding=utf-8
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dags.bq_to_cm_dag."""
import unittest
from airflow.contrib.hooks import bigquery_hook
from airflow.models import baseoperator
from airflow.models import dag
from airflow.models import variable
import mock
from gps_building_blocks.cloud.utils import cloud_auth
from dags import bq_to_cm_dag
from plugins.pipeline_plugins.hooks import monitoring_hook
_DAG_NAME = bq_to_cm_dag._DAG_NAME
AIRFLOW_VARIABLES = {
'dag_name': _DAG_NAME,
f'{_DAG_NAME}_schedule': '@once',
f'{_DAG_NAME}_retries': 0,
f'{_DAG_NAME}_retry_delay': 3,
f'{_DAG_NAME}_is_retry': True,
f'{_DAG_NAME}_is_run': True,
f'{_DAG_NAME}_enable_run_report': False,
f'{_DAG_NAME}_enable_monitoring': True,
f'{_DAG_NAME}_enable_monitoring_cleanup': False,
'monitoring_data_days_to_live': 50,
'monitoring_dataset': 'test_monitoring_dataset',
'monitoring_table': 'test_monitoring_table',
'monitoring_bq_conn_id': 'test_monitoring_conn',
'bq_dataset_id': 'test_dataset',
'bq_table_id': 'test_table',
'cm_profile_id': 'cm_profile_id',
'cm_service_account': 'cm_service_account'
}
class BQToCMDAGTest(unittest.TestCase):
def setUp(self):
super(BQToCMDAGTest, self).setUp()
self.addCleanup(mock.patch.stopall)
self.build_impersonated_client_mock = mock.patch.object(
cloud_auth, 'build_impersonated_client', autospec=True)
self.build_impersonated_client_mock.return_value = mock.Mock()
self.build_impersonated_client_mock.start()
self.mock_variable = mock.patch.object(
variable, 'Variable', autospec=True).start()
# `side_effect` is assigned to `lambda` to dynamically return values
# each time when self.mock_variable is called.
self.mock_variable.get.side_effect = (
lambda key, value: AIRFLOW_VARIABLES[key])
self.original_bigquery_hook_init = bigquery_hook.BigQueryHook.__init__
bigquery_hook.BigQueryHook.__init__ = mock.MagicMock()
self.original_monitoring_hook = monitoring_hook.MonitoringHook
monitoring_hook.MonitoringHook = mock.MagicMock()
def tearDown(self):
super().tearDown()
bigquery_hook.BigQueryHook.__init__ = self.original_bigquery_hook_init
monitoring_hook.MonitoringHook = self.original_monitoring_hook
def test_create_dag(self):
"""Tests that returned DAG contains correct DAG and tasks."""
expected_task_ids = ['bq_to_cm_retry_task', 'bq_to_cm_task']
test_dag = bq_to_cm_dag.BigQueryToCMDag(
AIRFLOW_VARIABLES['dag_name']).create_dag()
self.assertIsInstance(test_dag, dag.DAG)
self.assertEqual(len(test_dag.tasks), len(expected_task_ids))
for task in test_dag.tasks:
self.assertIsInstance(task, baseoperator.BaseOperator)
actual_task_ids = [t.task_id for t in test_dag.tasks]
self.assertListEqual(actual_task_ids, expected_task_ids)
if __name__ == '__main__':
unittest.main()
| [
"mock.Mock",
"mock.patch.object",
"dags.bq_to_cm_dag.BigQueryToCMDag",
"unittest.main",
"mock.MagicMock"
] | [((3473, 3488), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3486, 3488), False, 'import unittest\n'), ((1901, 1974), 'mock.patch.object', 'mock.patch.object', (['cloud_auth', '"""build_impersonated_client"""'], {'autospec': '(True)'}), "(cloud_auth, 'build_impersonated_client', autospec=True)\n", (1918, 1974), False, 'import mock\n'), ((2039, 2050), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (2048, 2050), False, 'import mock\n'), ((2532, 2548), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (2546, 2548), False, 'import mock\n'), ((2654, 2670), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (2668, 2670), False, 'import mock\n'), ((2124, 2178), 'mock.patch.object', 'mock.patch.object', (['variable', '"""Variable"""'], {'autospec': '(True)'}), "(variable, 'Variable', autospec=True)\n", (2141, 2178), False, 'import mock\n'), ((3036, 3095), 'dags.bq_to_cm_dag.BigQueryToCMDag', 'bq_to_cm_dag.BigQueryToCMDag', (["AIRFLOW_VARIABLES['dag_name']"], {}), "(AIRFLOW_VARIABLES['dag_name'])\n", (3064, 3095), False, 'from dags import bq_to_cm_dag\n')] |
from django.core.management.base import BaseCommand
import logging
import re
from talentmap_api.common.xml_helpers import XMLloader, strip_extra_spaces, parse_boolean, parse_date, get_nested_tag
from talentmap_api.language.models import Language, Proficiency
from talentmap_api.position.models import Grade, Skill, Position, CapsuleDescription, SkillCone
from talentmap_api.organization.models import Organization, Post, TourOfDuty, Location, Country
class Command(BaseCommand):
help = 'Loads an XML into a supported file'
logger = logging.getLogger(__name__)
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
self.modes = {
'languages': mode_languages,
'proficiencies': mode_proficiencies,
'grades': mode_grades,
'skills': mode_skills,
'organizations': mode_organizations,
'positions': mode_positions,
'tours_of_duty': mode_tour_of_duty,
'posts': mode_post,
'countries': mode_country,
'locations': mode_location,
'capsule_descriptions': mode_capsule_description,
'skill_cone': mode_skill_cone
}
def add_arguments(self, parser):
parser.add_argument('file', nargs=1, type=str, help="The XML file to load")
parser.add_argument('type', nargs=1, type=str, choices=self.modes.keys(), help="The type of data in the XML")
parser.add_argument('--delete', dest='delete', action='store_true', help='Delete collisions')
parser.add_argument('--update', dest='update', action='store_true', help='Update collisions')
parser.add_argument('--skippost', dest='skip_post', action='store_true', help='Skip post load functions')
def handle(self, *args, **options):
model, instance_tag, tag_map, collision_field, post_load_function = self.modes[options['type'][0]]()
# Set / update the collision behavior
collision_behavior = None
if options['delete']:
collision_behavior = "delete"
elif options['update']:
collision_behavior = "update"
else:
collision_behavior = "skip"
loader = XMLloader(model, instance_tag, tag_map, collision_behavior, collision_field)
new_ids, updated_ids = loader.create_models_from_xml(options['file'][0])
# Run the post load function, if it exists
if callable(post_load_function) and not options['skip_post']:
post_load_function(new_ids, updated_ids)
self.logger.info(f"XML Load Report\n\tNew: {len(new_ids)}\n\tUpdated: {len(updated_ids)}\t\t")
def mode_languages():
model = Language
instance_tag = "LANGUAGES:LANGUAGE"
collision_field = "code"
tag_map = {
"LANGUAGES:LANG_CODE": "code",
"LANGUAGES:LANG_LONG_DESC": "long_description",
"LANGUAGES:LANG_SHORT_DESC": "short_description",
"LANGUAGES:LANG_EFFECTIVE_DATE": parse_date("effective_date")
}
return (model, instance_tag, tag_map, collision_field, None)
def mode_proficiencies():
model = Proficiency
instance_tag = "LANGUAGE_PROFICIENCY:LANGUAGE_PROFICIENCY"
collision_field = "code"
tag_map = {
"LANGUAGE_PROFICIENCY:LP_CODE": "code",
"LANGUAGE_PROFICIENCY:LP_DESC": "description"
}
return (model, instance_tag, tag_map, collision_field, None)
def mode_grades():
model = Grade
instance_tag = "GRADES:GRADE"
collision_field = "code"
tag_map = {
"GRADES:GRD_GRADE_CODE": "code"
}
def post_load_function(new_ids, updated_ids):
for pos in Grade.objects.filter(id__in=new_ids + updated_ids):
pos.update_relationships()
return (model, instance_tag, tag_map, collision_field, post_load_function)
def mode_skills():
model = Skill
instance_tag = "SKILLS:SKILL"
collision_field = "code"
tag_map = {
"SKILLS:SKILL_CODE": "code",
"SKILLS:SKILL_DESCRIPTION": "description"
}
return (model, instance_tag, tag_map, collision_field, None)
def mode_organizations():
model = Organization
instance_tag = "DATA_RECORD"
collision_field = "code"
tag_map = {
"ORG_CODE": "code",
"ORG_SHORT_DESC": "short_description",
"ORG_LONG_DESC": strip_extra_spaces("long_description"),
"ORG_PARENT_ORG_CODE": "_parent_organization_code",
"ORG_BUREAU_ORG_CODE": "_parent_bureau_code",
"ORG_LOCATION_CODE": "_location_code"
}
# Update relationships
def post_load_function(new_ids, updated_ids):
for org in Organization.objects.filter(id__in=new_ids + updated_ids):
org.update_relationships()
# Regional code setting is done automatically by DOS Webservices, so
# we now only need this logic when loading from our sample XML files
# Array of regional codes
regional_codes = [
"110000",
"120000",
"130000",
"140000",
"146000",
"150000",
"160000"
]
if org.code in regional_codes:
org.is_regional = True
else:
org.is_regional = False
if org.code == org._parent_bureau_code:
org.is_bureau = True
org.save()
return (model, instance_tag, tag_map, collision_field, post_load_function)
def mode_positions():
model = Position
instance_tag = "POSITIONS:POSITION"
collision_field = "_seq_num"
tag_map = {
"POSITIONS:POS_SEQ_NUM": "_seq_num",
"POSITIONS:POS_NUM_TEXT": "position_number",
"POSITIONS:POS_TITLE_CODE": "_title_code",
"POSITIONS:POS_TITLE_DESC": "title",
"POSITIONS:POS_ORG_CODE": "_org_code",
"POSITIONS:POS_BUREAU_CODE": "_bureau_code",
"POSITIONS:POS_SKILL_CODE": "_skill_code",
"POSITIONS:POS_STAFF_PTRN_SKILL_CODE": "_staff_ptrn_skill_code",
"POSITIONS:POS_OVERSEAS_IND": parse_boolean("is_overseas", ['O']),
"POSITIONS:POS_PAY_PLAN_CODE": "_pay_plan_code",
"POSITIONS:POS_STATUS_CODE": "_status_code",
"POSITIONS:POS_SERVICE_TYPE_CODE": "_service_type_code",
"POSITIONS:POS_GRADE_CODE": "_grade_code",
"POSITIONS:POS_POST_CODE": "_post_code",
"POSITIONS:POS_LANGUAGE_1_CODE": "_language_1_code",
"POSITIONS:POS_LANGUAGE_2_CODE": "_language_2_code",
"POSITIONS:POS_LOCATION_CODE": "_location_code",
"POSITIONS:POS_LANG_REQ_1_CODE": "_language_req_1_code",
"POSITIONS:POS_LANG_REQ_2_CODE": "_language_req_2_code",
"POSITIONS:POS_SPEAK_PROFICIENCY_1_CODE": "_language_1_spoken_proficiency_code",
"POSITIONS:POS_READ_PROFICIENCY_1_CODE": "_language_1_reading_proficiency_code",
"POSITIONS:POS_SPEAK_PROFICIENCY_2_CODE": "_language_2_spoken_proficiency_code",
"POSITIONS:POS_READ_PROFICIENCY_2_CODE": "_language_2_reading_proficiency_code",
"POSITIONS:POS_CREATE_ID": "_create_id",
"POSITIONS:POS_CREATE_DATE": parse_date("create_date"),
"POSITIONS:POS_UPDATE_ID": "_update_id",
"POSITIONS:POS_UPDATE_DATE": parse_date("update_date"),
"POSITIONS:POS_EFFECTIVE_DATE": parse_date("effective_date"),
"POSITIONS:POS_JOBCODE_CODE": "_jobcode_code",
"POSITIONS:POS_OCC_SERIES_CODE": "_occ_series_code",
}
def post_load_function(new_ids, updated_ids):
for pos in Position.objects.filter(id__in=new_ids + updated_ids):
pos.update_relationships()
return (model, instance_tag, tag_map, collision_field, post_load_function)
def mode_tour_of_duty():
model = TourOfDuty
instance_tag = "TOUR_OF_DUTIES:TOUR_OF_DUTY"
collision_field = "code"
tag_map = {
"TOUR_OF_DUTIES:TOD_CODE": "code",
"TOUR_OF_DUTIES:TOD_SHORT_DESC": "short_description",
"TOUR_OF_DUTIES:TOD_DESC_TEXT": lambda instance, item: setattr(instance, "long_description", re.sub('&', '&', item.text).strip()),
"TOUR_OF_DUTIES:TOD_MONTHS_NUM": "months"
}
return (model, instance_tag, tag_map, collision_field, None)
def mode_post():
model = Post
instance_tag = "BIDPOSTS:BIDDING_TOOL"
collision_field = "_location_code"
tag_map = {
"BIDPOSTS:DSC_CD": "_location_code",
"BIDPOSTS:TOD_CODE": "_tod_code",
"BIDPOSTS:BT_COST_OF_LIVING_ADJUST_NUM": "cost_of_living_adjustment",
"BIDPOSTS:BT_DIFFERENTIAL_RATE_NUM": "differential_rate",
"BIDPOSTS:BT_REST_RELAXATION_POINT_TEXT": strip_extra_spaces("rest_relaxation_point"),
"BIDPOSTS:BT_DANGER_PAY_NUM": "danger_pay",
"BIDPOSTS:BT_CONSUMABLE_ALLOWANCE_FLG": parse_boolean("has_consumable_allowance"),
"BIDPOSTS:BT_SERVICE_NEEDS_DIFF_FLG": parse_boolean("has_service_needs_differential"),
}
def post_load_function(new_ids, updated_ids):
for loc in Post.objects.filter(id__in=new_ids + updated_ids):
loc.update_relationships()
return (model, instance_tag, tag_map, collision_field, post_load_function)
def mode_country():
model = Country
instance_tag = "DATA_RECORD"
collision_field = "code"
tag_map = {
"COUNTRY_CODE": "code",
"FULL_NAME": "name",
"SHORT_NAME": "short_name",
"COUNTRY_CODE_2": "short_code",
"LOCATION_PREFIX": "location_prefix"
}
return (model, instance_tag, tag_map, collision_field, None)
def mode_location():
model = Location
instance_tag = "location"
collision_field = "code"
tag_map = {
"code": "code",
"city": strip_extra_spaces("city"),
"state": strip_extra_spaces("state"),
"country": "_country"
}
def post_load_function(new_ids, updated_ids):
# Connect new locations to applicable posts
for loc in Location.objects.filter(id__in=new_ids + updated_ids):
Post.objects.filter(_location_code=loc.code).update(location=loc)
return (model, instance_tag, tag_map, collision_field, post_load_function)
def mode_capsule_description():
model = CapsuleDescription
instance_tag = "position"
collision_field = "_pos_seq_num"
tag_map = {
"POS_SEQ_NUM": "_pos_seq_num",
"capsuleDescription": "content",
}
return (model, instance_tag, tag_map, collision_field, None)
def mode_skill_cone():
model = SkillCone
instance_tag = "jobCategorySkill"
collision_field = None
tag_map = {
"id": "_id",
"name": strip_extra_spaces("name"),
"skill": get_nested_tag("_skill_codes", "code"),
}
return (model, instance_tag, tag_map, collision_field, None)
| [
"logging.getLogger",
"talentmap_api.common.xml_helpers.XMLloader",
"talentmap_api.common.xml_helpers.strip_extra_spaces",
"talentmap_api.position.models.Position.objects.filter",
"talentmap_api.organization.models.Location.objects.filter",
"talentmap_api.organization.models.Post.objects.filter",
"talentmap_api.organization.models.Organization.objects.filter",
"talentmap_api.common.xml_helpers.parse_boolean",
"talentmap_api.common.xml_helpers.parse_date",
"talentmap_api.common.xml_helpers.get_nested_tag",
"talentmap_api.position.models.Grade.objects.filter",
"re.sub"
] | [((544, 571), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (561, 571), False, 'import logging\n'), ((2223, 2299), 'talentmap_api.common.xml_helpers.XMLloader', 'XMLloader', (['model', 'instance_tag', 'tag_map', 'collision_behavior', 'collision_field'], {}), '(model, instance_tag, tag_map, collision_behavior, collision_field)\n', (2232, 2299), False, 'from talentmap_api.common.xml_helpers import XMLloader, strip_extra_spaces, parse_boolean, parse_date, get_nested_tag\n'), ((2984, 3012), 'talentmap_api.common.xml_helpers.parse_date', 'parse_date', (['"""effective_date"""'], {}), "('effective_date')\n", (2994, 3012), False, 'from talentmap_api.common.xml_helpers import XMLloader, strip_extra_spaces, parse_boolean, parse_date, get_nested_tag\n'), ((3653, 3703), 'talentmap_api.position.models.Grade.objects.filter', 'Grade.objects.filter', ([], {'id__in': '(new_ids + updated_ids)'}), '(id__in=new_ids + updated_ids)\n', (3673, 3703), False, 'from talentmap_api.position.models import Grade, Skill, Position, CapsuleDescription, SkillCone\n'), ((4332, 4370), 'talentmap_api.common.xml_helpers.strip_extra_spaces', 'strip_extra_spaces', (['"""long_description"""'], {}), "('long_description')\n", (4350, 4370), False, 'from talentmap_api.common.xml_helpers import XMLloader, strip_extra_spaces, parse_boolean, parse_date, get_nested_tag\n'), ((4635, 4692), 'talentmap_api.organization.models.Organization.objects.filter', 'Organization.objects.filter', ([], {'id__in': '(new_ids + updated_ids)'}), '(id__in=new_ids + updated_ids)\n', (4662, 4692), False, 'from talentmap_api.organization.models import Organization, Post, TourOfDuty, Location, Country\n'), ((6084, 6119), 'talentmap_api.common.xml_helpers.parse_boolean', 'parse_boolean', (['"""is_overseas"""', "['O']"], {}), "('is_overseas', ['O'])\n", (6097, 6119), False, 'from talentmap_api.common.xml_helpers import XMLloader, strip_extra_spaces, parse_boolean, parse_date, get_nested_tag\n'), ((7147, 7172), 'talentmap_api.common.xml_helpers.parse_date', 'parse_date', (['"""create_date"""'], {}), "('create_date')\n", (7157, 7172), False, 'from talentmap_api.common.xml_helpers import XMLloader, strip_extra_spaces, parse_boolean, parse_date, get_nested_tag\n'), ((7260, 7285), 'talentmap_api.common.xml_helpers.parse_date', 'parse_date', (['"""update_date"""'], {}), "('update_date')\n", (7270, 7285), False, 'from talentmap_api.common.xml_helpers import XMLloader, strip_extra_spaces, parse_boolean, parse_date, get_nested_tag\n'), ((7327, 7355), 'talentmap_api.common.xml_helpers.parse_date', 'parse_date', (['"""effective_date"""'], {}), "('effective_date')\n", (7337, 7355), False, 'from talentmap_api.common.xml_helpers import XMLloader, strip_extra_spaces, parse_boolean, parse_date, get_nested_tag\n'), ((7549, 7602), 'talentmap_api.position.models.Position.objects.filter', 'Position.objects.filter', ([], {'id__in': '(new_ids + updated_ids)'}), '(id__in=new_ids + updated_ids)\n', (7572, 7602), False, 'from talentmap_api.position.models import Grade, Skill, Position, CapsuleDescription, SkillCone\n'), ((8652, 8695), 'talentmap_api.common.xml_helpers.strip_extra_spaces', 'strip_extra_spaces', (['"""rest_relaxation_point"""'], {}), "('rest_relaxation_point')\n", (8670, 8695), False, 'from talentmap_api.common.xml_helpers import XMLloader, strip_extra_spaces, parse_boolean, parse_date, get_nested_tag\n'), ((8797, 8838), 'talentmap_api.common.xml_helpers.parse_boolean', 'parse_boolean', (['"""has_consumable_allowance"""'], {}), "('has_consumable_allowance')\n", (8810, 8838), False, 'from talentmap_api.common.xml_helpers import XMLloader, strip_extra_spaces, parse_boolean, parse_date, get_nested_tag\n'), ((8886, 8933), 'talentmap_api.common.xml_helpers.parse_boolean', 'parse_boolean', (['"""has_service_needs_differential"""'], {}), "('has_service_needs_differential')\n", (8899, 8933), False, 'from talentmap_api.common.xml_helpers import XMLloader, strip_extra_spaces, parse_boolean, parse_date, get_nested_tag\n'), ((9011, 9060), 'talentmap_api.organization.models.Post.objects.filter', 'Post.objects.filter', ([], {'id__in': '(new_ids + updated_ids)'}), '(id__in=new_ids + updated_ids)\n', (9030, 9060), False, 'from talentmap_api.organization.models import Organization, Post, TourOfDuty, Location, Country\n'), ((9714, 9740), 'talentmap_api.common.xml_helpers.strip_extra_spaces', 'strip_extra_spaces', (['"""city"""'], {}), "('city')\n", (9732, 9740), False, 'from talentmap_api.common.xml_helpers import XMLloader, strip_extra_spaces, parse_boolean, parse_date, get_nested_tag\n'), ((9759, 9786), 'talentmap_api.common.xml_helpers.strip_extra_spaces', 'strip_extra_spaces', (['"""state"""'], {}), "('state')\n", (9777, 9786), False, 'from talentmap_api.common.xml_helpers import XMLloader, strip_extra_spaces, parse_boolean, parse_date, get_nested_tag\n'), ((9946, 9999), 'talentmap_api.organization.models.Location.objects.filter', 'Location.objects.filter', ([], {'id__in': '(new_ids + updated_ids)'}), '(id__in=new_ids + updated_ids)\n', (9969, 9999), False, 'from talentmap_api.organization.models import Organization, Post, TourOfDuty, Location, Country\n'), ((10624, 10650), 'talentmap_api.common.xml_helpers.strip_extra_spaces', 'strip_extra_spaces', (['"""name"""'], {}), "('name')\n", (10642, 10650), False, 'from talentmap_api.common.xml_helpers import XMLloader, strip_extra_spaces, parse_boolean, parse_date, get_nested_tag\n'), ((10669, 10707), 'talentmap_api.common.xml_helpers.get_nested_tag', 'get_nested_tag', (['"""_skill_codes"""', '"""code"""'], {}), "('_skill_codes', 'code')\n", (10683, 10707), False, 'from talentmap_api.common.xml_helpers import XMLloader, strip_extra_spaces, parse_boolean, parse_date, get_nested_tag\n'), ((10013, 10057), 'talentmap_api.organization.models.Post.objects.filter', 'Post.objects.filter', ([], {'_location_code': 'loc.code'}), '(_location_code=loc.code)\n', (10032, 10057), False, 'from talentmap_api.organization.models import Organization, Post, TourOfDuty, Location, Country\n'), ((8073, 8104), 're.sub', 're.sub', (['"""&"""', '"""&"""', 'item.text'], {}), "('&', '&', item.text)\n", (8079, 8104), False, 'import re\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Unit tests for gluon.recfile
"""
import unittest
import os
import shutil
import uuid
from .fix_path import fix_sys_path
fix_sys_path(__file__)
from gluon import recfile
class TestRecfile(unittest.TestCase):
def setUp(self):
os.mkdir('tests')
def tearDown(self):
shutil.rmtree('tests')
def test_generation(self):
for k in range(10):
teststring = 'test%s' % k
filename = os.path.join('tests', str(uuid.uuid4()) + '.test')
with recfile.open(filename, "w") as g:
g.write(teststring)
self.assertEqual(recfile.open(filename, "r").read(), teststring)
is_there = recfile.exists(filename)
self.assertTrue(is_there)
recfile.remove(filename)
is_there = recfile.exists(filename)
self.assertFalse(is_there)
for k in range(10):
teststring = 'test%s' % k
filename = str(uuid.uuid4()) + '.test'
with recfile.open(filename, "w", path='tests') as g:
g.write(teststring)
self.assertEqual(recfile.open(filename, "r", path='tests').read(), teststring)
is_there = recfile.exists(filename, path='tests')
self.assertTrue(is_there)
recfile.remove(filename, path='tests')
is_there = recfile.exists(filename, path='tests')
self.assertFalse(is_there)
for k in range(10):
teststring = 'test%s' % k
filename = os.path.join('tests', str(uuid.uuid4()), str(uuid.uuid4()) + '.test')
with recfile.open(filename, "w") as g:
g.write(teststring)
self.assertEqual(recfile.open(filename, "r").read(), teststring)
is_there = recfile.exists(filename)
self.assertTrue(is_there)
recfile.remove(filename)
is_there = recfile.exists(filename)
self.assertFalse(is_there)
def test_existing(self):
filename = os.path.join('tests', str(uuid.uuid4()) + '.test')
with open(filename, 'w') as g:
g.write('this file exists')
self.assertTrue(recfile.exists(filename))
self.assertTrue(hasattr(recfile.open(filename, "r"), 'read'))
recfile.remove(filename, path='tests')
self.assertFalse(recfile.exists(filename))
self.assertRaises(IOError, recfile.remove, filename)
self.assertRaises(IOError, recfile.open, filename, "r")
if __name__ == '__main__':
unittest.main()
| [
"gluon.recfile.exists",
"uuid.uuid4",
"os.mkdir",
"shutil.rmtree",
"unittest.main",
"gluon.recfile.remove",
"gluon.recfile.open"
] | [((2565, 2580), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2578, 2580), False, 'import unittest\n'), ((296, 313), 'os.mkdir', 'os.mkdir', (['"""tests"""'], {}), "('tests')\n", (304, 313), False, 'import os\n'), ((347, 369), 'shutil.rmtree', 'shutil.rmtree', (['"""tests"""'], {}), "('tests')\n", (360, 369), False, 'import shutil\n'), ((2317, 2355), 'gluon.recfile.remove', 'recfile.remove', (['filename'], {'path': '"""tests"""'}), "(filename, path='tests')\n", (2331, 2355), False, 'from gluon import recfile\n'), ((729, 753), 'gluon.recfile.exists', 'recfile.exists', (['filename'], {}), '(filename)\n', (743, 753), False, 'from gluon import recfile\n'), ((804, 828), 'gluon.recfile.remove', 'recfile.remove', (['filename'], {}), '(filename)\n', (818, 828), False, 'from gluon import recfile\n'), ((852, 876), 'gluon.recfile.exists', 'recfile.exists', (['filename'], {}), '(filename)\n', (866, 876), False, 'from gluon import recfile\n'), ((1248, 1286), 'gluon.recfile.exists', 'recfile.exists', (['filename'], {'path': '"""tests"""'}), "(filename, path='tests')\n", (1262, 1286), False, 'from gluon import recfile\n'), ((1337, 1375), 'gluon.recfile.remove', 'recfile.remove', (['filename'], {'path': '"""tests"""'}), "(filename, path='tests')\n", (1351, 1375), False, 'from gluon import recfile\n'), ((1399, 1437), 'gluon.recfile.exists', 'recfile.exists', (['filename'], {'path': '"""tests"""'}), "(filename, path='tests')\n", (1413, 1437), False, 'from gluon import recfile\n'), ((1823, 1847), 'gluon.recfile.exists', 'recfile.exists', (['filename'], {}), '(filename)\n', (1837, 1847), False, 'from gluon import recfile\n'), ((1898, 1922), 'gluon.recfile.remove', 'recfile.remove', (['filename'], {}), '(filename)\n', (1912, 1922), False, 'from gluon import recfile\n'), ((1946, 1970), 'gluon.recfile.exists', 'recfile.exists', (['filename'], {}), '(filename)\n', (1960, 1970), False, 'from gluon import recfile\n'), ((2213, 2237), 'gluon.recfile.exists', 'recfile.exists', (['filename'], {}), '(filename)\n', (2227, 2237), False, 'from gluon import recfile\n'), ((2381, 2405), 'gluon.recfile.exists', 'recfile.exists', (['filename'], {}), '(filename)\n', (2395, 2405), False, 'from gluon import recfile\n'), ((559, 586), 'gluon.recfile.open', 'recfile.open', (['filename', '"""w"""'], {}), "(filename, 'w')\n", (571, 586), False, 'from gluon import recfile\n'), ((1050, 1091), 'gluon.recfile.open', 'recfile.open', (['filename', '"""w"""'], {'path': '"""tests"""'}), "(filename, 'w', path='tests')\n", (1062, 1091), False, 'from gluon import recfile\n'), ((1653, 1680), 'gluon.recfile.open', 'recfile.open', (['filename', '"""w"""'], {}), "(filename, 'w')\n", (1665, 1680), False, 'from gluon import recfile\n'), ((2271, 2298), 'gluon.recfile.open', 'recfile.open', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (2283, 2298), False, 'from gluon import recfile\n'), ((1009, 1021), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1019, 1021), False, 'import uuid\n'), ((1592, 1604), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1602, 1604), False, 'import uuid\n'), ((2085, 2097), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2095, 2097), False, 'import uuid\n'), ((517, 529), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (527, 529), False, 'import uuid\n'), ((658, 685), 'gluon.recfile.open', 'recfile.open', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (670, 685), False, 'from gluon import recfile\n'), ((1163, 1204), 'gluon.recfile.open', 'recfile.open', (['filename', '"""r"""'], {'path': '"""tests"""'}), "(filename, 'r', path='tests')\n", (1175, 1204), False, 'from gluon import recfile\n'), ((1611, 1623), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1621, 1623), False, 'import uuid\n'), ((1752, 1779), 'gluon.recfile.open', 'recfile.open', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (1764, 1779), False, 'from gluon import recfile\n')] |
"""Miscellaneous utility functions."""
from functools import reduce
from PIL import Image
import numpy as np
from matplotlib.colors import rgb_to_hsv, hsv_to_rgb
def compose(*funcs):
"""Compose arbitrarily many functions, evaluated left to right.
Reference: https://mathieularose.com/function-composition-in-python/
"""
# return lambda x: reduce(lambda v, f: f(v), funcs, x)
if funcs:
return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)
else:
raise ValueError('Composition of empty sequence not supported.')
def letterbox_image(image, size):
'''resize image with unchanged aspect ratio using padding'''
iw, ih = image.size
w, h = size
scale = min(w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
image = image.resize((nw,nh), Image.BICUBIC)
new_image = Image.new('RGB', size, (128,128,128))
new_image.paste(image, ((w-nw)//2, (h-nh)//2))
return new_image
def rand(a=0, b=1):
return np.random.rand()*(b-a) + a
def get_random_data(annotation_line, input_shape, random=True, max_boxes=20, jitter=.3, hue=.1, sat=1.5, val=1.5, proc_img=True):
'''random preprocessing for real-time data augmentation'''
line = annotation_line.split()
image = Image.open(line[0])
iw, ih = image.size
h, w = input_shape
box = np.array([np.array(list(map(int,box.split(',')))) for box in line[1:]])
if not random:
# resize image
scale = min(w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
dx = (w-nw)//2
dy = (h-nh)//2
image_data=0
if proc_img:
image = image.resize((nw,nh), Image.BICUBIC)
new_image = Image.new('RGB', (w,h), (128,128,128))
new_image.paste(image, (dx, dy))
image_data = np.array(new_image)/255.
# correct boxes
box_data = np.zeros((max_boxes,5))
if len(box)>0:
np.random.shuffle(box)
if len(box)>max_boxes: box = box[:max_boxes]
box[:, [0,2]] = box[:, [0,2]]*scale + dx
box[:, [1,3]] = box[:, [1,3]]*scale + dy
box_data[:len(box)] = box
return image_data, box_data
# resize image
new_ar = w/h * rand(1-jitter,1+jitter)/rand(1-jitter,1+jitter)
scale = rand(.25, 2)
if new_ar < 1:
nh = int(scale*h)
nw = int(nh*new_ar)
else:
nw = int(scale*w)
nh = int(nw/new_ar)
image = image.resize((nw,nh), Image.BICUBIC)
# place image
dx = int(rand(0, w-nw))
dy = int(rand(0, h-nh))
new_image = Image.new('RGB', (w,h), (128,128,128))
new_image.paste(image, (dx, dy))
image = new_image
# flip image or not
flip = rand()<.5
if flip: image = image.transpose(Image.FLIP_LEFT_RIGHT)
# distort image
hue = rand(-hue, hue)
sat = rand(1, sat) if rand()<.5 else 1/rand(1, sat)
val = rand(1, val) if rand()<.5 else 1/rand(1, val)
x = rgb_to_hsv(np.array(image)/255.)
x[..., 0] += hue
x[..., 0][x[..., 0]>1] -= 1
x[..., 0][x[..., 0]<0] += 1
x[..., 1] *= sat
x[..., 2] *= val
x[x>1] = 1
x[x<0] = 0
image_data = hsv_to_rgb(x) # numpy array, 0 to 1
# correct boxes
box_data = np.zeros((max_boxes,5))
if len(box)>0:
np.random.shuffle(box)
box[:, [0,2]] = box[:, [0,2]]*nw/iw + dx
box[:, [1,3]] = box[:, [1,3]]*nh/ih + dy
if flip: box[:, [0,2]] = w - box[:, [2,0]]
box[:, 0:2][box[:, 0:2]<0] = 0
box[:, 2][box[:, 2]>w] = w
box[:, 3][box[:, 3]>h] = h
box_w = box[:, 2] - box[:, 0]
box_h = box[:, 3] - box[:, 1]
box = box[np.logical_and(box_w>1, box_h>1)] # discard invalid box
if len(box)>max_boxes: box = box[:max_boxes]
box_data[:len(box)] = box
return image_data, box_data
def get_random_data2(annotation_line, input_shape, random=True, max_boxes=20, jitter=.3, hue=.1, sat=1.5, val=1.5, proc_img=True):
'''random preprocessing for real-time data augmentation'''
line = annotation_line.split()
image = Image.open(line[0])
w, h = image.size #13 14
dx, dy = input_shape
box = np.array([np.array(list(map(int,box.split(',')))) for box in line[1:]])
x_min = w
x_max = 0
y_min = h
y_max = 0
for bbox in box:
x_min = min(x_min, bbox[0])
y_min = min(y_min, bbox[1])
x_max = max(x_max, bbox[2])
y_max = max(y_max, bbox[3])
name = bbox[4]
# 包含所有目标框的最小框到各个边的距离
d_to_left = x_min
d_to_right = w - x_max
d_to_top = y_min
d_to_bottom = h - y_max
# 随机扩展这个最小范围
crop_x_min = int(x_min - rand(0, d_to_left))
crop_y_min = int(y_min - rand(0, d_to_top))
crop_x_max = int(x_max + rand(0, d_to_right))
crop_y_max = int(y_max + rand(0, d_to_bottom))
# 确保不出界
crop_x_min = max(0, crop_x_min)
crop_y_min = max(0, crop_y_min)
crop_x_max = min(w, crop_x_max)
crop_y_max = min(h, crop_y_max)
cropped = image.crop((crop_x_min, crop_y_min, crop_x_max, crop_y_max)) # (left, upper, right, lower)
new_image = Image.new('RGB', (w,h), (128,128,128))
new_image.paste(cropped, (dx, dy))
image_data = np.array(new_image)/255.
box_data = np.zeros((max_boxes,5))
if len(box)>0:
np.random.shuffle(box)
if len(box)>max_boxes: box = box[:max_boxes]
box[:,0] = box[:,0]-crop_y_min
box[:,1] = box[:,1]-crop_y_min
box[:,2] = box[:,2]-crop_x_min
box[:,3] = box[:,3]-crop_y_min
box_data[:len(box)] = box
return image_data, box_data
def get_random_data2(annotation_line, input_shape, max_boxes=20, jitter=.3, hue=.1, sat=1.5, val=1.5, proc_img=True):
line = annotation_line.split()
img = cv2.imread(line[0])
h_img, w_img, _ = img.shape
w, h = input_shape
box = np.array([np.array(list(map(int,box.split(',')))) for box in line[1:]])
max_bbox = np.concatenate([np.min(box[:, 0:2], axis=0), np.max(box[:, 2:4], axis=0)], axis=-1)# 取得所有bbox中的最大bbox
#包含所有目標框的最大框到各個邊的距離
max_l_trans = max_bbox[0]
max_u_trans = max_bbox[1]
max_r_trans = w_img - max_bbox[2]
max_d_trans = h_img - max_bbox[3]
#隨機擴展框最大範圍
crop_xmin = max(0, int(max_bbox[0] - random.uniform(0, max_l_trans)*2))
crop_ymin = max(0, int(max_bbox[1] - random.uniform(0, max_u_trans)*2))
crop_xmax = max(w_img, int(max_bbox[2] + random.uniform(0, max_r_trans)*2))
crop_ymax = max(h_img, int(max_bbox[3] + random.uniform(0, max_d_trans)*2))
img = img[crop_ymin : crop_ymax, crop_xmin : crop_xmax] #進行裁剪
image = Image.fromarray(cv2.cvtColor(img,cv2.COLOR_BGR2RGB)) #因為目前圖片格式是cv2,因此要轉換為PIL格式做貼上的語法
new_image = Image.new('RGB', (w,h), (128,128,128)) #產出一個(416,416)的灰色圖片
new_image.paste(image, (0, 0)) #將轉為PIL格式的圖片 貼到灰色圖片中
img2 = cv2.cvtColor(np.asarray(new_image),cv2.COLOR_RGB2BGR) #再將格式轉回cv2
box_data = np.zeros((max_boxes,5)) #box最多有max_boxes個,即shap->(20,5)
#將剪裁後位移的框與原始框進行相減,避免變換之後的值過大或過小,並去除異常的box
if len(box)>0:
np.random.shuffle(box)
if len(box)>max_boxes: box = box[:max_boxes]
box[:, [0, 2]] = box[:, [0, 2]] - crop_xmin
box[:, [1, 3]] = box[:, [1, 3]] - crop_ymin
box[:, 2][box[:, 2]>w] = w
box[:, 3][box[:, 3]>h] = h
box_w = box[:, 2] - box[:, 0]
box_h = box[:, 3] - box[:, 1]
box = box[np.logical_and(box_w>1, box_h>1)] # discard invalid box
if len(box)>max_boxes: box = box[:max_boxes]
box_data[:len(box)] = box
#標框線
# light_blue = (255,200,100)
# for boxs in box:
# cv2.rectangle(img2,(boxs[0],boxs[1]),(boxs[2],boxs[3]),light_blue,2)
# writename=os.path.basename(line[0]) #取檔名
# cv2.imshow('My Image', img2)
# cv2.waitKey(0)
return img2, box_data
| [
"PIL.Image.open",
"numpy.random.rand",
"numpy.logical_and",
"PIL.Image.new",
"numpy.asarray",
"numpy.max",
"numpy.array",
"numpy.zeros",
"matplotlib.colors.hsv_to_rgb",
"numpy.min",
"numpy.random.shuffle"
] | [((847, 886), 'PIL.Image.new', 'Image.new', (['"""RGB"""', 'size', '(128, 128, 128)'], {}), "('RGB', size, (128, 128, 128))\n", (856, 886), False, 'from PIL import Image\n'), ((1257, 1276), 'PIL.Image.open', 'Image.open', (['line[0]'], {}), '(line[0])\n', (1267, 1276), False, 'from PIL import Image\n'), ((2591, 2632), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(w, h)', '(128, 128, 128)'], {}), "('RGB', (w, h), (128, 128, 128))\n", (2600, 2632), False, 'from PIL import Image\n'), ((3169, 3182), 'matplotlib.colors.hsv_to_rgb', 'hsv_to_rgb', (['x'], {}), '(x)\n', (3179, 3182), False, 'from matplotlib.colors import rgb_to_hsv, hsv_to_rgb\n'), ((3241, 3265), 'numpy.zeros', 'np.zeros', (['(max_boxes, 5)'], {}), '((max_boxes, 5))\n', (3249, 3265), True, 'import numpy as np\n'), ((4090, 4109), 'PIL.Image.open', 'Image.open', (['line[0]'], {}), '(line[0])\n', (4100, 4109), False, 'from PIL import Image\n'), ((5109, 5150), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(w, h)', '(128, 128, 128)'], {}), "('RGB', (w, h), (128, 128, 128))\n", (5118, 5150), False, 'from PIL import Image\n'), ((5245, 5269), 'numpy.zeros', 'np.zeros', (['(max_boxes, 5)'], {}), '((max_boxes, 5))\n', (5253, 5269), True, 'import numpy as np\n'), ((6714, 6755), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(w, h)', '(128, 128, 128)'], {}), "('RGB', (w, h), (128, 128, 128))\n", (6723, 6755), False, 'from PIL import Image\n'), ((6921, 6945), 'numpy.zeros', 'np.zeros', (['(max_boxes, 5)'], {}), '((max_boxes, 5))\n', (6929, 6945), True, 'import numpy as np\n'), ((1882, 1906), 'numpy.zeros', 'np.zeros', (['(max_boxes, 5)'], {}), '((max_boxes, 5))\n', (1890, 1906), True, 'import numpy as np\n'), ((3292, 3314), 'numpy.random.shuffle', 'np.random.shuffle', (['box'], {}), '(box)\n', (3309, 3314), True, 'import numpy as np\n'), ((5204, 5223), 'numpy.array', 'np.array', (['new_image'], {}), '(new_image)\n', (5212, 5223), True, 'import numpy as np\n'), ((5296, 5318), 'numpy.random.shuffle', 'np.random.shuffle', (['box'], {}), '(box)\n', (5313, 5318), True, 'import numpy as np\n'), ((6853, 6874), 'numpy.asarray', 'np.asarray', (['new_image'], {}), '(new_image)\n', (6863, 6874), True, 'import numpy as np\n'), ((7052, 7074), 'numpy.random.shuffle', 'np.random.shuffle', (['box'], {}), '(box)\n', (7069, 7074), True, 'import numpy as np\n'), ((989, 1005), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1003, 1005), True, 'import numpy as np\n'), ((1704, 1745), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(w, h)', '(128, 128, 128)'], {}), "('RGB', (w, h), (128, 128, 128))\n", (1713, 1745), False, 'from PIL import Image\n'), ((1941, 1963), 'numpy.random.shuffle', 'np.random.shuffle', (['box'], {}), '(box)\n', (1958, 1963), True, 'import numpy as np\n'), ((2973, 2988), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (2981, 2988), True, 'import numpy as np\n'), ((3667, 3703), 'numpy.logical_and', 'np.logical_and', (['(box_w > 1)', '(box_h > 1)'], {}), '(box_w > 1, box_h > 1)\n', (3681, 3703), True, 'import numpy as np\n'), ((5954, 5981), 'numpy.min', 'np.min', (['box[:, 0:2]'], {'axis': '(0)'}), '(box[:, 0:2], axis=0)\n', (5960, 5981), True, 'import numpy as np\n'), ((5983, 6010), 'numpy.max', 'np.max', (['box[:, 2:4]'], {'axis': '(0)'}), '(box[:, 2:4], axis=0)\n', (5989, 6010), True, 'import numpy as np\n'), ((7397, 7433), 'numpy.logical_and', 'np.logical_and', (['(box_w > 1)', '(box_h > 1)'], {}), '(box_w > 1, box_h > 1)\n', (7411, 7433), True, 'import numpy as np\n'), ((1813, 1832), 'numpy.array', 'np.array', (['new_image'], {}), '(new_image)\n', (1821, 1832), True, 'import numpy as np\n')] |
import re
from pyquery import PyQuery as pq
from .. import utils
from .constants import RANKINGS_SCHEME, RANKINGS_URL
from six.moves.urllib.error import HTTPError
class Rankings:
"""
Get all Associated Press (AP) rankings on a week-by-week basis.
Grab a list of the rankings published by the Associated Press to easily
query the hierarchy of teams each week. The results expose the current and
previous rankings as well as the movement for each team in the list.
Parameters
----------
year : string (optional)
A string of the requested year to pull rankings from. Defaults to the
most recent season.
"""
def __init__(self, year=None):
self._rankings = {}
self._find_rankings(year)
def _pull_rankings_page(self, year):
"""
Download the rankings page.
Download the rankings page for the requested year and create a PyQuery
object.
Parameters
----------
year : string
A string of the requested year to pull rankings from.
Returns
-------
PyQuery object
Returns a PyQuery object of the rankings HTML page.
"""
try:
return pq(RANKINGS_URL % year)
except HTTPError:
return None
def _get_team(self, team):
"""
Retrieve team's name and abbreviation.
The team's name and abbreviation are embedded within the 'school_name'
tag and, in the case of the abbreviation, require special parsing as it
is located in the middle of a URI. The name and abbreviation are
returned for the requested school.
Parameters
----------
team : PyQuery object
A PyQuery object representing a single row in a table on the
rankings page.
Returns
-------
tuple (string, string)
Returns a tuple of two strings where the first string is the team's
abbreviation, such as 'PURDUE' and the second string is the team's
name, such as 'Purdue'.
"""
name_tag = team('td[data-stat="school_name"]')
abbreviation = re.sub(r'.*/cfb/schools/', '', str(name_tag('a')))
abbreviation = re.sub(r'/.*', '', abbreviation)
name = team('td[data-stat="school_name"] a').text()
return abbreviation, name
def _find_rankings(self, year):
"""
Retrieve the rankings for each week.
Find and retrieve all AP rankings for the requested year and combine
them on a per-week basis. Each week contains information about the
name, abbreviation, rank, movement, and previous rank for each team
as well as the date and week number the results were published on.
Parameters
----------
year : string
A string of the requested year to pull rankings from.
"""
if not year:
year = utils._find_year_for_season('ncaaf')
page = self._pull_rankings_page(year)
if not page:
output = ("Can't pull rankings page. Ensure the following URL "
"exists: %s" % RANKINGS_URL)
raise ValueError(output)
rankings = page('table#ap tbody tr').items()
weekly_rankings = []
week = 0
for team in rankings:
if 'class="thead"' in str(team):
self._rankings[int(week)] = weekly_rankings
weekly_rankings = []
continue
abbreviation, name = self._get_team(team)
rank = utils._parse_field(RANKINGS_SCHEME, team, 'rank')
week = utils._parse_field(RANKINGS_SCHEME, team, 'week')
date = utils._parse_field(RANKINGS_SCHEME, team, 'date')
previous = utils._parse_field(RANKINGS_SCHEME, team, 'previous')
change = utils._parse_field(RANKINGS_SCHEME, team, 'change')
if 'decrease' in str(team(RANKINGS_SCHEME['change'])):
change = int(change) * -1
elif 'increase' in str(team(RANKINGS_SCHEME['change'])):
try:
change = int(change)
except ValueError:
change = 0
else:
change = 0
rank_details = {
'abbreviation': abbreviation,
'name': name,
'rank': int(rank),
'week': int(week),
'date': date,
'previous': previous,
'change': change
}
weekly_rankings.append(rank_details)
# Add the final rankings which is not terminated with another header
# row and hence will not hit the first if statement in the loop above.
self._rankings[int(week)] = weekly_rankings
@property
def current_extended(self):
"""
Returns a ``list`` of ``dictionaries`` of the most recent AP rankings.
The list is ordered in terms of the ranking so the #1 team will be in
the first element and the #25 team will be the last element. Each
dictionary has the following structure::
{
'abbreviation': Team's abbreviation, such as 'PURDUE' (str),
'name': Team's full name, such as 'Purdue' (str),
'rank': Team's rank for the current week (int),
'week': Week number for the results, such as 19 (int),
'date': Date the rankings were released, such as '2017-03-01'.
Can also be 'Final' for the final rankings or
'Preseason' for preseason rankings (str),
'previous': The team's previous rank, if applicable (str),
'change': The amount the team moved up or down the rankings.
Moves up the ladder have a positive number while
drops yield a negative number and teams that didn't
move have 0 (int)
}
"""
latest_week = max(self._rankings.keys())
ordered_dict = sorted(self._rankings[latest_week],
key=lambda k: k['rank'])
return ordered_dict
@property
def current(self):
"""
Returns a ``dictionary`` of the most recent rankings from the
Associated Press where each key is a ``string`` of the team's
abbreviation and each value is an ``int`` of the team's rank for the
current week.
"""
rankings_dict = {}
for team in self.current_extended:
rankings_dict[team['abbreviation']] = team['rank']
return rankings_dict
@property
def complete(self):
"""
Returns a ``dictionary`` where each key is a week number as an ``int``
and each value is a ``list`` of ``dictionaries`` containing the AP
rankings for each week. Within each list is a dictionary of team
information such as name, abbreviation, rank, and more. Note that the
list might not necessarily be in the same order as the rankings.
The overall dictionary has the following structure::
{
week number, ie 16 (int): [
{
'abbreviation': Team's abbreviation, such as 'PURDUE'
(str),
'name': Team's full name, such as 'Purdue' (str),
'rank': Team's rank for the current week (int),
'week': Week number for the results, such as 16 (int),
'date': Date the rankings were released, such as
'2017-12-03'. Can also be 'Final' for the final
rankings or 'Preseason' for preseason rankings
(str),
'previous': The team's previous rank, if applicable
(str),
'change': The amount the team moved up or down the
rankings. Moves up the ladder have a positive
number while drops yield a negative number
and teams that didn't move have 0 (int)
},
...
],
...
}
"""
return self._rankings
| [
"re.sub",
"pyquery.PyQuery"
] | [((2268, 2299), 're.sub', 're.sub', (['"""/.*"""', '""""""', 'abbreviation'], {}), "('/.*', '', abbreviation)\n", (2274, 2299), False, 'import re\n'), ((1235, 1258), 'pyquery.PyQuery', 'pq', (['(RANKINGS_URL % year)'], {}), '(RANKINGS_URL % year)\n', (1237, 1258), True, 'from pyquery import PyQuery as pq\n')] |
import re
from zlib import crc32
from ..utils import snake_to_camel_case
CORE_TYPES = (
0xbc799737, # boolFalse#bc799737 = Bool;
0x997275b5, # boolTrue#997275b5 = Bool;
0x3fedd339, # true#3fedd339 = True;
0x1cb5c415, # vector#1cb5c415 {t:Type} # [ t ] = Vector t;
)
# https://github.com/telegramdesktop/tdesktop/blob/4bf66cb6e93f3965b40084771b595e93d0b11bcd/Telegram/SourceFiles/codegen/scheme/codegen_scheme.py#L57-L62
WHITELISTED_MISMATCHING_IDS = {
# 0 represents any layer
0: {'ipPortSecret', 'accessPointRule', 'help.configSimple'},
77: {'channel'},
78: {'channel'}
}
class TLObject:
def __init__(self, fullname, object_id, args, result, is_function, layer):
"""
Initializes a new TLObject, given its properties.
:param fullname: The fullname of the TL object (namespace.name)
The namespace can be omitted.
:param object_id: The hexadecimal string representing the object ID
:param args: The arguments, if any, of the TL object
:param result: The result type of the TL object
:param is_function: Is the object a function or a type?
:param layer: The layer this TLObject belongs to.
"""
# The name can or not have a namespace
self.fullname = fullname
if '.' in fullname:
self.namespace, self.name = fullname.split('.', maxsplit=1)
else:
self.namespace, self.name = None, fullname
self.args = args
self.result = result
self.is_function = is_function
self.id = None
if object_id is None:
self.id = self.infer_id()
else:
self.id = int(object_id, base=16)
whitelist = WHITELISTED_MISMATCHING_IDS[0] |\
WHITELISTED_MISMATCHING_IDS.get(layer, set())
if self.fullname not in whitelist:
assert self.id == self.infer_id(),\
'Invalid inferred ID for ' + repr(self)
self.class_name = snake_to_camel_case(
self.name, suffix='Request' if self.is_function else '')
self.real_args = list(a for a in self.sorted_args() if not
(a.flag_indicator or a.generic_definition))
def sorted_args(self):
"""Returns the arguments properly sorted and ready to plug-in
into a Python's method header (i.e., flags and those which
can be inferred will go last so they can default =None)
"""
return sorted(self.args,
key=lambda x: x.is_flag or x.can_be_inferred)
def __repr__(self, ignore_id=False):
if self.id is None or ignore_id:
hex_id = ''
else:
hex_id = '#{:08x}'.format(self.id)
if self.args:
args = ' ' + ' '.join([repr(arg) for arg in self.args])
else:
args = ''
return '{}{}{} = {}'.format(self.fullname, hex_id, args, self.result)
def infer_id(self):
representation = self.__repr__(ignore_id=True)
representation = representation\
.replace(':bytes ', ':string ')\
.replace('?bytes ', '?string ')\
.replace('<', ' ').replace('>', '')\
.replace('{', '').replace('}', '')
representation = re.sub(
r' \w+:flags\.\d+\?true',
r'',
representation
)
return crc32(representation.encode('ascii'))
class TLArg:
def __init__(self, name, arg_type, generic_definition):
"""
Initializes a new .tl argument
:param name: The name of the .tl argument
:param arg_type: The type of the .tl argument
:param generic_definition: Is the argument a generic definition?
(i.e. {X:Type})
"""
self.name = 'is_self' if name == 'self' else name
# Default values
self.is_vector = False
self.is_flag = False
self.skip_constructor_id = False
self.flag_index = -1
# Special case: some types can be inferred, which makes it
# less annoying to type. Currently the only type that can
# be inferred is if the name is 'random_id', to which a
# random ID will be assigned if left as None (the default)
self.can_be_inferred = name == 'random_id'
# The type can be an indicator that other arguments will be flags
if arg_type == '#':
self.flag_indicator = True
self.type = None
self.is_generic = False
else:
self.flag_indicator = False
self.is_generic = arg_type.startswith('!')
# Strip the exclamation mark always to have only the name
self.type = arg_type.lstrip('!')
# The type may be a flag (flags.IDX?REAL_TYPE)
# Note that 'flags' is NOT the flags name; this
# is determined by a previous argument
# However, we assume that the argument will always be called 'flags'
flag_match = re.match(r'flags.(\d+)\?([\w<>.]+)', self.type)
if flag_match:
self.is_flag = True
self.flag_index = int(flag_match.group(1))
# Update the type to match the exact type, not the "flagged" one
self.type = flag_match.group(2)
# Then check if the type is a Vector<REAL_TYPE>
vector_match = re.match(r'[Vv]ector<([\w\d.]+)>', self.type)
if vector_match:
self.is_vector = True
# If the type's first letter is not uppercase, then
# it is a constructor and we use (read/write) its ID
# as pinpointed on issue #81.
self.use_vector_id = self.type[0] == 'V'
# Update the type to match the one inside the vector
self.type = vector_match.group(1)
# See use_vector_id. An example of such case is ipPort in
# help.configSpecial
if self.type.split('.')[-1][0].islower():
self.skip_constructor_id = True
# The name may contain "date" in it, if this is the case and the type is "int",
# we can safely assume that this should be treated as a "date" object.
# Note that this is not a valid Telegram object, but it's easier to work with
if self.type == 'int' and (
re.search(r'(\b|_)date\b', name) or
name in ('expires', 'expires_at', 'was_online')):
self.type = 'date'
self.generic_definition = generic_definition
def type_hint(self):
type = self.type
if '.' in type:
type = type.split('.')[1]
result = {
'int': 'int',
'long': 'int',
'int128': 'int',
'int256': 'int',
'string': 'str',
'date': 'Optional[datetime]', # None date = 0 timestamp
'bytes': 'bytes',
'true': 'bool',
}.get(type, "Type{}".format(type))
if self.is_vector:
result = 'List[{}]'.format(result)
if self.is_flag and type != 'date':
result = 'Optional[{}]'.format(result)
return result
def __str__(self):
# Find the real type representation by updating it as required
real_type = self.type
if self.flag_indicator:
real_type = '#'
if self.is_vector:
if self.use_vector_id:
real_type = 'Vector<{}>'.format(real_type)
else:
real_type = 'vector<{}>'.format(real_type)
if self.is_generic:
real_type = '!{}'.format(real_type)
if self.is_flag:
real_type = 'flags.{}?{}'.format(self.flag_index, real_type)
if self.generic_definition:
return '{{{}:{}}}'.format(self.name, real_type)
else:
return '{}:{}'.format(self.name, real_type)
def __repr__(self):
return str(self).replace(':date', ':int').replace('?date', '?int')
def _from_line(line, is_function, layer):
match = re.match(
r'^([\w.]+)' # 'name'
r'(?:#([0-9a-fA-F]+))?' # '#optionalcode'
r'(?:\s{?\w+:[\w\d<>#.?!]+}?)*' # '{args:.0?type}'
r'\s=\s' # ' = '
r'([\w\d<>#.?]+);$', # '<result.type>;'
line
)
if match is None:
# Probably "vector#1cb5c415 {t:Type} # [ t ] = Vector t;"
raise ValueError('Cannot parse TLObject {}'.format(line))
args_match = re.findall(
r'({)?'
r'(\w+)'
r':'
r'([\w\d<>#.?!]+)'
r'}?',
line
)
return TLObject(
fullname=match.group(1),
object_id=match.group(2),
result=match.group(3),
is_function=is_function,
layer=layer,
args=[TLArg(name, arg_type, brace != '')
for brace, name, arg_type in args_match]
)
def parse_tl(file_path, layer, ignore_core=False):
"""This method yields TLObjects from a given .tl file."""
with open(file_path, encoding='utf-8') as file:
is_function = False
for line in file:
comment_index = line.find('//')
if comment_index != -1:
line = line[:comment_index]
line = line.strip()
if not line:
continue
match = re.match('---(\w+)---', line)
if match:
following_types = match.group(1)
is_function = following_types == 'functions'
continue
try:
result = _from_line(line, is_function, layer=layer)
if not ignore_core or result.id not in CORE_TYPES:
yield result
except ValueError as e:
if 'vector#1cb5c415' not in str(e):
raise
def find_layer(file_path):
"""Finds the layer used on the specified scheme.tl file."""
layer_regex = re.compile(r'^//\s*LAYER\s*(\d+)$')
with open(file_path, encoding='utf-8') as file:
for line in file:
match = layer_regex.match(line)
if match:
return int(match.group(1))
| [
"re.compile",
"re.match",
"re.sub",
"re.findall",
"re.search"
] | [((8192, 8307), 're.match', 're.match', (['"""^([\\\\w.]+)(?:#([0-9a-fA-F]+))?(?:\\\\s{?\\\\w+:[\\\\w\\\\d<>#.?!]+}?)*\\\\s=\\\\s([\\\\w\\\\d<>#.?]+);$"""', 'line'], {}), "(\n '^([\\\\w.]+)(?:#([0-9a-fA-F]+))?(?:\\\\s{?\\\\w+:[\\\\w\\\\d<>#.?!]+}?)*\\\\s=\\\\s([\\\\w\\\\d<>#.?]+);$'\n , line)\n", (8200, 8307), False, 'import re\n'), ((8671, 8721), 're.findall', 're.findall', (['"""({)?(\\\\w+):([\\\\w\\\\d<>#.?!]+)}?"""', 'line'], {}), "('({)?(\\\\w+):([\\\\w\\\\d<>#.?!]+)}?', line)\n", (8681, 8721), False, 'import re\n'), ((10120, 10157), 're.compile', 're.compile', (['"""^//\\\\s*LAYER\\\\s*(\\\\d+)$"""'], {}), "('^//\\\\s*LAYER\\\\s*(\\\\d+)$')\n", (10130, 10157), False, 'import re\n'), ((3322, 3377), 're.sub', 're.sub', (['""" \\\\w+:flags\\\\.\\\\d+\\\\?true"""', '""""""', 'representation'], {}), "(' \\\\w+:flags\\\\.\\\\d+\\\\?true', '', representation)\n", (3328, 3377), False, 'import re\n'), ((5079, 5128), 're.match', 're.match', (['"""flags.(\\\\d+)\\\\?([\\\\w<>.]+)"""', 'self.type'], {}), "('flags.(\\\\d+)\\\\?([\\\\w<>.]+)', self.type)\n", (5087, 5128), False, 'import re\n'), ((5466, 5512), 're.match', 're.match', (['"""[Vv]ector<([\\\\w\\\\d.]+)>"""', 'self.type'], {}), "('[Vv]ector<([\\\\w\\\\d.]+)>', self.type)\n", (5474, 5512), False, 'import re\n'), ((9522, 9552), 're.match', 're.match', (['"""---(\\\\w+)---"""', 'line'], {}), "('---(\\\\w+)---', line)\n", (9530, 9552), False, 'import re\n'), ((6476, 6509), 're.search', 're.search', (['"""(\\\\b|_)date\\\\b"""', 'name'], {}), "('(\\\\b|_)date\\\\b', name)\n", (6485, 6509), False, 'import re\n')] |
# Owner(s): ["oncall: jit"]
import torch
import os
import sys
from torch.testing._internal.jit_utils import JitTestCase
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestModules(JitTestCase):
def test_script_module_with_constants_list(self):
"""
Test that a module that has __constants__ set to something
that is not a set can be scripted.
"""
# torch.nn.Linear has a __constants__ attribute defined
# and intialized to a list.
class Net(torch.nn.Linear):
x: torch.jit.Final[int]
def __init__(self):
super().__init__(5, 10)
self.x = 0
self.checkModule(Net(), (torch.randn(5),))
| [
"os.path.realpath",
"sys.path.append",
"torch.randn"
] | [((246, 279), 'sys.path.append', 'sys.path.append', (['pytorch_test_dir'], {}), '(pytorch_test_dir)\n', (261, 279), False, 'import sys\n'), ((217, 243), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (233, 243), False, 'import os\n'), ((1017, 1031), 'torch.randn', 'torch.randn', (['(5)'], {}), '(5)\n', (1028, 1031), False, 'import torch\n')] |
#! /usr/bin/env python
import arc
import sys
import os
def retrieve(uc, endpoints):
# The ComputingServiceRetriever needs the UserConfig to know which credentials
# to use in case of HTTPS connections
retriever = arc.ComputingServiceRetriever(uc, endpoints)
# the constructor of the ComputingServiceRetriever returns immediately
sys.stdout.write('\n')
sys.stdout.write("ComputingServiceRetriever created with the following endpoints:\n")
for endpoint in endpoints:
sys.stdout.write("- %s\n"%endpoint.str())
# here we want to wait until all the results arrive
sys.stdout.write("Waiting for the results...\n")
retriever.wait()
return retriever
def example():
# Creating a UserConfig object with the user's proxy
# and the path of the trusted CA certificates
uc = arc.UserConfig()
uc.ProxyPath("/tmp/x509up_u%s" % os.getuid())
uc.CACertificatesDirectory("/etc/grid-security/certificates")
# Query two registries (index servers) for Computing Services
registries = [
# for the index1, we specify that it is an EGIIS service
arc.Endpoint("index1.nordugrid.org:2135/Mds-Vo-name=NorduGrid,o=grid", arc.Endpoint.REGISTRY, "org.nordugrid.ldapegiis"),
# for the arc-emi.grid.upjs.sk, we don't specify the type (the InterfaceName)
# we let the system to try all possibilities
arc.Endpoint("arc-emi.grid.upjs.sk/O=Grid/Mds-Vo-Name=ARC-EMI", arc.Endpoint.REGISTRY)
]
retriever = retrieve(uc, registries)
# The retriever acts as a list containing all the discovered ComputingServices:
sys.stdout.write("Discovered ComputingServices: %s\n"%(", ".join([service.Name for service in retriever])))
# Get all the ExecutionTargets on these ComputingServices
targets = retriever.GetExecutionTargets()
sys.stdout.write("Number of ExecutionTargets on these ComputingServices: %d\n"%len(targets))
# Query the local infosys (COMPUTINGINFO) of computing elements
computing_elements = [
# for piff, we specify that we want to query the LDAP GLUE2 tree
arc.Endpoint("piff.hep.lu.se", arc.Endpoint.COMPUTINGINFO, "org.nordugrid.ldapglue2"),
# for pgs03, we don't specify the interface, we let the system try all possibilities
arc.Endpoint("pgs03.grid.upjs.sk", arc.Endpoint.COMPUTINGINFO)
]
retriever2 = retrieve(uc, computing_elements)
# Get all the ExecutionTargets on these ComputingServices
targets2 = retriever2.GetExecutionTargets()
sys.stdout.write("The discovered ExecutionTargets:\n")
for target in targets2:
sys.stdout.write("%s\n"%str(target))
# Query both registries and computing elements at the same time:
endpoints = [
arc.Endpoint("arc-emi.grid.upjs.sk/O=Grid/Mds-Vo-Name=ARC-EMI", arc.Endpoint.REGISTRY),
arc.Endpoint("piff.hep.lu.se", arc.Endpoint.COMPUTINGINFO, "org.nordugrid.ldapglue2")
]
retriever3 = retrieve(uc, endpoints)
sys.stdout.write("Discovered ComputingServices: %s\n"%(", ".join([service.Name for service in retriever3])))
# wait for all the background threads to finish before we destroy the objects they may use
import atexit
@atexit.register
def wait_exit():
arc.ThreadInitializer().waitExit()
# arc.Logger.getRootLogger().addDestination(arc.LogStream(sys.stderr))
# arc.Logger.getRootLogger().setThreshold(arc.DEBUG)
# run the example
example()
| [
"os.getuid",
"arc.ThreadInitializer",
"arc.ComputingServiceRetriever",
"arc.UserConfig",
"arc.Endpoint",
"sys.stdout.write"
] | [((226, 270), 'arc.ComputingServiceRetriever', 'arc.ComputingServiceRetriever', (['uc', 'endpoints'], {}), '(uc, endpoints)\n', (255, 270), False, 'import arc\n'), ((350, 372), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (366, 372), False, 'import sys\n'), ((377, 467), 'sys.stdout.write', 'sys.stdout.write', (['"""ComputingServiceRetriever created with the following endpoints:\n"""'], {}), "(\n 'ComputingServiceRetriever created with the following endpoints:\\n')\n", (393, 467), False, 'import sys\n'), ((604, 652), 'sys.stdout.write', 'sys.stdout.write', (['"""Waiting for the results...\n"""'], {}), "('Waiting for the results...\\n')\n", (620, 652), False, 'import sys\n'), ((827, 843), 'arc.UserConfig', 'arc.UserConfig', ([], {}), '()\n', (841, 843), False, 'import arc\n'), ((2527, 2581), 'sys.stdout.write', 'sys.stdout.write', (['"""The discovered ExecutionTargets:\n"""'], {}), "('The discovered ExecutionTargets:\\n')\n", (2543, 2581), False, 'import sys\n'), ((1119, 1244), 'arc.Endpoint', 'arc.Endpoint', (['"""index1.nordugrid.org:2135/Mds-Vo-name=NorduGrid,o=grid"""', 'arc.Endpoint.REGISTRY', '"""org.nordugrid.ldapegiis"""'], {}), "('index1.nordugrid.org:2135/Mds-Vo-name=NorduGrid,o=grid', arc.\n Endpoint.REGISTRY, 'org.nordugrid.ldapegiis')\n", (1131, 1244), False, 'import arc\n'), ((1388, 1479), 'arc.Endpoint', 'arc.Endpoint', (['"""arc-emi.grid.upjs.sk/O=Grid/Mds-Vo-Name=ARC-EMI"""', 'arc.Endpoint.REGISTRY'], {}), "('arc-emi.grid.upjs.sk/O=Grid/Mds-Vo-Name=ARC-EMI', arc.\n Endpoint.REGISTRY)\n", (1400, 1479), False, 'import arc\n'), ((2103, 2192), 'arc.Endpoint', 'arc.Endpoint', (['"""piff.hep.lu.se"""', 'arc.Endpoint.COMPUTINGINFO', '"""org.nordugrid.ldapglue2"""'], {}), "('piff.hep.lu.se', arc.Endpoint.COMPUTINGINFO,\n 'org.nordugrid.ldapglue2')\n", (2115, 2192), False, 'import arc\n'), ((2291, 2353), 'arc.Endpoint', 'arc.Endpoint', (['"""pgs03.grid.upjs.sk"""', 'arc.Endpoint.COMPUTINGINFO'], {}), "('pgs03.grid.upjs.sk', arc.Endpoint.COMPUTINGINFO)\n", (2303, 2353), False, 'import arc\n'), ((2752, 2843), 'arc.Endpoint', 'arc.Endpoint', (['"""arc-emi.grid.upjs.sk/O=Grid/Mds-Vo-Name=ARC-EMI"""', 'arc.Endpoint.REGISTRY'], {}), "('arc-emi.grid.upjs.sk/O=Grid/Mds-Vo-Name=ARC-EMI', arc.\n Endpoint.REGISTRY)\n", (2764, 2843), False, 'import arc\n'), ((2848, 2937), 'arc.Endpoint', 'arc.Endpoint', (['"""piff.hep.lu.se"""', 'arc.Endpoint.COMPUTINGINFO', '"""org.nordugrid.ldapglue2"""'], {}), "('piff.hep.lu.se', arc.Endpoint.COMPUTINGINFO,\n 'org.nordugrid.ldapglue2')\n", (2860, 2937), False, 'import arc\n'), ((881, 892), 'os.getuid', 'os.getuid', ([], {}), '()\n', (890, 892), False, 'import os\n'), ((3241, 3264), 'arc.ThreadInitializer', 'arc.ThreadInitializer', ([], {}), '()\n', (3262, 3264), False, 'import arc\n')] |
from flask import Blueprint
auth=Blueprint('auth',__name__)
from .import views,forms | [
"flask.Blueprint"
] | [((33, 60), 'flask.Blueprint', 'Blueprint', (['"""auth"""', '__name__'], {}), "('auth', __name__)\n", (42, 60), False, 'from flask import Blueprint\n')] |
import pandas as pd
import numpy as np
def estimate_volatility(prices, l):
"""Create an exponential moving average model of the volatility of a stock
price, and return the most recent (last) volatility estimate.
Parameters
----------
prices : pandas.Series
A series of adjusted closing prices for a stock.
l : float
The 'lambda' parameter of the exponential moving average model. Making
this value smaller will cause the model to weight older terms less
relative to more recent terms.
Returns
-------
last_vol : float
The last element of your exponential moving averge volatility model series.
"""
# TODO: Implement the exponential moving average volatility model and return the last value.
return prices.ewm(alpha=(1-l)).mean()[-1]
def test_run(filename='data.csv'):
"""Test run get_most_volatile() with stock prices from a file."""
prices = pd.read_csv(filename, parse_dates=[
'date'], index_col='date', squeeze=True)
print("Most recent volatility estimate: {:.6f}".format(estimate_volatility(prices, 0.7)))
# print(estimate_volatility(prices, 0.7))
if __name__ == '__main__':
test_run()
| [
"pandas.read_csv"
] | [((973, 1048), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'parse_dates': "['date']", 'index_col': '"""date"""', 'squeeze': '(True)'}), "(filename, parse_dates=['date'], index_col='date', squeeze=True)\n", (984, 1048), True, 'import pandas as pd\n')] |
import sys
n, q = map(int, sys.stdin.readline().split())
s = '$' + sys.stdin.readline().rstrip()
lr = zip(*[map(int, sys.stdin.read().split())] * 2)
def main():
res = [None] * (n + 1); res[0] = 0
prev = '$'
for i in range(1, n+1):
res[i] = res[i-1]
res[i] += (prev == 'A' and s[i] == 'C') & 1
prev = s[i]
for l, r in lr:
yield res[r] - res[l]
if __name__ == '__main__':
ans = main()
print(*ans, sep='\n')
| [
"sys.stdin.readline",
"sys.stdin.read"
] | [((30, 50), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (48, 50), False, 'import sys\n'), ((71, 91), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (89, 91), False, 'import sys\n'), ((122, 138), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (136, 138), False, 'import sys\n')] |
"""
A class hierarchy relating to fields of all kinds.
"""
from __future__ import print_function, division
import numpy as np
from ciabatta.meta import make_repr_str
from fealty import lattice, field_numerics, walled_field_numerics
class Space(object):
def __init__(self, L, dim):
self.L = L
self.dim = dim
@property
def L_half(self):
return self.L / 2.0
@property
def A(self):
return self.L ** self.dim
def iterate(self, *args, **kwargs):
pass
def __repr__(self):
fs = [('L', self.L), ('dim', self.dim)]
return make_repr_str(self, fs)
class Field(Space):
def __init__(self, L, dim, dx):
Space.__init__(self, L, dim)
self.M = int(round(self.L / dx))
@property
def dx(self):
return self.L / self.M
@property
def A_i(self):
return self.M ** self.dim
@property
def dA(self):
return self.dx ** self.dim
def density_field(self, r):
return density(r, self.L, self.dx)
def r_to_i(self, r):
return lattice.r_to_i(r, self.L, self.dx)
def i_to_r(self, i):
return lattice.i_to_r(i, self.L, self.dx)
def __repr__(self):
fs = [('L', self.L), ('dim', self.dim), ('dx', self.dx)]
return make_repr_str(self, fs)
class Scalar(Field):
def __init__(self, L, dim, dx, a_0=0.0):
Field.__init__(self, L, dim, dx)
self.a = np.ones(self.dim * (self.M,), dtype=np.float) * a_0
def grad(self):
return _grad(self.a, self.dx)
def grad_i(self, r):
return _grad_i(self.a, self.r_to_i(r), self.dx)
def laplacian(self):
return _laplace(self.a, self.dx)
def __repr__(self):
fs = [('L', self.L), ('dim', self.dim), ('dx', self.dx),
('a_0', self.a_0)]
return make_repr_str(self, fs)
class Diffusing(Scalar):
def __init__(self, L, dim, dx, D, dt, a_0=0.0):
Scalar.__init__(self, L, dim, dx, a_0=a_0)
self.D = D
self.dt = dt
if self.D > self.dx ** 2 / (2.0 * self.dim * self.dt):
raise Exception('Unstable diffusion constant')
def iterate(self):
self.a += self.D * self.laplacian() * self.dt
def __repr__(self):
fs = [('L', self.L), ('dim', self.dim), ('dx', self.dx),
('D', self.D), ('dt', self.dt), ('a_0', self.a_0)]
return make_repr_str(self, fs)
class WalledScalar(Scalar):
def __init__(self, L, dim, dx, walls, a_0=0.0):
Scalar.__init__(self, L, dim, dx, a_0=a_0)
self.walls = walls
# Make field zero-valued where obstructed
self.a *= np.logical_not(self.walls)
def grad(self):
return _walled_grad(self.a, self.dx, self.walls)
def grad_i(self, r):
return _walled_grad_i(self.a, self.r_to_i(r), self.dx,
self.walls)
def laplacian(self):
return _walled_laplace(self.a, self.dx, self.walls)
def __repr__(self):
fs = [('L', self.L), ('dim', self.dim), ('dx', self.dx),
('walls', self.walls), ('a_0', self.a_0)]
return make_repr_str(self, fs)
# Note, inheritance order matters to get walled grad & laplacian call
# (see diamond problem on wikipedia and how python handles it)
class WalledDiffusing(WalledScalar, Diffusing):
def __init__(self, L, dim, dx, walls, D, dt, a_0=0.0):
Diffusing.__init__(self, L, dim, dx, D, dt, a_0=a_0)
WalledScalar.__init__(self, L, dim, dx, walls, a_0=a_0)
def __repr__(self):
fs = [('L', self.L), ('dim', self.dim), ('dx', self.dx),
('walls', self.walls), ('D', self.D), ('dt', self.dt),
('a_0', self.a_0)]
return make_repr_str(self, fs)
def density(r, L, dx):
assert r.ndim == 2
M = int(round(L / dx))
dx = L / M
inds = lattice.r_to_i(r, L, dx)
f = np.zeros(r.shape[1] * (M,), dtype=np.int)
if f.ndim == 1:
field_numerics.density_1d(inds, f)
elif f.ndim == 2:
field_numerics.density_2d(inds, f)
elif f.ndim == 3:
field_numerics.density_3d(inds, f)
else:
raise Exception('Density calc not implemented in this dimension')
return f / dx ** r.shape[1]
def _laplace(field, dx):
assert dx > 0.0
laplace = np.empty_like(field)
if field.ndim == 1:
field_numerics.laplace_1d(field, laplace, dx)
elif field.ndim == 2:
field_numerics.laplace_2d(field, laplace, dx)
elif field.ndim == 3:
field_numerics.laplace_3d(field, laplace, dx)
else:
raise Exception('Laplacian not implemented in this dimension')
return laplace
def _grad_i(field, inds, dx):
assert dx > 0.0
assert inds.ndim == 2
assert field.ndim == inds.shape[1]
grad_i = np.empty(inds.shape, dtype=field.dtype)
if field.ndim == 1:
field_numerics.grad_i_1d(field, inds, grad_i, dx)
elif field.ndim == 2:
field_numerics.grad_i_2d(field, inds, grad_i, dx)
elif field.ndim == 3:
field_numerics.grad_i_3d(field, grad_i, dx)
else:
raise Exception("Grad_i not implemented in this dimension")
return grad_i
def _grad(field, dx):
assert dx > 0.0
grad = np.empty(field.shape + (field.ndim,), dtype=field.dtype)
if field.ndim == 1:
field_numerics.grad_1d(field, grad, dx)
elif field.ndim == 2:
field_numerics.grad_2d(field, grad, dx)
elif field.ndim == 3:
field_numerics.grad_3d(field, grad, dx)
else:
raise Exception('Grad not implemented in this dimension')
return grad
def _div(field, dx):
assert dx > 0.0
div = np.empty(field.shape[:-1], dtype=field.dtype)
if field.ndim == 2:
field_numerics.div_1d(field, div, dx)
elif field.ndim == 3:
field_numerics.div_2d(field, div, dx)
elif field.ndim == 4:
field_numerics.div_3d(field, div, dx)
else:
raise Exception('Divergence not implemented in this dimension')
return div
def _walled_grad(field, dx, walls):
assert field.shape == walls.shape
assert dx > 0.0
grad = np.empty(field.shape + (field.ndim,), dtype=field.dtype)
if field.ndim == 1:
walled_field_numerics.grad_1d(field, grad, dx, walls)
elif field.ndim == 2:
walled_field_numerics.grad_2d(field, grad, dx, walls)
elif field.ndim == 3:
walled_field_numerics.grad_3d(field, grad, dx, walls)
else:
raise Exception("Walled grad not implemented in this dimension")
return grad
def _walled_grad_i(field, inds, dx, walls):
assert field.shape == walls.shape
assert dx > 0.0
assert inds.ndim == 2
assert field.ndim == inds.shape[1]
grad_i = np.empty(inds.shape, dtype=field.dtype)
if field.ndim == 1:
walled_field_numerics.grad_i_1d(field, inds, grad_i, dx, walls)
elif field.ndim == 2:
walled_field_numerics.grad_i_2d(field, inds, grad_i, dx, walls)
elif field.ndim == 3:
walled_field_numerics.grad_i_3d(field, inds, grad_i, dx, walls)
else:
raise Exception("Walled Grad_i not implemented in this dimension")
return grad_i
def _walled_laplace(field, dx, walls):
assert field.shape == walls.shape
assert dx > 0.0
laplace = np.empty_like(field)
if field.ndim == 1:
walled_field_numerics.laplace_1d(field, laplace, dx, walls)
elif field.ndim == 2:
walled_field_numerics.laplace_2d(field, laplace, dx, walls)
elif field.ndim == 3:
walled_field_numerics.laplace_3d(field, laplace, dx, walls)
else:
raise Exception('Laplacian not implemented in this dimension')
return laplace
| [
"numpy.logical_not",
"fealty.field_numerics.density_3d",
"fealty.walled_field_numerics.grad_3d",
"fealty.field_numerics.div_1d",
"fealty.lattice.i_to_r",
"ciabatta.meta.make_repr_str",
"fealty.walled_field_numerics.grad_i_1d",
"fealty.field_numerics.grad_i_1d",
"numpy.empty",
"fealty.walled_field_numerics.laplace_2d",
"fealty.field_numerics.laplace_2d",
"fealty.field_numerics.div_2d",
"fealty.field_numerics.grad_3d",
"numpy.ones",
"fealty.walled_field_numerics.grad_i_2d",
"fealty.field_numerics.laplace_1d",
"fealty.field_numerics.density_2d",
"fealty.field_numerics.div_3d",
"fealty.walled_field_numerics.grad_2d",
"fealty.field_numerics.grad_2d",
"fealty.lattice.r_to_i",
"fealty.walled_field_numerics.grad_1d",
"fealty.field_numerics.grad_i_3d",
"fealty.field_numerics.density_1d",
"fealty.walled_field_numerics.grad_i_3d",
"fealty.walled_field_numerics.laplace_3d",
"fealty.field_numerics.grad_1d",
"fealty.field_numerics.grad_i_2d",
"numpy.zeros",
"numpy.empty_like",
"fealty.field_numerics.laplace_3d",
"fealty.walled_field_numerics.laplace_1d"
] | [((3872, 3896), 'fealty.lattice.r_to_i', 'lattice.r_to_i', (['r', 'L', 'dx'], {}), '(r, L, dx)\n', (3886, 3896), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((3905, 3946), 'numpy.zeros', 'np.zeros', (['(r.shape[1] * (M,))'], {'dtype': 'np.int'}), '(r.shape[1] * (M,), dtype=np.int)\n', (3913, 3946), True, 'import numpy as np\n'), ((4317, 4337), 'numpy.empty_like', 'np.empty_like', (['field'], {}), '(field)\n', (4330, 4337), True, 'import numpy as np\n'), ((4806, 4845), 'numpy.empty', 'np.empty', (['inds.shape'], {'dtype': 'field.dtype'}), '(inds.shape, dtype=field.dtype)\n', (4814, 4845), True, 'import numpy as np\n'), ((5241, 5297), 'numpy.empty', 'np.empty', (['(field.shape + (field.ndim,))'], {'dtype': 'field.dtype'}), '(field.shape + (field.ndim,), dtype=field.dtype)\n', (5249, 5297), True, 'import numpy as np\n'), ((5663, 5708), 'numpy.empty', 'np.empty', (['field.shape[:-1]'], {'dtype': 'field.dtype'}), '(field.shape[:-1], dtype=field.dtype)\n', (5671, 5708), True, 'import numpy as np\n'), ((6127, 6183), 'numpy.empty', 'np.empty', (['(field.shape + (field.ndim,))'], {'dtype': 'field.dtype'}), '(field.shape + (field.ndim,), dtype=field.dtype)\n', (6135, 6183), True, 'import numpy as np\n'), ((6727, 6766), 'numpy.empty', 'np.empty', (['inds.shape'], {'dtype': 'field.dtype'}), '(inds.shape, dtype=field.dtype)\n', (6735, 6766), True, 'import numpy as np\n'), ((7275, 7295), 'numpy.empty_like', 'np.empty_like', (['field'], {}), '(field)\n', (7288, 7295), True, 'import numpy as np\n'), ((603, 626), 'ciabatta.meta.make_repr_str', 'make_repr_str', (['self', 'fs'], {}), '(self, fs)\n', (616, 626), False, 'from ciabatta.meta import make_repr_str\n'), ((1081, 1115), 'fealty.lattice.r_to_i', 'lattice.r_to_i', (['r', 'self.L', 'self.dx'], {}), '(r, self.L, self.dx)\n', (1095, 1115), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((1157, 1191), 'fealty.lattice.i_to_r', 'lattice.i_to_r', (['i', 'self.L', 'self.dx'], {}), '(i, self.L, self.dx)\n', (1171, 1191), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((1297, 1320), 'ciabatta.meta.make_repr_str', 'make_repr_str', (['self', 'fs'], {}), '(self, fs)\n', (1310, 1320), False, 'from ciabatta.meta import make_repr_str\n'), ((1846, 1869), 'ciabatta.meta.make_repr_str', 'make_repr_str', (['self', 'fs'], {}), '(self, fs)\n', (1859, 1869), False, 'from ciabatta.meta import make_repr_str\n'), ((2412, 2435), 'ciabatta.meta.make_repr_str', 'make_repr_str', (['self', 'fs'], {}), '(self, fs)\n', (2425, 2435), False, 'from ciabatta.meta import make_repr_str\n'), ((2665, 2691), 'numpy.logical_not', 'np.logical_not', (['self.walls'], {}), '(self.walls)\n', (2679, 2691), True, 'import numpy as np\n'), ((3148, 3171), 'ciabatta.meta.make_repr_str', 'make_repr_str', (['self', 'fs'], {}), '(self, fs)\n', (3161, 3171), False, 'from ciabatta.meta import make_repr_str\n'), ((3747, 3770), 'ciabatta.meta.make_repr_str', 'make_repr_str', (['self', 'fs'], {}), '(self, fs)\n', (3760, 3770), False, 'from ciabatta.meta import make_repr_str\n'), ((3975, 4009), 'fealty.field_numerics.density_1d', 'field_numerics.density_1d', (['inds', 'f'], {}), '(inds, f)\n', (4000, 4009), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((4370, 4415), 'fealty.field_numerics.laplace_1d', 'field_numerics.laplace_1d', (['field', 'laplace', 'dx'], {}), '(field, laplace, dx)\n', (4395, 4415), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((4878, 4927), 'fealty.field_numerics.grad_i_1d', 'field_numerics.grad_i_1d', (['field', 'inds', 'grad_i', 'dx'], {}), '(field, inds, grad_i, dx)\n', (4902, 4927), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((5330, 5369), 'fealty.field_numerics.grad_1d', 'field_numerics.grad_1d', (['field', 'grad', 'dx'], {}), '(field, grad, dx)\n', (5352, 5369), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((5741, 5778), 'fealty.field_numerics.div_1d', 'field_numerics.div_1d', (['field', 'div', 'dx'], {}), '(field, div, dx)\n', (5762, 5778), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((6216, 6269), 'fealty.walled_field_numerics.grad_1d', 'walled_field_numerics.grad_1d', (['field', 'grad', 'dx', 'walls'], {}), '(field, grad, dx, walls)\n', (6245, 6269), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((6799, 6862), 'fealty.walled_field_numerics.grad_i_1d', 'walled_field_numerics.grad_i_1d', (['field', 'inds', 'grad_i', 'dx', 'walls'], {}), '(field, inds, grad_i, dx, walls)\n', (6830, 6862), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((7328, 7387), 'fealty.walled_field_numerics.laplace_1d', 'walled_field_numerics.laplace_1d', (['field', 'laplace', 'dx', 'walls'], {}), '(field, laplace, dx, walls)\n', (7360, 7387), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((1448, 1493), 'numpy.ones', 'np.ones', (['(self.dim * (self.M,))'], {'dtype': 'np.float'}), '(self.dim * (self.M,), dtype=np.float)\n', (1455, 1493), True, 'import numpy as np\n'), ((4040, 4074), 'fealty.field_numerics.density_2d', 'field_numerics.density_2d', (['inds', 'f'], {}), '(inds, f)\n', (4065, 4074), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((4450, 4495), 'fealty.field_numerics.laplace_2d', 'field_numerics.laplace_2d', (['field', 'laplace', 'dx'], {}), '(field, laplace, dx)\n', (4475, 4495), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((4962, 5011), 'fealty.field_numerics.grad_i_2d', 'field_numerics.grad_i_2d', (['field', 'inds', 'grad_i', 'dx'], {}), '(field, inds, grad_i, dx)\n', (4986, 5011), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((5404, 5443), 'fealty.field_numerics.grad_2d', 'field_numerics.grad_2d', (['field', 'grad', 'dx'], {}), '(field, grad, dx)\n', (5426, 5443), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((5813, 5850), 'fealty.field_numerics.div_2d', 'field_numerics.div_2d', (['field', 'div', 'dx'], {}), '(field, div, dx)\n', (5834, 5850), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((6304, 6357), 'fealty.walled_field_numerics.grad_2d', 'walled_field_numerics.grad_2d', (['field', 'grad', 'dx', 'walls'], {}), '(field, grad, dx, walls)\n', (6333, 6357), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((6897, 6960), 'fealty.walled_field_numerics.grad_i_2d', 'walled_field_numerics.grad_i_2d', (['field', 'inds', 'grad_i', 'dx', 'walls'], {}), '(field, inds, grad_i, dx, walls)\n', (6928, 6960), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((7422, 7481), 'fealty.walled_field_numerics.laplace_2d', 'walled_field_numerics.laplace_2d', (['field', 'laplace', 'dx', 'walls'], {}), '(field, laplace, dx, walls)\n', (7454, 7481), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((4105, 4139), 'fealty.field_numerics.density_3d', 'field_numerics.density_3d', (['inds', 'f'], {}), '(inds, f)\n', (4130, 4139), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((4530, 4575), 'fealty.field_numerics.laplace_3d', 'field_numerics.laplace_3d', (['field', 'laplace', 'dx'], {}), '(field, laplace, dx)\n', (4555, 4575), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((5046, 5089), 'fealty.field_numerics.grad_i_3d', 'field_numerics.grad_i_3d', (['field', 'grad_i', 'dx'], {}), '(field, grad_i, dx)\n', (5070, 5089), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((5478, 5517), 'fealty.field_numerics.grad_3d', 'field_numerics.grad_3d', (['field', 'grad', 'dx'], {}), '(field, grad, dx)\n', (5500, 5517), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((5885, 5922), 'fealty.field_numerics.div_3d', 'field_numerics.div_3d', (['field', 'div', 'dx'], {}), '(field, div, dx)\n', (5906, 5922), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((6392, 6445), 'fealty.walled_field_numerics.grad_3d', 'walled_field_numerics.grad_3d', (['field', 'grad', 'dx', 'walls'], {}), '(field, grad, dx, walls)\n', (6421, 6445), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((6995, 7058), 'fealty.walled_field_numerics.grad_i_3d', 'walled_field_numerics.grad_i_3d', (['field', 'inds', 'grad_i', 'dx', 'walls'], {}), '(field, inds, grad_i, dx, walls)\n', (7026, 7058), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((7516, 7575), 'fealty.walled_field_numerics.laplace_3d', 'walled_field_numerics.laplace_3d', (['field', 'laplace', 'dx', 'walls'], {}), '(field, laplace, dx, walls)\n', (7548, 7575), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n')] |
"""
ASGI config for example_django project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "example_django.settings")
application = get_asgi_application()
os.system("/usr/bin/python3 /opt/code/manage.py migrate")
os.system("/usr/bin/python3 /opt/code/manage.py "
"loaddata /opt/code/blog/fixtures/default_articles.json")
| [
"os.environ.setdefault",
"os.system",
"django.core.asgi.get_asgi_application"
] | [((292, 366), 'os.environ.setdefault', 'os.environ.setdefault', (['"""DJANGO_SETTINGS_MODULE"""', '"""example_django.settings"""'], {}), "('DJANGO_SETTINGS_MODULE', 'example_django.settings')\n", (313, 366), False, 'import os\n'), ((382, 404), 'django.core.asgi.get_asgi_application', 'get_asgi_application', ([], {}), '()\n', (402, 404), False, 'from django.core.asgi import get_asgi_application\n'), ((406, 463), 'os.system', 'os.system', (['"""/usr/bin/python3 /opt/code/manage.py migrate"""'], {}), "('/usr/bin/python3 /opt/code/manage.py migrate')\n", (415, 463), False, 'import os\n'), ((465, 579), 'os.system', 'os.system', (['"""/usr/bin/python3 /opt/code/manage.py loaddata /opt/code/blog/fixtures/default_articles.json"""'], {}), "(\n '/usr/bin/python3 /opt/code/manage.py loaddata /opt/code/blog/fixtures/default_articles.json'\n )\n", (474, 579), False, 'import os\n')] |
# +
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either exp'
# ress or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
import warnings
from bigdl.chronos.model.prophet import ProphetBuilder, ProphetModel
from bigdl.chronos.autots.utils import recalculate_n_sampling
# -
class AutoProphet:
def __init__(self,
changepoint_prior_scale=None,
seasonality_prior_scale=None,
holidays_prior_scale=None,
seasonality_mode=None,
changepoint_range=None,
metric='mse',
logs_dir="/tmp/auto_prophet_logs",
cpus_per_trial=1,
name="auto_prophet",
remote_dir=None,
load_dir=None,
**prophet_config
):
"""
Create an automated Prophet Model.
User need to specify either the exact value or the search space of the
Prophet model hyperparameters. For details of the Prophet model hyperparameters, refer to
https://facebook.github.io/prophet/docs/diagnostics.html#hyperparameter-tuning.
:param changepoint_prior_scale: Int or hp sampling function from an integer space
for hyperparameter changepoint_prior_scale for the Prophet model.
For hp sampling, see bigdl.chronos.orca.automl.hp for more details.
e.g. hp.loguniform(0.001, 0.5).
:param seasonality_prior_scale: hyperparameter seasonality_prior_scale for the
Prophet model.
e.g. hp.loguniform(0.01, 10).
:param holidays_prior_scale: hyperparameter holidays_prior_scale for the
Prophet model.
e.g. hp.loguniform(0.01, 10).
:param seasonality_mode: hyperparameter seasonality_mode for the
Prophet model.
e.g. hp.choice(['additive', 'multiplicative']).
:param changepoint_range: hyperparameter changepoint_range for the
Prophet model.
e.g. hp.uniform(0.8, 0.95).
:param metric: String. The evaluation metric name to optimize. e.g. "mse"
:param logs_dir: Local directory to save logs and results. It defaults to
"/tmp/auto_prophet_logs"
:param cpus_per_trial: Int. Number of cpus for each trial. It defaults to 1.
:param name: name of the AutoProphet. It defaults to "auto_prophet"
:param remote_dir: String. Remote directory to sync training results and checkpoints. It
defaults to None and doesn't take effects while running in local. While running in
cluster, it defaults to "hdfs:///tmp/{name}".
:param load_dir: Load the ckpt from load_dir. The value defaults to None.
:param prophet_config: Other Prophet hyperparameters.
"""
if load_dir:
self.best_model = ProphetModel()
self.best_model.restore(load_dir)
try:
from bigdl.orca.automl.auto_estimator import AutoEstimator
import bigdl.orca.automl.hp as hp
self.search_space = {
"changepoint_prior_scale": hp.grid_search([0.005, 0.05, 0.1, 0.5])
if changepoint_prior_scale is None
else changepoint_prior_scale,
"seasonality_prior_scale": hp.grid_search([0.01, 0.1, 1.0, 10.0])
if seasonality_prior_scale is None
else seasonality_prior_scale,
"holidays_prior_scale": hp.loguniform(0.01, 10)
if holidays_prior_scale is None
else holidays_prior_scale,
"seasonality_mode": hp.choice(['additive', 'multiplicative'])
if seasonality_mode is None
else seasonality_mode,
"changepoint_range": hp.uniform(0.8, 0.95)
if changepoint_range is None
else changepoint_range
}
self.search_space.update(prophet_config) # update other configs
self.metric = metric
model_builder = ProphetBuilder()
self.auto_est = AutoEstimator(model_builder=model_builder,
logs_dir=logs_dir,
resources_per_trial={"cpu": cpus_per_trial},
remote_dir=remote_dir,
name=name)
except ImportError:
warnings.warn("You need to install `bigdl-orca[automl]` to use `fit` function.")
def fit(self,
data,
cross_validation=True,
expect_horizon=None,
freq=None,
metric_threshold=None,
n_sampling=16,
search_alg=None,
search_alg_params=None,
scheduler=None,
scheduler_params=None,
):
"""
Automatically fit the model and search for the best hyperparameters.
:param data: training data, a pandas dataframe with Td rows,
and 2 columns, with column 'ds' indicating date and column 'y' indicating value
and Td is the time dimension
:param cross_validation: bool, if the eval result comes from cross_validation.
The value is set to True by default. Setting this option to False to
speed up the process.
:param expect_horizon: int, validation data will be automatically splited from training
data, and expect_horizon is the horizon you may need to use once the mode is fitted.
The value defaults to None, where 10% of training data will be taken
as the validation data.
:param freq: the freqency of the training dataframe. the frequency can be anything from the
pandas list of frequency strings here:
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timeseries-offset-aliasesDefaulted
to None, where an unreliable frequency will be infer implicitly.
:param metric_threshold: a trial will be terminated when metric threshold is met
:param n_sampling: Number of trials to evaluate in total. Defaults to 16.
If hp.grid_search is in search_space, the grid will be run n_sampling of trials
and round up n_sampling according to hp.grid_search.
If this is -1, (virtually) infinite samples are generated
until a stopping condition is met.
:param search_alg: str, all supported searcher provided by ray tune
(i.e."variant_generator", "random", "ax", "dragonfly", "skopt",
"hyperopt", "bayesopt", "bohb", "nevergrad", "optuna", "zoopt" and
"sigopt")
:param search_alg_params: extra parameters for searcher algorithm besides search_space,
metric and searcher mode
:param scheduler: str, all supported scheduler provided by ray tune
:param scheduler_params: parameters for scheduler
"""
if expect_horizon is None:
expect_horizon = int(0.1*len(data))
if freq is None:
assert len(data) >= 2, "The training dataframe should contains more than 2 records."
assert pd.api.types.is_datetime64_any_dtype(data["ds"].dtypes), \
"The 'ds' col should be in datetime 64 type, or you need to set `freq` in fit."
self._freq = data["ds"].iloc[1] - data["ds"].iloc[0]
else:
self._freq = pd.Timedelta(freq)
expect_horizon_str = str(self._freq * expect_horizon)
self.search_space.update({"expect_horizon": expect_horizon_str,
"cross_validation": cross_validation})
train_data = data if cross_validation else data[:len(data)-expect_horizon]
validation_data = None if cross_validation else data[len(data)-expect_horizon:]
n_sampling = recalculate_n_sampling(self.search_space,
n_sampling) if n_sampling != -1 else -1
self.auto_est.fit(data=train_data,
validation_data=validation_data,
metric=self.metric,
metric_threshold=metric_threshold,
n_sampling=n_sampling,
search_space=self.search_space,
search_alg=search_alg,
search_alg_params=search_alg_params,
scheduler=scheduler,
scheduler_params=scheduler_params
)
# use the best config to fit a new prophet model on whole data
self.best_model = ProphetBuilder().build(self.auto_est.get_best_config())
self.best_model.model.fit(data)
def predict(self, horizon=1, freq="D", ds_data=None):
"""
Predict using the best model after HPO.
:param horizon: the number of steps forward to predict
:param freq: the freqency of the predicted dataframe, defaulted to day("D"),
the frequency can be anything from the pandas list of frequency strings here:
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timeseries-offset-aliases
:param ds_data: a dataframe that has 1 column 'ds' indicating date.
"""
if self.best_model.model is None:
raise RuntimeError(
"You must call fit or restore first before calling predict!")
return self.best_model.predict(horizon=horizon, freq=freq, ds_data=ds_data)
def evaluate(self, data, metrics=['mse']):
"""
Evaluate using the best model after HPO.
:param data: evaluation data, a pandas dataframe with Td rows,
and 2 columns, with column 'ds' indicating date and column 'y' indicating value
and Td is the time dimension
:param metrics: A list contains metrics for test/valid data.
"""
if data is None:
raise ValueError("Input invalid data of None")
if self.best_model.model is None:
raise RuntimeError(
"You must call fit or restore first before calling evaluate!")
return self.best_model.evaluate(target=data,
metrics=metrics)
def save(self, checkpoint_file):
"""
Save the best model after HPO.
:param checkpoint_file: The location you want to save the best model, should be a json file
"""
if self.best_model.model is None:
raise RuntimeError(
"You must call fit or restore first before calling save!")
self.best_model.save(checkpoint_file)
def restore(self, checkpoint_file):
"""
Restore the best model after HPO.
:param checkpoint_file: The checkpoint file location you want to load the best model.
"""
self.best_model.restore(checkpoint_file)
def get_best_model(self):
"""
Get the best Prophet model.
"""
return self.best_model.model
| [
"bigdl.chronos.model.prophet.ProphetModel",
"bigdl.orca.automl.hp.uniform",
"bigdl.orca.automl.hp.loguniform",
"pandas.Timedelta",
"bigdl.orca.automl.hp.choice",
"bigdl.chronos.model.prophet.ProphetBuilder",
"bigdl.chronos.autots.utils.recalculate_n_sampling",
"warnings.warn",
"bigdl.orca.automl.auto_estimator.AutoEstimator",
"bigdl.orca.automl.hp.grid_search",
"pandas.api.types.is_datetime64_any_dtype"
] | [((3348, 3362), 'bigdl.chronos.model.prophet.ProphetModel', 'ProphetModel', ([], {}), '()\n', (3360, 3362), False, 'from bigdl.chronos.model.prophet import ProphetBuilder, ProphetModel\n'), ((4543, 4559), 'bigdl.chronos.model.prophet.ProphetBuilder', 'ProphetBuilder', ([], {}), '()\n', (4557, 4559), False, 'from bigdl.chronos.model.prophet import ProphetBuilder, ProphetModel\n'), ((4588, 4736), 'bigdl.orca.automl.auto_estimator.AutoEstimator', 'AutoEstimator', ([], {'model_builder': 'model_builder', 'logs_dir': 'logs_dir', 'resources_per_trial': "{'cpu': cpus_per_trial}", 'remote_dir': 'remote_dir', 'name': 'name'}), "(model_builder=model_builder, logs_dir=logs_dir,\n resources_per_trial={'cpu': cpus_per_trial}, remote_dir=remote_dir,\n name=name)\n", (4601, 4736), False, 'from bigdl.orca.automl.auto_estimator import AutoEstimator\n'), ((7757, 7812), 'pandas.api.types.is_datetime64_any_dtype', 'pd.api.types.is_datetime64_any_dtype', (["data['ds'].dtypes"], {}), "(data['ds'].dtypes)\n", (7793, 7812), True, 'import pandas as pd\n'), ((8016, 8034), 'pandas.Timedelta', 'pd.Timedelta', (['freq'], {}), '(freq)\n', (8028, 8034), True, 'import pandas as pd\n'), ((8434, 8487), 'bigdl.chronos.autots.utils.recalculate_n_sampling', 'recalculate_n_sampling', (['self.search_space', 'n_sampling'], {}), '(self.search_space, n_sampling)\n', (8456, 8487), False, 'from bigdl.chronos.autots.utils import recalculate_n_sampling\n'), ((4937, 5022), 'warnings.warn', 'warnings.warn', (['"""You need to install `bigdl-orca[automl]` to use `fit` function."""'], {}), "('You need to install `bigdl-orca[automl]` to use `fit` function.'\n )\n", (4950, 5022), False, 'import warnings\n'), ((9220, 9236), 'bigdl.chronos.model.prophet.ProphetBuilder', 'ProphetBuilder', ([], {}), '()\n', (9234, 9236), False, 'from bigdl.chronos.model.prophet import ProphetBuilder, ProphetModel\n'), ((3616, 3655), 'bigdl.orca.automl.hp.grid_search', 'hp.grid_search', (['[0.005, 0.05, 0.1, 0.5]'], {}), '([0.005, 0.05, 0.1, 0.5])\n', (3630, 3655), True, 'import bigdl.orca.automl.hp as hp\n'), ((3796, 3834), 'bigdl.orca.automl.hp.grid_search', 'hp.grid_search', (['[0.01, 0.1, 1.0, 10.0]'], {}), '([0.01, 0.1, 1.0, 10.0])\n', (3810, 3834), True, 'import bigdl.orca.automl.hp as hp\n'), ((3972, 3995), 'bigdl.orca.automl.hp.loguniform', 'hp.loguniform', (['(0.01)', '(10)'], {}), '(0.01, 10)\n', (3985, 3995), True, 'import bigdl.orca.automl.hp as hp\n'), ((4123, 4164), 'bigdl.orca.automl.hp.choice', 'hp.choice', (["['additive', 'multiplicative']"], {}), "(['additive', 'multiplicative'])\n", (4132, 4164), True, 'import bigdl.orca.automl.hp as hp\n'), ((4285, 4306), 'bigdl.orca.automl.hp.uniform', 'hp.uniform', (['(0.8)', '(0.95)'], {}), '(0.8, 0.95)\n', (4295, 4306), True, 'import bigdl.orca.automl.hp as hp\n')] |
# module pyparsing.py
#
# Copyright (c) 2003-2019 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from pyparsing import (
Literal,
Word,
Group,
Forward,
alphas,
alphanums,
Regex,
CaselessKeyword,
Suppress,
delimitedList,
)
import math
import operator
# map operator symbols to corresponding arithmetic operations
epsilon = 1e-12
opn = {
"+": operator.add,
"-": operator.sub,
"*": operator.mul,
"/": operator.truediv,
"^": operator.pow,
}
fn = {
"sin": math.sin,
"cos": math.cos,
"tan": math.tan,
"exp": math.exp,
"abs": abs,
"trunc": lambda a: int(a),
"round": round,
"sgn": lambda a: -1 if a < -epsilon else 1 if a > epsilon else 0,
}
exprStack = []
def push_first(toks):
exprStack.append(toks[0])
def push_unary_minus(toks):
for t in toks:
if t == "-":
exprStack.append("unary -")
else:
break
def BNF():
"""
expop :: '^'
multop :: '*' | '/'
addop :: '+' | '-'
integer :: ['+' | '-'] '0'..'9'+
atom :: PI | E | real | fn '(' expr ')' | '(' expr ')'
factor :: atom [ expop factor ]*
term :: factor [ multop factor ]*
expr :: term [ addop term ]*
"""
# use CaselessKeyword for e and pi, to avoid accidentally matching
# functions that start with 'e' or 'pi' (such as 'exp'); Keyword
# and CaselessKeyword only match whole words
e = CaselessKeyword("E")
pi = CaselessKeyword("PI")
# fnumber = Combine(Word("+-"+nums, nums) +
# Optional("." + Optional(Word(nums))) +
# Optional(e + Word("+-"+nums, nums)))
# or use provided pyparsing_common.number, but convert back to str:
# fnumber = ppc.number().addParseAction(lambda t: str(t[0]))
fnumber = Regex(r"[+-]?\d+(?:\.\d*)?(?:[eE][+-]?\d+)?")
ident = Word(alphas, alphanums + "_$")
plus, minus, mult, div = map(Literal, "+-*/")
lpar, rpar = map(Suppress, "()")
addop = plus | minus
multop = mult | div
expop = Literal("^")
expr = Forward()
expr_list = delimitedList(Group(expr))
# add parse action that replaces the function identifier with a (name, number of args) tuple
fn_call = (ident + lpar - Group(expr_list) + rpar).setParseAction(
lambda t: t.insert(0, (t.pop(0), len(t[0])))
)
atom = (
addop[...]
+ (
(fn_call | pi | e | fnumber | ident).setParseAction(push_first)
| Group(lpar + expr + rpar)
)
).setParseAction(push_unary_minus)
# by defining exponentiation as "atom [ ^ factor ]..." instead of "atom [ ^ atom ]...", we get right-to-left
# exponents, instead of left-to-right that is, 2^3^2 = 2^(3^2), not (2^3)^2.
factor = Forward()
factor <<= atom + (expop + factor).setParseAction(push_first)[...]
term = factor + (multop + factor).setParseAction(push_first)[...]
expr <<= term + (addop + term).setParseAction(push_first)[...]
bnf = expr
return bnf
def evaluate_stack(s, stats):
op, num_args = s.pop(), 0
if isinstance(op, tuple):
op, num_args = op
if op == "unary -":
return -evaluate_stack(s, stats)
if op in "+-*/^":
# note: operands are pushed onto the stack in reverse order
op2 = evaluate_stack(s, stats)
op1 = evaluate_stack(s, stats)
return opn[op](op1, op2)
elif op == "PI":
return math.pi # 3.1415926535
elif op == "E":
return math.e # 2.718281828
elif op == "mean":
return stats['mean']
elif op == "min":
return stats['min']
elif op == "max":
return stats['max']
elif op == "std":
return stats['std']
elif op in fn:
# note: args are pushed onto the stack in reverse order
args = reversed([evaluate_stack(s, stats) for _ in range(num_args)])
return fn[op](*args)
elif op[0].isalpha():
raise Exception("invalid identifier '%s'" % op)
else:
return float(op)
def eval_fx(fx, stats):
"""Given fx and stats ('min', 'max', 'mean', 'std') return the result"""
_ = BNF().parseString(fx, parseAll=True)
val = evaluate_stack(exprStack[:], stats)
return val
| [
"pyparsing.CaselessKeyword",
"pyparsing.Regex",
"pyparsing.Forward",
"pyparsing.Group",
"pyparsing.Word",
"pyparsing.Literal"
] | [((2472, 2492), 'pyparsing.CaselessKeyword', 'CaselessKeyword', (['"""E"""'], {}), "('E')\n", (2487, 2492), False, 'from pyparsing import Literal, Word, Group, Forward, alphas, alphanums, Regex, CaselessKeyword, Suppress, delimitedList\n'), ((2502, 2523), 'pyparsing.CaselessKeyword', 'CaselessKeyword', (['"""PI"""'], {}), "('PI')\n", (2517, 2523), False, 'from pyparsing import Literal, Word, Group, Forward, alphas, alphanums, Regex, CaselessKeyword, Suppress, delimitedList\n'), ((2849, 2897), 'pyparsing.Regex', 'Regex', (['"""[+-]?\\\\d+(?:\\\\.\\\\d*)?(?:[eE][+-]?\\\\d+)?"""'], {}), "('[+-]?\\\\d+(?:\\\\.\\\\d*)?(?:[eE][+-]?\\\\d+)?')\n", (2854, 2897), False, 'from pyparsing import Literal, Word, Group, Forward, alphas, alphanums, Regex, CaselessKeyword, Suppress, delimitedList\n'), ((2907, 2937), 'pyparsing.Word', 'Word', (['alphas', "(alphanums + '_$')"], {}), "(alphas, alphanums + '_$')\n", (2911, 2937), False, 'from pyparsing import Literal, Word, Group, Forward, alphas, alphanums, Regex, CaselessKeyword, Suppress, delimitedList\n'), ((3087, 3099), 'pyparsing.Literal', 'Literal', (['"""^"""'], {}), "('^')\n", (3094, 3099), False, 'from pyparsing import Literal, Word, Group, Forward, alphas, alphanums, Regex, CaselessKeyword, Suppress, delimitedList\n'), ((3112, 3121), 'pyparsing.Forward', 'Forward', ([], {}), '()\n', (3119, 3121), False, 'from pyparsing import Literal, Word, Group, Forward, alphas, alphanums, Regex, CaselessKeyword, Suppress, delimitedList\n'), ((3809, 3818), 'pyparsing.Forward', 'Forward', ([], {}), '()\n', (3816, 3818), False, 'from pyparsing import Literal, Word, Group, Forward, alphas, alphanums, Regex, CaselessKeyword, Suppress, delimitedList\n'), ((3152, 3163), 'pyparsing.Group', 'Group', (['expr'], {}), '(expr)\n', (3157, 3163), False, 'from pyparsing import Literal, Word, Group, Forward, alphas, alphanums, Regex, CaselessKeyword, Suppress, delimitedList\n'), ((3292, 3308), 'pyparsing.Group', 'Group', (['expr_list'], {}), '(expr_list)\n', (3297, 3308), False, 'from pyparsing import Literal, Word, Group, Forward, alphas, alphanums, Regex, CaselessKeyword, Suppress, delimitedList\n'), ((3526, 3551), 'pyparsing.Group', 'Group', (['(lpar + expr + rpar)'], {}), '(lpar + expr + rpar)\n', (3531, 3551), False, 'from pyparsing import Literal, Word, Group, Forward, alphas, alphanums, Regex, CaselessKeyword, Suppress, delimitedList\n')] |
from itertools import combinations
import copy
def get_reverse(n):
if n == 1:
return 0
else:
return 1
def get_edge_info(e):
v = [0 for i in range(2)]
n = [0 for i in range(2)]
t = 0
for x in e:
v[t], n[t] = x
t += 1
return v, n
def sort_e_by_domain(val):
return val[0][1]
def sort_by_strand(val):
return val[0][0]
def check_edge_in_tuplelist(edge, tpl):
for i in tpl:
if edge in i:
return True
return False
def compare(a, b):
return (a > b) - (a < b)
def flip(i):
if i == 0:
i = 1
elif i == 1:
i = 0
return i
def get_free_domains(limits, blocks, bound):
limits = sorted(limits)
interval = limits[1] - limits[0]
for i in blocks:
if limits[1] > i > limits[0]:
tmp = abs(bound - i)
if tmp < interval:
interval = tmp
return interval
def get_combinations(oldlen, newlen, cursor, indexlist):
combold = list(combinations(indexlist[cursor:oldlen], 2))
combself = [(i, i) for i in range(0, oldlen)]
combnew = []
if oldlen != newlen:
for i in range(0, oldlen):
for j in range(oldlen, newlen):
combnew.append((i, j))
return combold + combnew + combself
def get_migrate_nodes(edges, indices, startstrand):
d = []
for i in indices:
vi, ni = get_edge_info(edges[i][0])
if vi[0] == startstrand:
d.append(ni[0])
else:
d.append(ni[1])
d.sort()
return d
def check_following_migration(edges, p=0):
"""
:param edges:
:return:
"""
e = copy.copy(edges)
visited = [False for _ in e]
miggroup = []
cnt = -1
for i in range(0, len(e)):
if visited[i]:
continue
e[i] = list(e[i])
e[i][p] = list(e[i][p])
t1 = sorted(e[i][p], key=lambda tup: tup[0])
if not visited[i]:
visited[i] = True
miggroup.append([i])
cnt += 1
for j in range(0, len(e)):
if j != i and not visited[j]:
e[j] = list(e[j])
e[j][p] = list(e[j][p])
t2 = sorted(e[j][p], key=lambda tup: tup[0])
if (t2[0][0] != t1[0][0]) or (t2[1][0] != t1[1][0]):
continue
for num in range(0, len(miggroup[cnt])):
t1 = sorted(e[miggroup[cnt][num]][p], key=lambda tup: tup[0])
if (t1[0][1] + 1 == t2[0][1] and t1[1][1] - 1 == t2[1][1]) \
or (t1[0][1] - 1 == t2[0][1] and t1[1][1] + 1 == t2[1][1]):
visited[j] = True
miggroup[cnt].append(j)
break
return miggroup
def get_absdist(domain1, domain2):
"""
:param domain1:
:param domain2:
:return:
"""
return abs(domain1[1] - domain2[1])
def get_closet_domain_to_target(target, domains):
"""
:param target:
:param domains:
:return:
"""
closet = 10000
closetd = ()
for i in domains:
dist = get_absdist(i, target)
if dist < closet:
closet = dist
closetd = i
return closetd
def get_domains_on_2sides(target1, target2, domains1, domains2):
"""
:param target1:
:param target2:
:param domains1:
:param domains2:
:return:
"""
if target1[0] == domains1[0][0]:
closetd1 = get_closet_domain_to_target(target1, domains1)
elif target2[0] == domains1[0][0]:
closetd1 = get_closet_domain_to_target(target2, domains1)
if target1[0] == domains2[0][0]:
closetd2 = get_closet_domain_to_target(target1, domains2)
elif target2[0] == domains2[0][0]:
closetd2 = get_closet_domain_to_target(target2, domains2)
return closetd1, closetd2
def get_closest_target(domains, targets):
"""
:return:
"""
domains = sorted(domains, key=lambda tup: tup[1])
mindist = 10000
mint = None
for t in targets:
dist = min(get_absdist(t, domains[0]), get_absdist(t, domains[len(domains) - 1]))
if dist < mindist:
mint = t
return mint
def check_continuity(a, b):
for i in a:
for j in b:
if i + 1 == j or i - 1 == j:
return i, j
return None
def check_bond_existence(d1, d2, l1, l2):
for i in range(len(l1)):
if d1 == l1[i] and d2 == l2[i]:
return True
return False
| [
"itertools.combinations",
"copy.copy"
] | [((1671, 1687), 'copy.copy', 'copy.copy', (['edges'], {}), '(edges)\n', (1680, 1687), False, 'import copy\n'), ((1016, 1057), 'itertools.combinations', 'combinations', (['indexlist[cursor:oldlen]', '(2)'], {}), '(indexlist[cursor:oldlen], 2)\n', (1028, 1057), False, 'from itertools import combinations\n')] |
# coding=utf-8
# Copyright 2020 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Sparse Fine-tuning the library models for question answering.
"""
# You can also adapt this script on your own question answering task. Pointers for this are left as comments.
from nn_pruning.sparse_trainer import SparseTrainer
from .qa_train import QATrainer
# SparseTrainer should appear first in the base classes, as its functions must override QATrainer and its base classes (Trainer)
class QASparseTrainer(SparseTrainer, QATrainer):
def __init__(self, sparse_args, *args, **kwargs):
QATrainer.__init__(self, *args, **kwargs)
SparseTrainer.__init__(self, sparse_args)
| [
"nn_pruning.sparse_trainer.SparseTrainer.__init__"
] | [((1176, 1217), 'nn_pruning.sparse_trainer.SparseTrainer.__init__', 'SparseTrainer.__init__', (['self', 'sparse_args'], {}), '(self, sparse_args)\n', (1198, 1217), False, 'from nn_pruning.sparse_trainer import SparseTrainer\n')] |
# -*- coding: future_fstrings -*-
#
# Copyright 2019 <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
#
# This file is part of acados.
#
# The 2-Clause BSD License
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.;
#
import sys, os, json
import numpy as np
from ctypes import *
from casadi import CasadiMeta, Function, SX
from copy import deepcopy
from .generate_c_code_explicit_ode import generate_c_code_explicit_ode
from .generate_c_code_implicit_ode import generate_c_code_implicit_ode
from .generate_c_code_gnsf import generate_c_code_gnsf
from .generate_c_code_constraint import generate_c_code_constraint
from .generate_c_code_nls_cost import generate_c_code_nls_cost
from .generate_c_code_external_cost import generate_c_code_external_cost
from .acados_ocp import AcadosOcp
from .acados_model import acados_model_strip_casadi_symbolics
from .utils import is_column, is_empty, casadi_length, render_template, acados_class2dict,\
format_class_dict, ocp_check_against_layout, np_array_to_list, make_model_consistent,\
set_up_imported_gnsf_model
def make_ocp_dims_consistent(acados_ocp):
dims = acados_ocp.dims
cost = acados_ocp.cost
constraints = acados_ocp.constraints
model = acados_ocp.model
opts = acados_ocp.solver_options
# nx
if is_column(model.x):
dims.nx = casadi_length(model.x)
else:
raise Exception('model.x should be column vector!')
# nu
if is_empty(model.u):
dims.nu = 0
else:
dims.nu = casadi_length(model.u)
# nz
if is_empty(model.z):
dims.nz = 0
else:
dims.nz = casadi_length(model.z)
# np
if is_empty(model.p):
dims.np = 0
else:
dims.np = casadi_length(model.p)
if acados_ocp.parameter_values.shape[0] != dims.np:
raise Exception('inconsistent dimension np, regarding model.p and parameter_values.')
## cost
# path
if cost.cost_type == 'LINEAR_LS':
ny = cost.W.shape[0]
if cost.Vx.shape[0] != ny or cost.Vu.shape[0] != ny:
raise Exception('inconsistent dimension ny, regarding W, Vx, Vu.' + \
f'\nGot W[{cost.W.shape}], Vx[{cost.Vx.shape}], Vu[{cost.Vu.shape}]\n')
if dims.nz != 0 and cost.Vz.shape[0] != ny:
raise Exception('inconsistent dimension ny, regarding W, Vx, Vu, Vz.' + \
f'\nGot W[{cost.W.shape}], Vx[{cost.Vx.shape}], Vu[{cost.Vu.shape}], Vz[{cost.Vz.shape}]\n')
if cost.Vx.shape[1] != dims.nx and ny != 0:
raise Exception('inconsistent dimension: Vx should have nx columns.')
if cost.Vu.shape[1] != dims.nu and ny != 0:
raise Exception('inconsistent dimension: Vu should have nu columns.')
if cost.yref.shape[0] != ny:
raise Exception('inconsistent dimension: regarding W, yref.' + \
f'\nGot W[{cost.W.shape}], yref[{cost.yref.shape}]\n')
dims.ny = ny
elif cost.cost_type == 'NONLINEAR_LS':
ny = cost.W.shape[0]
if is_empty(model.cost_y_expr) and ny != 0:
raise Exception('inconsistent dimension ny: regarding W, cost_y_expr.')
elif casadi_length(model.cost_y_expr) != ny:
raise Exception('inconsistent dimension ny: regarding W, cost_y_expr.')
if cost.yref.shape[0] != ny:
raise Exception('inconsistent dimension: regarding W, yref.' + \
f'\nGot W[{cost.W.shape}], yref[{cost.yref.shape}]\n')
dims.ny = ny
# terminal
if cost.cost_type_e == 'LINEAR_LS':
ny_e = cost.W_e.shape[0]
if cost.Vx_e.shape[0] != ny_e:
raise Exception('inconsistent dimension ny_e: regarding W_e, cost_y_expr_e.' + \
f'\nGot W_e[{cost.W_e.shape}], Vx_e[{cost.Vx_e.shape}]')
if cost.Vx_e.shape[1] != dims.nx and ny_e != 0:
raise Exception('inconsistent dimension: Vx_e should have nx columns.')
if cost.yref_e.shape[0] != ny_e:
raise Exception('inconsistent dimension: regarding W_e, yref_e.')
dims.ny_e = ny_e
elif cost.cost_type_e == 'NONLINEAR_LS':
ny_e = cost.W_e.shape[0]
if is_empty(model.cost_y_expr_e) and ny_e != 0:
raise Exception('inconsistent dimension ny_e: regarding W_e, cost_y_expr_e.')
elif casadi_length(model.cost_y_expr_e) != ny_e:
raise Exception('inconsistent dimension ny_e: regarding W_e, cost_y_expr_e.')
if cost.yref_e.shape[0] != ny_e:
raise Exception('inconsistent dimension: regarding W_e, yref_e.')
dims.ny_e = ny_e
## constraints
# initial
if (constraints.lbx_0 == [] and constraints.ubx_0 == []):
dims.nbx_0 = 0
else:
this_shape = constraints.lbx_0.shape
other_shape = constraints.ubx_0.shape
if not this_shape == other_shape:
raise Exception('lbx_0, ubx_0 have different shapes!')
if not is_column(constraints.lbx_0):
raise Exception('lbx_0, ubx_0 must be column vectors!')
dims.nbx_0 = constraints.lbx_0.size
if all(constraints.lbx_0 == constraints.ubx_0):
dims.nbxe_0 = dims.nbx_0
# path
nbx = constraints.idxbx.shape[0]
if constraints.ubx.shape[0] != nbx or constraints.lbx.shape[0] != nbx:
raise Exception('inconsistent dimension nbx, regarding idxbx, ubx, lbx.')
else:
dims.nbx = nbx
nbu = constraints.idxbu.shape[0]
if constraints.ubu.shape[0] != nbu or constraints.lbu.shape[0] != nbu:
raise Exception('inconsistent dimension nbu, regarding idxbu, ubu, lbu.')
else:
dims.nbu = nbu
ng = constraints.lg.shape[0]
if constraints.ug.shape[0] != ng or constraints.C.shape[0] != ng \
or constraints.D.shape[0] != ng:
raise Exception('inconsistent dimension ng, regarding lg, ug, C, D.')
else:
dims.ng = ng
if not is_empty(model.con_h_expr):
nh = casadi_length(model.con_h_expr)
else:
nh = 0
if constraints.uh.shape[0] != nh or constraints.lh.shape[0] != nh:
raise Exception('inconsistent dimension nh, regarding lh, uh, con_h_expr.')
else:
dims.nh = nh
if is_empty(model.con_phi_expr):
dims.nphi = 0
dims.nr = 0
else:
dims.nphi = casadi_length(model.con_phi_expr)
if is_empty(model.con_r_expr):
raise Exception('convex over nonlinear constraints: con_r_expr but con_phi_expr is nonempty')
else:
dims.nr = casadi_length(model.con_r_expr)
# terminal
nbx_e = constraints.idxbx_e.shape[0]
if constraints.ubx_e.shape[0] != nbx_e or constraints.lbx_e.shape[0] != nbx_e:
raise Exception('inconsistent dimension nbx_e, regarding idxbx_e, ubx_e, lbx_e.')
else:
dims.nbx_e = nbx_e
ng_e = constraints.lg_e.shape[0]
if constraints.ug_e.shape[0] != ng_e or constraints.C_e.shape[0] != ng_e:
raise Exception('inconsistent dimension ng_e, regarding_e lg_e, ug_e, C_e.')
else:
dims.ng_e = ng_e
if not is_empty(model.con_h_expr_e):
nh_e = casadi_length(model.con_h_expr_e)
else:
nh_e = 0
if constraints.uh_e.shape[0] != nh_e or constraints.lh_e.shape[0] != nh_e:
raise Exception('inconsistent dimension nh_e, regarding lh_e, uh_e, con_h_expr_e.')
else:
dims.nh_e = nh_e
if is_empty(model.con_phi_expr_e):
dims.nphi_e = 0
dims.nr_e = 0
else:
dims.nphi_e = casadi_length(model.con_phi_expr_e)
if is_empty(model.con_r_expr_e):
raise Exception('convex over nonlinear constraints: con_r_expr_e but con_phi_expr_e is nonempty')
else:
dims.nr_e = casadi_length(model.con_r_expr_e)
# Slack dimensions
nsbx = constraints.idxsbx.shape[0]
if is_empty(constraints.lsbx):
constraints.lsbx = np.zeros((nsbx,))
elif constraints.lsbx.shape[0] != nsbx:
raise Exception('inconsistent dimension nsbx, regarding idxsbx, lsbx.')
if is_empty(constraints.usbx):
constraints.usbx = np.zeros((nsbx,))
elif constraints.usbx.shape[0] != nsbx:
raise Exception('inconsistent dimension nsbx, regarding idxsbx, usbx.')
dims.nsbx = nsbx
nsbu = constraints.idxsbu.shape[0]
if is_empty(constraints.lsbu):
constraints.lsbu = np.zeros((nsbu,))
elif constraints.lsbu.shape[0] != nsbu:
raise Exception('inconsistent dimension nsbu, regarding idxsbu, lsbu.')
if is_empty(constraints.usbu):
constraints.usbu = np.zeros((nsbu,))
elif constraints.usbu.shape[0] != nsbu:
raise Exception('inconsistent dimension nsbu, regarding idxsbu, usbu.')
dims.nsbu = nsbu
nsh = constraints.idxsh.shape[0]
if is_empty(constraints.lsh):
constraints.lsh = np.zeros((nsh,))
elif constraints.lsh.shape[0] != nsh:
raise Exception('inconsistent dimension nsh, regarding idxsh, lsh.')
if is_empty(constraints.ush):
constraints.ush = np.zeros((nsh,))
elif constraints.ush.shape[0] != nsh:
raise Exception('inconsistent dimension nsh, regarding idxsh, ush.')
dims.nsh = nsh
nsphi = constraints.idxsphi.shape[0]
if is_empty(constraints.lsphi):
constraints.lsphi = np.zeros((nsphi,))
elif constraints.lsphi.shape[0] != nsphi:
raise Exception('inconsistent dimension nsphi, regarding idxsphi, lsphi.')
if is_empty(constraints.usphi):
constraints.usphi = np.zeros((nsphi,))
elif constraints.usphi.shape[0] != nsphi:
raise Exception('inconsistent dimension nsphi, regarding idxsphi, usphi.')
dims.nsphi = nsphi
nsg = constraints.idxsg.shape[0]
if is_empty(constraints.lsg):
constraints.lsg = np.zeros((nsg,))
elif constraints.lsg.shape[0] != nsg:
raise Exception('inconsistent dimension nsg, regarding idxsg, lsg.')
if is_empty(constraints.usg):
constraints.usg = np.zeros((nsg,))
elif constraints.usg.shape[0] != nsg:
raise Exception('inconsistent dimension nsg, regarding idxsg, usg.')
dims.nsg = nsg
ns = nsbx + nsbu + nsh + nsg + nsphi
wrong_field = ""
if cost.Zl.shape[0] != ns:
wrong_field = "Zl"
dim = cost.Zl.shape[0]
elif cost.Zu.shape[0] != ns:
wrong_field = "Zu"
dim = cost.Zu.shape[0]
elif cost.zl.shape[0] != ns:
wrong_field = "zl"
dim = cost.zl.shape[0]
elif cost.zu.shape[0] != ns:
wrong_field = "zu"
dim = cost.zu.shape[0]
if wrong_field != "":
raise Exception(f'Inconsistent size for field {wrong_field}, with dimension {dim}, \n\t'\
+ f'Detected ns = {ns} = nsbx + nsbu + nsg + nsh + nsphi.\n\t'\
+ f'With nsbx = {nsbx}, nsbu = {nsbu}, nsg = {nsg}, nsh = {nsh}, nsphi = {nsphi}')
dims.ns = ns
nsbx_e = constraints.idxsbx_e.shape[0]
if is_empty(constraints.lsbx_e):
constraints.lsbx_e = np.zeros((nsbx_e,))
elif constraints.lsbx_e.shape[0] != nsbx_e:
raise Exception('inconsistent dimension nsbx_e, regarding idxsbx_e, lsbx_e.')
if is_empty(constraints.usbx_e):
constraints.usbx_e = np.zeros((nsbx_e,))
elif constraints.usbx_e.shape[0] != nsbx_e:
raise Exception('inconsistent dimension nsbx_e, regarding idxsbx_e, usbx_e.')
dims.nsbx_e = nsbx_e
nsh_e = constraints.idxsh_e.shape[0]
if is_empty(constraints.lsh_e):
constraints.lsh_e = np.zeros((nsh_e,))
elif constraints.lsh_e.shape[0] != nsh_e:
raise Exception('inconsistent dimension nsh_e, regarding idxsh_e, lsh_e.')
if is_empty(constraints.ush_e):
constraints.ush_e = np.zeros((nsh_e,))
elif constraints.ush_e.shape[0] != nsh_e:
raise Exception('inconsistent dimension nsh_e, regarding idxsh_e, ush_e.')
dims.nsh_e = nsh_e
nsg_e = constraints.idxsg_e.shape[0]
if is_empty(constraints.lsg_e):
constraints.lsg_e = np.zeros((nsg_e,))
elif constraints.lsg_e.shape[0] != nsg_e:
raise Exception('inconsistent dimension nsg_e, regarding idxsg_e, lsg_e.')
if is_empty(constraints.usg_e):
constraints.usg_e = np.zeros((nsg_e,))
elif constraints.usg_e.shape[0] != nsg_e:
raise Exception('inconsistent dimension nsg_e, regarding idxsg_e, usg_e.')
dims.nsg_e = nsg_e
nsphi_e = constraints.idxsphi_e.shape[0]
if is_empty(constraints.lsphi_e):
constraints.lsphi_e = np.zeros((nsphi_e,))
elif constraints.lsphi_e.shape[0] != nsphi_e:
raise Exception('inconsistent dimension nsphi_e, regarding idxsphi_e, lsphi_e.')
if is_empty(constraints.usphi_e):
constraints.usphi_e = np.zeros((nsphi_e,))
elif constraints.usphi_e.shape[0] != nsphi_e:
raise Exception('inconsistent dimension nsphi_e, regarding idxsphi_e, usphi_e.')
dims.nsphi_e = nsphi_e
# terminal
ns_e = nsbx_e + nsh_e + nsg_e + nsphi_e
wrong_field = ""
if cost.Zl_e.shape[0] != ns_e:
wrong_field = "Zl_e"
dim = cost.Zl_e.shape[0]
elif cost.Zu_e.shape[0] != ns_e:
wrong_field = "Zu_e"
dim = cost.Zu_e.shape[0]
elif cost.zl_e.shape[0] != ns_e:
wrong_field = "zl_e"
dim = cost.zl_e.shape[0]
elif cost.zu_e.shape[0] != ns_e:
wrong_field = "zu_e"
dim = cost.zu_e.shape[0]
if wrong_field != "":
raise Exception(f'Inconsistent size for field {wrong_field}, with dimension {dim}, \n\t'\
+ f'Detected ns_e = {ns_e} = nsbx_e + nsg_e + nsh_e + nsphi_e.\n\t'\
+ f'With nsbx_e = {nsbx_e}, nsg_e = {nsg_e}, nsh_e = {nsh_e}, nsphi_e = {nsphi_e}')
dims.ns_e = ns_e
# discretization
if is_empty(opts.time_steps) and is_empty(opts.shooting_nodes):
# uniform discretization
opts.time_steps = opts.tf / dims.N * np.ones((dims.N,))
elif not is_empty(opts.shooting_nodes):
if np.shape(opts.shooting_nodes)[0] != dims.N+1:
raise Exception('inconsistent dimension N, regarding shooting_nodes.')
time_steps = np.zeros((dims.N,))
for i in range(dims.N):
time_steps[i] = opts.shooting_nodes[i+1] - opts.shooting_nodes[i]
opts.time_steps = time_steps
elif (not is_empty(opts.time_steps)) and (not is_empty(opts.shooting_nodes)):
Exception('Please provide either time_steps or shooting_nodes for nonuniform discretization')
tf = np.sum(opts.time_steps)
if (tf - opts.tf) / tf > 1e-15:
raise Exception(f'Inconsistent discretization: {opts.tf}'\
f' = tf != sum(opts.time_steps) = {tf}.')
def get_ocp_nlp_layout():
current_module = sys.modules[__name__]
acados_path = os.path.dirname(current_module.__file__)
with open(acados_path + '/acados_layout.json', 'r') as f:
ocp_nlp_layout = json.load(f)
return ocp_nlp_layout
def ocp_formulation_json_dump(acados_ocp, json_file='acados_ocp_nlp.json'):
# Load acados_ocp_nlp structure description
ocp_layout = get_ocp_nlp_layout()
# Copy input ocp object dictionary
ocp_nlp_dict = dict(deepcopy(acados_ocp).__dict__)
# TODO: maybe make one funciton with formatting
for acados_struct, v in ocp_layout.items():
# skip non dict attributes
if not isinstance(v, dict): continue
# setattr(ocp_nlp, acados_struct, dict(getattr(acados_ocp, acados_struct).__dict__))
# Copy ocp object attributes dictionaries
ocp_nlp_dict[acados_struct]=dict(getattr(acados_ocp, acados_struct).__dict__)
ocp_nlp_dict = format_class_dict(ocp_nlp_dict)
# strip symbolics
ocp_nlp_dict['model'] = acados_model_strip_casadi_symbolics(ocp_nlp_dict['model'])
# strip shooting_nodes
ocp_nlp_dict['solver_options'].pop('shooting_nodes', None)
dims_dict = acados_class2dict(acados_ocp.dims)
ocp_check_against_layout(ocp_nlp_dict, dims_dict)
with open(json_file, 'w') as f:
json.dump(ocp_nlp_dict, f, default=np_array_to_list, indent=4, sort_keys=True)
def ocp_formulation_json_load(json_file='acados_ocp_nlp.json'):
# Load acados_ocp_nlp structure description
ocp_layout = get_ocp_nlp_layout()
with open(json_file, 'r') as f:
ocp_nlp_json = json.load(f)
ocp_nlp_dict = json2dict(ocp_nlp_json, ocp_nlp_json['dims'])
# Instantiate AcadosOcp object
acados_ocp = AcadosOcp()
# load class dict
acados_ocp.__dict__ = ocp_nlp_dict
# laod class attributes dict, dims, constraints, etc
for acados_struct, v in ocp_layout.items():
# skip non dict attributes
if not isinstance(v, dict): continue
acados_attribute = getattr(acados_ocp, acados_struct)
acados_attribute.__dict__ = ocp_nlp_dict[acados_struct]
setattr(acados_ocp, acados_struct, acados_attribute)
return acados_ocp
def ocp_generate_external_functions(acados_ocp, model):
model = make_model_consistent(model)
if acados_ocp.solver_options.integrator_type == 'ERK':
# explicit model -- generate C code
generate_c_code_explicit_ode(model)
elif acados_ocp.solver_options.integrator_type == 'IRK':
# implicit model -- generate C code
opts = dict(generate_hess=1)
generate_c_code_implicit_ode(model, opts)
elif acados_ocp.solver_options.integrator_type == 'GNSF':
generate_c_code_gnsf(model)
else:
raise Exception("ocp_generate_external_functions: unknown integrator type.")
if acados_ocp.solver_options.hessian_approx == 'EXACT':
opts = dict(generate_hess=1)
else:
opts = dict(generate_hess=0)
if acados_ocp.dims.nphi > 0 or acados_ocp.dims.nh > 0:
generate_c_code_constraint(model, model.name, False, opts)
if acados_ocp.dims.nphi_e > 0 or acados_ocp.dims.nh_e > 0:
generate_c_code_constraint(model, model.name, True, opts)
# dummy matrices
if not acados_ocp.cost.cost_type == 'LINEAR_LS':
acados_ocp.cost.Vx = np.zeros((acados_ocp.dims.ny, acados_ocp.dims.nx))
acados_ocp.cost.Vu = np.zeros((acados_ocp.dims.ny, acados_ocp.dims.nu))
if not acados_ocp.cost.cost_type_e == 'LINEAR_LS':
acados_ocp.cost.Vx_e = np.zeros((acados_ocp.dims.ny_e, acados_ocp.dims.nx))
if acados_ocp.cost.cost_type == 'NONLINEAR_LS':
generate_c_code_nls_cost(model, model.name, False)
elif acados_ocp.cost.cost_type == 'EXTERNAL':
generate_c_code_external_cost(model, False)
if acados_ocp.cost.cost_type_e == 'NONLINEAR_LS':
generate_c_code_nls_cost(model, model.name, True)
elif acados_ocp.cost.cost_type_e == 'EXTERNAL':
generate_c_code_external_cost(model, True)
def ocp_render_templates(acados_ocp, json_file):
name = acados_ocp.model.name
# setting up loader and environment
json_path = '{cwd}/{json_file}'.format(
cwd=os.getcwd(),
json_file=json_file)
if not os.path.exists(json_path):
raise Exception('{} not found!'.format(json_path))
template_dir = 'c_generated_code/'
## Render templates
in_file = 'main.in.c'
out_file = 'main_{}.c'.format(name)
render_template(in_file, out_file, template_dir, json_path)
in_file = 'acados_solver.in.c'
out_file = 'acados_solver_{}.c'.format(name)
render_template(in_file, out_file, template_dir, json_path)
in_file = 'acados_solver.in.h'
out_file = 'acados_solver_{}.h'.format(name)
render_template(in_file, out_file, template_dir, json_path)
in_file = 'Makefile.in'
out_file = 'Makefile'
render_template(in_file, out_file, template_dir, json_path)
in_file = 'acados_solver_sfun.in.c'
out_file = 'acados_solver_sfunction_{}.c'.format(name)
render_template(in_file, out_file, template_dir, json_path)
in_file = 'make_sfun.in.m'
out_file = 'make_sfun.m'
render_template(in_file, out_file, template_dir, json_path)
in_file = 'acados_sim_solver.in.c'
out_file = 'acados_sim_solver_{}.c'.format(name)
render_template(in_file, out_file, template_dir, json_path)
in_file = 'acados_sim_solver.in.h'
out_file = 'acados_sim_solver_{}.h'.format(name)
render_template(in_file, out_file, template_dir, json_path)
## folder model
template_dir = 'c_generated_code/{}_model/'.format(name)
in_file = 'model.in.h'
out_file = '{}_model.h'.format(name)
render_template(in_file, out_file, template_dir, json_path)
# constraints on convex over nonlinear function
if acados_ocp.constraints.constr_type == 'BGP' and acados_ocp.dims.nphi > 0:
# constraints on outer function
template_dir = 'c_generated_code/{}_constraints/'.format(name)
in_file = 'phi_constraint.in.h'
out_file = '{}_phi_constraint.h'.format(name)
render_template(in_file, out_file, template_dir, json_path)
# terminal constraints on convex over nonlinear function
if acados_ocp.constraints.constr_type_e == 'BGP' and acados_ocp.dims.nphi_e > 0:
# terminal constraints on outer function
template_dir = 'c_generated_code/{}_constraints/'.format(name)
in_file = 'phi_e_constraint.in.h'
out_file = '{}_phi_e_constraint.h'.format(name)
render_template(in_file, out_file, template_dir, json_path)
# nonlinear constraints
if acados_ocp.constraints.constr_type == 'BGH' and acados_ocp.dims.nh > 0:
template_dir = 'c_generated_code/{}_constraints/'.format(name)
in_file = 'h_constraint.in.h'
out_file = '{}_h_constraint.h'.format(name)
render_template(in_file, out_file, template_dir, json_path)
# terminal nonlinear constraints
if acados_ocp.constraints.constr_type_e == 'BGH' and acados_ocp.dims.nh_e > 0:
template_dir = 'c_generated_code/{}_constraints/'.format(name)
in_file = 'h_e_constraint.in.h'
out_file = '{}_h_e_constraint.h'.format(name)
render_template(in_file, out_file, template_dir, json_path)
# nonlinear cost function
if acados_ocp.cost.cost_type == 'NONLINEAR_LS':
template_dir = 'c_generated_code/{}_cost/'.format(name)
in_file = 'cost_y_fun.in.h'
out_file = '{}_cost_y_fun.h'.format(name)
render_template(in_file, out_file, template_dir, json_path)
# terminal nonlinear cost function
if acados_ocp.cost.cost_type_e == 'NONLINEAR_LS':
template_dir = 'c_generated_code/{}_cost/'.format(name)
in_file = 'cost_y_e_fun.in.h'
out_file = '{}_cost_y_e_fun.h'.format(name)
render_template(in_file, out_file, template_dir, json_path)
# external cost
if acados_ocp.cost.cost_type == 'EXTERNAL':
template_dir = 'c_generated_code/{}_cost/'.format(name)
in_file = 'external_cost.in.h'
out_file = '{}_external_cost.h'.format(name)
render_template(in_file, out_file, template_dir, json_path)
# external cost - terminal
if acados_ocp.cost.cost_type_e == 'EXTERNAL':
template_dir = 'c_generated_code/{}_cost/'.format(name)
in_file = 'external_cost_e.in.h'
out_file = '{}_external_cost_e.h'.format(name)
render_template(in_file, out_file, template_dir, json_path)
class AcadosOcpSolver:
"""
class to interact with the acados ocp solver C object
"""
def __init__(self, acados_ocp, json_file='acados_ocp_nlp.json'):
self.solver_created = False
model = acados_ocp.model
# make dims consistent
make_ocp_dims_consistent(acados_ocp)
if acados_ocp.solver_options.integrator_type == 'GNSF':
set_up_imported_gnsf_model(acados_ocp)
# set integrator time automatically
acados_ocp.solver_options.Tsim = acados_ocp.solver_options.time_steps[0]
# generate external functions
ocp_generate_external_functions(acados_ocp, model)
# dump to json
ocp_formulation_json_dump(acados_ocp, json_file)
# render templates
ocp_render_templates(acados_ocp, json_file)
## Compile solver
os.chdir('c_generated_code')
os.system('make clean_ocp_shared_lib')
os.system('make ocp_shared_lib')
os.chdir('..')
self.shared_lib_name = 'c_generated_code/libacados_ocp_solver_' + model.name + '.so'
# get
self.shared_lib = CDLL(self.shared_lib_name)
self.shared_lib.acados_create()
self.solver_created = True
self.shared_lib.acados_get_nlp_opts.restype = c_void_p
self.nlp_opts = self.shared_lib.acados_get_nlp_opts()
self.shared_lib.acados_get_nlp_dims.restype = c_void_p
self.nlp_dims = self.shared_lib.acados_get_nlp_dims()
self.shared_lib.acados_get_nlp_config.restype = c_void_p
self.nlp_config = self.shared_lib.acados_get_nlp_config()
self.shared_lib.acados_get_nlp_out.restype = c_void_p
self.nlp_out = self.shared_lib.acados_get_nlp_out()
self.shared_lib.acados_get_nlp_in.restype = c_void_p
self.nlp_in = self.shared_lib.acados_get_nlp_in()
self.shared_lib.acados_get_nlp_solver.restype = c_void_p
self.nlp_solver = self.shared_lib.acados_get_nlp_solver()
self.acados_ocp = acados_ocp
def solve(self):
"""
solve the ocp with current input
"""
status = self.shared_lib.acados_solve()
return status
def get(self, stage_, field_):
"""
get the last solution of the solver:
:param stage: integer corresponding to shooting node
:param field_: string in ['x', 'u', 'z', 'pi', 'lam', 't', 'sl', 'su',]
.. note:: regarding lam, t: \n
the inequalities are internally organized in the following order: \n
[ lbu lbx lg lh lphi ubu ubx ug uh uphi; \n
lsbu lsbx lsg lsh lsphi usbu usbx usg ush usphi]
.. note:: pi: multipliers for dynamics equality constraints \n
lam: multipliers for inequalities \n
t: slack variables corresponding to evaluation of all inequalities (at the solution) \n
sl: slack variables of soft lower inequality constraints \n
su: slack variables of soft upper inequality constraints \n
"""
out_fields = ['x', 'u', 'z', 'pi', 'lam', 't']
mem_fields = ['sl', 'su']
field = field_
field = field.encode('utf-8')
if (field_ not in out_fields + mem_fields):
raise Exception('AcadosOcpSolver.get(): {} is an invalid argument.\
\n Possible values are {}. Exiting.'.format(field_, out_fields + mem_fields))
self.shared_lib.ocp_nlp_dims_get_from_attr.argtypes = \
[c_void_p, c_void_p, c_void_p, c_int, c_char_p]
self.shared_lib.ocp_nlp_dims_get_from_attr.restype = c_int
dims = self.shared_lib.ocp_nlp_dims_get_from_attr(self.nlp_config, \
self.nlp_dims, self.nlp_out, stage_, field)
out = np.ascontiguousarray(np.zeros((dims,)), dtype=np.float64)
out_data = cast(out.ctypes.data, POINTER(c_double))
if (field_ in out_fields):
self.shared_lib.ocp_nlp_out_get.argtypes = \
[c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p]
self.shared_lib.ocp_nlp_out_get(self.nlp_config, \
self.nlp_dims, self.nlp_out, stage_, field, out_data)
elif field_ in mem_fields:
self.shared_lib.ocp_nlp_get_at_stage.argtypes = \
[c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p]
self.shared_lib.ocp_nlp_get_at_stage(self.nlp_config, \
self.nlp_dims, self.nlp_solver, stage_, field, out_data)
return out
def print_statistics(self):
stat = self.get_stats("statistics")
if self.acados_ocp.solver_options.nlp_solver_type == 'SQP':
print('\niter\tres_stat\tres_eq\t\tres_ineq\tres_comp\tqp_stat\tqp_iter')
if stat.shape[0]>7:
print('\tqp_res_stat\tqp_res_eq\tqp_res_ineq\tqp_res_comp')
for jj in range(stat.shape[1]):
print('{:d}\t{:e}\t{:e}\t{:e}\t{:e}\t{:d}\t{:d}'.format( \
int(stat[0][jj]), stat[1][jj], stat[2][jj], \
stat[3][jj], stat[4][jj], int(stat[5][jj]), int(stat[6][jj])))
if stat.shape[0]>7:
print('\t{:e}\t{:e}\t{:e}\t{:e}'.format( \
stat[7][jj], stat[8][jj], stat[9][jj], stat[10][jj]))
print('\n')
elif self.acados_ocp.solver_options.nlp_solver_type == 'SQP_RTI':
print('\niter\tqp_stat\tqp_iter')
if stat.shape[0]>3:
print('\tqp_res_stat\tqp_res_eq\tqp_res_ineq\tqp_res_comp')
for jj in range(stat.shape[1]):
print('{:d}\t{:d}\t{:d}'.format( int(stat[0][jj]), int(stat[1][jj]), int(stat[2][jj])))
if stat.shape[0]>3:
print('\t{:e}\t{:e}\t{:e}\t{:e}'.format( \
stat[3][jj], stat[4][jj], stat[5][jj], stat[6][jj]))
print('\n')
return
def get_stats(self, field_):
"""
get the information of the last solver call:
:param field_: string in ['statistics', 'time_tot', 'time_lin', 'time_sim', 'time_sim_ad', 'time_sim_la', 'time_qp', 'time_qp_solver_call', 'time_reg', 'sqp_iter']
"""
fields = ['time_tot', # total cpu time previous call
'time_lin', # cpu time for linearization
'time_sim', # cpu time for integrator
'time_sim_ad', # cpu time for integrator contribution of external function calls
'time_sim_la', # cpu time for integrator contribution of linear algebra
'time_qp', # cpu time qp solution
'time_qp_solver_call', # cpu time inside qp solver (without converting the QP)
'time_qp_xcond',
'time_reg', # cpu time regularization
'sqp_iter', # number of SQP iterations
'statistics', # table with info about last iteration
'stat_m',
'stat_n',
]
field = field_
field = field.encode('utf-8')
if (field_ not in fields):
raise Exception('AcadosOcpSolver.get_stats(): {} is not a valid argument.\
\n Possible values are {}. Exiting.'.format(fields, fields))
if field_ in ['sqp_iter', 'stat_m', 'stat_n']:
out = np.ascontiguousarray(np.zeros((1,)), dtype=np.int64)
out_data = cast(out.ctypes.data, POINTER(c_int64))
elif field_ == 'statistics':
sqp_iter = self.get_stats("sqp_iter")
stat_m = self.get_stats("stat_m")
stat_n = self.get_stats("stat_n")
min_size = min([stat_m, sqp_iter+1])
out = np.ascontiguousarray(
np.zeros( (stat_n[0]+1, min_size[0]) ), dtype=np.float64)
out_data = cast(out.ctypes.data, POINTER(c_double))
else:
out = np.ascontiguousarray(np.zeros((1,)), dtype=np.float64)
out_data = cast(out.ctypes.data, POINTER(c_double))
self.shared_lib.ocp_nlp_get.argtypes = [c_void_p, c_void_p, c_char_p, c_void_p]
self.shared_lib.ocp_nlp_get(self.nlp_config, self.nlp_solver, field, out_data)
return out
# Note: this function should not be used anymore, better use cost_set, constraints_set
def set(self, stage_, field_, value_):
cost_fields = ['y_ref', 'yref']
constraints_fields = ['lbx', 'ubx', 'lbu', 'ubu']
out_fields = ['x', 'u', 'pi', 'lam', 't']
# cast value_ to avoid conversion issues
value_ = value_.astype(float)
field = field_
field = field.encode('utf-8')
stage = c_int(stage_)
# treat parameters separately
if field_ is 'p':
self.shared_lib.acados_update_params.argtypes = [c_int, POINTER(c_double)]
self.shared_lib.acados_update_params.restype = c_int
value_data = cast(value_.ctypes.data, POINTER(c_double))
self.shared_lib.acados_update_params(stage, value_data, value_.shape[0])
else:
if field_ not in constraints_fields + cost_fields + out_fields:
raise Exception("AcadosOcpSolver.set(): {} is not a valid argument.\
\nPossible values are {}. Exiting.".format(field, \
constraints_fields + cost_fields + out_fields + ['p']))
self.shared_lib.ocp_nlp_dims_get_from_attr.argtypes = \
[c_void_p, c_void_p, c_void_p, c_int, c_char_p]
self.shared_lib.ocp_nlp_dims_get_from_attr.restype = c_int
dims = self.shared_lib.ocp_nlp_dims_get_from_attr(self.nlp_config, \
self.nlp_dims, self.nlp_out, stage_, field)
if value_.shape[0] != dims:
msg = 'AcadosOcpSolver.set(): mismatching dimension for field "{}" '.format(field_)
msg += 'with dimension {} (you have {})'.format(dims, value_.shape[0])
raise Exception(msg)
value_data = cast(value_.ctypes.data, POINTER(c_double))
value_data_p = cast((value_data), c_void_p)
if field_ in constraints_fields:
self.shared_lib.ocp_nlp_constraints_model_set.argtypes = \
[c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p]
self.shared_lib.ocp_nlp_constraints_model_set(self.nlp_config, \
self.nlp_dims, self.nlp_in, stage, field, value_data_p)
elif field_ in cost_fields:
self.shared_lib.ocp_nlp_cost_model_set.argtypes = \
[c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p]
self.shared_lib.ocp_nlp_cost_model_set(self.nlp_config, \
self.nlp_dims, self.nlp_in, stage, field, value_data_p)
elif field_ in out_fields:
self.shared_lib.ocp_nlp_out_set.argtypes = \
[c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p]
self.shared_lib.ocp_nlp_out_set(self.nlp_config, \
self.nlp_dims, self.nlp_out, stage, field, value_data_p)
return
def cost_set(self, stage_, field_, value_):
"""
set numerical data in the cost module of the solver:
:param stage_: integer corresponding to shooting node
:param field_: string, e.g. 'yref', 'W', 'ext_cost_num_hess'
:param value_: of appropriate size
"""
# cast value_ to avoid conversion issues
value_ = value_.astype(float)
field = field_
field = field.encode('utf-8')
stage = c_int(stage_)
self.shared_lib.ocp_nlp_cost_dims_get_from_attr.argtypes = \
[c_void_p, c_void_p, c_void_p, c_int, c_char_p, POINTER(c_int)]
self.shared_lib.ocp_nlp_cost_dims_get_from_attr.restype = c_int
dims = np.ascontiguousarray(np.zeros((2,)), dtype=np.intc)
dims_data = cast(dims.ctypes.data, POINTER(c_int))
self.shared_lib.ocp_nlp_cost_dims_get_from_attr(self.nlp_config, \
self.nlp_dims, self.nlp_out, stage_, field, dims_data)
value_shape = value_.shape
if len(value_shape) == 1:
value_shape = (value_shape[0], 0)
if value_shape != tuple(dims):
raise Exception('AcadosOcpSolver.cost_set(): mismatching dimension', \
' for field "{}" with dimension {} (you have {})'.format( \
field_, tuple(dims), value_shape))
value_data = cast(value_.ctypes.data, POINTER(c_double))
value_data_p = cast((value_data), c_void_p)
self.shared_lib.ocp_nlp_cost_model_set.argtypes = \
[c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p]
self.shared_lib.ocp_nlp_cost_model_set(self.nlp_config, \
self.nlp_dims, self.nlp_in, stage, field, value_data_p)
return
def constraints_set(self, stage_, field_, value_):
"""
set numerical data in the constraint module of the solver:
Parameters:
:param stage_: integer corresponding to shooting node
:param field_: string, e.g. 'lbx'
:param value_: of appropriate size
"""
# cast value_ to avoid conversion issues
value_ = value_.astype(float)
field = field_
field = field.encode('utf-8')
stage = c_int(stage_)
self.shared_lib.ocp_nlp_constraint_dims_get_from_attr.argtypes = \
[c_void_p, c_void_p, c_void_p, c_int, c_char_p, POINTER(c_int)]
self.shared_lib.ocp_nlp_constraint_dims_get_from_attr.restype = c_int
dims = np.ascontiguousarray(np.zeros((2,)), dtype=np.intc)
dims_data = cast(dims.ctypes.data, POINTER(c_int))
self.shared_lib.ocp_nlp_constraint_dims_get_from_attr(self.nlp_config, \
self.nlp_dims, self.nlp_out, stage_, field, dims_data)
value_shape = value_.shape
if len(value_shape) == 1:
value_shape = (value_shape[0], 0)
if value_shape != tuple(dims):
raise Exception('AcadosOcpSolver.constraints_set(): mismatching dimension' \
' for field "{}" with dimension {} (you have {})'.format(field_, tuple(dims), value_shape))
value_data = cast(value_.ctypes.data, POINTER(c_double))
value_data_p = cast((value_data), c_void_p)
self.shared_lib.ocp_nlp_constraints_model_set.argtypes = \
[c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p]
self.shared_lib.ocp_nlp_constraints_model_set(self.nlp_config, \
self.nlp_dims, self.nlp_in, stage, field, value_data_p)
return
def options_set(self, field_, value_):
"""
set options of the solver:
Parameters:
:param field_: string, e.g. 'print_level', 'rti_phase', 'initialize_t_slacks', 'step_length'
:param value_: of type int, float
"""
int_fields = ['print_level', 'rti_phase', 'initialize_t_slacks']
double_fields = ['step_length']
string_fields = ['globalization']
if field_ in int_fields:
if not isinstance(value_, int):
raise Exception('solver option {} must be of type int. You have {}.'.format(field_, type(value_)))
else:
value_ctypes = c_int(value_)
elif field_ in double_fields:
if not isinstance(value_, float):
raise Exception('solver option {} must be of type float. You have {}.'.format(field_, type(value_)))
else:
value_ctypes = c_double(value_)
elif field_ in string_fields:
if not isinstance(value_, str):
raise Exception('solver option {} must be of type str. You have {}.'.format(field_, type(value_)))
else:
value_ctypes = value_.encode('utf-8')
if field_ == 'rti_phase':
if value_ < 0 or value_ > 2:
raise Exception('AcadosOcpSolver.solve(): argument \'rti_phase\' can '
'take only values 0, 1, 2 for SQP-RTI-type solvers')
if self.acados_ocp.solver_options.nlp_solver_type != 'SQP_RTI' and value_ > 0:
raise Exception('AcadosOcpSolver.solve(): argument \'rti_phase\' can '
'take only value 0 for SQP-type solvers')
field = field_
field = field.encode('utf-8')
if field_ in string_fields:
self.shared_lib.ocp_nlp_solver_opts_set.argtypes = \
[c_void_p, c_void_p, c_char_p, c_char_p]
self.shared_lib.ocp_nlp_solver_opts_set(self.nlp_config, \
self.nlp_opts, field, value_ctypes)
else:
self.shared_lib.ocp_nlp_solver_opts_set.argtypes = \
[c_void_p, c_void_p, c_char_p, c_void_p]
self.shared_lib.ocp_nlp_solver_opts_set(self.nlp_config, \
self.nlp_opts, field, byref(value_ctypes))
return
def __del__(self):
if self.solver_created:
self.shared_lib.acados_free()
del self.shared_lib
# NOTE: DLL cannot be easily unloaded!!!
# see https://stackoverflow.com/questions/359498/how-can-i-unload-a-dll-using-ctypes-in-python
# while isLoaded(self.shared_lib_name):
# dlclose(handle)
| [
"os.path.exists",
"numpy.ones",
"os.getcwd",
"os.chdir",
"os.path.dirname",
"numpy.sum",
"numpy.zeros",
"copy.deepcopy",
"json.load",
"os.system",
"numpy.shape",
"json.dump"
] | [((15748, 15771), 'numpy.sum', 'np.sum', (['opts.time_steps'], {}), '(opts.time_steps)\n', (15754, 15771), True, 'import numpy as np\n'), ((16019, 16059), 'os.path.dirname', 'os.path.dirname', (['current_module.__file__'], {}), '(current_module.__file__)\n', (16034, 16059), False, 'import sys, os, json\n'), ((9215, 9232), 'numpy.zeros', 'np.zeros', (['(nsbx,)'], {}), '((nsbx,))\n', (9223, 9232), True, 'import numpy as np\n'), ((9419, 9436), 'numpy.zeros', 'np.zeros', (['(nsbx,)'], {}), '((nsbx,))\n', (9427, 9436), True, 'import numpy as np\n'), ((9684, 9701), 'numpy.zeros', 'np.zeros', (['(nsbu,)'], {}), '((nsbu,))\n', (9692, 9701), True, 'import numpy as np\n'), ((9888, 9905), 'numpy.zeros', 'np.zeros', (['(nsbu,)'], {}), '((nsbu,))\n', (9896, 9905), True, 'import numpy as np\n'), ((10149, 10165), 'numpy.zeros', 'np.zeros', (['(nsh,)'], {}), '((nsh,))\n', (10157, 10165), True, 'import numpy as np\n'), ((10345, 10361), 'numpy.zeros', 'np.zeros', (['(nsh,)'], {}), '((nsh,))\n', (10353, 10361), True, 'import numpy as np\n'), ((10606, 10624), 'numpy.zeros', 'np.zeros', (['(nsphi,)'], {}), '((nsphi,))\n', (10614, 10624), True, 'import numpy as np\n'), ((10818, 10836), 'numpy.zeros', 'np.zeros', (['(nsphi,)'], {}), '((nsphi,))\n', (10826, 10836), True, 'import numpy as np\n'), ((11087, 11103), 'numpy.zeros', 'np.zeros', (['(nsg,)'], {}), '((nsg,))\n', (11095, 11103), True, 'import numpy as np\n'), ((11283, 11299), 'numpy.zeros', 'np.zeros', (['(nsg,)'], {}), '((nsg,))\n', (11291, 11299), True, 'import numpy as np\n'), ((12287, 12306), 'numpy.zeros', 'np.zeros', (['(nsbx_e,)'], {}), '((nsbx_e,))\n', (12295, 12306), True, 'import numpy as np\n'), ((12507, 12526), 'numpy.zeros', 'np.zeros', (['(nsbx_e,)'], {}), '((nsbx_e,))\n', (12515, 12526), True, 'import numpy as np\n'), ((12792, 12810), 'numpy.zeros', 'np.zeros', (['(nsh_e,)'], {}), '((nsh_e,))\n', (12800, 12810), True, 'import numpy as np\n'), ((13004, 13022), 'numpy.zeros', 'np.zeros', (['(nsh_e,)'], {}), '((nsh_e,))\n', (13012, 13022), True, 'import numpy as np\n'), ((13281, 13299), 'numpy.zeros', 'np.zeros', (['(nsg_e,)'], {}), '((nsg_e,))\n', (13289, 13299), True, 'import numpy as np\n'), ((13493, 13511), 'numpy.zeros', 'np.zeros', (['(nsg_e,)'], {}), '((nsg_e,))\n', (13501, 13511), True, 'import numpy as np\n'), ((13778, 13798), 'numpy.zeros', 'np.zeros', (['(nsphi_e,)'], {}), '((nsphi_e,))\n', (13786, 13798), True, 'import numpy as np\n'), ((14006, 14026), 'numpy.zeros', 'np.zeros', (['(nsphi_e,)'], {}), '((nsphi_e,))\n', (14014, 14026), True, 'import numpy as np\n'), ((16147, 16159), 'json.load', 'json.load', (['f'], {}), '(f)\n', (16156, 16159), False, 'import sys, os, json\n'), ((17261, 17339), 'json.dump', 'json.dump', (['ocp_nlp_dict', 'f'], {'default': 'np_array_to_list', 'indent': '(4)', 'sort_keys': '(True)'}), '(ocp_nlp_dict, f, default=np_array_to_list, indent=4, sort_keys=True)\n', (17270, 17339), False, 'import sys, os, json\n'), ((17553, 17565), 'json.load', 'json.load', (['f'], {}), '(f)\n', (17562, 17565), False, 'import sys, os, json\n'), ((19295, 19345), 'numpy.zeros', 'np.zeros', (['(acados_ocp.dims.ny, acados_ocp.dims.nx)'], {}), '((acados_ocp.dims.ny, acados_ocp.dims.nx))\n', (19303, 19345), True, 'import numpy as np\n'), ((19375, 19425), 'numpy.zeros', 'np.zeros', (['(acados_ocp.dims.ny, acados_ocp.dims.nu)'], {}), '((acados_ocp.dims.ny, acados_ocp.dims.nu))\n', (19383, 19425), True, 'import numpy as np\n'), ((19512, 19564), 'numpy.zeros', 'np.zeros', (['(acados_ocp.dims.ny_e, acados_ocp.dims.nx)'], {}), '((acados_ocp.dims.ny_e, acados_ocp.dims.nx))\n', (19520, 19564), True, 'import numpy as np\n'), ((20232, 20257), 'os.path.exists', 'os.path.exists', (['json_path'], {}), '(json_path)\n', (20246, 20257), False, 'import sys, os, json\n'), ((25354, 25382), 'os.chdir', 'os.chdir', (['"""c_generated_code"""'], {}), "('c_generated_code')\n", (25362, 25382), False, 'import sys, os, json\n'), ((25391, 25429), 'os.system', 'os.system', (['"""make clean_ocp_shared_lib"""'], {}), "('make clean_ocp_shared_lib')\n", (25400, 25429), False, 'import sys, os, json\n'), ((25438, 25470), 'os.system', 'os.system', (['"""make ocp_shared_lib"""'], {}), "('make ocp_shared_lib')\n", (25447, 25470), False, 'import sys, os, json\n'), ((25479, 25493), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (25487, 25493), False, 'import sys, os, json\n'), ((15160, 15178), 'numpy.ones', 'np.ones', (['(dims.N,)'], {}), '((dims.N,))\n', (15167, 15178), True, 'import numpy as np\n'), ((15386, 15405), 'numpy.zeros', 'np.zeros', (['(dims.N,)'], {}), '((dims.N,))\n', (15394, 15405), True, 'import numpy as np\n'), ((16414, 16434), 'copy.deepcopy', 'deepcopy', (['acados_ocp'], {}), '(acados_ocp)\n', (16422, 16434), False, 'from copy import deepcopy\n'), ((20178, 20189), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (20187, 20189), False, 'import sys, os, json\n'), ((28362, 28379), 'numpy.zeros', 'np.zeros', (['(dims,)'], {}), '((dims,))\n', (28370, 28379), True, 'import numpy as np\n'), ((36504, 36518), 'numpy.zeros', 'np.zeros', (['(2,)'], {}), '((2,))\n', (36512, 36518), True, 'import numpy as np\n'), ((38275, 38289), 'numpy.zeros', 'np.zeros', (['(2,)'], {}), '((2,))\n', (38283, 38289), True, 'import numpy as np\n'), ((31963, 31977), 'numpy.zeros', 'np.zeros', (['(1,)'], {}), '((1,))\n', (31971, 31977), True, 'import numpy as np\n'), ((15235, 15264), 'numpy.shape', 'np.shape', (['opts.shooting_nodes'], {}), '(opts.shooting_nodes)\n', (15243, 15264), True, 'import numpy as np\n'), ((32353, 32391), 'numpy.zeros', 'np.zeros', (['(stat_n[0] + 1, min_size[0])'], {}), '((stat_n[0] + 1, min_size[0]))\n', (32361, 32391), True, 'import numpy as np\n'), ((32529, 32543), 'numpy.zeros', 'np.zeros', (['(1,)'], {}), '((1,))\n', (32537, 32543), True, 'import numpy as np\n')] |
# This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
__name__ = "biotite.application"
__author__ = "<NAME>"
__all__ = ["Application", "AppStateError", "TimeoutError", "VersionError",
"AppState", "requires_state"]
import abc
import time
from functools import wraps
from enum import Flag, auto
class AppState(Flag):
"""
This enum type represents the app states of an application.
"""
CREATED = auto()
RUNNING = auto()
FINISHED = auto()
JOINED = auto()
CANCELLED = auto()
def requires_state(app_state):
"""
A decorator for methods of :class:`Application` subclasses that
raises an :class:`AppStateError` in case the method is called, when
the :class:`Application` is not in the specified :class:`AppState`
`app_state`.
Parameters
----------
app_state : AppState
The required app state.
Examples
--------
Raises :class:`AppStateError` when `function` is called,
if :class:`Application` is not in one of the specified states:
>>> @requires_state(AppState.RUNNING | AppState.FINISHED)
... def function(self):
... pass
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
# First parameter of method is always 'self'
instance = args[0]
if not instance._state & app_state:
raise AppStateError(
f"The application is in {instance.get_app_state()} state, "
f"but {app_state} state is required"
)
return func(*args, **kwargs)
return wrapper
return decorator
class Application(metaclass=abc.ABCMeta):
"""
This class is a wrapper around an external piece of runnable
software in any sense. Subclasses of this abstract base class
specify the respective kind of software and the way of interacting
with it.
Every :class:`Application` runs through a different app states
(instances of enum :class:`AppState`) from its creation until its
termination:
Directly after its instantiation the app is in the *CREATED* state.
In this state further parameters can be set for the application run.
After the user calls the :func:`start()` method, the app state is
set to *RUNNING* and the :class:`Application` type specific
:func:`run()` method is called.
When the application finishes the AppState changes to *FINISHED*.
This is checked via the :class:`Application` type specific
:func:`is_finished()` method.
The user can now call the :func:`join()` method, concluding the
application in the *JOINED* state and making the results of the
application accessible by executing the :class:`Application`
type specific :func:`evaluate()` method.
Furthermore this executes the :class:`Application` type specific
:func:`clean_up()` method.
:func:`join()` can even be called in the *RUNNING* state:
This will constantly check :func:`is_finished()` and will directly
go into the *JOINED* state as soon as the application reaches the
*FINISHED* state.
Calling the :func:`cancel()` method while the application is
*RUNNING* or *FINISHED* leaves the application in the *CANCELLED*
state.
This triggers the :func:`clean_up()` method, too, but there are no
accessible results.
If a method is called in an unsuitable app state, an
:class:`AppStateError` is called.
The application run behaves like an additional thread: Between the
call of :func:`start()` and :func:`join()` other Python code can be
executed, while the application runs in the background.
"""
def __init__(self):
self._state = AppState.CREATED
@requires_state(AppState.CREATED)
def start(self):
"""
Start the application run and set its state to *RUNNING*.
This can only be done from the *CREATED* state.
"""
self.run()
self._start_time = time.time()
self._state = AppState.RUNNING
@requires_state(AppState.RUNNING | AppState.FINISHED)
def join(self, timeout=None):
"""
Conclude the application run and set its state to *JOINED*.
This can only be done from the *RUNNING* or *FINISHED* state.
If the application is *FINISHED* the joining process happens
immediately, if otherwise the application is *RUNNING*, this
method waits until the application is *FINISHED*.
Parameters
----------
timeout : float, optional
If this parameter is specified, the :class:`Application`
only waits for finishing until this value (in seconds) runs
out.
After this time is exceeded a :class:`TimeoutError` is
raised and the application is cancelled.
Raises
------
TimeoutError
If the joining process exceeds the `timeout` value.
"""
time.sleep(self.wait_interval())
while self.get_app_state() != AppState.FINISHED:
if timeout is not None and time.time()-self._start_time > timeout:
self.cancel()
raise TimeoutError(
f"The application expired its timeout "
f"({timeout:.1f} s)"
)
else:
time.sleep(self.wait_interval())
time.sleep(self.wait_interval())
try:
self.evaluate()
except AppStateError:
raise
except:
self._state = AppState.CANCELLED
raise
else:
self._state = AppState.JOINED
self.clean_up()
@requires_state(AppState.RUNNING | AppState.FINISHED)
def cancel(self):
"""
Cancel the application when in *RUNNING* or *FINISHED* state.
"""
self._state = AppState.CANCELLED
self.clean_up()
def get_app_state(self):
"""
Get the current app state.
Returns
-------
app_state : AppState
The current app state.
"""
if self._state == AppState.RUNNING:
if self.is_finished():
self._state = AppState.FINISHED
return self._state
@abc.abstractmethod
def run(self):
"""
Commence the application run. Called in :func:`start()`.
PROTECTED: Override when inheriting.
"""
pass
@abc.abstractmethod
def is_finished(self):
"""
Check if the application has finished.
PROTECTED: Override when inheriting.
Returns
-------
finished : bool
True of the application has finished, false otherwise
"""
pass
@abc.abstractmethod
def wait_interval(self):
"""
The time interval of :func:`is_finished()` calls in the joining
process.
PROTECTED: Override when inheriting.
Returns
-------
interval : float
Time (in seconds) between calls of :func:`is_finished()` in
:func:`join()`
"""
pass
@abc.abstractmethod
def evaluate(self):
"""
Evaluate application results. Called in :func:`join()`.
PROTECTED: Override when inheriting.
"""
pass
def clean_up(self):
"""
Do clean up work after the application terminates.
PROTECTED: Optionally override when inheriting.
"""
pass
class AppStateError(Exception):
"""
Indicate that the application lifecycle was violated.
"""
pass
class TimeoutError(Exception):
"""
Indicate that the application's timeout expired.
"""
pass
class VersionError(Exception):
"""
Indicate that the application's version is invalid.
"""
pass | [
"enum.auto",
"functools.wraps",
"time.time"
] | [((526, 532), 'enum.auto', 'auto', ([], {}), '()\n', (530, 532), False, 'from enum import Flag, auto\n'), ((547, 553), 'enum.auto', 'auto', ([], {}), '()\n', (551, 553), False, 'from enum import Flag, auto\n'), ((569, 575), 'enum.auto', 'auto', ([], {}), '()\n', (573, 575), False, 'from enum import Flag, auto\n'), ((589, 595), 'enum.auto', 'auto', ([], {}), '()\n', (593, 595), False, 'from enum import Flag, auto\n'), ((612, 618), 'enum.auto', 'auto', ([], {}), '()\n', (616, 618), False, 'from enum import Flag, auto\n'), ((1293, 1304), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (1298, 1304), False, 'from functools import wraps\n'), ((4111, 4122), 'time.time', 'time.time', ([], {}), '()\n', (4120, 4122), False, 'import time\n'), ((5246, 5257), 'time.time', 'time.time', ([], {}), '()\n', (5255, 5257), False, 'import time\n')] |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from ..framework import core
from ..fluid.layer_helper import LayerHelper
from ..fluid.data_feeder import check_variable_and_dtype
# TODO: define functions to get tensor attributes
from ..fluid.layers import rank # noqa: F401
from ..fluid.layers import shape # noqa: F401
import paddle
from paddle import _C_ops
from paddle.static import Variable
from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode
__all__ = []
def _complex_to_real_dtype(dtype):
if dtype == core.VarDesc.VarType.COMPLEX64:
return core.VarDesc.VarType.FP32
elif dtype == core.VarDesc.VarType.COMPLEX128:
return core.VarDesc.VarType.FP64
else:
return dtype
def _real_to_complex_dtype(dtype):
if dtype == core.VarDesc.VarType.FP32:
return core.VarDesc.VarType.COMPLEX64
elif dtype == core.VarDesc.VarType.FP64:
return core.VarDesc.VarType.COMPLEX128
else:
return dtype
def is_complex(x):
"""Return whether x is a tensor of complex data type(complex64 or complex128).
Args:
x (Tensor): The input tensor.
Returns:
bool: True if the data type of the input is complex data type, otherwise false.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1 + 2j, 3 + 4j])
print(paddle.is_complex(x))
# True
x = paddle.to_tensor([1.1, 1.2])
print(paddle.is_complex(x))
# False
x = paddle.to_tensor([1, 2, 3])
print(paddle.is_complex(x))
# False
"""
if not isinstance(x, (paddle.Tensor, paddle.static.Variable)):
raise TypeError("Expected Tensor, but received type of x: {}".format(
type(x)))
dtype = x.dtype
is_complex_dtype = (dtype == core.VarDesc.VarType.COMPLEX64 or
dtype == core.VarDesc.VarType.COMPLEX128)
return is_complex_dtype
def is_floating_point(x):
"""
Returns whether the dtype of `x` is one of paddle.float64, paddle.float32, paddle.float16, and paddle.bfloat16.
Args:
x (Tensor): The input tensor.
Returns:
bool: True if the dtype of `x` is floating type, otherwise false.
Examples:
.. code-block:: python
import paddle
x = paddle.arange(1., 5., dtype='float32')
y = paddle.arange(1, 5, dtype='int32')
print(paddle.is_floating_point(x))
# True
print(paddle.is_floating_point(y))
# False
"""
if not isinstance(x, (paddle.Tensor, paddle.static.Variable)):
raise TypeError("Expected Tensor, but received type of x: {}".format(
type(x)))
dtype = x.dtype
is_fp_dtype = (dtype == core.VarDesc.VarType.FP32 or
dtype == core.VarDesc.VarType.FP64 or
dtype == core.VarDesc.VarType.FP16 or
dtype == core.VarDesc.VarType.BF16)
return is_fp_dtype
def is_integer(x):
"""Return whether x is a tensor of integeral data type.
Args:
x (Tensor): The input tensor.
Returns:
bool: True if the data type of the input is integer data type, otherwise false.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1 + 2j, 3 + 4j])
print(paddle.is_integer(x))
# False
x = paddle.to_tensor([1.1, 1.2])
print(paddle.is_integer(x))
# False
x = paddle.to_tensor([1, 2, 3])
print(paddle.is_integer(x))
# True
"""
if not isinstance(x, (paddle.Tensor, paddle.static.Variable)):
raise TypeError("Expected Tensor, but received type of x: {}".format(
type(x)))
dtype = x.dtype
is_int_dtype = (dtype == core.VarDesc.VarType.UINT8 or
dtype == core.VarDesc.VarType.INT8 or
dtype == core.VarDesc.VarType.INT16 or
dtype == core.VarDesc.VarType.INT32 or
dtype == core.VarDesc.VarType.INT64)
return is_int_dtype
def real(x, name=None):
"""
Returns a new tensor containing real values of the input tensor.
Args:
x (Tensor): the input tensor, its data type could be complex64 or complex128.
name (str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Tensor: a tensor containing real values of the input tensor.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor(
[[1 + 6j, 2 + 5j, 3 + 4j], [4 + 3j, 5 + 2j, 6 + 1j]])
# Tensor(shape=[2, 3], dtype=complex64, place=CUDAPlace(0), stop_gradient=True,
# [[(1+6j), (2+5j), (3+4j)],
# [(4+3j), (5+2j), (6+1j)]])
real_res = paddle.real(x)
# Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [[1., 2., 3.],
# [4., 5., 6.]])
real_t = x.real()
# Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [[1., 2., 3.],
# [4., 5., 6.]])
"""
if in_dygraph_mode():
return _C_ops.final_state_real(x)
if _in_legacy_dygraph():
return _C_ops.real(x)
check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], 'real')
helper = LayerHelper('real', **locals())
out = helper.create_variable_for_type_inference(
dtype=_complex_to_real_dtype(helper.input_dtype()))
helper.append_op(type='real', inputs={'X': x}, outputs={'Out': out})
return out
def imag(x, name=None):
"""
Returns a new tensor containing imaginary values of input tensor.
Args:
x (Tensor): the input tensor, its data type could be complex64 or complex128.
name (str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Tensor: a tensor containing imaginary values of the input tensor.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor(
[[1 + 6j, 2 + 5j, 3 + 4j], [4 + 3j, 5 + 2j, 6 + 1j]])
# Tensor(shape=[2, 3], dtype=complex64, place=CUDAPlace(0), stop_gradient=True,
# [[(1+6j), (2+5j), (3+4j)],
# [(4+3j), (5+2j), (6+1j)]])
imag_res = paddle.imag(x)
# Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [[6., 5., 4.],
# [3., 2., 1.]])
imag_t = x.imag()
# Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [[6., 5., 4.],
# [3., 2., 1.]])
"""
if in_dygraph_mode():
return _C_ops.final_state_imag(x)
if _in_legacy_dygraph():
return _C_ops.imag(x)
check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], 'imag')
helper = LayerHelper('imag', **locals())
out = helper.create_variable_for_type_inference(
dtype=_complex_to_real_dtype(helper.input_dtype()))
helper.append_op(type='imag', inputs={'X': x}, outputs={'Out': out})
return out
| [
"paddle._C_ops.real",
"paddle._C_ops.final_state_real",
"paddle._C_ops.imag",
"paddle._C_ops.final_state_imag"
] | [((6059, 6085), 'paddle._C_ops.final_state_real', '_C_ops.final_state_real', (['x'], {}), '(x)\n', (6082, 6085), False, 'from paddle import _C_ops\n'), ((6130, 6144), 'paddle._C_ops.real', '_C_ops.real', (['x'], {}), '(x)\n', (6141, 6144), False, 'from paddle import _C_ops\n'), ((7755, 7781), 'paddle._C_ops.final_state_imag', '_C_ops.final_state_imag', (['x'], {}), '(x)\n', (7778, 7781), False, 'from paddle import _C_ops\n'), ((7826, 7840), 'paddle._C_ops.imag', '_C_ops.imag', (['x'], {}), '(x)\n', (7837, 7840), False, 'from paddle import _C_ops\n')] |
# Barcode Example
#
# This example shows off how easy it is to detect bar codes using the
# OpenMV Cam M7. Barcode detection does not work on the M4 Camera.
import sensor, image, time, math
sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.VGA) # High Res!
sensor.set_windowing((640, 80)) # V Res of 80 == less work (40 for 2X the speed).
sensor.skip_frames(time = 2000)
sensor.set_auto_gain(False) # must turn this off to prevent image washout...
sensor.set_auto_whitebal(False) # must turn this off to prevent image washout...
clock = time.clock()
# Barcode detection can run at the full 640x480 resolution of your OpenMV Cam's
# OV7725 camera module. Barcode detection will also work in RGB565 mode but at
# a lower resolution. That said, barcode detection requires a higher resolution
# to work well so it should always be run at 640x480 in grayscale...
def barcode_name(code):
if(code.type() == image.EAN2):
return "EAN2"
if(code.type() == image.EAN5):
return "EAN5"
if(code.type() == image.EAN8):
return "EAN8"
if(code.type() == image.UPCE):
return "UPCE"
if(code.type() == image.ISBN10):
return "ISBN10"
if(code.type() == image.UPCA):
return "UPCA"
if(code.type() == image.EAN13):
return "EAN13"
if(code.type() == image.ISBN13):
return "ISBN13"
if(code.type() == image.I25):
return "I25"
if(code.type() == image.DATABAR):
return "DATABAR"
if(code.type() == image.DATABAR_EXP):
return "DATABAR_EXP"
if(code.type() == image.CODABAR):
return "CODABAR"
if(code.type() == image.CODE39):
return "CODE39"
if(code.type() == image.PDF417):
return "PDF417"
if(code.type() == image.CODE93):
return "CODE93"
if(code.type() == image.CODE128):
return "CODE128"
while(True):
clock.tick()
img = sensor.snapshot()
codes = img.find_barcodes()
for code in codes:
img.draw_rectangle(code.rect())
print_args = (barcode_name(code), code.payload(), (180 * code.rotation()) / math.pi, code.quality(), clock.fps())
print("Barcode %s, Payload \"%s\", rotation %f (degrees), quality %d, FPS %f" % print_args)
if not codes:
print("FPS %f" % clock.fps())
| [
"sensor.set_windowing",
"sensor.skip_frames",
"sensor.set_auto_gain",
"time.clock",
"sensor.set_pixformat",
"sensor.set_auto_whitebal",
"sensor.set_framesize",
"sensor.reset",
"sensor.snapshot"
] | [((192, 206), 'sensor.reset', 'sensor.reset', ([], {}), '()\n', (204, 206), False, 'import sensor, image, time, math\n'), ((207, 245), 'sensor.set_pixformat', 'sensor.set_pixformat', (['sensor.GRAYSCALE'], {}), '(sensor.GRAYSCALE)\n', (227, 245), False, 'import sensor, image, time, math\n'), ((246, 278), 'sensor.set_framesize', 'sensor.set_framesize', (['sensor.VGA'], {}), '(sensor.VGA)\n', (266, 278), False, 'import sensor, image, time, math\n'), ((291, 322), 'sensor.set_windowing', 'sensor.set_windowing', (['(640, 80)'], {}), '((640, 80))\n', (311, 322), False, 'import sensor, image, time, math\n'), ((373, 402), 'sensor.skip_frames', 'sensor.skip_frames', ([], {'time': '(2000)'}), '(time=2000)\n', (391, 402), False, 'import sensor, image, time, math\n'), ((405, 432), 'sensor.set_auto_gain', 'sensor.set_auto_gain', (['(False)'], {}), '(False)\n', (425, 432), False, 'import sensor, image, time, math\n'), ((483, 514), 'sensor.set_auto_whitebal', 'sensor.set_auto_whitebal', (['(False)'], {}), '(False)\n', (507, 514), False, 'import sensor, image, time, math\n'), ((573, 585), 'time.clock', 'time.clock', ([], {}), '()\n', (583, 585), False, 'import sensor, image, time, math\n'), ((1925, 1942), 'sensor.snapshot', 'sensor.snapshot', ([], {}), '()\n', (1940, 1942), False, 'import sensor, image, time, math\n')] |
# coding: utf-8
"""
Marketplace Insights API
<a href=\"https://developer.ebay.com/api-docs/static/versioning.html#limited\" target=\"_blank\"> <img src=\"/cms/img/docs/partners-api.svg\" class=\"legend-icon partners-icon\" title=\"Limited Release\" alt=\"Limited Release\" />(Limited Release)</a> The Marketplace Insights API provides the ability to search for sold items on eBay by keyword, GTIN, category, and product and returns the of sales history of those items. # noqa: E501
OpenAPI spec version: v1_beta.2.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ItemLocation(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'address_line1': 'str',
'address_line2': 'str',
'city': 'str',
'country': 'str',
'county': 'str',
'postal_code': 'str',
'state_or_province': 'str'
}
attribute_map = {
'address_line1': 'addressLine1',
'address_line2': 'addressLine2',
'city': 'city',
'country': 'country',
'county': 'county',
'postal_code': 'postalCode',
'state_or_province': 'stateOrProvince'
}
def __init__(self, address_line1=None, address_line2=None, city=None, country=None, county=None, postal_code=None, state_or_province=None): # noqa: E501
"""ItemLocation - a model defined in Swagger""" # noqa: E501
self._address_line1 = None
self._address_line2 = None
self._city = None
self._country = None
self._county = None
self._postal_code = None
self._state_or_province = None
self.discriminator = None
if address_line1 is not None:
self.address_line1 = address_line1
if address_line2 is not None:
self.address_line2 = address_line2
if city is not None:
self.city = city
if country is not None:
self.country = country
if county is not None:
self.county = county
if postal_code is not None:
self.postal_code = postal_code
if state_or_province is not None:
self.state_or_province = state_or_province
@property
def address_line1(self):
"""Gets the address_line1 of this ItemLocation. # noqa: E501
The first line of the street address. # noqa: E501
:return: The address_line1 of this ItemLocation. # noqa: E501
:rtype: str
"""
return self._address_line1
@address_line1.setter
def address_line1(self, address_line1):
"""Sets the address_line1 of this ItemLocation.
The first line of the street address. # noqa: E501
:param address_line1: The address_line1 of this ItemLocation. # noqa: E501
:type: str
"""
self._address_line1 = address_line1
@property
def address_line2(self):
"""Gets the address_line2 of this ItemLocation. # noqa: E501
The second line of the street address. This field may contain such values as an apartment or suite number. # noqa: E501
:return: The address_line2 of this ItemLocation. # noqa: E501
:rtype: str
"""
return self._address_line2
@address_line2.setter
def address_line2(self, address_line2):
"""Sets the address_line2 of this ItemLocation.
The second line of the street address. This field may contain such values as an apartment or suite number. # noqa: E501
:param address_line2: The address_line2 of this ItemLocation. # noqa: E501
:type: str
"""
self._address_line2 = address_line2
@property
def city(self):
"""Gets the city of this ItemLocation. # noqa: E501
The city in which the item is located. # noqa: E501
:return: The city of this ItemLocation. # noqa: E501
:rtype: str
"""
return self._city
@city.setter
def city(self, city):
"""Sets the city of this ItemLocation.
The city in which the item is located. # noqa: E501
:param city: The city of this ItemLocation. # noqa: E501
:type: str
"""
self._city = city
@property
def country(self):
"""Gets the country of this ItemLocation. # noqa: E501
The two-letter <a href=\"https://www.iso.org/iso-3166-country-codes.html\">ISO 3166</a> standard code that indicates the country in which the item is located. For implementation help, refer to <a href='https://developer.ebay.com/api-docs/buy/marketplace_insights/types/ba:CountryCodeEnum'>eBay API documentation</a> # noqa: E501
:return: The country of this ItemLocation. # noqa: E501
:rtype: str
"""
return self._country
@country.setter
def country(self, country):
"""Sets the country of this ItemLocation.
The two-letter <a href=\"https://www.iso.org/iso-3166-country-codes.html\">ISO 3166</a> standard code that indicates the country in which the item is located. For implementation help, refer to <a href='https://developer.ebay.com/api-docs/buy/marketplace_insights/types/ba:CountryCodeEnum'>eBay API documentation</a> # noqa: E501
:param country: The country of this ItemLocation. # noqa: E501
:type: str
"""
self._country = country
@property
def county(self):
"""Gets the county of this ItemLocation. # noqa: E501
The county in which the item is located. # noqa: E501
:return: The county of this ItemLocation. # noqa: E501
:rtype: str
"""
return self._county
@county.setter
def county(self, county):
"""Sets the county of this ItemLocation.
The county in which the item is located. # noqa: E501
:param county: The county of this ItemLocation. # noqa: E501
:type: str
"""
self._county = county
@property
def postal_code(self):
"""Gets the postal_code of this ItemLocation. # noqa: E501
The postal code (or zip code in US) where the item is located.<br /> <br /><span class=\"tablenote\"> <b> Note: </b>Beginning in late January 2020, the displayed postal code will be masked to all users. Different countries will mask postal/zip codes in slightly different ways, but an example would be <code>951**</code>.</span> # noqa: E501
:return: The postal_code of this ItemLocation. # noqa: E501
:rtype: str
"""
return self._postal_code
@postal_code.setter
def postal_code(self, postal_code):
"""Sets the postal_code of this ItemLocation.
The postal code (or zip code in US) where the item is located.<br /> <br /><span class=\"tablenote\"> <b> Note: </b>Beginning in late January 2020, the displayed postal code will be masked to all users. Different countries will mask postal/zip codes in slightly different ways, but an example would be <code>951**</code>.</span> # noqa: E501
:param postal_code: The postal_code of this ItemLocation. # noqa: E501
:type: str
"""
self._postal_code = postal_code
@property
def state_or_province(self):
"""Gets the state_or_province of this ItemLocation. # noqa: E501
The state or province in which the item is located. # noqa: E501
:return: The state_or_province of this ItemLocation. # noqa: E501
:rtype: str
"""
return self._state_or_province
@state_or_province.setter
def state_or_province(self, state_or_province):
"""Sets the state_or_province of this ItemLocation.
The state or province in which the item is located. # noqa: E501
:param state_or_province: The state_or_province of this ItemLocation. # noqa: E501
:type: str
"""
self._state_or_province = state_or_province
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ItemLocation, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ItemLocation):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"six.iteritems"
] | [((8458, 8491), 'six.iteritems', 'six.iteritems', (['self.swagger_types'], {}), '(self.swagger_types)\n', (8471, 8491), False, 'import six\n')] |
#!/usr/bin/env python
# -*- python -*-
#BEGIN_LEGAL
#
#Copyright (c) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#END_LEGAL
from __future__ import print_function
import re
import genutil
import codegen
def _emit_function(fe, isa_sets, name):
fo = codegen.function_object_t('xed_classify_{}'.format(name))
fo.add_arg('const xed_decoded_inst_t* d')
fo.add_code_eol(' const xed_isa_set_enum_t isa_set = xed_decoded_inst_get_isa_set(d)')
# FIXME: 2017-07-14 optimization: could use a static array for faster checking, smaller code
switch = codegen.c_switch_generator_t('isa_set', fo)
isa_sets_sorted = sorted(isa_sets)
for c in isa_sets_sorted:
switch.add_case('XED_ISA_SET_{}'.format(c.upper()),[],do_break=False)
if len(isa_sets) > 0:
switch.add('return 1;')
switch.add_default(['return 0;'], do_break=False)
switch.finish()
fo.emit_file_emitter(fe)
def work(agi):
sse_isa_sets = set([])
avx_isa_sets = set([])
avx512_isa_sets = set([])
avx512_kmask_op = set([])
for generator in agi.generator_list:
for ii in generator.parser_output.instructions:
if genutil.field_check(ii, 'iclass'):
if re.search('AVX512',ii.isa_set):
avx512_isa_sets.add(ii.isa_set)
if re.search('KOP',ii.isa_set):
avx512_kmask_op.add(ii.isa_set)
elif re.search('AVX',ii.isa_set) or ii.isa_set in ['F16C', 'FMA']:
avx_isa_sets.add(ii.isa_set)
elif re.search('SSE',ii.isa_set) or ii.isa_set in ['AES','PCLMULQDQ']:
# Exclude MMX instructions that come in with SSE2 &
# SSSE3. The several purely MMX instr in SSE are
# "SSE-opcodes" with memop operands. One can look for
# those with SSE2MMX and SSSE3MMX xed isa_sets.
#
# Also exclude the SSE_PREFETCH operations; Those are
# just memops.
if (not re.search('MMX',ii.isa_set) and not re.search('PREFETCH',ii.isa_set)
and not re.search('X87',ii.isa_set) and not re.search('MWAIT',ii.isa_set)):
sse_isa_sets.add(ii.isa_set)
fe = agi.open_file('xed-classifiers.c') # xed_file_emitter_t
_emit_function(fe, avx512_isa_sets, 'avx512')
_emit_function(fe, avx512_kmask_op, 'avx512_maskop')
_emit_function(fe, avx_isa_sets, 'avx')
_emit_function(fe, sse_isa_sets, 'sse')
fe.close()
return
| [
"codegen.c_switch_generator_t",
"genutil.field_check",
"re.search"
] | [((1102, 1145), 'codegen.c_switch_generator_t', 'codegen.c_switch_generator_t', (['"""isa_set"""', 'fo'], {}), "('isa_set', fo)\n", (1130, 1145), False, 'import codegen\n'), ((1697, 1730), 'genutil.field_check', 'genutil.field_check', (['ii', '"""iclass"""'], {}), "(ii, 'iclass')\n", (1716, 1730), False, 'import genutil\n'), ((1748, 1779), 're.search', 're.search', (['"""AVX512"""', 'ii.isa_set'], {}), "('AVX512', ii.isa_set)\n", (1757, 1779), False, 'import re\n'), ((1849, 1877), 're.search', 're.search', (['"""KOP"""', 'ii.isa_set'], {}), "('KOP', ii.isa_set)\n", (1858, 1877), False, 'import re\n'), ((1949, 1977), 're.search', 're.search', (['"""AVX"""', 'ii.isa_set'], {}), "('AVX', ii.isa_set)\n", (1958, 1977), False, 'import re\n'), ((2075, 2103), 're.search', 're.search', (['"""SSE"""', 'ii.isa_set'], {}), "('SSE', ii.isa_set)\n", (2084, 2103), False, 'import re\n'), ((2559, 2587), 're.search', 're.search', (['"""MMX"""', 'ii.isa_set'], {}), "('MMX', ii.isa_set)\n", (2568, 2587), False, 'import re\n'), ((2595, 2628), 're.search', 're.search', (['"""PREFETCH"""', 'ii.isa_set'], {}), "('PREFETCH', ii.isa_set)\n", (2604, 2628), False, 'import re\n'), ((2657, 2685), 're.search', 're.search', (['"""X87"""', 'ii.isa_set'], {}), "('X87', ii.isa_set)\n", (2666, 2685), False, 'import re\n'), ((2693, 2723), 're.search', 're.search', (['"""MWAIT"""', 'ii.isa_set'], {}), "('MWAIT', ii.isa_set)\n", (2702, 2723), False, 'import re\n')] |
"""
Module containing all the spectrogram classes
"""
# 0.2.0
import torch
import torch.nn as nn
from torch.nn.functional import conv1d, conv2d, fold
import numpy as np
from time import time
from nnAudio.librosa_functions import *
from nnAudio.utils import *
sz_float = 4 # size of a float
epsilon = 10e-8 # fudge factor for normalization
### --------------------------- Spectrogram Classes ---------------------------###
class STFT(torch.nn.Module):
"""This function is to calculate the short-time Fourier transform (STFT) of the input signal.
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
The correct shape will be inferred automatically if the input follows these 3 shapes.
Most of the arguments follow the convention from librosa.
This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``.
Parameters
----------
n_fft : int
The window size. Default value is 2048.
freq_bins : int
Number of frequency bins. Default is ``None``, which means ``n_fft//2+1`` bins.
hop_length : int
The hop (or stride) size. Default value is ``None`` which is equivalent to ``n_fft//4``.
window : str
The windowing function for STFT. It uses ``scipy.signal.get_window``, please refer to
scipy documentation for possible windowing functions. The default value is 'hann'.
freq_scale : 'linear', 'log', or 'no'
Determine the spacing between each frequency bin. When `linear` or `log` is used,
the bin spacing can be controlled by ``fmin`` and ``fmax``. If 'no' is used, the bin will
start at 0Hz and end at Nyquist frequency with linear spacing.
center : bool
Putting the STFT keneral at the center of the time-step or not. If ``False``, the time
index is the beginning of the STFT kernel, if ``True``, the time index is the center of
the STFT kernel. Default value if ``True``.
pad_mode : str
The padding method. Default value is 'reflect'.
inverse : bool
To activate the iSTFT module or not. By default, it is False to save GPU memory.
fmin : int
The starting frequency for the lowest frequency bin. If freq_scale is ``no``, this argument
does nothing.
fmax : int
The ending frequency for the highest frequency bin. If freq_scale is ``no``, this argument
does nothing.
sr : int
The sampling rate for the input audio. It is used to calucate the correct ``fmin`` and ``fmax``.
Setting the correct sampling rate is very important for calculating the correct frequency.
trainable : bool
Determine if the STFT kenrels are trainable or not. If ``True``, the gradients for STFT
kernels will also be caluclated and the STFT kernels will be updated during model training.
Default value is ``False``
output_format : str
Control the spectrogram output type, either ``Magnitude``, ``Complex``, or ``Phase``.
The output_format can also be changed during the ``forward`` method.
verbose : bool
If ``True``, it shows layer information. If ``False``, it suppresses all prints
device : str
Choose which device to initialize this layer. Default value is 'cpu'
Returns
-------
spectrogram : torch.tensor
It returns a tensor of spectrograms.
``shape = (num_samples, freq_bins,time_steps)`` if ``output_format='Magnitude'``;
``shape = (num_samples, freq_bins,time_steps, 2)`` if ``output_format='Complex' or 'Phase'``;
Examples
--------
>>> spec_layer = Spectrogram.STFT()
>>> specs = spec_layer(x)
"""
def __init__(self, n_fft=2048, win_length=None, freq_bins=None, hop_length=None, window='hann',
freq_scale='no', center=True, pad_mode='reflect', iSTFT=False,
fmin=50, fmax=6000, sr=22050, trainable=False,
output_format="Complex", verbose=True):
super().__init__()
# Trying to make the default setting same as librosa
if win_length==None: win_length = n_fft
if hop_length==None: hop_length = int(win_length // 4)
self.output_format = output_format
self.trainable = trainable
self.stride = hop_length
self.center = center
self.pad_mode = pad_mode
self.n_fft = n_fft
self.freq_bins = freq_bins
self.trainable = trainable
self.pad_amount = self.n_fft // 2
self.window = window
self.win_length = win_length
self.iSTFT = iSTFT
self.trainable = trainable
start = time()
# Create filter windows for stft
kernel_sin, kernel_cos, self.bins2freq, self.bin_list, window_mask = create_fourier_kernels(n_fft,
win_length=win_length,
freq_bins=freq_bins,
window=window,
freq_scale=freq_scale,
fmin=fmin,
fmax=fmax,
sr=sr,
verbose=verbose)
kernel_sin = torch.tensor(kernel_sin, dtype=torch.float)
kernel_cos = torch.tensor(kernel_cos, dtype=torch.float)
# In this way, the inverse kernel and the forward kernel do not share the same memory...
kernel_sin_inv = torch.cat((kernel_sin, -kernel_sin[1:-1].flip(0)), 0)
kernel_cos_inv = torch.cat((kernel_cos, kernel_cos[1:-1].flip(0)), 0)
if iSTFT:
self.register_buffer('kernel_sin_inv', kernel_sin_inv.unsqueeze(-1))
self.register_buffer('kernel_cos_inv', kernel_cos_inv.unsqueeze(-1))
# Making all these variables nn.Parameter, so that the model can be used with nn.Parallel
# self.kernel_sin = torch.nn.Parameter(self.kernel_sin, requires_grad=self.trainable)
# self.kernel_cos = torch.nn.Parameter(self.kernel_cos, requires_grad=self.trainable)
# Applying window functions to the Fourier kernels
window_mask = torch.tensor(window_mask)
wsin = kernel_sin * window_mask
wcos = kernel_cos * window_mask
if self.trainable==False:
self.register_buffer('wsin', wsin)
self.register_buffer('wcos', wcos)
if self.trainable==True:
wsin = torch.nn.Parameter(wsin, requires_grad=self.trainable)
wcos = torch.nn.Parameter(wcos, requires_grad=self.trainable)
self.register_parameter('wsin', wsin)
self.register_parameter('wcos', wcos)
# Prepare the shape of window mask so that it can be used later in inverse
self.register_buffer('window_mask', window_mask.unsqueeze(0).unsqueeze(-1))
if verbose==True:
print("STFT kernels created, time used = {:.4f} seconds".format(time()-start))
else:
pass
def forward(self, x, output_format=None):
"""
Convert a batch of waveforms to spectrograms.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
output_format : str
Control the type of spectrogram to be return. Can be either ``Magnitude`` or ``Complex`` or ``Phase``.
Default value is ``Complex``.
"""
output_format = output_format or self.output_format
self.num_samples = x.shape[-1]
x = broadcast_dim(x)
if self.center:
if self.pad_mode == 'constant':
padding = nn.ConstantPad1d(self.pad_amount, 0)
elif self.pad_mode == 'reflect':
if self.num_samples < self.pad_amount:
raise AssertionError("Signal length shorter than reflect padding length (n_fft // 2).")
padding = nn.ReflectionPad1d(self.pad_amount)
x = padding(x)
spec_imag = conv1d(x, self.wsin, stride=self.stride)
spec_real = conv1d(x, self.wcos, stride=self.stride) # Doing STFT by using conv1d
# remove redundant parts
spec_real = spec_real[:, :self.freq_bins, :]
spec_imag = spec_imag[:, :self.freq_bins, :]
if output_format=='Magnitude':
spec = spec_real.pow(2) + spec_imag.pow(2)
if self.trainable==True:
return torch.sqrt(spec+1e-8) # prevent Nan gradient when sqrt(0) due to output=0
else:
return torch.sqrt(spec)
elif output_format=='Complex':
return torch.stack((spec_real,-spec_imag), -1) # Remember the minus sign for imaginary part
elif output_format=='Phase':
return torch.atan2(-spec_imag+0.0,spec_real) # +0.0 removes -0.0 elements, which leads to error in calculating phase
def inverse(self, X, onesided=True, length=None, refresh_win=True):
"""
This function is same as the :func:`~nnAudio.Spectrogram.iSTFT` class,
which is to convert spectrograms back to waveforms.
It only works for the complex value spectrograms. If you have the magnitude spectrograms,
please use :func:`~nnAudio.Spectrogram.Griffin_Lim`.
Parameters
----------
onesided : bool
If your spectrograms only have ``n_fft//2+1`` frequency bins, please use ``onesided=True``,
else use ``onesided=False``
length : int
To make sure the inverse STFT has the same output length of the original waveform, please
set `length` as your intended waveform length. By default, ``length=None``,
which will remove ``n_fft//2`` samples from the start and the end of the output.
refresh_win : bool
Recalculating the window sum square. If you have an input with fixed number of timesteps,
you can increase the speed by setting ``refresh_win=False``. Else please keep ``refresh_win=True``
"""
if (hasattr(self, 'kernel_sin_inv') != True) or (hasattr(self, 'kernel_cos_inv') != True):
raise NameError("Please activate the iSTFT module by setting `iSTFT=True` if you want to use `inverse`")
assert X.dim()==4 , "Inverse iSTFT only works for complex number," \
"make sure our tensor is in the shape of (batch, freq_bins, timesteps, 2)."\
"\nIf you have a magnitude spectrogram, please consider using Griffin-Lim."
if onesided:
X = extend_fbins(X) # extend freq
X_real, X_imag = X[:, :, :, 0], X[:, :, :, 1]
# broadcast dimensions to support 2D convolution
X_real_bc = X_real.unsqueeze(1)
X_imag_bc = X_imag.unsqueeze(1)
a1 = conv2d(X_real_bc, self.kernel_cos_inv, stride=(1,1))
b2 = conv2d(X_imag_bc, self.kernel_sin_inv, stride=(1,1))
# compute real and imag part. signal lies in the real part
real = a1 - b2
real = real.squeeze(-2)*self.window_mask
# Normalize the amplitude with n_fft
real /= (self.n_fft)
# Overlap and Add algorithm to connect all the frames
real = overlap_add(real, self.stride)
# Prepare the window sumsqure for division
# Only need to create this window once to save time
# Unless the input spectrograms have different time steps
if hasattr(self, 'w_sum')==False or refresh_win==True:
self.w_sum = torch_window_sumsquare(self.window_mask.flatten(), X.shape[2], self.stride, self.n_fft).flatten()
self.nonzero_indices = (self.w_sum>1e-10)
else:
pass
real[:, self.nonzero_indices] = real[:,self.nonzero_indices].div(self.w_sum[self.nonzero_indices])
# Remove padding
if length is None:
if self.center:
real = real[:, self.pad_amount:-self.pad_amount]
else:
if self.center:
real = real[:, self.pad_amount:self.pad_amount + length]
else:
real = real[:, :length]
return real
def extra_repr(self) -> str:
return 'n_fft={}, Fourier Kernel size={}, iSTFT={}, trainable={}'.format(
self.n_fft, (*self.wsin.shape,), self.iSTFT, self.trainable
)
class MelSpectrogram(torch.nn.Module):
"""This function is to calculate the Melspectrogram of the input signal.
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
The correct shape will be inferred automatically if the input follows these 3 shapes.
Most of the arguments follow the convention from librosa.
This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``.
Parameters
----------
sr : int
The sampling rate for the input audio.
It is used to calculate the correct ``fmin`` and ``fmax``.
Setting the correct sampling rate is very important for calculating the correct frequency.
n_fft : int
The window size for the STFT. Default value is 2048
n_mels : int
The number of Mel filter banks. The filter banks maps the n_fft to mel bins.
Default value is 128.
hop_length : int
The hop (or stride) size. Default value is 512.
window : str
The windowing function for STFT. It uses ``scipy.signal.get_window``, please refer to
scipy documentation for possible windowing functions. The default value is 'hann'.
center : bool
Putting the STFT keneral at the center of the time-step or not. If ``False``,
the time index is the beginning of the STFT kernel, if ``True``, the time index is the
center of the STFT kernel. Default value if ``True``.
pad_mode : str
The padding method. Default value is 'reflect'.
htk : bool
When ``False`` is used, the Mel scale is quasi-logarithmic. When ``True`` is used, the
Mel scale is logarithmic. The default value is ``False``.
fmin : int
The starting frequency for the lowest Mel filter bank.
fmax : int
The ending frequency for the highest Mel filter bank.
trainable_mel : bool
Determine if the Mel filter banks are trainable or not. If ``True``, the gradients for Mel
filter banks will also be calculated and the Mel filter banks will be updated during model
training. Default value is ``False``.
trainable_STFT : bool
Determine if the STFT kenrels are trainable or not. If ``True``, the gradients for STFT
kernels will also be caluclated and the STFT kernels will be updated during model training.
Default value is ``False``.
verbose : bool
If ``True``, it shows layer information. If ``False``, it suppresses all prints.
device : str
Choose which device to initialize this layer. Default value is 'cpu'.
Returns
-------
spectrogram : torch.tensor
It returns a tensor of spectrograms. shape = ``(num_samples, freq_bins,time_steps)``.
Examples
--------
>>> spec_layer = Spectrogram.MelSpectrogram()
>>> specs = spec_layer(x)
"""
def __init__(self, sr=22050, n_fft=2048, n_mels=128, hop_length=512,
window='hann', center=True, pad_mode='reflect', power=2.0, htk=False,
fmin=0.0, fmax=None, norm=1, trainable_mel=False, trainable_STFT=False,
verbose=True, **kwargs):
super().__init__()
self.stride = hop_length
self.center = center
self.pad_mode = pad_mode
self.n_fft = n_fft
self.power = power
self.trainable_mel = trainable_mel
self.trainable_STFT = trainable_STFT
self.verbose = verbose
# Preparing for the stft layer. No need for center
self.stft = STFT(n_fft=n_fft, freq_bins=None, hop_length=hop_length, window=window,
freq_scale='no', center=center, pad_mode=pad_mode, sr=sr, trainable=trainable_STFT,
output_format="Magnitude", verbose=verbose, **kwargs)
# Create filter windows for stft
start = time()
# Creating kernel for mel spectrogram
start = time()
mel_basis = mel(sr, n_fft, n_mels, fmin, fmax, htk=htk, norm=norm)
mel_basis = torch.tensor(mel_basis)
if verbose==True:
print("STFT filter created, time used = {:.4f} seconds".format(time()-start))
print("Mel filter created, time used = {:.4f} seconds".format(time()-start))
else:
pass
if trainable_mel:
# Making everything nn.Parameter, so that this model can support nn.DataParallel
mel_basis = torch.nn.Parameter(mel_basis, requires_grad=trainable_mel)
self.register_parameter('mel_basis', mel_basis)
else:
self.register_buffer('mel_basis', mel_basis)
# if trainable_mel==True:
# self.mel_basis = torch.nn.Parameter(self.mel_basis)
# if trainable_STFT==True:
# self.wsin = torch.nn.Parameter(self.wsin)
# self.wcos = torch.nn.Parameter(self.wcos)
def forward(self, x):
"""
Convert a batch of waveforms to Mel spectrograms.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
"""
x = broadcast_dim(x)
spec = self.stft(x, output_format='Magnitude')**self.power
melspec = torch.matmul(self.mel_basis, spec)
return melspec
def extra_repr(self) -> str:
return 'Mel filter banks size = {}, trainable_mel={}'.format(
(*self.mel_basis.shape,), self.trainable_mel, self.trainable_STFT
)
def to_stft(self, melspec, max_steps=1000, loss_threshold=1e-8, grad_threshold=1e-7, random_start=False, sgd_kwargs=None, eps=1e-12, return_extras=False, verbose=None):
"""
Best-attempt spectrogram inversion
"""
def loss_fn(pred, target):
pred = pred.unsqueeze(1) if pred.ndim == 3 else pred
target = target.unsqueeze(1) if target.ndim == 3 else target
loss = (pred - target).pow(2).sum(-2).mean()
return loss
verbose = verbose or self.verbose
# SGD arguments
default_sgd_kwargs = dict(lr=1e3, momentum=0.9)
if sgd_kwargs:
default_sgd_kwargs.update(sgd_kwargs)
sgd_kwargs = default_sgd_kwargs
mel_basis = self.mel_basis.detach()
shape = melspec.shape
batch_size, n_mels, time = shape[0], shape[-2], shape[-1]
_, n_freq = mel_basis.shape
melspec = melspec.detach().view(-1, n_mels, time)
if random_start:
pred_stft_shape = (batch_size, n_freq, time)
pred_stft = torch.zeros(*pred_stft_shape, dtype=torch.float32, device=mel_basis.device).normal_().clamp_(eps)
else:
pred_stft = (torch.pinverse(mel_basis) @ melspec).clamp(eps)
pred_stft = nn.Parameter(pred_stft, requires_grad=True)
sgd_kwargs["lr"] = sgd_kwargs["lr"] * batch_size
optimizer = torch.optim.SGD([pred_stft], **sgd_kwargs)
losses = []
for i in range(max_steps):
optimizer.zero_grad()
pred_mel = mel_basis @ pred_stft
loss = loss_fn(pred_mel, melspec)
losses.append(loss.item())
loss.backward()
optimizer.step()
# Check conditions
if not loss.isfinite():
raise OverflowError("Overflow encountered in Mel -> STFT optimization")
if loss_threshold and loss < loss_threshold:
if verbose:
print(f"Target error of {loss_threshold} reached. Stopping optimization.")
break
if grad_threshold and pred_stft.grad.max() < grad_threshold:
if verbose:
print(f"Target max gradient of {grad_threshold} reached. Stopping optimization.")
break
pred_stft = pred_stft.detach().clamp(eps) ** 0.5
pred_stft = pred_stft.view((*shape[:-2], n_freq, time))
if return_extras:
return pred_stft, pred_mel.detach(), losses
return pred_stft
def inverse(self, melspec, mel_inversion_params=None, stft_inversion_params=None):
default_mel_inversion_params = {}
default_stft_inversion_params = {}
mel_inversion_params = mel_inversion_params or {}
stft_inversion_params = stft_inversion_params or {}
if mel_inversion_params:
mel_inversion_params = {**default_mel_inversion_params, **mel_inversion_params}
if stft_inversion_params:
stft_inversion_params = {**default_stft_inversion_params, **stft_inversion_params}
recon_stft = self.to_stft(melspec, **mel_inversion_params)
recon_audio = self.stft.inverse(recon_stft, **stft_inversion_params)
return recon_audio
class MFCC(torch.nn.Module):
"""This function is to calculate the Mel-frequency cepstral coefficients (MFCCs) of the input signal.
This algorithm first extracts Mel spectrograms from the audio clips,
then the discrete cosine transform is calcuated to obtain the final MFCCs.
Therefore, the Mel spectrogram part can be made trainable using
``trainable_mel`` and ``trainable_STFT``.
It only support type-II DCT at the moment. Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
The correct shape will be inferred autommatically if the input follows these 3 shapes.
Most of the arguments follow the convention from librosa.
This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``.
Parameters
----------
sr : int
The sampling rate for the input audio. It is used to calculate the correct ``fmin`` and ``fmax``.
Setting the correct sampling rate is very important for calculating the correct frequency.
n_mfcc : int
The number of Mel-frequency cepstral coefficients
norm : string
The default value is 'ortho'. Normalization for DCT basis
**kwargs
Other arguments for Melspectrogram such as n_fft, n_mels, hop_length, and window
Returns
-------
MFCCs : torch.tensor
It returns a tensor of MFCCs. shape = ``(num_samples, n_mfcc, time_steps)``.
Examples
--------
>>> spec_layer = Spectrogram.MFCC()
>>> mfcc = spec_layer(x)
"""
def __init__(self, sr=22050, n_mfcc=20, norm='ortho', verbose=True, ref=1.0, amin=1e-10, top_db=80.0, **kwargs):
super().__init__()
self.melspec_layer = MelSpectrogram(sr=sr, verbose=verbose, **kwargs)
self.m_mfcc = n_mfcc
# attributes that will be used for _power_to_db
if amin <= 0:
raise ParameterError('amin must be strictly positive')
amin = torch.tensor([amin])
ref = torch.abs(torch.tensor([ref]))
self.register_buffer('amin', amin)
self.register_buffer('ref', ref)
self.top_db = top_db
self.n_mfcc = n_mfcc
def _power_to_db(self, S):
'''
Refer to https://librosa.github.io/librosa/_modules/librosa/core/spectrum.html#power_to_db
for the original implmentation.
'''
log_spec = 10.0 * torch.log10(torch.max(S, self.amin))
log_spec -= 10.0 * torch.log10(torch.max(self.amin, self.ref))
if self.top_db is not None:
if self.top_db < 0:
raise ParameterError('top_db must be non-negative')
# make the dim same as log_spec so that it can be broadcasted
batch_wise_max = log_spec.flatten(1).max(1)[0].unsqueeze(1).unsqueeze(1)
log_spec = torch.max(log_spec, batch_wise_max - self.top_db)
return log_spec
def _dct(self, x, norm=None):
'''
Refer to https://github.com/zh217/torch-dct for the original implmentation.
'''
x = x.permute(0,2,1) # make freq the last axis, since dct applies to the frequency axis
x_shape = x.shape
N = x_shape[-1]
v = torch.cat([x[:, :, ::2], x[:, :, 1::2].flip([2])], dim=2)
Vc = torch.rfft(v, 1, onesided=False)
# TODO: Can make the W_r and W_i trainable here
k = - torch.arange(N, dtype=x.dtype, device=x.device)[None, :] * np.pi / (2 * N)
W_r = torch.cos(k)
W_i = torch.sin(k)
V = Vc[:, :, :, 0] * W_r - Vc[:, :, :, 1] * W_i
if norm == 'ortho':
V[:, :, 0] /= np.sqrt(N) * 2
V[:, :, 1:] /= np.sqrt(N / 2) * 2
V = 2 * V
return V.permute(0,2,1) # swapping back the time axis and freq axis
def forward(self, x):
"""
Convert a batch of waveforms to MFCC.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
"""
x = self.melspec_layer(x)
x = self._power_to_db(x)
x = self._dct(x, norm='ortho')[:,:self.m_mfcc,:]
return x
def extra_repr(self) -> str:
return 'n_mfcc = {}'.format(
(self.n_mfcc)
)
class CQT1992(torch.nn.Module):
"""
This alogrithm uses the method proposed in [1]. Please refer to :func:`~nnAudio.Spectrogram.CQT1992v2` for a more
computational and memory efficient version.
[1] Brown, <NAME>. and <NAME>. “An efficient algorithm for the calculation of a
constant Q transform.” (1992).
This function is to calculate the CQT of the input signal.
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
The correct shape will be inferred autommatically if the input follows these 3 shapes.
Most of the arguments follow the convention from librosa.
This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``.
Parameters
----------
sr : int
The sampling rate for the input audio. It is used to calucate the correct ``fmin`` and ``fmax``.
Setting the correct sampling rate is very important for calculating the correct frequency.
hop_length : int
The hop (or stride) size. Default value is 512.
fmin : float
The frequency for the lowest CQT bin. Default is 32.70Hz, which coresponds to the note C0.
fmax : float
The frequency for the highest CQT bin. Default is ``None``, therefore the higest CQT bin is
inferred from the ``n_bins`` and ``bins_per_octave``.
If ``fmax`` is not ``None``, then the argument ``n_bins`` will be ignored and ``n_bins``
will be calculated automatically. Default is ``None``
n_bins : int
The total numbers of CQT bins. Default is 84. Will be ignored if ``fmax`` is not ``None``.
bins_per_octave : int
Number of bins per octave. Default is 12.
trainable_STFT : bool
Determine if the time to frequency domain transformation kernel for the input audio is trainable or not.
Default is ``False``
trainable_CQT : bool
Determine if the frequency domain CQT kernel is trainable or not.
Default is ``False``
norm : int
Normalization for the CQT kernels. ``1`` means L1 normalization, and ``2`` means L2 normalization.
Default is ``1``, which is same as the normalization used in librosa.
window : str
The windowing function for CQT. It uses ``scipy.signal.get_window``, please refer to
scipy documentation for possible windowing functions. The default value is 'hann'.
center : bool
Putting the CQT keneral at the center of the time-step or not. If ``False``, the time index is
the beginning of the CQT kernel, if ``True``, the time index is the center of the CQT kernel.
Default value if ``True``.
pad_mode : str
The padding method. Default value is 'reflect'.
trainable : bool
Determine if the CQT kernels are trainable or not. If ``True``, the gradients for CQT kernels
will also be caluclated and the CQT kernels will be updated during model training.
Default value is ``False``.
output_format : str
Determine the return type.
``Magnitude`` will return the magnitude of the STFT result, shape = ``(num_samples, freq_bins,time_steps)``;
``Complex`` will return the STFT result in complex number, shape = ``(num_samples, freq_bins,time_steps, 2)``;
``Phase`` will return the phase of the STFT reuslt, shape = ``(num_samples, freq_bins,time_steps, 2)``.
The complex number is stored as ``(real, imag)`` in the last axis. Default value is 'Magnitude'.
verbose : bool
If ``True``, it shows layer information. If ``False``, it suppresses all prints
Returns
-------
spectrogram : torch.tensor
It returns a tensor of spectrograms.
shape = ``(num_samples, freq_bins,time_steps)`` if ``output_format='Magnitude'``;
shape = ``(num_samples, freq_bins,time_steps, 2)`` if ``output_format='Complex' or 'Phase'``;
Examples
--------
>>> spec_layer = Spectrogram.CQT1992v2()
>>> specs = spec_layer(x)
"""
def __init__(self, sr=22050, hop_length=512, fmin=220, fmax=None, n_bins=84,
trainable_STFT=False, trainable_CQT=False, bins_per_octave=12,
output_format='Complex', norm=1, window='hann', center=True, pad_mode='reflect'):
super().__init__()
# norm arg is not functioning
self.hop_length = hop_length
self.center = center
self.pad_mode = pad_mode
self.norm = norm
self.output_format = output_format
# creating kernels for CQT
Q = 1/(2**(1/bins_per_octave)-1)
print("Creating CQT kernels ...", end='\r')
start = time()
cqt_kernels, self.kernel_width, lenghts = create_cqt_kernels(Q,
sr,
fmin,
n_bins,
bins_per_octave,
norm,
window,
fmax)
self.register_buffer('lenghts', lenghts)
cqt_kernels = fft(cqt_kernels)[:,:self.kernel_width//2+1]
print("CQT kernels created, time used = {:.4f} seconds".format(time()-start))
# creating kernels for stft
# self.cqt_kernels_real*=lenghts.unsqueeze(1)/self.kernel_width # Trying to normalize as librosa
# self.cqt_kernels_imag*=lenghts.unsqueeze(1)/self.kernel_width
print("Creating STFT kernels ...", end='\r')
start = time()
kernel_sin, kernel_cos, self.bins2freq, _, window = create_fourier_kernels(self.kernel_width,
window='ones',
freq_scale='no')
# Converting kernels from numpy arrays to torch tensors
wsin = torch.tensor(kernel_sin * window)
wcos = torch.tensor(kernel_cos * window)
cqt_kernels_real = torch.tensor(cqt_kernels.real.astype(np.float32))
cqt_kernels_imag = torch.tensor(cqt_kernels.imag.astype(np.float32))
if trainable_STFT:
wsin = torch.nn.Parameter(wsin, requires_grad=trainable_kernels)
wcos = torch.nn.Parameter(wcos, requires_grad=trainable_kernels)
self.register_parameter('wsin', wsin)
self.register_parameter('wcos', wcos)
else:
self.register_buffer('wsin', wsin)
self.register_buffer('wcos', wcos)
if trainable_CQT:
cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable_kernels)
cqt_kernels_imag = torch.nn.Parameter(cqt_kernels_imag, requires_grad=trainable_kernels)
self.register_parameter('cqt_kernels_real', cqt_kernels_real)
self.register_parameter('cqt_kernels_imag', cqt_kernels_imag)
else:
self.register_buffer('cqt_kernels_real', cqt_kernels_real)
self.register_buffer('cqt_kernels_imag', cqt_kernels_imag)
print("STFT kernels created, time used = {:.4f} seconds".format(time()-start))
def forward(self, x, output_format=None):
"""
Convert a batch of waveforms to CQT spectrograms.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
"""
output_format = output_format or self.output_format
x = broadcast_dim(x)
if self.center:
if self.pad_mode == 'constant':
padding = nn.ConstantPad1d(self.kernel_width//2, 0)
elif self.pad_mode == 'reflect':
padding = nn.ReflectionPad1d(self.kernel_width//2)
x = padding(x)
# STFT
fourier_real = conv1d(x, self.wcos, stride=self.hop_length)
fourier_imag = conv1d(x, self.wsin, stride=self.hop_length)
# CQT
CQT_real, CQT_imag = complex_mul((self.cqt_kernels_real, self.cqt_kernels_imag),
(fourier_real, fourier_imag))
CQT = torch.stack((CQT_real,-CQT_imag),-1)
if self.norm:
CQT = CQT/self.kernel_width*torch.sqrt(self.lenghts.view(-1,1,1))
else:
CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1))
if output_format=='Magnitude':
# Getting CQT Amplitude
return torch.sqrt(CQT.pow(2).sum(-1))
elif output_format=='Complex':
return CQT
elif output_format=='Phase':
phase_real = torch.cos(torch.atan2(CQT_imag,CQT_real))
phase_imag = torch.sin(torch.atan2(CQT_imag,CQT_real))
return torch.stack((phase_real,phase_imag), -1)
def extra_repr(self) -> str:
return 'STFT kernel size = {}, CQT kernel size = {}'.format(
(*self.wcos.shape,), (*self.cqt_kernels_real.shape,)
)
class CQT2010(torch.nn.Module):
"""
This algorithm is using the resampling method proposed in [1].
Instead of convoluting the STFT results with a gigantic CQT kernel covering the full frequency
spectrum, we make a small CQT kernel covering only the top octave.
Then we keep downsampling the input audio by a factor of 2 to convoluting it with the
small CQT kernel. Everytime the input audio is downsampled, the CQT relative to the downsampled
input is equavalent to the next lower octave.
The kernel creation process is still same as the 1992 algorithm. Therefore, we can reuse the code
from the 1992 alogrithm [2]
[1] <NAME>. “CONSTANT-Q TRANSFORM TOOLBOX FOR MUSIC PROCESSING.” (2010).
[2] Brown, <NAME>. and <NAME>. “An efficient algorithm for the calculation of a
constant Q transform.” (1992).
early downsampling factor is to downsample the input audio to reduce the CQT kernel size.
The result with and without early downsampling are more or less the same except in the very low
frequency region where freq < 40Hz.
"""
def __init__(self, sr=22050, hop_length=512, fmin=32.70, fmax=None, n_bins=84, bins_per_octave=12,
norm=True, basis_norm=1, window='hann', pad_mode='reflect', trainable_STFT=False,
trainable_CQT=False, output_format='Complex', earlydownsample=True, verbose=True):
super().__init__()
self.norm = norm # Now norm is used to normalize the final CQT result by dividing n_fft
# basis_norm is for normalizing basis
self.hop_length = hop_length
self.pad_mode = pad_mode
self.n_bins = n_bins
self.output_format = output_format
self.earlydownsample = earlydownsample # TODO: activate early downsampling later if possible
# This will be used to calculate filter_cutoff and creating CQT kernels
Q = 1/(2**(1/bins_per_octave)-1)
# Creating lowpass filter and make it a torch tensor
if verbose==True:
print("Creating low pass filter ...", end='\r')
start = time()
lowpass_filter = torch.tensor(create_lowpass_filter(
band_center = 0.5,
kernelLength=256,
transitionBandwidth=0.001
)
)
# Broadcast the tensor to the shape that fits conv1d
self.register_buffer('lowpass_filter', lowpass_filter[None,None,:])
if verbose==True:
print("Low pass filter created, time used = {:.4f} seconds".format(time()-start))
# Calculate num of filter requires for the kernel
# n_octaves determines how many resampling requires for the CQT
n_filters = min(bins_per_octave, n_bins)
self.n_octaves = int(np.ceil(float(n_bins) / bins_per_octave))
# print("n_octaves = ", self.n_octaves)
# Calculate the lowest frequency bin for the top octave kernel
self.fmin_t = fmin*2**(self.n_octaves-1)
remainder = n_bins % bins_per_octave
# print("remainder = ", remainder)
if remainder==0:
# Calculate the top bin frequency
fmax_t = self.fmin_t*2**((bins_per_octave-1)/bins_per_octave)
else:
# Calculate the top bin frequency
fmax_t = self.fmin_t*2**((remainder-1)/bins_per_octave)
self.fmin_t = fmax_t/2**(1-1/bins_per_octave) # Adjusting the top minium bins
if fmax_t > sr/2:
raise ValueError('The top bin {}Hz has exceeded the Nyquist frequency, \
please reduce the n_bins'.format(fmax_t))
if self.earlydownsample == True: # Do early downsampling if this argument is True
if verbose==True:
print("Creating early downsampling filter ...", end='\r')
start = time()
sr, self.hop_length, self.downsample_factor, early_downsample_filter, \
self.earlydownsample = get_early_downsample_params(sr,
hop_length,
fmax_t,
Q,
self.n_octaves,
verbose)
self.register_buffer('early_downsample_filter', early_downsample_filter)
if verbose==True:
print("Early downsampling filter created, \
time used = {:.4f} seconds".format(time()-start))
else:
self.downsample_factor=1.
# Preparing CQT kernels
if verbose==True:
print("Creating CQT kernels ...", end='\r')
start = time()
# print("Q = {}, fmin_t = {}, n_filters = {}".format(Q, self.fmin_t, n_filters))
basis, self.n_fft, _ = create_cqt_kernels(Q,
sr,
self.fmin_t,
n_filters,
bins_per_octave,
norm=basis_norm,
topbin_check=False)
# This is for the normalization in the end
freqs = fmin * 2.0 ** (np.r_[0:n_bins] / np.float(bins_per_octave))
lenghts = np.ceil(Q * sr / freqs)
lenghts = torch.tensor(lenghts).float()
self.register_buffer('lenghts', lenghts)
self.basis=basis
fft_basis = fft(basis)[:,:self.n_fft//2+1] # Convert CQT kenral from time domain to freq domain
# These cqt_kernel is already in the frequency domain
cqt_kernels_real = torch.tensor(fft_basis.real.astype(np.float32))
cqt_kernels_imag = torch.tensor(fft_basis.imag.astype(np.float32))
if verbose==True:
print("CQT kernels created, time used = {:.4f} seconds".format(time()-start))
# print("Getting cqt kernel done, n_fft = ",self.n_fft)
# Preparing kernels for Short-Time Fourier Transform (STFT)
# We set the frequency range in the CQT filter instead of here.
if verbose==True:
print("Creating STFT kernels ...", end='\r')
start = time()
kernel_sin, kernel_cos, self.bins2freq, _, window = create_fourier_kernels(self.n_fft, window='ones', freq_scale='no')
wsin = kernel_sin * window
wcos = kernel_cos * window
wsin = torch.tensor(wsin)
wcos = torch.tensor(wcos)
if verbose==True:
print("STFT kernels created, time used = {:.4f} seconds".format(time()-start))
if trainable_STFT:
wsin = torch.nn.Parameter(wsin, requires_grad=trainable_kernels)
wcos = torch.nn.Parameter(wcos, requires_grad=trainable_kernels)
self.register_parameter('wsin', wsin)
self.register_parameter('wcos', wcos)
else:
self.register_buffer('wsin', wsin)
self.register_buffer('wcos', wcos)
if trainable_CQT:
cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable_kernels)
cqt_kernels_imag = torch.nn.Parameter(cqt_kernels_imag, requires_grad=trainable_kernels)
self.register_parameter('cqt_kernels_real', cqt_kernels_real)
self.register_parameter('cqt_kernels_imag', cqt_kernels_imag)
else:
self.register_buffer('cqt_kernels_real', cqt_kernels_real)
self.register_buffer('cqt_kernels_imag', cqt_kernels_imag)
# If center==True, the STFT window will be put in the middle, and paddings at the beginning
# and ending are required.
if self.pad_mode == 'constant':
self.padding = nn.ConstantPad1d(self.n_fft//2, 0)
elif self.pad_mode == 'reflect':
self.padding = nn.ReflectionPad1d(self.n_fft//2)
def forward(self,x, output_format=None):
"""
Convert a batch of waveforms to CQT spectrograms.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
"""
output_format = output_format or self.output_format
x = broadcast_dim(x)
if self.earlydownsample==True:
x = downsampling_by_n(x, self.early_downsample_filter, self.downsample_factor)
hop = self.hop_length
CQT = get_cqt_complex(x, self.wcos, self.wsin, hop, self.padding) # Getting the top octave CQT
x_down = x # Preparing a new variable for downsampling
for i in range(self.n_octaves-1):
hop = hop//2
x_down = downsampling_by_2(x_down, self.lowpass_filter)
CQT1 = get_cqt_complex(x_down, self.wcos, self.wsin, hop, self.padding)
CQT = torch.cat((CQT1, CQT),1)
CQT = CQT[:,-self.n_bins:,:] # Removing unwanted top bins
if self.norm:
CQT = CQT/self.n_fft*torch.sqrt(self.lenghts.view(-1,1,1))
else:
CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1))
# Normalizing the output with the downsampling factor, 2**(self.n_octaves-1)
# is make it same mag as 1992
CQT = CQT*self.downsample_factor
if output_format=='Magnitude':
# Getting CQT Amplitude
return torch.sqrt(CQT.pow(2).sum(-1))
elif output_format=='Complex':
return CQT
elif output_format=='Phase':
phase_real = torch.cos(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0]))
phase_imag = torch.sin(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0]))
return torch.stack((phase_real,phase_imag), -1)
def extra_repr(self) -> str:
return 'STFT kernel size = {}, CQT kernel size = {}'.format(
(*self.wcos.shape,), (*self.cqt_kernels_real.shape,)
)
class CQT1992v2(torch.nn.Module):
"""This function is to calculate the CQT of the input signal.
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
The correct shape will be inferred autommatically if the input follows these 3 shapes.
Most of the arguments follow the convention from librosa.
This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``.
This alogrithm uses the method proposed in [1]. I slightly modify it so that it runs faster
than the original 1992 algorithm, that is why I call it version 2.
[1] Brown, <NAME>. and <NAME>. “An efficient algorithm for the calculation of a
constant Q transform.” (1992).
Parameters
----------
sr : int
The sampling rate for the input audio. It is used to calucate the correct ``fmin`` and ``fmax``.
Setting the correct sampling rate is very important for calculating the correct frequency.
hop_length : int
The hop (or stride) size. Default value is 512.
fmin : float
The frequency for the lowest CQT bin. Default is 32.70Hz, which coresponds to the note C0.
fmax : float
The frequency for the highest CQT bin. Default is ``None``, therefore the higest CQT bin is
inferred from the ``n_bins`` and ``bins_per_octave``.
If ``fmax`` is not ``None``, then the argument ``n_bins`` will be ignored and ``n_bins``
will be calculated automatically. Default is ``None``
n_bins : int
The total numbers of CQT bins. Default is 84. Will be ignored if ``fmax`` is not ``None``.
bins_per_octave : int
Number of bins per octave. Default is 12.
norm : int
Normalization for the CQT kernels. ``1`` means L1 normalization, and ``2`` means L2 normalization.
Default is ``1``, which is same as the normalization used in librosa.
window : str
The windowing function for CQT. It uses ``scipy.signal.get_window``, please refer to
scipy documentation for possible windowing functions. The default value is 'hann'.
center : bool
Putting the CQT keneral at the center of the time-step or not. If ``False``, the time index is
the beginning of the CQT kernel, if ``True``, the time index is the center of the CQT kernel.
Default value if ``True``.
pad_mode : str
The padding method. Default value is 'reflect'.
trainable : bool
Determine if the CQT kernels are trainable or not. If ``True``, the gradients for CQT kernels
will also be caluclated and the CQT kernels will be updated during model training.
Default value is ``False``.
output_format : str
Determine the return type.
``Magnitude`` will return the magnitude of the STFT result, shape = ``(num_samples, freq_bins,time_steps)``;
``Complex`` will return the STFT result in complex number, shape = ``(num_samples, freq_bins,time_steps, 2)``;
``Phase`` will return the phase of the STFT reuslt, shape = ``(num_samples, freq_bins,time_steps, 2)``.
The complex number is stored as ``(real, imag)`` in the last axis. Default value is 'Magnitude'.
verbose : bool
If ``True``, it shows layer information. If ``False``, it suppresses all prints
Returns
-------
spectrogram : torch.tensor
It returns a tensor of spectrograms.
shape = ``(num_samples, freq_bins,time_steps)`` if ``output_format='Magnitude'``;
shape = ``(num_samples, freq_bins,time_steps, 2)`` if ``output_format='Complex' or 'Phase'``;
Examples
--------
>>> spec_layer = Spectrogram.CQT1992v2()
>>> specs = spec_layer(x)
"""
def __init__(self, sr=22050, hop_length=512, fmin=32.70, fmax=None, n_bins=84,
bins_per_octave=12, norm=1, window='hann', center=True, pad_mode='reflect',
trainable=False, output_format='Magnitude', verbose=True):
super().__init__()
# norm arg is not functioning
self.trainable = trainable
self.hop_length = hop_length
self.center = center
self.pad_mode = pad_mode
self.output_format = output_format
# creating kernels for CQT
Q = 1/(2**(1/bins_per_octave)-1)
if verbose==True:
print("Creating CQT kernels ...", end='\r')
start = time()
cqt_kernels, self.kernel_width, lenghts = create_cqt_kernels(Q,
sr,
fmin,
n_bins,
bins_per_octave,
norm,
window,
fmax)
self.register_buffer('lenghts', lenghts)
cqt_kernels_real = torch.tensor(cqt_kernels.real).unsqueeze(1)
cqt_kernels_imag = torch.tensor(cqt_kernels.imag).unsqueeze(1)
if trainable:
cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable_kernels)
cqt_kernels_imag = torch.nn.Parameter(cqt_kernels_imag, requires_grad=trainable_kernels)
self.register_parameter('cqt_kernels_real', cqt_kernels_real)
self.register_parameter('cqt_kernels_imag', cqt_kernels_imag)
else:
self.register_buffer('cqt_kernels_real', cqt_kernels_real)
self.register_buffer('cqt_kernels_imag', cqt_kernels_imag)
if verbose==True:
print("CQT kernels created, time used = {:.4f} seconds".format(time()-start))
def forward(self,x, output_format=None):
"""
Convert a batch of waveforms to CQT spectrograms.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
"""
output_format = output_format or self.output_format
x = broadcast_dim(x)
if self.center:
if self.pad_mode == 'constant':
padding = nn.ConstantPad1d(self.kernel_width//2, 0)
elif self.pad_mode == 'reflect':
padding = nn.ReflectionPad1d(self.kernel_width//2)
x = padding(x)
# CQT
CQT_real = conv1d(x, self.cqt_kernels_real, stride=self.hop_length) * \
torch.sqrt(self.lenghts.view(-1,1))
CQT_imag = -conv1d(x, self.cqt_kernels_imag, stride=self.hop_length) * \
torch.sqrt(self.lenghts.view(-1,1))
if output_format=='Magnitude':
if self.trainable==False:
# Getting CQT Amplitude
CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2))
else:
CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2)+1e-8)
return CQT
elif output_format=='Complex':
return torch.stack((CQT_real,CQT_imag),-1)
elif output_format=='Phase':
phase_real = torch.cos(torch.atan2(CQT_imag,CQT_real))
phase_imag = torch.sin(torch.atan2(CQT_imag,CQT_real))
return torch.stack((phase_real,phase_imag), -1)
def forward_manual(self,x):
"""
Method for debugging
"""
x = broadcast_dim(x)
if self.center:
if self.pad_mode == 'constant':
padding = nn.ConstantPad1d(self.kernel_width//2, 0)
elif self.pad_mode == 'reflect':
padding = nn.ReflectionPad1d(self.kernel_width//2)
x = padding(x)
# CQT
CQT_real = conv1d(x, self.cqt_kernels_real, stride=self.hop_length)
CQT_imag = conv1d(x, self.cqt_kernels_imag, stride=self.hop_length)
# Getting CQT Amplitude
CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2))
return CQT*torch.sqrt(self.lenghts.view(-1,1))
class CQT2010v2(torch.nn.Module):
"""This function is to calculate the CQT of the input signal.
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
The correct shape will be inferred autommatically if the input follows these 3 shapes.
Most of the arguments follow the convention from librosa.
This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``.
This alogrithm uses the resampling method proposed in [1].
Instead of convoluting the STFT results with a gigantic CQT kernel covering the full frequency
spectrum, we make a small CQT kernel covering only the top octave. Then we keep downsampling the
input audio by a factor of 2 to convoluting it with the small CQT kernel.
Everytime the input audio is downsampled, the CQT relative to the downsampled input is equivalent
to the next lower octave.
The kernel creation process is still same as the 1992 algorithm. Therefore, we can reuse the
code from the 1992 alogrithm [2]
[1] Schörkhuber, Christian. “CONSTANT-Q TRANSFORM TOOLBOX FOR MUSIC PROCESSING.” (2010).
[2] Brown, <NAME>. and <NAME>. “An efficient algorithm for the calculation of a
constant Q transform.” (1992).
Early downsampling factor is to downsample the input audio to reduce the CQT kernel size.
The result with and without early downsampling are more or less the same except in the very low
frequency region where freq < 40Hz.
Parameters
----------
sr : int
The sampling rate for the input audio. It is used to calucate the correct ``fmin`` and ``fmax``.
Setting the correct sampling rate is very important for calculating the correct frequency.
hop_length : int
The hop (or stride) size. Default value is 512.
fmin : float
The frequency for the lowest CQT bin. Default is 32.70Hz, which coresponds to the note C0.
fmax : float
The frequency for the highest CQT bin. Default is ``None``, therefore the higest CQT bin is
inferred from the ``n_bins`` and ``bins_per_octave``. If ``fmax`` is not ``None``, then the
argument ``n_bins`` will be ignored and ``n_bins`` will be calculated automatically.
Default is ``None``
n_bins : int
The total numbers of CQT bins. Default is 84. Will be ignored if ``fmax`` is not ``None``.
bins_per_octave : int
Number of bins per octave. Default is 12.
norm : bool
Normalization for the CQT result.
basis_norm : int
Normalization for the CQT kernels. ``1`` means L1 normalization, and ``2`` means L2 normalization.
Default is ``1``, which is same as the normalization used in librosa.
window : str
The windowing function for CQT. It uses ``scipy.signal.get_window``, please refer to
scipy documentation for possible windowing functions. The default value is 'hann'
pad_mode : str
The padding method. Default value is 'reflect'.
trainable : bool
Determine if the CQT kernels are trainable or not. If ``True``, the gradients for CQT kernels
will also be caluclated and the CQT kernels will be updated during model training.
Default value is ``False``
output_format : str
Determine the return type.
'Magnitude' will return the magnitude of the STFT result, shape = ``(num_samples, freq_bins, time_steps)``;
'Complex' will return the STFT result in complex number, shape = ``(num_samples, freq_bins, time_steps, 2)``;
'Phase' will return the phase of the STFT reuslt, shape = ``(num_samples, freq_bins,time_steps, 2)``.
The complex number is stored as ``(real, imag)`` in the last axis. Default value is 'Magnitude'.
verbose : bool
If ``True``, it shows layer information. If ``False``, it suppresses all prints.
device : str
Choose which device to initialize this layer. Default value is 'cpu'.
Returns
-------
spectrogram : torch.tensor
It returns a tensor of spectrograms.
shape = ``(num_samples, freq_bins,time_steps)`` if ``output_format='Magnitude'``;
shape = ``(num_samples, freq_bins,time_steps, 2)`` if ``output_format='Complex' or 'Phase'``;
Examples
--------
>>> spec_layer = Spectrogram.CQT2010v2()
>>> specs = spec_layer(x)
"""
# To DO:
# need to deal with the filter and other tensors
def __init__(self, sr=22050, hop_length=512, fmin=32.70, fmax=None, n_bins=84,
bins_per_octave=12, norm=True, basis_norm=1, window='hann', pad_mode='reflect',
earlydownsample=True, trainable=False, output_format='Magnitude', verbose=True):
super().__init__()
self.norm = norm # Now norm is used to normalize the final CQT result by dividing n_fft
# basis_norm is for normalizing basis
self.hop_length = hop_length
self.pad_mode = pad_mode
self.n_bins = n_bins
self.earlydownsample = earlydownsample # We will activate early downsampling later if possible
self.trainable = trainable
self.output_format = output_format
# It will be used to calculate filter_cutoff and creating CQT kernels
Q = 1/(2**(1/bins_per_octave)-1)
# Creating lowpass filter and make it a torch tensor
if verbose==True:
print("Creating low pass filter ...", end='\r')
start = time()
# self.lowpass_filter = torch.tensor(
# create_lowpass_filter(
# band_center = 0.50,
# kernelLength=256,
# transitionBandwidth=0.001))
lowpass_filter = torch.tensor(create_lowpass_filter(
band_center = 0.50,
kernelLength=256,
transitionBandwidth=0.001)
)
# Broadcast the tensor to the shape that fits conv1d
self.register_buffer('lowpass_filter', lowpass_filter[None,None,:])
if verbose==True:
print("Low pass filter created, time used = {:.4f} seconds".format(time()-start))
# Caluate num of filter requires for the kernel
# n_octaves determines how many resampling requires for the CQT
n_filters = min(bins_per_octave, n_bins)
self.n_octaves = int(np.ceil(float(n_bins) / bins_per_octave))
if verbose==True:
print("num_octave = ", self.n_octaves)
# Calculate the lowest frequency bin for the top octave kernel
self.fmin_t = fmin*2**(self.n_octaves-1)
remainder = n_bins % bins_per_octave
# print("remainder = ", remainder)
if remainder==0:
# Calculate the top bin frequency
fmax_t = self.fmin_t*2**((bins_per_octave-1)/bins_per_octave)
else:
# Calculate the top bin frequency
fmax_t = self.fmin_t*2**((remainder-1)/bins_per_octave)
self.fmin_t = fmax_t/2**(1-1/bins_per_octave) # Adjusting the top minium bins
if fmax_t > sr/2:
raise ValueError('The top bin {}Hz has exceeded the Nyquist frequency, \
please reduce the n_bins'.format(fmax_t))
if self.earlydownsample == True: # Do early downsampling if this argument is True
if verbose==True:
print("Creating early downsampling filter ...", end='\r')
start = time()
sr, self.hop_length, self.downsample_factor, early_downsample_filter, \
self.earlydownsample = get_early_downsample_params(sr,
hop_length,
fmax_t,
Q,
self.n_octaves,
verbose)
self.register_buffer('early_downsample_filter', early_downsample_filter)
if verbose==True:
print("Early downsampling filter created, \
time used = {:.4f} seconds".format(time()-start))
else:
self.downsample_factor=1.
# Preparing CQT kernels
if verbose==True:
print("Creating CQT kernels ...", end='\r')
start = time()
basis, self.n_fft, lenghts = create_cqt_kernels(Q,
sr,
self.fmin_t,
n_filters,
bins_per_octave,
norm=basis_norm,
topbin_check=False)
# For normalization in the end
freqs = fmin * 2.0 ** (np.r_[0:n_bins] / np.float(bins_per_octave))
lenghts = np.ceil(Q * sr / freqs)
lenghts = torch.tensor(lenghts).float()
self.register_buffer('lenghts', lenghts)
self.basis = basis
# These cqt_kernel is already in the frequency domain
cqt_kernels_real = torch.tensor(basis.real.astype(np.float32)).unsqueeze(1)
cqt_kernels_imag = torch.tensor(basis.imag.astype(np.float32)).unsqueeze(1)
if trainable:
cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable_kernels)
cqt_kernels_imag = torch.nn.Parameter(cqt_kernels_imag, requires_grad=trainable_kernels)
self.register_parameter('cqt_kernels_real', cqt_kernels_real)
self.register_parameter('cqt_kernels_imag', cqt_kernels_imag)
else:
self.register_buffer('cqt_kernels_real', cqt_kernels_real)
self.register_buffer('cqt_kernels_imag', cqt_kernels_imag)
if verbose==True:
print("CQT kernels created, time used = {:.4f} seconds".format(time()-start))
# print("Getting cqt kernel done, n_fft = ",self.n_fft)
# If center==True, the STFT window will be put in the middle, and paddings at the beginning
# and ending are required.
if self.pad_mode == 'constant':
self.padding = nn.ConstantPad1d(self.n_fft//2, 0)
elif self.pad_mode == 'reflect':
self.padding = nn.ReflectionPad1d(self.n_fft//2)
def forward(self,x,output_format=None):
"""
Convert a batch of waveforms to CQT spectrograms.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
"""
output_format = output_format or self.output_format
x = broadcast_dim(x)
if self.earlydownsample==True:
x = downsampling_by_n(x, self.early_downsample_filter, self.downsample_factor)
hop = self.hop_length
CQT = get_cqt_complex(x, self.cqt_kernels_real, self.cqt_kernels_imag, hop, self.padding) # Getting the top octave CQT
x_down = x # Preparing a new variable for downsampling
for i in range(self.n_octaves-1):
hop = hop//2
x_down = downsampling_by_2(x_down, self.lowpass_filter)
CQT1 = get_cqt_complex(x_down, self.cqt_kernels_real, self.cqt_kernels_imag, hop, self.padding)
CQT = torch.cat((CQT1, CQT),1)
CQT = CQT[:,-self.n_bins:,:] # Removing unwanted bottom bins
# print("downsample_factor = ",self.downsample_factor)
# print(CQT.shape)
# print(self.lenghts.view(-1,1).shape)
# Normalizing the output with the downsampling factor, 2**(self.n_octaves-1) is make it
# same mag as 1992
CQT = CQT*self.downsample_factor
# Normalize again to get same result as librosa
CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1))
if output_format=='Magnitude':
if self.trainable==False:
# Getting CQT Amplitude
return torch.sqrt(CQT.pow(2).sum(-1))
else:
return torch.sqrt(CQT.pow(2).sum(-1)+1e-8)
elif output_format=='Complex':
return CQT
elif output_format=='Phase':
phase_real = torch.cos(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0]))
phase_imag = torch.sin(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0]))
return torch.stack((phase_real,phase_imag), -1)
class CQT(CQT1992v2):
"""An abbreviation for :func:`~nnAudio.Spectrogram.CQT1992v2`. Please refer to the :func:`~nnAudio.Spectrogram.CQT1992v2` documentation"""
pass
# The section below is for developing purpose
# Please don't use the following classes
#
class DFT(torch.nn.Module):
"""
Experimental feature before `torch.fft` was made avaliable.
The inverse function only works for 1 single frame. i.e. input shape = (batch, n_fft, 1)
"""
def __init__(self, n_fft=2048, freq_bins=None, hop_length=512,
window='hann', freq_scale='no', center=True, pad_mode='reflect',
fmin=50, fmax=6000, sr=22050):
super().__init__()
self.stride = hop_length
self.center = center
self.pad_mode = pad_mode
self.n_fft = n_fft
# Create filter windows for stft
wsin, wcos, self.bins2freq = create_fourier_kernels(n_fft=n_fft,
freq_bins=n_fft,
window=window,
freq_scale=freq_scale,
fmin=fmin,
fmax=fmax,
sr=sr)
self.wsin = torch.tensor(wsin, dtype=torch.float)
self.wcos = torch.tensor(wcos, dtype=torch.float)
def forward(self,x):
"""
Convert a batch of waveforms to spectrums.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
"""
x = broadcast_dim(x)
if self.center:
if self.pad_mode == 'constant':
padding = nn.ConstantPad1d(self.n_fft//2, 0)
elif self.pad_mode == 'reflect':
padding = nn.ReflectionPad1d(self.n_fft//2)
x = padding(x)
imag = conv1d(x, self.wsin, stride=self.stride)
real = conv1d(x, self.wcos, stride=self.stride)
return (real, -imag)
def inverse(self,x_real,x_imag):
"""
Convert a batch of waveforms to CQT spectrograms.
Parameters
----------
x_real : torch tensor
Real part of the signal.
x_imag : torch tensor
Imaginary part of the signal.
"""
x_real = broadcast_dim(x_real)
x_imag = broadcast_dim(x_imag)
x_real.transpose_(1,2) # Prepare the right shape to do inverse
x_imag.transpose_(1,2) # Prepare the right shape to do inverse
# if self.center:
# if self.pad_mode == 'constant':
# padding = nn.ConstantPad1d(self.n_fft//2, 0)
# elif self.pad_mode == 'reflect':
# padding = nn.ReflectionPad1d(self.n_fft//2)
# x_real = padding(x_real)
# x_imag = padding(x_imag)
# Watch out for the positive and negative signs
# ifft = e^(+2\pi*j)*X
# ifft(X_real) = (a1, a2)
# ifft(X_imag)*1j = (b1, b2)*1j
# = (-b2, b1)
a1 = conv1d(x_real, self.wcos, stride=self.stride)
a2 = conv1d(x_real, self.wsin, stride=self.stride)
b1 = conv1d(x_imag, self.wcos, stride=self.stride)
b2 = conv1d(x_imag, self.wsin, stride=self.stride)
imag = a2+b1
real = a1-b2
return (real/self.n_fft, imag/self.n_fft)
class iSTFT(torch.nn.Module):
"""This class is to convert spectrograms back to waveforms. It only works for the complex value spectrograms.
If you have the magnitude spectrograms, please use :func:`~nnAudio.Spectrogram.Griffin_Lim`.
The parameters (e.g. n_fft, window) need to be the same as the STFT in order to obtain the correct inverse.
If trainability is not required, it is recommended to use the ``inverse`` method under the ``STFT`` class
to save GPU/RAM memory.
When ``trainable=True`` and ``freq_scale!='no'``, there is no guarantee that the inverse is perfect, please
use with extra care.
Parameters
----------
n_fft : int
The window size. Default value is 2048.
freq_bins : int
Number of frequency bins. Default is ``None``, which means ``n_fft//2+1`` bins
Please make sure the value is the same as the forward STFT.
hop_length : int
The hop (or stride) size. Default value is ``None`` which is equivalent to ``n_fft//4``.
Please make sure the value is the same as the forward STFT.
window : str
The windowing function for iSTFT. It uses ``scipy.signal.get_window``, please refer to
scipy documentation for possible windowing functions. The default value is 'hann'.
Please make sure the value is the same as the forward STFT.
freq_scale : 'linear', 'log', or 'no'
Determine the spacing between each frequency bin. When `linear` or `log` is used,
the bin spacing can be controlled by ``fmin`` and ``fmax``. If 'no' is used, the bin will
start at 0Hz and end at Nyquist frequency with linear spacing.
Please make sure the value is the same as the forward STFT.
center : bool
Putting the iSTFT keneral at the center of the time-step or not. If ``False``, the time
index is the beginning of the iSTFT kernel, if ``True``, the time index is the center of
the iSTFT kernel. Default value if ``True``.
Please make sure the value is the same as the forward STFT.
fmin : int
The starting frequency for the lowest frequency bin. If freq_scale is ``no``, this argument
does nothing. Please make sure the value is the same as the forward STFT.
fmax : int
The ending frequency for the highest frequency bin. If freq_scale is ``no``, this argument
does nothing. Please make sure the value is the same as the forward STFT.
sr : int
The sampling rate for the input audio. It is used to calucate the correct ``fmin`` and ``fmax``.
Setting the correct sampling rate is very important for calculating the correct frequency.
trainable_kernels : bool
Determine if the STFT kenrels are trainable or not. If ``True``, the gradients for STFT
kernels will also be caluclated and the STFT kernels will be updated during model training.
Default value is ``False``.
trainable_window : bool
Determine if the window function is trainable or not.
Default value is ``False``.
verbose : bool
If ``True``, it shows layer information. If ``False``, it suppresses all prints.
device : str
Choose which device to initialize this layer. Default value is 'cpu'.
Returns
-------
spectrogram : torch.tensor
It returns a batch of waveforms.
Examples
--------
>>> spec_layer = Spectrogram.iSTFT()
>>> specs = spec_layer(x)
"""
def __init__(self, n_fft=2048, win_length=None, freq_bins=None, hop_length=None, window='hann',
freq_scale='no', center=True, fmin=50, fmax=6000, sr=22050, trainable_kernels=False,
trainable_window=False, verbose=True, refresh_win=True):
super().__init__()
# Trying to make the default setting same as librosa
if win_length==None: win_length = n_fft
if hop_length==None: hop_length = int(win_length // 4)
self.n_fft = n_fft
self.win_length = win_length
self.stride = hop_length
self.center = center
self.pad_amount = self.n_fft // 2
self.refresh_win = refresh_win
start = time()
# Create the window function and prepare the shape for batch-wise-time-wise multiplication
# Create filter windows for inverse
kernel_sin, kernel_cos, _, _, window_mask = create_fourier_kernels(n_fft,
win_length=win_length,
freq_bins=n_fft,
window=window,
freq_scale=freq_scale,
fmin=fmin,
fmax=fmax,
sr=sr,
verbose=False)
window_mask = get_window(window,int(win_length), fftbins=True)
# For inverse, the Fourier kernels do not need to be windowed
window_mask = torch.tensor(window_mask).unsqueeze(0).unsqueeze(-1)
# kernel_sin and kernel_cos have the shape (freq_bins, 1, n_fft, 1) to support 2D Conv
kernel_sin = torch.tensor(kernel_sin, dtype=torch.float).unsqueeze(-1)
kernel_cos = torch.tensor(kernel_cos, dtype=torch.float).unsqueeze(-1)
# Decide if the Fourier kernels are trainable
if trainable_kernels:
# Making all these variables trainable
kernel_sin = torch.nn.Parameter(kernel_sin, requires_grad=trainable_kernels)
kernel_cos = torch.nn.Parameter(kernel_cos, requires_grad=trainable_kernels)
self.register_parameter('kernel_sin', kernel_sin)
self.register_parameter('kernel_cos', kernel_cos)
else:
self.register_buffer('kernel_sin', kernel_sin)
self.register_buffer('kernel_cos', kernel_cos)
# Decide if the window function is trainable
if trainable_window:
window_mask = torch.nn.Parameter(window_mask, requires_grad=trainable_window)
self.register_parameter('window_mask', window_mask)
else:
self.register_buffer('window_mask', window_mask)
if verbose==True:
print("iSTFT kernels created, time used = {:.4f} seconds".format(time()-start))
else:
pass
def forward(self, X, onesided=False, length=None, refresh_win=None):
"""
If your spectrograms only have ``n_fft//2+1`` frequency bins, please use ``onesided=True``,
else use ``onesided=False``
To make sure the inverse STFT has the same output length of the original waveform, please
set `length` as your intended waveform length. By default, ``length=None``,
which will remove ``n_fft//2`` samples from the start and the end of the output.
If your input spectrograms X are of the same length, please use ``refresh_win=None`` to increase
computational speed.
"""
if refresh_win==None:
refresh_win=self.refresh_win
assert X.dim()==4 , "Inverse iSTFT only works for complex number," \
"make sure our tensor is in the shape of (batch, freq_bins, timesteps, 2)"
# If the input spectrogram contains only half of the n_fft
# Use extend_fbins function to get back another half
if onesided:
X = extend_fbins(X) # extend freq
X_real, X_imag = X[:, :, :, 0], X[:, :, :, 1]
# broadcast dimensions to support 2D convolution
X_real_bc = X_real.unsqueeze(1)
X_imag_bc = X_imag.unsqueeze(1)
a1 = conv2d(X_real_bc, self.kernel_cos, stride=(1,1))
b2 = conv2d(X_imag_bc, self.kernel_sin, stride=(1,1))
# compute real and imag part. signal lies in the real part
real = a1 - b2
real = real.squeeze(-2)*self.window_mask
# Normalize the amplitude with n_fft
real /= (self.n_fft)
# Overlap and Add algorithm to connect all the frames
real = overlap_add(real, self.stride)
# Prepare the window sumsqure for division
# Only need to create this window once to save time
# Unless the input spectrograms have different time steps
if hasattr(self, 'w_sum')==False or refresh_win==True:
self.w_sum = torch_window_sumsquare(self.window_mask.flatten(), X.shape[2], self.stride, self.n_fft).flatten()
self.nonzero_indices = (self.w_sum>1e-10)
else:
pass
real[:, self.nonzero_indices] = real[:,self.nonzero_indices].div(self.w_sum[self.nonzero_indices])
# Remove padding
if length is None:
if self.center:
real = real[:, self.pad_amount:-self.pad_amount]
else:
if self.center:
real = real[:, self.pad_amount:self.pad_amount + length]
else:
real = real[:, :length]
return real
class Griffin_Lim(torch.nn.Module):
"""
Converting Magnitude spectrograms back to waveforms based on the "fast Griffin-Lim"[1].
This Griffin Lim is a direct clone from librosa.griffinlim.
[1] <NAME>., <NAME>., & <NAME>. “A fast Griffin-Lim algorithm,”
IEEE Workshop on Applications of Signal Processing to Audio and Acoustics (pp. 1-4), Oct. 2013.
Parameters
----------
n_fft : int
The window size. Default value is 2048.
n_iter=32 : int
The number of iterations for Griffin-Lim. The default value is ``32``
hop_length : int
The hop (or stride) size. Default value is ``None`` which is equivalent to ``n_fft//4``.
Please make sure the value is the same as the forward STFT.
window : str
The windowing function for iSTFT. It uses ``scipy.signal.get_window``, please refer to
scipy documentation for possible windowing functions. The default value is 'hann'.
Please make sure the value is the same as the forward STFT.
center : bool
Putting the iSTFT keneral at the center of the time-step or not. If ``False``, the time
index is the beginning of the iSTFT kernel, if ``True``, the time index is the center of
the iSTFT kernel. Default value if ``True``.
Please make sure the value is the same as the forward STFT.
momentum : float
The momentum for the update rule. The default value is ``0.99``.
device : str
Choose which device to initialize this layer. Default value is 'cpu'
"""
def __init__(self,
n_fft,
n_iter=32,
hop_length=None,
win_length=None,
window='hann',
center=True,
pad_mode='reflect',
momentum=0.99,
device='cpu'):
super().__init__()
self.n_fft = n_fft
self.win_length = win_length
self.n_iter = n_iter
self.center = center
self.pad_mode = pad_mode
self.momentum = momentum
self.device = device
if win_length==None:
self.win_length=n_fft
else:
self.win_length=win_length
if hop_length==None:
self.hop_length = n_fft//4
else:
self.hop_length = hop_length
# Creating window function for stft and istft later
self.w = torch.tensor(get_window(window,
int(self.win_length),
fftbins=True),
device=device).float()
def forward(self, S):
"""
Convert a batch of magnitude spectrograms to waveforms.
Parameters
----------
S : torch tensor
Spectrogram of the shape ``(batch, n_fft//2+1, timesteps)``
"""
assert S.dim()==3 , "Please make sure your input is in the shape of (batch, freq_bins, timesteps)"
# Initializing Random Phase
rand_phase = torch.randn(*S.shape, device=self.device)
angles = torch.empty((*S.shape,2), device=self.device)
angles[:, :,:,0] = torch.cos(2 * np.pi * rand_phase)
angles[:,:,:,1] = torch.sin(2 * np.pi * rand_phase)
# Initializing the rebuilt magnitude spectrogram
rebuilt = torch.zeros(*angles.shape, device=self.device)
for _ in range(self.n_iter):
tprev = rebuilt # Saving previous rebuilt magnitude spec
# spec2wav conversion
# print(f'win_length={self.win_length}\tw={self.w.shape}')
inverse = torch.istft(S.unsqueeze(-1) * angles,
self.n_fft,
self.hop_length,
win_length=self.win_length,
window=self.w,
center=self.center)
# wav2spec conversion
rebuilt = torch.stft(inverse,
self.n_fft,
self.hop_length,
win_length=self.win_length,
window=self.w,
pad_mode=self.pad_mode)
# Phase update rule
angles[:,:,:] = rebuilt[:,:,:] - (self.momentum / (1 + self.momentum)) * tprev[:,:,:]
# Phase normalization
angles = angles.div(torch.sqrt(angles.pow(2).sum(-1)).unsqueeze(-1) + 1e-16) # normalizing the phase
# Using the final phase to reconstruct the waveforms
inverse = torch.istft(S.unsqueeze(-1) * angles,
self.n_fft,
self.hop_length,
win_length=self.win_length,
window=self.w,
center=self.center)
return inverse
| [
"torch.nn.functional.conv2d",
"numpy.sqrt",
"torch.nn.functional.conv1d",
"torch.sin",
"torch.max",
"torch.sqrt",
"torch.cos",
"torch.arange",
"torch.nn.ReflectionPad1d",
"torch.nn.ConstantPad1d",
"torch.rfft",
"torch.matmul",
"torch.pinverse",
"torch.randn",
"torch.optim.SGD",
"numpy.ceil",
"torch.stft",
"torch.empty",
"time.time",
"torch.cat",
"numpy.float",
"torch.stack",
"torch.atan2",
"torch.tensor",
"torch.nn.Parameter",
"torch.zeros"
] | [((4751, 4757), 'time.time', 'time', ([], {}), '()\n', (4755, 4757), False, 'from time import time\n'), ((5660, 5703), 'torch.tensor', 'torch.tensor', (['kernel_sin'], {'dtype': 'torch.float'}), '(kernel_sin, dtype=torch.float)\n', (5672, 5703), False, 'import torch\n'), ((5725, 5768), 'torch.tensor', 'torch.tensor', (['kernel_cos'], {'dtype': 'torch.float'}), '(kernel_cos, dtype=torch.float)\n', (5737, 5768), False, 'import torch\n'), ((6576, 6601), 'torch.tensor', 'torch.tensor', (['window_mask'], {}), '(window_mask)\n', (6588, 6601), False, 'import torch\n'), ((8628, 8668), 'torch.nn.functional.conv1d', 'conv1d', (['x', 'self.wsin'], {'stride': 'self.stride'}), '(x, self.wsin, stride=self.stride)\n', (8634, 8668), False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((8689, 8729), 'torch.nn.functional.conv1d', 'conv1d', (['x', 'self.wcos'], {'stride': 'self.stride'}), '(x, self.wcos, stride=self.stride)\n', (8695, 8729), False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((11428, 11481), 'torch.nn.functional.conv2d', 'conv2d', (['X_real_bc', 'self.kernel_cos_inv'], {'stride': '(1, 1)'}), '(X_real_bc, self.kernel_cos_inv, stride=(1, 1))\n', (11434, 11481), False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((11494, 11547), 'torch.nn.functional.conv2d', 'conv2d', (['X_imag_bc', 'self.kernel_sin_inv'], {'stride': '(1, 1)'}), '(X_imag_bc, self.kernel_sin_inv, stride=(1, 1))\n', (11500, 11547), False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((16903, 16909), 'time.time', 'time', ([], {}), '()\n', (16907, 16909), False, 'from time import time\n'), ((16973, 16979), 'time.time', 'time', ([], {}), '()\n', (16977, 16979), False, 'from time import time\n'), ((17075, 17098), 'torch.tensor', 'torch.tensor', (['mel_basis'], {}), '(mel_basis)\n', (17087, 17098), False, 'import torch\n'), ((18463, 18497), 'torch.matmul', 'torch.matmul', (['self.mel_basis', 'spec'], {}), '(self.mel_basis, spec)\n', (18475, 18497), False, 'import torch\n'), ((19991, 20034), 'torch.nn.Parameter', 'nn.Parameter', (['pred_stft'], {'requires_grad': '(True)'}), '(pred_stft, requires_grad=True)\n', (20003, 20034), True, 'import torch.nn as nn\n'), ((20113, 20155), 'torch.optim.SGD', 'torch.optim.SGD', (['[pred_stft]'], {}), '([pred_stft], **sgd_kwargs)\n', (20128, 20155), False, 'import torch\n'), ((23995, 24015), 'torch.tensor', 'torch.tensor', (['[amin]'], {}), '([amin])\n', (24007, 24015), False, 'import torch\n'), ((25300, 25332), 'torch.rfft', 'torch.rfft', (['v', '(1)'], {'onesided': '(False)'}), '(v, 1, onesided=False)\n', (25310, 25332), False, 'import torch\n'), ((25493, 25505), 'torch.cos', 'torch.cos', (['k'], {}), '(k)\n', (25502, 25505), False, 'import torch\n'), ((25520, 25532), 'torch.sin', 'torch.sin', (['k'], {}), '(k)\n', (25529, 25532), False, 'import torch\n'), ((31204, 31210), 'time.time', 'time', ([], {}), '()\n', (31208, 31210), False, 'from time import time\n'), ((32272, 32278), 'time.time', 'time', ([], {}), '()\n', (32276, 32278), False, 'from time import time\n'), ((32619, 32652), 'torch.tensor', 'torch.tensor', (['(kernel_sin * window)'], {}), '(kernel_sin * window)\n', (32631, 32652), False, 'import torch\n'), ((32668, 32701), 'torch.tensor', 'torch.tensor', (['(kernel_cos * window)'], {}), '(kernel_cos * window)\n', (32680, 32701), False, 'import torch\n'), ((34726, 34770), 'torch.nn.functional.conv1d', 'conv1d', (['x', 'self.wcos'], {'stride': 'self.hop_length'}), '(x, self.wcos, stride=self.hop_length)\n', (34732, 34770), False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((34794, 34838), 'torch.nn.functional.conv1d', 'conv1d', (['x', 'self.wsin'], {'stride': 'self.hop_length'}), '(x, self.wsin, stride=self.hop_length)\n', (34800, 34838), False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((35029, 35067), 'torch.stack', 'torch.stack', (['(CQT_real, -CQT_imag)', '(-1)'], {}), '((CQT_real, -CQT_imag), -1)\n', (35040, 35067), False, 'import torch\n'), ((37939, 37945), 'time.time', 'time', ([], {}), '()\n', (37943, 37945), False, 'from time import time\n'), ((40885, 40891), 'time.time', 'time', ([], {}), '()\n', (40889, 40891), False, 'from time import time\n'), ((41562, 41585), 'numpy.ceil', 'np.ceil', (['(Q * sr / freqs)'], {}), '(Q * sr / freqs)\n', (41569, 41585), True, 'import numpy as np\n'), ((42451, 42457), 'time.time', 'time', ([], {}), '()\n', (42455, 42457), False, 'from time import time\n'), ((42671, 42689), 'torch.tensor', 'torch.tensor', (['wsin'], {}), '(wsin)\n', (42683, 42689), False, 'import torch\n'), ((42705, 42723), 'torch.tensor', 'torch.tensor', (['wcos'], {}), '(wcos)\n', (42717, 42723), False, 'import torch\n'), ((50717, 50723), 'time.time', 'time', ([], {}), '()\n', (50721, 50723), False, 'from time import time\n'), ((54329, 54385), 'torch.nn.functional.conv1d', 'conv1d', (['x', 'self.cqt_kernels_real'], {'stride': 'self.hop_length'}), '(x, self.cqt_kernels_real, stride=self.hop_length)\n', (54335, 54385), False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((54405, 54461), 'torch.nn.functional.conv1d', 'conv1d', (['x', 'self.cqt_kernels_imag'], {'stride': 'self.hop_length'}), '(x, self.cqt_kernels_imag, stride=self.hop_length)\n', (54411, 54461), False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((60134, 60140), 'time.time', 'time', ([], {}), '()\n', (60138, 60140), False, 'from time import time\n'), ((63381, 63387), 'time.time', 'time', ([], {}), '()\n', (63385, 63387), False, 'from time import time\n'), ((64029, 64052), 'numpy.ceil', 'np.ceil', (['(Q * sr / freqs)'], {}), '(Q * sr / freqs)\n', (64036, 64052), True, 'import numpy as np\n'), ((69081, 69118), 'torch.tensor', 'torch.tensor', (['wsin'], {'dtype': 'torch.float'}), '(wsin, dtype=torch.float)\n', (69093, 69118), False, 'import torch\n'), ((69139, 69176), 'torch.tensor', 'torch.tensor', (['wcos'], {'dtype': 'torch.float'}), '(wcos, dtype=torch.float)\n', (69151, 69176), False, 'import torch\n'), ((69909, 69949), 'torch.nn.functional.conv1d', 'conv1d', (['x', 'self.wsin'], {'stride': 'self.stride'}), '(x, self.wsin, stride=self.stride)\n', (69915, 69949), False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((69965, 70005), 'torch.nn.functional.conv1d', 'conv1d', (['x', 'self.wcos'], {'stride': 'self.stride'}), '(x, self.wcos, stride=self.stride)\n', (69971, 70005), False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((71093, 71138), 'torch.nn.functional.conv1d', 'conv1d', (['x_real', 'self.wcos'], {'stride': 'self.stride'}), '(x_real, self.wcos, stride=self.stride)\n', (71099, 71138), False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((71152, 71197), 'torch.nn.functional.conv1d', 'conv1d', (['x_real', 'self.wsin'], {'stride': 'self.stride'}), '(x_real, self.wsin, stride=self.stride)\n', (71158, 71197), False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((71211, 71256), 'torch.nn.functional.conv1d', 'conv1d', (['x_imag', 'self.wcos'], {'stride': 'self.stride'}), '(x_imag, self.wcos, stride=self.stride)\n', (71217, 71256), False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((71270, 71315), 'torch.nn.functional.conv1d', 'conv1d', (['x_imag', 'self.wsin'], {'stride': 'self.stride'}), '(x_imag, self.wsin, stride=self.stride)\n', (71276, 71315), False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((75587, 75593), 'time.time', 'time', ([], {}), '()\n', (75591, 75593), False, 'from time import time\n'), ((79208, 79257), 'torch.nn.functional.conv2d', 'conv2d', (['X_real_bc', 'self.kernel_cos'], {'stride': '(1, 1)'}), '(X_real_bc, self.kernel_cos, stride=(1, 1))\n', (79214, 79257), False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((79270, 79319), 'torch.nn.functional.conv2d', 'conv2d', (['X_imag_bc', 'self.kernel_sin'], {'stride': '(1, 1)'}), '(X_imag_bc, self.kernel_sin, stride=(1, 1))\n', (79276, 79319), False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((83586, 83627), 'torch.randn', 'torch.randn', (['*S.shape'], {'device': 'self.device'}), '(*S.shape, device=self.device)\n', (83597, 83627), False, 'import torch\n'), ((83645, 83691), 'torch.empty', 'torch.empty', (['(*S.shape, 2)'], {'device': 'self.device'}), '((*S.shape, 2), device=self.device)\n', (83656, 83691), False, 'import torch\n'), ((83718, 83751), 'torch.cos', 'torch.cos', (['(2 * np.pi * rand_phase)'], {}), '(2 * np.pi * rand_phase)\n', (83727, 83751), False, 'import torch\n'), ((83778, 83811), 'torch.sin', 'torch.sin', (['(2 * np.pi * rand_phase)'], {}), '(2 * np.pi * rand_phase)\n', (83787, 83811), False, 'import torch\n'), ((83888, 83934), 'torch.zeros', 'torch.zeros', (['*angles.shape'], {'device': 'self.device'}), '(*angles.shape, device=self.device)\n', (83899, 83934), False, 'import torch\n'), ((6864, 6918), 'torch.nn.Parameter', 'torch.nn.Parameter', (['wsin'], {'requires_grad': 'self.trainable'}), '(wsin, requires_grad=self.trainable)\n', (6882, 6918), False, 'import torch\n'), ((6938, 6992), 'torch.nn.Parameter', 'torch.nn.Parameter', (['wcos'], {'requires_grad': 'self.trainable'}), '(wcos, requires_grad=self.trainable)\n', (6956, 6992), False, 'import torch\n'), ((17476, 17534), 'torch.nn.Parameter', 'torch.nn.Parameter', (['mel_basis'], {'requires_grad': 'trainable_mel'}), '(mel_basis, requires_grad=trainable_mel)\n', (17494, 17534), False, 'import torch\n'), ((24040, 24059), 'torch.tensor', 'torch.tensor', (['[ref]'], {}), '([ref])\n', (24052, 24059), False, 'import torch\n'), ((24852, 24901), 'torch.max', 'torch.max', (['log_spec', '(batch_wise_max - self.top_db)'], {}), '(log_spec, batch_wise_max - self.top_db)\n', (24861, 24901), False, 'import torch\n'), ((32904, 32961), 'torch.nn.Parameter', 'torch.nn.Parameter', (['wsin'], {'requires_grad': 'trainable_kernels'}), '(wsin, requires_grad=trainable_kernels)\n', (32922, 32961), False, 'import torch\n'), ((32981, 33038), 'torch.nn.Parameter', 'torch.nn.Parameter', (['wcos'], {'requires_grad': 'trainable_kernels'}), '(wcos, requires_grad=trainable_kernels)\n', (32999, 33038), False, 'import torch\n'), ((33305, 33374), 'torch.nn.Parameter', 'torch.nn.Parameter', (['cqt_kernels_real'], {'requires_grad': 'trainable_kernels'}), '(cqt_kernels_real, requires_grad=trainable_kernels)\n', (33323, 33374), False, 'import torch\n'), ((33406, 33475), 'torch.nn.Parameter', 'torch.nn.Parameter', (['cqt_kernels_imag'], {'requires_grad': 'trainable_kernels'}), '(cqt_kernels_imag, requires_grad=trainable_kernels)\n', (33424, 33475), False, 'import torch\n'), ((39877, 39883), 'time.time', 'time', ([], {}), '()\n', (39881, 39883), False, 'from time import time\n'), ((42889, 42946), 'torch.nn.Parameter', 'torch.nn.Parameter', (['wsin'], {'requires_grad': 'trainable_kernels'}), '(wsin, requires_grad=trainable_kernels)\n', (42907, 42946), False, 'import torch\n'), ((42966, 43023), 'torch.nn.Parameter', 'torch.nn.Parameter', (['wcos'], {'requires_grad': 'trainable_kernels'}), '(wcos, requires_grad=trainable_kernels)\n', (42984, 43023), False, 'import torch\n'), ((43290, 43359), 'torch.nn.Parameter', 'torch.nn.Parameter', (['cqt_kernels_real'], {'requires_grad': 'trainable_kernels'}), '(cqt_kernels_real, requires_grad=trainable_kernels)\n', (43308, 43359), False, 'import torch\n'), ((43391, 43460), 'torch.nn.Parameter', 'torch.nn.Parameter', (['cqt_kernels_imag'], {'requires_grad': 'trainable_kernels'}), '(cqt_kernels_imag, requires_grad=trainable_kernels)\n', (43409, 43460), False, 'import torch\n'), ((43968, 44004), 'torch.nn.ConstantPad1d', 'nn.ConstantPad1d', (['(self.n_fft // 2)', '(0)'], {}), '(self.n_fft // 2, 0)\n', (43984, 44004), True, 'import torch.nn as nn\n'), ((45215, 45240), 'torch.cat', 'torch.cat', (['(CQT1, CQT)', '(1)'], {}), '((CQT1, CQT), 1)\n', (45224, 45240), False, 'import torch\n'), ((51581, 51650), 'torch.nn.Parameter', 'torch.nn.Parameter', (['cqt_kernels_real'], {'requires_grad': 'trainable_kernels'}), '(cqt_kernels_real, requires_grad=trainable_kernels)\n', (51599, 51650), False, 'import torch\n'), ((51682, 51751), 'torch.nn.Parameter', 'torch.nn.Parameter', (['cqt_kernels_imag'], {'requires_grad': 'trainable_kernels'}), '(cqt_kernels_imag, requires_grad=trainable_kernels)\n', (51700, 51751), False, 'import torch\n'), ((53026, 53082), 'torch.nn.functional.conv1d', 'conv1d', (['x', 'self.cqt_kernels_real'], {'stride': 'self.hop_length'}), '(x, self.cqt_kernels_real, stride=self.hop_length)\n', (53032, 53082), False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((62378, 62384), 'time.time', 'time', ([], {}), '()\n', (62382, 62384), False, 'from time import time\n'), ((64462, 64531), 'torch.nn.Parameter', 'torch.nn.Parameter', (['cqt_kernels_real'], {'requires_grad': 'trainable_kernels'}), '(cqt_kernels_real, requires_grad=trainable_kernels)\n', (64480, 64531), False, 'import torch\n'), ((64563, 64632), 'torch.nn.Parameter', 'torch.nn.Parameter', (['cqt_kernels_imag'], {'requires_grad': 'trainable_kernels'}), '(cqt_kernels_imag, requires_grad=trainable_kernels)\n', (64581, 64632), False, 'import torch\n'), ((65322, 65358), 'torch.nn.ConstantPad1d', 'nn.ConstantPad1d', (['(self.n_fft // 2)', '(0)'], {}), '(self.n_fft // 2, 0)\n', (65338, 65358), True, 'import torch.nn as nn\n'), ((66616, 66641), 'torch.cat', 'torch.cat', (['(CQT1, CQT)', '(1)'], {}), '((CQT1, CQT), 1)\n', (66625, 66641), False, 'import torch\n'), ((77039, 77102), 'torch.nn.Parameter', 'torch.nn.Parameter', (['kernel_sin'], {'requires_grad': 'trainable_kernels'}), '(kernel_sin, requires_grad=trainable_kernels)\n', (77057, 77102), False, 'import torch\n'), ((77128, 77191), 'torch.nn.Parameter', 'torch.nn.Parameter', (['kernel_cos'], {'requires_grad': 'trainable_kernels'}), '(kernel_cos, requires_grad=trainable_kernels)\n', (77146, 77191), False, 'import torch\n'), ((77558, 77621), 'torch.nn.Parameter', 'torch.nn.Parameter', (['window_mask'], {'requires_grad': 'trainable_window'}), '(window_mask, requires_grad=trainable_window)\n', (77576, 77621), False, 'import torch\n'), ((84526, 84645), 'torch.stft', 'torch.stft', (['inverse', 'self.n_fft', 'self.hop_length'], {'win_length': 'self.win_length', 'window': 'self.w', 'pad_mode': 'self.pad_mode'}), '(inverse, self.n_fft, self.hop_length, win_length=self.win_length,\n window=self.w, pad_mode=self.pad_mode)\n', (84536, 84645), False, 'import torch\n'), ((8272, 8308), 'torch.nn.ConstantPad1d', 'nn.ConstantPad1d', (['self.pad_amount', '(0)'], {}), '(self.pad_amount, 0)\n', (8288, 8308), True, 'import torch.nn as nn\n'), ((9055, 9079), 'torch.sqrt', 'torch.sqrt', (['(spec + 1e-08)'], {}), '(spec + 1e-08)\n', (9065, 9079), False, 'import torch\n'), ((9171, 9187), 'torch.sqrt', 'torch.sqrt', (['spec'], {}), '(spec)\n', (9181, 9187), False, 'import torch\n'), ((9247, 9287), 'torch.stack', 'torch.stack', (['(spec_real, -spec_imag)', '(-1)'], {}), '((spec_real, -spec_imag), -1)\n', (9258, 9287), False, 'import torch\n'), ((24437, 24460), 'torch.max', 'torch.max', (['S', 'self.amin'], {}), '(S, self.amin)\n', (24446, 24460), False, 'import torch\n'), ((24501, 24531), 'torch.max', 'torch.max', (['self.amin', 'self.ref'], {}), '(self.amin, self.ref)\n', (24510, 24531), False, 'import torch\n'), ((25645, 25655), 'numpy.sqrt', 'np.sqrt', (['N'], {}), '(N)\n', (25652, 25655), True, 'import numpy as np\n'), ((25687, 25701), 'numpy.sqrt', 'np.sqrt', (['(N / 2)'], {}), '(N / 2)\n', (25694, 25701), True, 'import numpy as np\n'), ((34505, 34548), 'torch.nn.ConstantPad1d', 'nn.ConstantPad1d', (['(self.kernel_width // 2)', '(0)'], {}), '(self.kernel_width // 2, 0)\n', (34521, 34548), True, 'import torch.nn as nn\n'), ((41604, 41625), 'torch.tensor', 'torch.tensor', (['lenghts'], {}), '(lenghts)\n', (41616, 41625), False, 'import torch\n'), ((44071, 44106), 'torch.nn.ReflectionPad1d', 'nn.ReflectionPad1d', (['(self.n_fft // 2)'], {}), '(self.n_fft // 2)\n', (44089, 44106), True, 'import torch.nn as nn\n'), ((51412, 51442), 'torch.tensor', 'torch.tensor', (['cqt_kernels.real'], {}), '(cqt_kernels.real)\n', (51424, 51442), False, 'import torch\n'), ((51483, 51513), 'torch.tensor', 'torch.tensor', (['cqt_kernels.imag'], {}), '(cqt_kernels.imag)\n', (51495, 51513), False, 'import torch\n'), ((52810, 52853), 'torch.nn.ConstantPad1d', 'nn.ConstantPad1d', (['(self.kernel_width // 2)', '(0)'], {}), '(self.kernel_width // 2, 0)\n', (52826, 52853), True, 'import torch.nn as nn\n'), ((53163, 53219), 'torch.nn.functional.conv1d', 'conv1d', (['x', 'self.cqt_kernels_imag'], {'stride': 'self.hop_length'}), '(x, self.cqt_kernels_imag, stride=self.hop_length)\n', (53169, 53219), False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((53635, 53672), 'torch.stack', 'torch.stack', (['(CQT_real, CQT_imag)', '(-1)'], {}), '((CQT_real, CQT_imag), -1)\n', (53646, 53672), False, 'import torch\n'), ((54113, 54156), 'torch.nn.ConstantPad1d', 'nn.ConstantPad1d', (['(self.kernel_width // 2)', '(0)'], {}), '(self.kernel_width // 2, 0)\n', (54129, 54156), True, 'import torch.nn as nn\n'), ((64071, 64092), 'torch.tensor', 'torch.tensor', (['lenghts'], {}), '(lenghts)\n', (64083, 64092), False, 'import torch\n'), ((65425, 65460), 'torch.nn.ReflectionPad1d', 'nn.ReflectionPad1d', (['(self.n_fft // 2)'], {}), '(self.n_fft // 2)\n', (65443, 65460), True, 'import torch.nn as nn\n'), ((69725, 69761), 'torch.nn.ConstantPad1d', 'nn.ConstantPad1d', (['(self.n_fft // 2)', '(0)'], {}), '(self.n_fft // 2, 0)\n', (69741, 69761), True, 'import torch.nn as nn\n'), ((76741, 76784), 'torch.tensor', 'torch.tensor', (['kernel_sin'], {'dtype': 'torch.float'}), '(kernel_sin, dtype=torch.float)\n', (76753, 76784), False, 'import torch\n'), ((76820, 76863), 'torch.tensor', 'torch.tensor', (['kernel_cos'], {'dtype': 'torch.float'}), '(kernel_cos, dtype=torch.float)\n', (76832, 76863), False, 'import torch\n'), ((8544, 8579), 'torch.nn.ReflectionPad1d', 'nn.ReflectionPad1d', (['self.pad_amount'], {}), '(self.pad_amount)\n', (8562, 8579), True, 'import torch.nn as nn\n'), ((9390, 9430), 'torch.atan2', 'torch.atan2', (['(-spec_imag + 0.0)', 'spec_real'], {}), '(-spec_imag + 0.0, spec_real)\n', (9401, 9430), False, 'import torch\n'), ((31973, 31979), 'time.time', 'time', ([], {}), '()\n', (31977, 31979), False, 'from time import time\n'), ((33853, 33859), 'time.time', 'time', ([], {}), '()\n', (33857, 33859), False, 'from time import time\n'), ((34618, 34660), 'torch.nn.ReflectionPad1d', 'nn.ReflectionPad1d', (['(self.kernel_width // 2)'], {}), '(self.kernel_width // 2)\n', (34636, 34660), True, 'import torch.nn as nn\n'), ((35621, 35662), 'torch.stack', 'torch.stack', (['(phase_real, phase_imag)', '(-1)'], {}), '((phase_real, phase_imag), -1)\n', (35632, 35662), False, 'import torch\n'), ((41517, 41542), 'numpy.float', 'np.float', (['bins_per_octave'], {}), '(bins_per_octave)\n', (41525, 41542), True, 'import numpy as np\n'), ((46038, 46079), 'torch.stack', 'torch.stack', (['(phase_real, phase_imag)', '(-1)'], {}), '((phase_real, phase_imag), -1)\n', (46049, 46079), False, 'import torch\n'), ((52923, 52965), 'torch.nn.ReflectionPad1d', 'nn.ReflectionPad1d', (['(self.kernel_width // 2)'], {}), '(self.kernel_width // 2)\n', (52941, 52965), True, 'import torch.nn as nn\n'), ((53862, 53903), 'torch.stack', 'torch.stack', (['(phase_real, phase_imag)', '(-1)'], {}), '((phase_real, phase_imag), -1)\n', (53873, 53903), False, 'import torch\n'), ((54226, 54268), 'torch.nn.ReflectionPad1d', 'nn.ReflectionPad1d', (['(self.kernel_width // 2)'], {}), '(self.kernel_width // 2)\n', (54244, 54268), True, 'import torch.nn as nn\n'), ((63984, 64009), 'numpy.float', 'np.float', (['bins_per_octave'], {}), '(bins_per_octave)\n', (63992, 64009), True, 'import numpy as np\n'), ((67645, 67686), 'torch.stack', 'torch.stack', (['(phase_real, phase_imag)', '(-1)'], {}), '((phase_real, phase_imag), -1)\n', (67656, 67686), False, 'import torch\n'), ((69831, 69866), 'torch.nn.ReflectionPad1d', 'nn.ReflectionPad1d', (['(self.n_fft // 2)'], {}), '(self.n_fft // 2)\n', (69849, 69866), True, 'import torch.nn as nn\n'), ((7367, 7373), 'time.time', 'time', ([], {}), '()\n', (7371, 7373), False, 'from time import time\n'), ((17201, 17207), 'time.time', 'time', ([], {}), '()\n', (17205, 17207), False, 'from time import time\n'), ((17290, 17296), 'time.time', 'time', ([], {}), '()\n', (17294, 17296), False, 'from time import time\n'), ((19923, 19948), 'torch.pinverse', 'torch.pinverse', (['mel_basis'], {}), '(mel_basis)\n', (19937, 19948), False, 'import torch\n'), ((25404, 25451), 'torch.arange', 'torch.arange', (['N'], {'dtype': 'x.dtype', 'device': 'x.device'}), '(N, dtype=x.dtype, device=x.device)\n', (25416, 25451), False, 'import torch\n'), ((35503, 35534), 'torch.atan2', 'torch.atan2', (['CQT_imag', 'CQT_real'], {}), '(CQT_imag, CQT_real)\n', (35514, 35534), False, 'import torch\n'), ((35570, 35601), 'torch.atan2', 'torch.atan2', (['CQT_imag', 'CQT_real'], {}), '(CQT_imag, CQT_real)\n', (35581, 35601), False, 'import torch\n'), ((38595, 38601), 'time.time', 'time', ([], {}), '()\n', (38599, 38601), False, 'from time import time\n'), ((42130, 42136), 'time.time', 'time', ([], {}), '()\n', (42134, 42136), False, 'from time import time\n'), ((42827, 42833), 'time.time', 'time', ([], {}), '()\n', (42831, 42833), False, 'from time import time\n'), ((45904, 45949), 'torch.atan2', 'torch.atan2', (['CQT[:, :, :, 1]', 'CQT[:, :, :, 0]'], {}), '(CQT[:, :, :, 1], CQT[:, :, :, 0])\n', (45915, 45949), False, 'import torch\n'), ((45979, 46024), 'torch.atan2', 'torch.atan2', (['CQT[:, :, :, 1]', 'CQT[:, :, :, 0]'], {}), '(CQT[:, :, :, 1], CQT[:, :, :, 0])\n', (45990, 46024), False, 'import torch\n'), ((52158, 52164), 'time.time', 'time', ([], {}), '()\n', (52162, 52164), False, 'from time import time\n'), ((53744, 53775), 'torch.atan2', 'torch.atan2', (['CQT_imag', 'CQT_real'], {}), '(CQT_imag, CQT_real)\n', (53755, 53775), False, 'import torch\n'), ((53811, 53842), 'torch.atan2', 'torch.atan2', (['CQT_imag', 'CQT_real'], {}), '(CQT_imag, CQT_real)\n', (53822, 53842), False, 'import torch\n'), ((61071, 61077), 'time.time', 'time', ([], {}), '()\n', (61075, 61077), False, 'from time import time\n'), ((65040, 65046), 'time.time', 'time', ([], {}), '()\n', (65044, 65046), False, 'from time import time\n'), ((67511, 67556), 'torch.atan2', 'torch.atan2', (['CQT[:, :, :, 1]', 'CQT[:, :, :, 0]'], {}), '(CQT[:, :, :, 1], CQT[:, :, :, 0])\n', (67522, 67556), False, 'import torch\n'), ((67586, 67631), 'torch.atan2', 'torch.atan2', (['CQT[:, :, :, 1]', 'CQT[:, :, :, 0]'], {}), '(CQT[:, :, :, 1], CQT[:, :, :, 0])\n', (67597, 67631), False, 'import torch\n'), ((76571, 76596), 'torch.tensor', 'torch.tensor', (['window_mask'], {}), '(window_mask)\n', (76583, 76596), False, 'import torch\n'), ((77866, 77872), 'time.time', 'time', ([], {}), '()\n', (77870, 77872), False, 'from time import time\n'), ((19786, 19861), 'torch.zeros', 'torch.zeros', (['*pred_stft_shape'], {'dtype': 'torch.float32', 'device': 'mel_basis.device'}), '(*pred_stft_shape, dtype=torch.float32, device=mel_basis.device)\n', (19797, 19861), False, 'import torch\n'), ((40686, 40692), 'time.time', 'time', ([], {}), '()\n', (40690, 40692), False, 'from time import time\n'), ((63183, 63189), 'time.time', 'time', ([], {}), '()\n', (63187, 63189), False, 'from time import time\n')] |
"""
Custom management command to rebuild thumbnail images
- May be required after importing a new dataset, for example
"""
import os
import logging
from PIL import UnidentifiedImageError
from django.core.management.base import BaseCommand
from django.conf import settings
from django.db.utils import OperationalError, ProgrammingError
from company.models import Company
from part.models import Part
logger = logging.getLogger("inventree-thumbnails")
class Command(BaseCommand):
"""
Rebuild all thumbnail images
"""
def rebuild_thumbnail(self, model):
"""
Rebuild the thumbnail specified by the "image" field of the provided model
"""
if not model.image:
return
img = model.image
url = img.thumbnail.name
loc = os.path.join(settings.MEDIA_ROOT, url)
if not os.path.exists(loc):
logger.info(f"Generating thumbnail image for '{img}'")
try:
model.image.render_variations(replace=False)
except FileNotFoundError:
logger.error(f"ERROR: Image file '{img}' is missing")
except UnidentifiedImageError:
logger.error(f"ERROR: Image file '{img}' is not a valid image")
def handle(self, *args, **kwargs):
logger.setLevel(logging.INFO)
logger.info("Rebuilding Part thumbnails")
for part in Part.objects.exclude(image=None):
try:
self.rebuild_thumbnail(part)
except (OperationalError, ProgrammingError):
logger.error("ERROR: Database read error.")
break
logger.info("Rebuilding Company thumbnails")
for company in Company.objects.exclude(image=None):
try:
self.rebuild_thumbnail(company)
except (OperationalError, ProgrammingError):
logger.error("ERROR: abase read error.")
break
| [
"logging.getLogger",
"os.path.exists",
"company.models.Company.objects.exclude",
"part.models.Part.objects.exclude",
"os.path.join"
] | [((415, 456), 'logging.getLogger', 'logging.getLogger', (['"""inventree-thumbnails"""'], {}), "('inventree-thumbnails')\n", (432, 456), False, 'import logging\n'), ((806, 844), 'os.path.join', 'os.path.join', (['settings.MEDIA_ROOT', 'url'], {}), '(settings.MEDIA_ROOT, url)\n', (818, 844), False, 'import os\n'), ((1418, 1450), 'part.models.Part.objects.exclude', 'Part.objects.exclude', ([], {'image': 'None'}), '(image=None)\n', (1438, 1450), False, 'from part.models import Part\n'), ((1731, 1766), 'company.models.Company.objects.exclude', 'Company.objects.exclude', ([], {'image': 'None'}), '(image=None)\n', (1754, 1766), False, 'from company.models import Company\n'), ((869, 888), 'os.path.exists', 'os.path.exists', (['loc'], {}), '(loc)\n', (883, 888), False, 'import os\n')] |
from glue.core.data_factories.helpers import has_extension
from glue.config import data_factory
__all__ = ['tabular_data']
@data_factory(label="ASCII Table",
identifier=has_extension('csv txt tsv tbl dat '
'csv.gz txt.gz tbl.bz '
'dat.gz'),
priority=1)
def tabular_data(path, **kwargs):
from glue.core.data_factories.astropy_table import astropy_tabular_data
from glue.core.data_factories.pandas import pandas_read_table
for fac in [astropy_tabular_data, pandas_read_table]:
try:
return fac(path, **kwargs)
except Exception:
pass
else:
raise IOError("Could not parse file: %s" % path)
| [
"glue.core.data_factories.helpers.has_extension"
] | [((187, 251), 'glue.core.data_factories.helpers.has_extension', 'has_extension', (['"""csv txt tsv tbl dat csv.gz txt.gz tbl.bz dat.gz"""'], {}), "('csv txt tsv tbl dat csv.gz txt.gz tbl.bz dat.gz')\n", (200, 251), False, 'from glue.core.data_factories.helpers import has_extension\n')] |
# External deps
import os, sys, json
from pathlib import Path
from typing import Dict, List
# Internal deps
os.chdir(sys.path[0])
sys.path.append("..")
import df_common as dfc
import analyses_common as ac
# Generated files directory
GEN_FILE_DIR = str(Path(__file__).resolve().parent.parent) + os.sep + "generated_files" # TODO: ugly parent.parent pathing
if os.path.exists(GEN_FILE_DIR):
sys.path.append(GEN_FILE_DIR)
if os.path.exists(os.path.join(GEN_FILE_DIR, "sloc_cnt.py")):
from sloc_cnt import DRIVER_NAME_TO_SLOC
else:
print("Error: no SLOC file! Run \'df_analyze.py\' with \'--linux-src-dir\'")
sys.exit(1)
if __name__ == "__main__":
json_files = ac.argparse_and_get_files("Graph SLOC/SoC data")
soc_sloc_by_arch: Dict[str, List[int]] = {}
print("Gathering SLOC average by arch...")
from graph_dd_sloc_by_arch import get_sloc_avg_and_list_by_arch
cmp_by_arch = ac.build_dict_two_lvl_cnt(json_files, dfc.JSON_ARC, dfc.JSON_CMP_STR)
avg_sloc_by_arch, sloc_list_by_arch = get_sloc_avg_and_list_by_arch(cmp_by_arch, verbose = False)
# Collection
print("Iterating DTBs/SoCs...")
for dtb_json in json_files:
with open(dtb_json) as json_file:
data = json.load(json_file)
soc_sloc = 0
arch = data[dfc.JSON_ARC]
cmp_strs = data[dfc.JSON_CMP_STR]
# Total SLOC for this SoC
for cmp_str in cmp_strs:
driver_sloc = dfc.cmp_str_to_sloc(cmp_str)
if not driver_sloc: # Closed-source driver
driver_sloc = avg_sloc_by_arch[arch]
soc_sloc += driver_sloc
#print("{}: {}".format(cmp_str, driver_sloc))
if arch not in soc_sloc_by_arch:
soc_sloc_by_arch[arch] = []
else:
soc_sloc_by_arch[arch].append(soc_sloc)
print("{} ({}): {}".format(dtb_json.split(os.sep)[-1], arch, soc_sloc))
# Final stats
ac.print_mean_median_std_dev_for_dict_of_lists(soc_sloc_by_arch,
"\nSloc Per Soc, format: [arch : (mean, median, std_dev)]\n")
| [
"os.path.exists",
"pathlib.Path",
"os.path.join",
"analyses_common.build_dict_two_lvl_cnt",
"os.chdir",
"graph_dd_sloc_by_arch.get_sloc_avg_and_list_by_arch",
"sys.exit",
"json.load",
"analyses_common.argparse_and_get_files",
"analyses_common.print_mean_median_std_dev_for_dict_of_lists",
"sys.path.append",
"df_common.cmp_str_to_sloc"
] | [((109, 130), 'os.chdir', 'os.chdir', (['sys.path[0]'], {}), '(sys.path[0])\n', (117, 130), False, 'import os, sys, json\n'), ((131, 152), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (146, 152), False, 'import os, sys, json\n'), ((361, 389), 'os.path.exists', 'os.path.exists', (['GEN_FILE_DIR'], {}), '(GEN_FILE_DIR)\n', (375, 389), False, 'import os, sys, json\n'), ((395, 424), 'sys.path.append', 'sys.path.append', (['GEN_FILE_DIR'], {}), '(GEN_FILE_DIR)\n', (410, 424), False, 'import os, sys, json\n'), ((631, 642), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (639, 642), False, 'import os, sys, json\n'), ((689, 737), 'analyses_common.argparse_and_get_files', 'ac.argparse_and_get_files', (['"""Graph SLOC/SoC data"""'], {}), "('Graph SLOC/SoC data')\n", (714, 737), True, 'import analyses_common as ac\n'), ((920, 989), 'analyses_common.build_dict_two_lvl_cnt', 'ac.build_dict_two_lvl_cnt', (['json_files', 'dfc.JSON_ARC', 'dfc.JSON_CMP_STR'], {}), '(json_files, dfc.JSON_ARC, dfc.JSON_CMP_STR)\n', (945, 989), True, 'import analyses_common as ac\n'), ((1032, 1089), 'graph_dd_sloc_by_arch.get_sloc_avg_and_list_by_arch', 'get_sloc_avg_and_list_by_arch', (['cmp_by_arch'], {'verbose': '(False)'}), '(cmp_by_arch, verbose=False)\n', (1061, 1089), False, 'from graph_dd_sloc_by_arch import get_sloc_avg_and_list_by_arch\n'), ((1936, 2068), 'analyses_common.print_mean_median_std_dev_for_dict_of_lists', 'ac.print_mean_median_std_dev_for_dict_of_lists', (['soc_sloc_by_arch', '"""\nSloc Per Soc, format: [arch : (mean, median, std_dev)]\n"""'], {}), '(soc_sloc_by_arch,\n """\nSloc Per Soc, format: [arch : (mean, median, std_dev)]\n""")\n', (1982, 2068), True, 'import analyses_common as ac\n'), ((447, 488), 'os.path.join', 'os.path.join', (['GEN_FILE_DIR', '"""sloc_cnt.py"""'], {}), "(GEN_FILE_DIR, 'sloc_cnt.py')\n", (459, 488), False, 'import os, sys, json\n'), ((1240, 1260), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (1249, 1260), False, 'import os, sys, json\n'), ((1453, 1481), 'df_common.cmp_str_to_sloc', 'dfc.cmp_str_to_sloc', (['cmp_str'], {}), '(cmp_str)\n', (1472, 1481), True, 'import df_common as dfc\n'), ((254, 268), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (258, 268), False, 'from pathlib import Path\n')] |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 <NAME> <<EMAIL>>
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import re
import six
import yaml
import time
from .. import plugins
from ..AppriseAsset import AppriseAsset
from ..URLBase import URLBase
from ..common import ConfigFormat
from ..common import CONFIG_FORMATS
from ..common import ContentIncludeMode
from ..utils import GET_SCHEMA_RE
from ..utils import parse_list
from ..utils import parse_bool
from ..utils import parse_urls
from . import SCHEMA_MAP
# Test whether token is valid or not
VALID_TOKEN = re.compile(
r'(?P<token>[a-z0-9][a-z0-9_]+)', re.I)
class ConfigBase(URLBase):
"""
This is the base class for all supported configuration sources
"""
# The Default Encoding to use if not otherwise detected
encoding = 'utf-8'
# The default expected configuration format unless otherwise
# detected by the sub-modules
default_config_format = ConfigFormat.TEXT
# This is only set if the user overrides the config format on the URL
# this should always initialize itself as None
config_format = None
# Don't read any more of this amount of data into memory as there is no
# reason we should be reading in more. This is more of a safe guard then
# anything else. 128KB (131072B)
max_buffer_size = 131072
# By default all configuration is not includable using the 'include'
# line found in configuration files.
allow_cross_includes = ContentIncludeMode.NEVER
# the config path manages the handling of relative include
config_path = os.getcwd()
def __init__(self, cache=True, recursion=0, insecure_includes=False,
**kwargs):
"""
Initialize some general logging and common server arguments that will
keep things consistent when working with the configurations that
inherit this class.
By default we cache our responses so that subsiquent calls does not
cause the content to be retrieved again. For local file references
this makes no difference at all. But for remote content, this does
mean more then one call can be made to retrieve the (same) data. This
method can be somewhat inefficient if disabled. Only disable caching
if you understand the consequences.
You can alternatively set the cache value to an int identifying the
number of seconds the previously retrieved can exist for before it
should be considered expired.
recursion defines how deep we recursively handle entries that use the
`include` keyword. This keyword requires us to fetch more configuration
from another source and add it to our existing compilation. If the
file we remotely retrieve also has an `include` reference, we will only
advance through it if recursion is set to 2 deep. If set to zero
it is off. There is no limit to how high you set this value. It would
be recommended to keep it low if you do intend to use it.
insecure_include by default are disabled. When set to True, all
Apprise Config files marked to be in STRICT mode are treated as being
in ALWAYS mode.
Take a file:// based configuration for example, only a file:// based
configuration can include another file:// based one. because it is set
to STRICT mode. If an http:// based configuration file attempted to
include a file:// one it woul fail. However this include would be
possible if insecure_includes is set to True.
There are cases where a self hosting apprise developer may wish to load
configuration from memory (in a string format) that contains 'include'
entries (even file:// based ones). In these circumstances if you want
these 'include' entries to be honored, this value must be set to True.
"""
super(ConfigBase, self).__init__(**kwargs)
# Tracks the time the content was last retrieved on. This place a role
# for cases where we are not caching our response and are required to
# re-retrieve our settings.
self._cached_time = None
# Tracks previously loaded content for speed
self._cached_servers = None
# Initialize our recursion value
self.recursion = recursion
# Initialize our insecure_includes flag
self.insecure_includes = insecure_includes
if 'encoding' in kwargs:
# Store the encoding
self.encoding = kwargs.get('encoding')
if 'format' in kwargs \
and isinstance(kwargs['format'], six.string_types):
# Store the enforced config format
self.config_format = kwargs.get('format').lower()
if self.config_format not in CONFIG_FORMATS:
# Simple error checking
err = 'An invalid config format ({}) was specified.'.format(
self.config_format)
self.logger.warning(err)
raise TypeError(err)
# Set our cache flag; it can be True or a (positive) integer
try:
self.cache = cache if isinstance(cache, bool) else int(cache)
if self.cache < 0:
err = 'A negative cache value ({}) was specified.'.format(
cache)
self.logger.warning(err)
raise TypeError(err)
except (ValueError, TypeError):
err = 'An invalid cache value ({}) was specified.'.format(cache)
self.logger.warning(err)
raise TypeError(err)
return
def servers(self, asset=None, **kwargs):
"""
Performs reads loaded configuration and returns all of the services
that could be parsed and loaded.
"""
if not self.expired():
# We already have cached results to return; use them
return self._cached_servers
# Our cached response object
self._cached_servers = list()
# read() causes the child class to do whatever it takes for the
# config plugin to load the data source and return unparsed content
# None is returned if there was an error or simply no data
content = self.read(**kwargs)
if not isinstance(content, six.string_types):
# Set the time our content was cached at
self._cached_time = time.time()
# Nothing more to do; return our empty cache list
return self._cached_servers
# Our Configuration format uses a default if one wasn't one detected
# or enfored.
config_format = \
self.default_config_format \
if self.config_format is None else self.config_format
# Dynamically load our parse_ function based on our config format
fn = getattr(ConfigBase, 'config_parse_{}'.format(config_format))
# Initialize our asset object
asset = asset if isinstance(asset, AppriseAsset) else self.asset
# Execute our config parse function which always returns a tuple
# of our servers and our configuration
servers, configs = fn(content=content, asset=asset)
self._cached_servers.extend(servers)
# Configuration files were detected; recursively populate them
# If we have been configured to do so
for url in configs:
if self.recursion > 0:
# Attempt to acquire the schema at the very least to allow
# our configuration based urls.
schema = GET_SCHEMA_RE.match(url)
if schema is None:
# Plan B is to assume we're dealing with a file
schema = 'file'
if not os.path.isabs(url):
# We're dealing with a relative path; prepend
# our current config path
url = os.path.join(self.config_path, url)
url = '{}://{}'.format(schema, URLBase.quote(url))
else:
# Ensure our schema is always in lower case
schema = schema.group('schema').lower()
# Some basic validation
if schema not in SCHEMA_MAP:
ConfigBase.logger.warning(
'Unsupported include schema {}.'.format(schema))
continue
# Parse our url details of the server object as dictionary
# containing all of the information parsed from our URL
results = SCHEMA_MAP[schema].parse_url(url)
if not results:
# Failed to parse the server URL
self.logger.warning(
'Unparseable include URL {}'.format(url))
continue
# Handle cross inclusion based on allow_cross_includes rules
if (SCHEMA_MAP[schema].allow_cross_includes ==
ContentIncludeMode.STRICT
and schema not in self.schemas()
and not self.insecure_includes) or \
SCHEMA_MAP[schema].allow_cross_includes == \
ContentIncludeMode.NEVER:
# Prevent the loading if insecure base protocols
ConfigBase.logger.warning(
'Including {}:// based configuration is prohibited. '
'Ignoring URL {}'.format(schema, url))
continue
# Prepare our Asset Object
results['asset'] = asset
# No cache is required because we're just lumping this in
# and associating it with the cache value we've already
# declared (prior to our recursion)
results['cache'] = False
# Recursion can never be parsed from the URL; we decrement
# it one level
results['recursion'] = self.recursion - 1
# Insecure Includes flag can never be parsed from the URL
results['insecure_includes'] = self.insecure_includes
try:
# Attempt to create an instance of our plugin using the
# parsed URL information
cfg_plugin = SCHEMA_MAP[results['schema']](**results)
except Exception as e:
# the arguments are invalid or can not be used.
self.logger.warning(
'Could not load include URL: {}'.format(url))
self.logger.debug('Loading Exception: {}'.format(str(e)))
continue
# if we reach here, we can now add this servers found
# in this configuration file to our list
self._cached_servers.extend(
cfg_plugin.servers(asset=asset))
# We no longer need our configuration object
del cfg_plugin
else:
self.logger.debug(
'Recursion limit reached; ignoring Include URL: %s' % url)
if self._cached_servers:
self.logger.info('Loaded {} entries from {}'.format(
len(self._cached_servers), self.url()))
else:
self.logger.warning(
'Failed to load Apprise configuration from {}'.format(
self.url()))
# Set the time our content was cached at
self._cached_time = time.time()
return self._cached_servers
def read(self):
"""
This object should be implimented by the child classes
"""
return None
def expired(self):
"""
Simply returns True if the configuration should be considered
as expired or False if content should be retrieved.
"""
if isinstance(self._cached_servers, list) and self.cache:
# We have enough reason to look further into our cached content
# and verify it has not expired.
if self.cache is True:
# we have not expired, return False
return False
# Verify our cache time to determine whether we will get our
# content again.
age_in_sec = time.time() - self._cached_time
if age_in_sec <= self.cache:
# We have not expired; return False
return False
# If we reach here our configuration should be considered
# missing and/or expired.
return True
@staticmethod
def parse_url(url, verify_host=True):
"""Parses the URL and returns it broken apart into a dictionary.
This is very specific and customized for Apprise.
Args:
url (str): The URL you want to fully parse.
verify_host (:obj:`bool`, optional): a flag kept with the parsed
URL which some child classes will later use to verify SSL
keys (if SSL transactions take place). Unless under very
specific circumstances, it is strongly recomended that
you leave this default value set to True.
Returns:
A dictionary is returned containing the URL fully parsed if
successful, otherwise None is returned.
"""
results = URLBase.parse_url(url, verify_host=verify_host)
if not results:
# We're done; we failed to parse our url
return results
# Allow overriding the default config format
if 'format' in results['qsd']:
results['format'] = results['qsd'].get('format')
if results['format'] not in CONFIG_FORMATS:
URLBase.logger.warning(
'Unsupported format specified {}'.format(
results['format']))
del results['format']
# Defines the encoding of the payload
if 'encoding' in results['qsd']:
results['encoding'] = results['qsd'].get('encoding')
# Our cache value
if 'cache' in results['qsd']:
# First try to get it's integer value
try:
results['cache'] = int(results['qsd']['cache'])
except (ValueError, TypeError):
# No problem, it just isn't an integer; now treat it as a bool
# instead:
results['cache'] = parse_bool(results['qsd']['cache'])
return results
@staticmethod
def detect_config_format(content, **kwargs):
"""
Takes the specified content and attempts to detect the format type
The function returns the actual format type if detected, otherwise
it returns None
"""
# Detect Format Logic:
# - A pound/hashtag (#) is alawys a comment character so we skip over
# lines matched here.
# - Detection begins on the first non-comment and non blank line
# matched.
# - If we find a string followed by a colon, we know we're dealing
# with a YAML file.
# - If we find a string that starts with a URL, or our tag
# definitions (accepting commas) followed by an equal sign we know
# we're dealing with a TEXT format.
# Define what a valid line should look like
valid_line_re = re.compile(
r'^\s*(?P<line>([;#]+(?P<comment>.*))|'
r'(?P<text>((?P<tag>[ \t,a-z0-9_-]+)=)?[a-z0-9]+://.*)|'
r'((?P<yaml>[a-z0-9]+):.*))?$', re.I)
try:
# split our content up to read line by line
content = re.split(r'\r*\n', content)
except TypeError:
# content was not expected string type
ConfigBase.logger.error(
'Invalid Apprise configuration specified.')
return None
# By default set our return value to None since we don't know
# what the format is yet
config_format = None
# iterate over each line of the file to attempt to detect it
# stop the moment a the type has been determined
for line, entry in enumerate(content, start=1):
result = valid_line_re.match(entry)
if not result:
# Invalid syntax
ConfigBase.logger.error(
'Undetectable Apprise configuration found '
'based on line {}.'.format(line))
# Take an early exit
return None
# Attempt to detect configuration
if result.group('yaml'):
config_format = ConfigFormat.YAML
ConfigBase.logger.debug(
'Detected YAML configuration '
'based on line {}.'.format(line))
break
elif result.group('text'):
config_format = ConfigFormat.TEXT
ConfigBase.logger.debug(
'Detected TEXT configuration '
'based on line {}.'.format(line))
break
# If we reach here, we have a comment entry
# Adjust default format to TEXT
config_format = ConfigFormat.TEXT
return config_format
@staticmethod
def config_parse(content, asset=None, config_format=None, **kwargs):
"""
Takes the specified config content and loads it based on the specified
config_format. If a format isn't specified, then it is auto detected.
"""
if config_format is None:
# Detect the format
config_format = ConfigBase.detect_config_format(content)
if not config_format:
# We couldn't detect configuration
ConfigBase.logger.error('Could not detect configuration')
return (list(), list())
if config_format not in CONFIG_FORMATS:
# Invalid configuration type specified
ConfigBase.logger.error(
'An invalid configuration format ({}) was specified'.format(
config_format))
return (list(), list())
# Dynamically load our parse_ function based on our config format
fn = getattr(ConfigBase, 'config_parse_{}'.format(config_format))
# Execute our config parse function which always returns a list
return fn(content=content, asset=asset)
@staticmethod
def config_parse_text(content, asset=None):
"""
Parse the specified content as though it were a simple text file only
containing a list of URLs.
Return a tuple that looks like (servers, configs) where:
- servers contains a list of loaded notification plugins
- configs contains a list of additional configuration files
referenced.
You may also optionally associate an asset with the notification.
The file syntax is:
#
# pound/hashtag allow for line comments
#
# One or more tags can be idenified using comma's (,) to separate
# them.
<Tag(s)>=<URL>
# Or you can use this format (no tags associated)
<URL>
# you can also use the keyword 'include' and identify a
# configuration location (like this file) which will be included
# as additional configuration entries when loaded.
include <ConfigURL>
"""
# A list of loaded Notification Services
servers = list()
# A list of additional configuration files referenced using
# the include keyword
configs = list()
# Define what a valid line should look like
valid_line_re = re.compile(
r'^\s*(?P<line>([;#]+(?P<comment>.*))|'
r'(\s*(?P<tags>[^=]+)=|=)?\s*'
r'(?P<url>[a-z0-9]{2,9}://.*)|'
r'include\s+(?P<config>.+))?\s*$', re.I)
try:
# split our content up to read line by line
content = re.split(r'\r*\n', content)
except TypeError:
# content was not expected string type
ConfigBase.logger.error(
'Invalid Apprise TEXT based configuration specified.')
return (list(), list())
for line, entry in enumerate(content, start=1):
result = valid_line_re.match(entry)
if not result:
# Invalid syntax
ConfigBase.logger.error(
'Invalid Apprise TEXT configuration format found '
'{} on line {}.'.format(entry, line))
# Assume this is a file we shouldn't be parsing. It's owner
# can read the error printed to screen and take action
# otherwise.
return (list(), list())
url, config = result.group('url'), result.group('config')
if not (url or config):
# Comment/empty line; do nothing
continue
if config:
ConfigBase.logger.debug('Include URL: {}'.format(config))
# Store our include line
configs.append(config.strip())
continue
# Acquire our url tokens
results = plugins.url_to_dict(url)
if results is None:
# Failed to parse the server URL
ConfigBase.logger.warning(
'Unparseable URL {} on line {}.'.format(url, line))
continue
# Build a list of tags to associate with the newly added
# notifications if any were set
results['tag'] = set(parse_list(result.group('tags')))
# Prepare our Asset Object
results['asset'] = \
asset if isinstance(asset, AppriseAsset) else AppriseAsset()
try:
# Attempt to create an instance of our plugin using the
# parsed URL information
plugin = plugins.SCHEMA_MAP[results['schema']](**results)
# Create log entry of loaded URL
ConfigBase.logger.debug('Loaded URL: {}'.format(plugin.url()))
except Exception as e:
# the arguments are invalid or can not be used.
ConfigBase.logger.warning(
'Could not load URL {} on line {}.'.format(
url, line))
ConfigBase.logger.debug('Loading Exception: %s' % str(e))
continue
# if we reach here, we successfully loaded our data
servers.append(plugin)
# Return what was loaded
return (servers, configs)
@staticmethod
def config_parse_yaml(content, asset=None):
"""
Parse the specified content as though it were a yaml file
specifically formatted for Apprise.
Return a tuple that looks like (servers, configs) where:
- servers contains a list of loaded notification plugins
- configs contains a list of additional configuration files
referenced.
You may optionally associate an asset with the notification.
"""
# A list of loaded Notification Services
servers = list()
# A list of additional configuration files referenced using
# the include keyword
configs = list()
try:
# Load our data (safely)
result = yaml.load(content, Loader=yaml.SafeLoader)
except (AttributeError,
yaml.parser.ParserError,
yaml.error.MarkedYAMLError) as e:
# Invalid content
ConfigBase.logger.error(
'Invalid Apprise YAML data specified.')
ConfigBase.logger.debug(
'YAML Exception:{}{}'.format(os.linesep, e))
return (list(), list())
if not isinstance(result, dict):
# Invalid content
ConfigBase.logger.error(
'Invalid Apprise YAML based configuration specified.')
return (list(), list())
# YAML Version
version = result.get('version', 1)
if version != 1:
# Invalid syntax
ConfigBase.logger.error(
'Invalid Apprise YAML version specified {}.'.format(version))
return (list(), list())
#
# global asset object
#
asset = asset if isinstance(asset, AppriseAsset) else AppriseAsset()
tokens = result.get('asset', None)
if tokens and isinstance(tokens, dict):
for k, v in tokens.items():
if k.startswith('_') or k.endswith('_'):
# Entries are considered reserved if they start or end
# with an underscore
ConfigBase.logger.warning(
'Ignored asset key "{}".'.format(k))
continue
if not (hasattr(asset, k) and
isinstance(getattr(asset, k),
(bool, six.string_types))):
# We can't set a function or non-string set value
ConfigBase.logger.warning(
'Invalid asset key "{}".'.format(k))
continue
if v is None:
# Convert to an empty string
v = ''
if (isinstance(v, (bool, six.string_types))
and isinstance(getattr(asset, k), bool)):
# If the object in the Asset is a boolean, then
# we want to convert the specified string to
# match that.
setattr(asset, k, parse_bool(v))
elif isinstance(v, six.string_types):
# Set our asset object with the new value
setattr(asset, k, v.strip())
else:
# we must set strings with a string
ConfigBase.logger.warning(
'Invalid asset value to "{}".'.format(k))
continue
#
# global tag root directive
#
global_tags = set()
tags = result.get('tag', None)
if tags and isinstance(tags, (list, tuple, six.string_types)):
# Store any preset tags
global_tags = set(parse_list(tags))
#
# include root directive
#
includes = result.get('include', None)
if isinstance(includes, six.string_types):
# Support a single inline string or multiple ones separated by a
# comma and/or space
includes = parse_urls(includes)
elif not isinstance(includes, (list, tuple)):
# Not a problem; we simply have no includes
includes = list()
# Iterate over each config URL
for no, url in enumerate(includes):
if isinstance(url, six.string_types):
# Support a single inline string or multiple ones separated by
# a comma and/or space
configs.extend(parse_urls(url))
elif isinstance(url, dict):
# Store the url and ignore arguments associated
configs.extend(u for u in url.keys())
#
# urls root directive
#
urls = result.get('urls', None)
if not isinstance(urls, (list, tuple)):
# Not a problem; we simply have no urls
urls = list()
# Iterate over each URL
for no, url in enumerate(urls):
# Our results object is what we use to instantiate our object if
# we can. Reset it to None on each iteration
results = list()
if isinstance(url, six.string_types):
# We're just a simple URL string...
schema = GET_SCHEMA_RE.match(url)
if schema is None:
# Log invalid entries so that maintainer of config
# config file at least has something to take action
# with.
ConfigBase.logger.warning(
'Invalid URL {}, entry #{}'.format(url, no + 1))
continue
# We found a valid schema worthy of tracking; store it's
# details:
_results = plugins.url_to_dict(url)
if _results is None:
ConfigBase.logger.warning(
'Unparseable URL {}, entry #{}'.format(
url, no + 1))
continue
# add our results to our global set
results.append(_results)
elif isinstance(url, dict):
# We are a url string with additional unescaped options. In
# this case we want to iterate over all of our options so we
# can at least tell the end user what entries were ignored
# due to errors
if six.PY2:
it = url.iteritems()
else: # six.PY3
it = iter(url.items())
# Track the URL to-load
_url = None
# Track last acquired schema
schema = None
for key, tokens in it:
# Test our schema
_schema = GET_SCHEMA_RE.match(key)
if _schema is None:
# Log invalid entries so that maintainer of config
# config file at least has something to take action
# with.
ConfigBase.logger.warning(
'Ignored entry {} found under urls, entry #{}'
.format(key, no + 1))
continue
# Store our schema
schema = _schema.group('schema').lower()
# Store our URL and Schema Regex
_url = key
if _url is None:
# the loop above failed to match anything
ConfigBase.logger.warning(
'Unsupported URL, entry #{}'.format(no + 1))
continue
_results = plugins.url_to_dict(_url)
if _results is None:
# Setup dictionary
_results = {
# Minimum requirements
'schema': schema,
}
if isinstance(tokens, (list, tuple, set)):
# populate and/or override any results populated by
# parse_url()
for entries in tokens:
# Copy ourselves a template of our parsed URL as a base
# to work with
r = _results.copy()
# We are a url string with additional unescaped options
if isinstance(entries, dict):
if six.PY2:
_url, tokens = next(url.iteritems())
else: # six.PY3
_url, tokens = next(iter(url.items()))
# Tags you just can't over-ride
if 'schema' in entries:
del entries['schema']
# support our special tokens (if they're present)
if schema in plugins.SCHEMA_MAP:
entries = ConfigBase.__extract_special_tokens(
schema, entries)
# Extend our dictionary with our new entries
r.update(entries)
# add our results to our global set
results.append(r)
elif isinstance(tokens, dict):
# support our special tokens (if they're present)
if schema in plugins.SCHEMA_MAP:
tokens = ConfigBase.__extract_special_tokens(
schema, tokens)
# Copy ourselves a template of our parsed URL as a base to
# work with
r = _results.copy()
# add our result set
r.update(tokens)
# add our results to our global set
results.append(r)
else:
# add our results to our global set
results.append(_results)
else:
# Unsupported
ConfigBase.logger.warning(
'Unsupported Apprise YAML entry #{}'.format(no + 1))
continue
# Track our entries
entry = 0
while len(results):
# Increment our entry count
entry += 1
# Grab our first item
_results = results.pop(0)
# tag is a special keyword that is managed by Apprise object.
# The below ensures our tags are set correctly
if 'tag' in _results:
# Tidy our list up
_results['tag'] = \
set(parse_list(_results['tag'])) | global_tags
else:
# Just use the global settings
_results['tag'] = global_tags
for key in list(_results.keys()):
# Strip out any tokens we know that we can't accept and
# warn the user
match = VALID_TOKEN.match(key)
if not match:
ConfigBase.logger.warning(
'Ignoring invalid token ({}) found in YAML '
'configuration entry #{}, item #{}'
.format(key, no + 1, entry))
del _results[key]
ConfigBase.logger.trace(
'URL #{}: {} unpacked as:{}{}'
.format(no + 1, url, os.linesep, os.linesep.join(
['{}="{}"'.format(k, a)
for k, a in _results.items()])))
# Prepare our Asset Object
_results['asset'] = asset
try:
# Attempt to create an instance of our plugin using the
# parsed URL information
plugin = plugins.SCHEMA_MAP[_results['schema']](**_results)
# Create log entry of loaded URL
ConfigBase.logger.debug(
'Loaded URL: {}'.format(plugin.url()))
except Exception as e:
# the arguments are invalid or can not be used.
ConfigBase.logger.warning(
'Could not load Apprise YAML configuration '
'entry #{}, item #{}'
.format(no + 1, entry))
ConfigBase.logger.debug('Loading Exception: %s' % str(e))
continue
# if we reach here, we successfully loaded our data
servers.append(plugin)
return (servers, configs)
def pop(self, index=-1):
"""
Removes an indexed Notification Service from the stack and returns it.
By default, the last element of the list is removed.
"""
if not isinstance(self._cached_servers, list):
# Generate ourselves a list of content we can pull from
self.servers()
# Pop the element off of the stack
return self._cached_servers.pop(index)
@staticmethod
def __extract_special_tokens(schema, tokens):
"""
This function takes a list of tokens and updates them to no longer
include any special tokens such as +,-, and :
- schema must be a valid schema of a supported plugin type
- tokens must be a dictionary containing the yaml entries parsed.
The idea here is we can post process a set of tokens provided in
a YAML file where the user provided some of the special keywords.
We effectivley look up what these keywords map to their appropriate
value they're expected
"""
# Create a copy of our dictionary
tokens = tokens.copy()
for kw, meta in plugins.SCHEMA_MAP[schema]\
.template_kwargs.items():
# Determine our prefix:
prefix = meta.get('prefix', '+')
# Detect any matches
matches = \
{k[1:]: str(v) for k, v in tokens.items()
if k.startswith(prefix)}
if not matches:
# we're done with this entry
continue
if not isinstance(tokens.get(kw, None), dict):
# Invalid; correct it
tokens[kw] = dict()
# strip out processed tokens
tokens = {k: v for k, v in tokens.items()
if not k.startswith(prefix)}
# Update our entries
tokens[kw].update(matches)
# Return our tokens
return tokens
def __getitem__(self, index):
"""
Returns the indexed server entry associated with the loaded
notification servers
"""
if not isinstance(self._cached_servers, list):
# Generate ourselves a list of content we can pull from
self.servers()
return self._cached_servers[index]
def __iter__(self):
"""
Returns an iterator to our server list
"""
if not isinstance(self._cached_servers, list):
# Generate ourselves a list of content we can pull from
self.servers()
return iter(self._cached_servers)
def __len__(self):
"""
Returns the total number of servers loaded
"""
if not isinstance(self._cached_servers, list):
# Generate ourselves a list of content we can pull from
self.servers()
return len(self._cached_servers)
def __bool__(self):
"""
Allows the Apprise object to be wrapped in an Python 3.x based 'if
statement'. True is returned if our content was downloaded correctly.
"""
if not isinstance(self._cached_servers, list):
# Generate ourselves a list of content we can pull from
self.servers()
return True if self._cached_servers else False
def __nonzero__(self):
"""
Allows the Apprise object to be wrapped in an Python 2.x based 'if
statement'. True is returned if our content was downloaded correctly.
"""
if not isinstance(self._cached_servers, list):
# Generate ourselves a list of content we can pull from
self.servers()
return True if self._cached_servers else False
| [
"re.split",
"os.path.isabs",
"re.compile",
"os.path.join",
"yaml.load",
"os.getcwd",
"time.time"
] | [((1658, 1707), 're.compile', 're.compile', (['"""(?P<token>[a-z0-9][a-z0-9_]+)"""', 're.I'], {}), "('(?P<token>[a-z0-9][a-z0-9_]+)', re.I)\n", (1668, 1707), False, 'import re\n'), ((2676, 2687), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2685, 2687), False, 'import os\n'), ((12727, 12738), 'time.time', 'time.time', ([], {}), '()\n', (12736, 12738), False, 'import time\n'), ((16615, 16763), 're.compile', 're.compile', (['"""^\\\\s*(?P<line>([;#]+(?P<comment>.*))|(?P<text>((?P<tag>[ \\\\t,a-z0-9_-]+)=)?[a-z0-9]+://.*)|((?P<yaml>[a-z0-9]+):.*))?$"""', 're.I'], {}), "(\n '^\\\\s*(?P<line>([;#]+(?P<comment>.*))|(?P<text>((?P<tag>[ \\\\t,a-z0-9_-]+)=)?[a-z0-9]+://.*)|((?P<yaml>[a-z0-9]+):.*))?$'\n , re.I)\n", (16625, 16763), False, 'import re\n'), ((21008, 21164), 're.compile', 're.compile', (['"""^\\\\s*(?P<line>([;#]+(?P<comment>.*))|(\\\\s*(?P<tags>[^=]+)=|=)?\\\\s*(?P<url>[a-z0-9]{2,9}://.*)|include\\\\s+(?P<config>.+))?\\\\s*$"""', 're.I'], {}), "(\n '^\\\\s*(?P<line>([;#]+(?P<comment>.*))|(\\\\s*(?P<tags>[^=]+)=|=)?\\\\s*(?P<url>[a-z0-9]{2,9}://.*)|include\\\\s+(?P<config>.+))?\\\\s*$'\n , re.I)\n", (21018, 21164), False, 'import re\n'), ((7534, 7545), 'time.time', 'time.time', ([], {}), '()\n', (7543, 7545), False, 'import time\n'), ((16890, 16918), 're.split', 're.split', (['"""\\\\r*\\\\n"""', 'content'], {}), "('\\\\r*\\\\n', content)\n", (16898, 16918), False, 'import re\n'), ((21304, 21332), 're.split', 're.split', (['"""\\\\r*\\\\n"""', 'content'], {}), "('\\\\r*\\\\n', content)\n", (21312, 21332), False, 'import re\n'), ((24750, 24792), 'yaml.load', 'yaml.load', (['content'], {'Loader': 'yaml.SafeLoader'}), '(content, Loader=yaml.SafeLoader)\n', (24759, 24792), False, 'import yaml\n'), ((13514, 13525), 'time.time', 'time.time', ([], {}), '()\n', (13523, 13525), False, 'import time\n'), ((8890, 8908), 'os.path.isabs', 'os.path.isabs', (['url'], {}), '(url)\n', (8903, 8908), False, 'import os\n'), ((9060, 9095), 'os.path.join', 'os.path.join', (['self.config_path', 'url'], {}), '(self.config_path, url)\n', (9072, 9095), False, 'import os\n')] |
import sys
import subprocess
from subprocess import Popen, PIPE
AV_LOG_QUIET = "quiet"
AV_LOG_PANIC = "panic"
AV_LOG_FATAL = "fatal"
AV_LOG_ERROR = "error"
AV_LOG_WARNING = "warning"
AV_LOG_INFO = "info"
AV_LOG_VERBOSE = "verbose"
AV_LOG_DEBUG = "debug"
ffmpeg_loglevel = AV_LOG_ERROR
IS_WIN32 = 'win32' in str(sys.platform).lower()
SUBPROCESS_ARGS = {}
if IS_WIN32:
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = subprocess.CREATE_NEW_CONSOLE | subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
SUBPROCESS_ARGS['startupinfo'] = startupinfo
def popen_ffmpeg(inner_args):
cmd = [
'ffmpeg',
*inner_args,
# logging
'-loglevel', ffmpeg_loglevel,
'-hide_banner',
]
process = Popen(cmd, stdout=PIPE, stderr=PIPE, **SUBPROCESS_ARGS)
stdout, stderr = process.communicate()
print(stderr.decode(), end='', file=sys.stderr)
return stdout, stderr | [
"subprocess.Popen",
"subprocess.STARTUPINFO"
] | [((401, 425), 'subprocess.STARTUPINFO', 'subprocess.STARTUPINFO', ([], {}), '()\n', (423, 425), False, 'import subprocess\n'), ((801, 856), 'subprocess.Popen', 'Popen', (['cmd'], {'stdout': 'PIPE', 'stderr': 'PIPE'}), '(cmd, stdout=PIPE, stderr=PIPE, **SUBPROCESS_ARGS)\n', (806, 856), False, 'from subprocess import Popen, PIPE\n')] |
from setuptools import setup
setup(
name="nmn-iwp",
version="0.1",
keywords="",
packages=["vr", "vr.models"]
)
| [
"setuptools.setup"
] | [((30, 109), 'setuptools.setup', 'setup', ([], {'name': '"""nmn-iwp"""', 'version': '"""0.1"""', 'keywords': '""""""', 'packages': "['vr', 'vr.models']"}), "(name='nmn-iwp', version='0.1', keywords='', packages=['vr', 'vr.models'])\n", (35, 109), False, 'from setuptools import setup\n')] |
""" Represent a triangulated surface using a 3D boolean grid"""
import logging
import numpy as np
from rpl.tools.ray_tracing.bsp_tree_poly import BSP_Element
from rpl.tools.geometry import geom_utils
import data_io
class BSP_Grid(object):
def __init__(self, node_array, tris, allocate_step=100000):
"""
Store the triangles with an enumeration so that even when they are subdivided their
identity is not lost.
"""
tri_nums = np.arange(len(tris), dtype=np.int32).reshape((len(tris), 1))
minus_ones = -np.ones((len(tris), 6), dtype=np.int32)
self.tris = np.hstack((tris, minus_ones, tri_nums))
self.allocate_step = allocate_step
self.node_array = node_array # Reference to the full list of nodes
self._resize()
self.next_free = len(node_array)
self.split_cache = np.zeros(len(self.tris), dtype=np.int32)
def _resize(self):
"""
Increase node array size by the allocate_step amount.
"""
self.array_size = len(self.node_array) + self.allocate_step
self.node_array = np.concatenate((self.node_array, np.zeros((self.allocate_step, 3))))
def add_node(self, node):
"""
Adds a new node to the end of the node array (expanding if required). Returns the index of
the newly added node.
"""
if self.next_free == self.array_size:
self._resize()
self.node_array[self.next_free] = node
self.next_free += 1
return self.next_free - 1
def prepare_add(self, num_add_nodes):
"""
Make sure that ``num_add_nodes`` can be added later without needing a resize.
Useful if adding nodes from within cython where resizing is tricky.
"""
if self.next_free + num_add_nodes >= self.array_size:
self._resize()
return self.next_free
def make_grid(veh_surfs, settings):
"""
Make coordinates of voxelated grid based on overall list of vehicle surfaces
"""
## Find overall bounding box
x_min, x_max = 1e30, -1e30
y_min, y_max = 1e30, -1e30
z_min, z_max = 1e30, -1e30
for key, veh_surf in veh_surfs.items():
x_min, x_max = min(x_min, np.min(veh_surf["x"])), max(x_max, np.max(veh_surf["x"]))
y_min, y_max = min(y_min, np.min(veh_surf["y"])), max(y_max, np.max(veh_surf["y"]))
z_min, z_max = min(z_min, np.min(veh_surf["z"])), max(z_max, np.max(veh_surf["z"]))
x_min, x_max = x_min - settings["voxel_size"], x_max + settings["voxel_size"]
y_min, y_max = y_min - settings["voxel_size"], y_max + settings["voxel_size"]
z_min, z_max = z_min - settings["voxel_size"], z_max + settings["voxel_size"]
###########################################
# Create the uniformly spaced grid points
x_grid = np.arange(x_min, x_max + settings["voxel_size"], settings["voxel_size"])
y_grid = np.arange(y_min, y_max + settings["voxel_size"], settings["voxel_size"])
z_grid = np.arange(z_min, z_max + settings["voxel_size"], settings["voxel_size"])
return x_grid, y_grid, z_grid
def convert_geom(veh_surf, tr_mat):
"""
Rotate nodes using provided transformation matrix; convert xyz node dict to nodes array
"""
veh_surf["nodes"] = np.vstack((veh_surf["x"], veh_surf["y"], veh_surf["z"])).T
veh_surf['nodes'] = np.dot(veh_surf['nodes'], tr_mat[:3, :3])
veh_surf["x"] = veh_surf['nodes'][:, 0]
veh_surf["y"] = veh_surf['nodes'][:, 1]
veh_surf["z"] = veh_surf['nodes'][:, 2]
return veh_surf
def find_occupied_voxels(surf, surf_mask, voxel_data):
"""
Voxels with any triangle from ``surf`` are considered occupied and or'ed with ``group_mask``.
If the supplied ``occupied_voxels`` is None a voxel array is created and returned.
"""
nodes = surf["nodes"]
tris = surf["tris"]
x_pts, y_pts, z_pts = [voxel_data[k] for k in ("x_grid", "y_grid", "z_grid")]
vox_size = voxel_data["vox_size"]
## Find the local extents of this part
min_x, max_x = np.min(surf["x"]) - vox_size, np.max(surf["x"]) + vox_size
min_y, max_y = np.min(surf["y"]) - vox_size, np.max(surf["y"]) + vox_size
min_z, max_z = np.min(surf["z"]) - vox_size, np.max(surf["z"]) + vox_size
b_tree = BSP_Grid(nodes, tris)
# Create BSP tree elements- we're not using a tree, but we are using some of the functions
b_x_root = BSP_Element(b_tree.tris, b_tree)
size_i, size_j, size_k = len(x_pts), len(y_pts), len(z_pts)
## Create the occupied voxels if none were supplied
if voxel_data["value"] is None:
voxel_data["value"] = np.zeros((size_i - 1, size_j - 1, size_k - 1), dtype=np.uint32)
occupied_voxels = voxel_data["value"]
## The [1:] is because to make n voxels in a given direction we need n-1 splits
for i, x_pos in enumerate(x_pts[1:]):
if x_pos < min_x: continue
if x_pos > max_x: break
b_above_x, b_below_x = b_x_root.split_at(0, x_pos)
b_y_root = b_below_x
for j, y_pos in enumerate(y_pts[1:]):
if b_y_root is None:
break
if y_pos < min_y: continue
if y_pos > max_y: break
b_above_y, b_below_y = b_y_root.split_at(1, y_pos)
b_z_root = b_below_y
for k, z_pos in enumerate(z_pts[1:]):
if b_z_root is None:
break
if z_pos < min_z: continue
if z_pos > max_z: break
b_above_z, b_below_z = b_z_root.split_at(2, z_pos)
if not (b_below_z and (len(b_below_z.tris) == 0)):
## There is at least part of triangle here so mark as occupied
occupied_voxels[i, j, k] |= surf_mask
b_z_root = b_above_z
b_y_root = b_above_y
b_x_root = b_above_x
return voxel_data
#############
# Main code
def main(vehicle_comp_coords, tr_mat, voxel_masks, settings):
"""
Perform voxelization for all vehicle geometries in a list of parts. Combine on a uniform grid.
"""
for key, veh_surf in vehicle_comp_coords.items():
# Convert coordinates and find overall best bounding box
veh_surf = convert_geom(veh_surf, tr_mat)
x_grid, y_grid, z_grid = make_grid(vehicle_comp_coords, settings)
voxel_data = {"x_grid": x_grid,
"y_grid": y_grid,
"z_grid": z_grid,
"vox_size": settings["voxel_size"],
"csys_trans": tr_mat,
"value": None}
for key, veh_surf in vehicle_comp_coords.items():
# Build up the voxel_data
logging.debug("Sampling component: {}".format(key))
## Default mask is 1 for anything not in an identified set
surf_mask = 1
for mask, geo_set in voxel_masks.items():
if veh_surf['part_class'] in geo_set:
surf_mask |= mask
voxel_data = find_occupied_voxels(veh_surf, surf_mask, voxel_data)
return voxel_data
if __name__ == "__main__":
from rpl.tools.api import test_bench_api as tb_api
SETTINGS = tb_api.load_settings("settings.js")
DOORS = {'Hatch_Assembly_Rear_Ramp', 'Hatch_Assembly_Personnel_Door'}
HATCHES = {'Hatch_Assembly_Driver_Commander', 'Hatch_Assembly_Cargo'}
HULLS = {"Hull_Assembly_Parametric", 'Hull_Assembly_Example_With_Connector'}
MANIKINS = {"Manikin"}
# Special labels applied to specific types of voxels
VOXEL_LABELS = {2: HULLS,
4: DOORS,
8: HATCHES,
16: MANIKINS}
vehicle_surfs = tb_api.load_geometry(tb_api.get_all_geom_set() - MANIKINS, single_file=False)
# Modify node coords so object aligns with cartesian axes of occ voxel grid, +z=up
# Vector to rotate around is cross product of current z axis and sfc normal
veh_up = np.array([0., 1., 0.])
rot_around = np.cross(veh_up, np.array([0, 0, 1]))
rot_ang = -np.arccos(veh_up[2])
tr_mat = geom_utils.rotation_about_vector(rot_around, rot_ang)
# voxel_data = main(vehicle_surfs, tr_mat, VOXEL_LABELS, SETTINGS)
vox_veh_folder = r"voxelated_models/vehicles/{}/{}".format(SETTINGS["run_id"],
SETTINGS["voxel_size"])
vox_veh_file = "voxels_{}_vox{}_hacked".format(SETTINGS["run_id"],
SETTINGS["voxel_size"])
try:
voxel_data = data_io.load_array(vox_veh_folder, vox_veh_file, True)
except:
voxel_data = main(vehicle_surfs, tr_mat, VOXEL_LABELS, SETTINGS)
from mayavi import mlab
xo, yo, zo = np.where(voxel_data["value"] == 1)
plot_vehicle = mlab.points3d(voxel_data["x_grid"][xo],
voxel_data["y_grid"][yo],
voxel_data["z_grid"][zo],
color=(0.9, 0.9, 0.9),
scale_mode="none", scale_factor=voxel_data["vox_size"],
mode='cube', opacity=1)
xo, yo, zo = np.where(voxel_data["value"] & 2)
plot_vehicle = mlab.points3d(voxel_data["x_grid"][xo],
voxel_data["y_grid"][yo],
voxel_data["z_grid"][zo],
color=(1, 1, 1),
scale_mode="none", scale_factor=voxel_data["vox_size"],
mode='cube', opacity=0.05)
xo, yo, zo = np.where(voxel_data["value"] & 4)
plot_vehicle = mlab.points3d(voxel_data["x_grid"][xo],
voxel_data["y_grid"][yo],
voxel_data["z_grid"][zo],
color=(1.0, 0.5, 0.5),
scale_mode="none", scale_factor=voxel_data["vox_size"],
mode='cube', opacity=1)
xo, yo, zo = np.where(voxel_data["value"] & 8)
plot_vehicle = mlab.points3d(voxel_data["x_grid"][xo],
voxel_data["y_grid"][yo],
voxel_data["z_grid"][zo],
color=(0.6, 0.6, 1.0),
scale_mode="none", scale_factor=voxel_data["vox_size"],
mode='cube', opacity=1)
# No manikins included, no need to plot them
# xo, yo, zo = np.where(voxel_data["value"] & 16)
# plot_vehicle = mlab.points3d(voxel_data["x_grid"][xo],
# voxel_data["y_grid"][yo],
# voxel_data["z_grid"][zo],
# color=(0.5, 1.0, 0.8),
# scale_mode="none", scale_factor=voxel_data["vox_size"],
# mode='cube', opacity=1.0)
mlab.show()
# Save the voxelated model of the vehicle (sans door and other excluded parts)
data_io.save_multi_array(vox_veh_folder, vox_veh_file, voxel_data) | [
"mayavi.mlab.points3d",
"numpy.arccos",
"data_io.save_multi_array",
"mayavi.mlab.show",
"rpl.tools.geometry.geom_utils.rotation_about_vector",
"numpy.where",
"numpy.hstack",
"numpy.min",
"numpy.max",
"rpl.tools.ray_tracing.bsp_tree_poly.BSP_Element",
"numpy.array",
"numpy.dot",
"numpy.zeros",
"numpy.vstack",
"rpl.tools.api.test_bench_api.load_settings",
"data_io.load_array",
"rpl.tools.api.test_bench_api.get_all_geom_set",
"numpy.arange"
] | [((2930, 3002), 'numpy.arange', 'np.arange', (['x_min', "(x_max + settings['voxel_size'])", "settings['voxel_size']"], {}), "(x_min, x_max + settings['voxel_size'], settings['voxel_size'])\n", (2939, 3002), True, 'import numpy as np\n'), ((3017, 3089), 'numpy.arange', 'np.arange', (['y_min', "(y_max + settings['voxel_size'])", "settings['voxel_size']"], {}), "(y_min, y_max + settings['voxel_size'], settings['voxel_size'])\n", (3026, 3089), True, 'import numpy as np\n'), ((3104, 3176), 'numpy.arange', 'np.arange', (['z_min', "(z_max + settings['voxel_size'])", "settings['voxel_size']"], {}), "(z_min, z_max + settings['voxel_size'], settings['voxel_size'])\n", (3113, 3176), True, 'import numpy as np\n'), ((3475, 3516), 'numpy.dot', 'np.dot', (["veh_surf['nodes']", 'tr_mat[:3, :3]'], {}), "(veh_surf['nodes'], tr_mat[:3, :3])\n", (3481, 3516), True, 'import numpy as np\n'), ((4565, 4597), 'rpl.tools.ray_tracing.bsp_tree_poly.BSP_Element', 'BSP_Element', (['b_tree.tris', 'b_tree'], {}), '(b_tree.tris, b_tree)\n', (4576, 4597), False, 'from rpl.tools.ray_tracing.bsp_tree_poly import BSP_Element\n'), ((7387, 7422), 'rpl.tools.api.test_bench_api.load_settings', 'tb_api.load_settings', (['"""settings.js"""'], {}), "('settings.js')\n", (7407, 7422), True, 'from rpl.tools.api import test_bench_api as tb_api\n'), ((8159, 8184), 'numpy.array', 'np.array', (['[0.0, 1.0, 0.0]'], {}), '([0.0, 1.0, 0.0])\n', (8167, 8184), True, 'import numpy as np\n'), ((8289, 8342), 'rpl.tools.geometry.geom_utils.rotation_about_vector', 'geom_utils.rotation_about_vector', (['rot_around', 'rot_ang'], {}), '(rot_around, rot_ang)\n', (8321, 8342), False, 'from rpl.tools.geometry import geom_utils\n'), ((8967, 9001), 'numpy.where', 'np.where', (["(voxel_data['value'] == 1)"], {}), "(voxel_data['value'] == 1)\n", (8975, 9001), True, 'import numpy as np\n'), ((9022, 9224), 'mayavi.mlab.points3d', 'mlab.points3d', (["voxel_data['x_grid'][xo]", "voxel_data['y_grid'][yo]", "voxel_data['z_grid'][zo]"], {'color': '(0.9, 0.9, 0.9)', 'scale_mode': '"""none"""', 'scale_factor': "voxel_data['vox_size']", 'mode': '"""cube"""', 'opacity': '(1)'}), "(voxel_data['x_grid'][xo], voxel_data['y_grid'][yo],\n voxel_data['z_grid'][zo], color=(0.9, 0.9, 0.9), scale_mode='none',\n scale_factor=voxel_data['vox_size'], mode='cube', opacity=1)\n", (9035, 9224), False, 'from mayavi import mlab\n'), ((9407, 9440), 'numpy.where', 'np.where', (["(voxel_data['value'] & 2)"], {}), "(voxel_data['value'] & 2)\n", (9415, 9440), True, 'import numpy as np\n'), ((9461, 9660), 'mayavi.mlab.points3d', 'mlab.points3d', (["voxel_data['x_grid'][xo]", "voxel_data['y_grid'][yo]", "voxel_data['z_grid'][zo]"], {'color': '(1, 1, 1)', 'scale_mode': '"""none"""', 'scale_factor': "voxel_data['vox_size']", 'mode': '"""cube"""', 'opacity': '(0.05)'}), "(voxel_data['x_grid'][xo], voxel_data['y_grid'][yo],\n voxel_data['z_grid'][zo], color=(1, 1, 1), scale_mode='none',\n scale_factor=voxel_data['vox_size'], mode='cube', opacity=0.05)\n", (9474, 9660), False, 'from mayavi import mlab\n'), ((9843, 9876), 'numpy.where', 'np.where', (["(voxel_data['value'] & 4)"], {}), "(voxel_data['value'] & 4)\n", (9851, 9876), True, 'import numpy as np\n'), ((9897, 10099), 'mayavi.mlab.points3d', 'mlab.points3d', (["voxel_data['x_grid'][xo]", "voxel_data['y_grid'][yo]", "voxel_data['z_grid'][zo]"], {'color': '(1.0, 0.5, 0.5)', 'scale_mode': '"""none"""', 'scale_factor': "voxel_data['vox_size']", 'mode': '"""cube"""', 'opacity': '(1)'}), "(voxel_data['x_grid'][xo], voxel_data['y_grid'][yo],\n voxel_data['z_grid'][zo], color=(1.0, 0.5, 0.5), scale_mode='none',\n scale_factor=voxel_data['vox_size'], mode='cube', opacity=1)\n", (9910, 10099), False, 'from mayavi import mlab\n'), ((10282, 10315), 'numpy.where', 'np.where', (["(voxel_data['value'] & 8)"], {}), "(voxel_data['value'] & 8)\n", (10290, 10315), True, 'import numpy as np\n'), ((10336, 10538), 'mayavi.mlab.points3d', 'mlab.points3d', (["voxel_data['x_grid'][xo]", "voxel_data['y_grid'][yo]", "voxel_data['z_grid'][zo]"], {'color': '(0.6, 0.6, 1.0)', 'scale_mode': '"""none"""', 'scale_factor': "voxel_data['vox_size']", 'mode': '"""cube"""', 'opacity': '(1)'}), "(voxel_data['x_grid'][xo], voxel_data['y_grid'][yo],\n voxel_data['z_grid'][zo], color=(0.6, 0.6, 1.0), scale_mode='none',\n scale_factor=voxel_data['vox_size'], mode='cube', opacity=1)\n", (10349, 10538), False, 'from mayavi import mlab\n'), ((11212, 11223), 'mayavi.mlab.show', 'mlab.show', ([], {}), '()\n', (11221, 11223), False, 'from mayavi import mlab\n'), ((11315, 11381), 'data_io.save_multi_array', 'data_io.save_multi_array', (['vox_veh_folder', 'vox_veh_file', 'voxel_data'], {}), '(vox_veh_folder, vox_veh_file, voxel_data)\n', (11339, 11381), False, 'import data_io\n'), ((635, 674), 'numpy.hstack', 'np.hstack', (['(tris, minus_ones, tri_nums)'], {}), '((tris, minus_ones, tri_nums))\n', (644, 674), True, 'import numpy as np\n'), ((3391, 3447), 'numpy.vstack', 'np.vstack', (["(veh_surf['x'], veh_surf['y'], veh_surf['z'])"], {}), "((veh_surf['x'], veh_surf['y'], veh_surf['z']))\n", (3400, 3447), True, 'import numpy as np\n'), ((4792, 4855), 'numpy.zeros', 'np.zeros', (['(size_i - 1, size_j - 1, size_k - 1)'], {'dtype': 'np.uint32'}), '((size_i - 1, size_j - 1, size_k - 1), dtype=np.uint32)\n', (4800, 4855), True, 'import numpy as np\n'), ((8217, 8236), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (8225, 8236), True, 'import numpy as np\n'), ((8254, 8274), 'numpy.arccos', 'np.arccos', (['veh_up[2]'], {}), '(veh_up[2])\n', (8263, 8274), True, 'import numpy as np\n'), ((8772, 8826), 'data_io.load_array', 'data_io.load_array', (['vox_veh_folder', 'vox_veh_file', '(True)'], {}), '(vox_veh_folder, vox_veh_file, True)\n', (8790, 8826), False, 'import data_io\n'), ((4192, 4209), 'numpy.min', 'np.min', (["surf['x']"], {}), "(surf['x'])\n", (4198, 4209), True, 'import numpy as np\n'), ((4222, 4239), 'numpy.max', 'np.max', (["surf['x']"], {}), "(surf['x'])\n", (4228, 4239), True, 'import numpy as np\n'), ((4271, 4288), 'numpy.min', 'np.min', (["surf['y']"], {}), "(surf['y'])\n", (4277, 4288), True, 'import numpy as np\n'), ((4301, 4318), 'numpy.max', 'np.max', (["surf['y']"], {}), "(surf['y'])\n", (4307, 4318), True, 'import numpy as np\n'), ((4350, 4367), 'numpy.min', 'np.min', (["surf['z']"], {}), "(surf['z'])\n", (4356, 4367), True, 'import numpy as np\n'), ((4380, 4397), 'numpy.max', 'np.max', (["surf['z']"], {}), "(surf['z'])\n", (4386, 4397), True, 'import numpy as np\n'), ((7917, 7942), 'rpl.tools.api.test_bench_api.get_all_geom_set', 'tb_api.get_all_geom_set', ([], {}), '()\n', (7940, 7942), True, 'from rpl.tools.api import test_bench_api as tb_api\n'), ((1187, 1220), 'numpy.zeros', 'np.zeros', (['(self.allocate_step, 3)'], {}), '((self.allocate_step, 3))\n', (1195, 1220), True, 'import numpy as np\n'), ((2320, 2341), 'numpy.min', 'np.min', (["veh_surf['x']"], {}), "(veh_surf['x'])\n", (2326, 2341), True, 'import numpy as np\n'), ((2355, 2376), 'numpy.max', 'np.max', (["veh_surf['x']"], {}), "(veh_surf['x'])\n", (2361, 2376), True, 'import numpy as np\n'), ((2413, 2434), 'numpy.min', 'np.min', (["veh_surf['y']"], {}), "(veh_surf['y'])\n", (2419, 2434), True, 'import numpy as np\n'), ((2448, 2469), 'numpy.max', 'np.max', (["veh_surf['y']"], {}), "(veh_surf['y'])\n", (2454, 2469), True, 'import numpy as np\n'), ((2506, 2527), 'numpy.min', 'np.min', (["veh_surf['z']"], {}), "(veh_surf['z'])\n", (2512, 2527), True, 'import numpy as np\n'), ((2541, 2562), 'numpy.max', 'np.max', (["veh_surf['z']"], {}), "(veh_surf['z'])\n", (2547, 2562), True, 'import numpy as np\n')] |
'''
>List of functions
1. encrypt(user_input,passphrase) - Encrypt the given string with the given passphrase. Returns cipher text and locked pad.
2. decrypt(cipher_text,locked_pad,passphrase) - Decrypt the cipher text encrypted with SBET. It requires cipher text, locked pad, and passphrase.
'''
# CODE ========================================================================
import zlib
import random
from hashlib import sha1
from silver_bullet.TRNG import trlist
from silver_bullet.contain_value import contain
ascii_value=256
def ciphering(target_list,pad,decrypt=False):
result=[]
for counter in range(len(pad)):
if decrypt==False:
operated=contain(target_list[counter]+pad[counter],ascii_value)
else:
operated=contain(int(target_list[counter])-pad[counter],ascii_value)
result.append(operated)
return result
def locker(pad,passphrase):
cutter=round(len(passphrase)/2)
splited=[passphrase[:cutter],passphrase[cutter:]]
locker=[0 for counter in range(len(pad))]
for element in splited:
bloated_seed=sha1(element.encode()).hexdigest()
random.seed(bloated_seed)
locker=[contain(random.randrange(ascii_value)+element,ascii_value) for element in locker]
holder=[]
for counter in range(len(pad)):
operated=int(pad[counter])^locker[counter]
holder.append(operated)
return holder
def encrypt(user_input,passphrase):
compressed=zlib.compress(user_input.encode())
ui_listed=list(compressed)
pad=trlist(len(ui_listed),ascii_value)
ct=ciphering(ui_listed,pad)
lp=locker(pad,passphrase)
cipher_text=' '.join(map(str,ct))
locked_pad=' '.join(map(str,lp))
return cipher_text, locked_pad
def decrypt(cipher_text,locked_pad,passphrase):
ct=cipher_text.split(' ')
lp=locked_pad.split(' ')
pad=locker(lp,passphrase)
pt=ciphering(ct,pad,True)
byted=bytes(pt)
decompressed=zlib.decompress(byted).decode()
return decompressed | [
"zlib.decompress",
"silver_bullet.contain_value.contain",
"random.seed",
"random.randrange"
] | [((1076, 1101), 'random.seed', 'random.seed', (['bloated_seed'], {}), '(bloated_seed)\n', (1087, 1101), False, 'import random\n'), ((663, 720), 'silver_bullet.contain_value.contain', 'contain', (['(target_list[counter] + pad[counter])', 'ascii_value'], {}), '(target_list[counter] + pad[counter], ascii_value)\n', (670, 720), False, 'from silver_bullet.contain_value import contain\n'), ((1831, 1853), 'zlib.decompress', 'zlib.decompress', (['byted'], {}), '(byted)\n', (1846, 1853), False, 'import zlib\n'), ((1120, 1149), 'random.randrange', 'random.randrange', (['ascii_value'], {}), '(ascii_value)\n', (1136, 1149), False, 'import random\n')] |
import random
class Status(object):
def getHeadPosition(gamedata):
me = gamedata['you']
my_position = me['body']
head = my_position[0]
return head
def getMyLength(gamedata):
me = gamedata['you']
my_position = me['body']
if my_position[0] == my_position[1] == my_position[2]:
return 1
elif my_position[1] == my_position[2]:
return 2
else: return len(my_position)
def getMyDirection(gamedata):
me = gamedata['you']
my_position = me['body']
if Status.getMyLength(gamedata) == 1:
return 'none'
elif my_position[0]['x'] > my_position[1]['x']:
return 'right'
elif my_position[0]['x'] < my_position[1]['x']:
return 'left'
elif my_position[0]['x'] == my_position[1]['x'] and my_position[0]['y'] < my_position[1]['y']:
return 'up'
else: return 'down'
def getHealth(gamedata):
pass
def getBoardSize(gamedata):
board_height = gamedata['board']['height']
board_width = gamedata['board']['width']
dimensions = {'height': board_height, 'width': board_width}
return dimensions
def getFoodPositions(gamedata):
pass
def getSnakesPositions(gamedata):
pass
class Assess(object):
def wallProximity(gamedata):
"""returns proximity to a wall
either parallel to, head-on or corner"""
head = Status.getHeadPosition(gamedata)
board_size = Status.getBoardSize(gamedata)
direction = Status.getMyDirection(gamedata)
height = board_size['height'] - 1
width = board_size['width'] - 1
#corners
if head['x'] == 0 and head['y'] == 0:
return {'type': 'corner', 'identifier': 'top left', 'direction': direction}
elif head['x'] == 0 and head['y'] == height:
return {'type': 'corner', 'identifier': 'bottom left', 'direction': direction}
elif head['x'] == width and head['y'] == 0:
return {'type': 'corner', 'identifier': 'top right', 'direction': direction}
elif head['x'] == width and head['y'] == height:
return {'type': 'corner', 'identifier': 'bottom right', 'direction': direction}
#headons
elif head['x'] == 0 and direction == 'left':
return {'type': 'head-on', 'identifier': 'left', 'direction': direction}
elif head['y'] == 0 and direction == 'up':
return {'type': 'head-on', 'identifier': 'top', 'direction': direction}
elif head['x'] == width and direction == 'right':
return {'type': 'head-on', 'identifier': 'right', 'direction': direction}
elif head['y'] == height and direction == 'down':
return {'type': 'head-on', 'identifier': 'bottom', 'direction': direction}
#parrallels
elif head['x'] == 0 and direction == 'up' or head['x'] == 0 and direction == 'down':
return {'type': 'parallel', 'identifier': 'left', 'direction': direction}
elif head['y'] == 0 and direction == 'right' or head['y'] == 0 and direction =='left':
return {'type': 'parallel', 'identifier': 'top', 'direction': direction}
elif head['x'] == width and direction =='down' or head['x'] == width and direction == 'up':
return {'type': 'parallel', 'identifier': 'right', 'direction': direction}
elif head['y'] == height and direction == 'left' or head['y'] == height and direction == 'right':
return {'type': 'parallel', 'identifier': 'bottom', 'direction': direction}
else: return False
def ownBodyProximity(gamedata):
pass
def killPossible(gamedata):
pass
def smallerSnakeNearby(gamedata):
pass
def biggerSnakeNearby(gamedata):
pass
def foodNearby(gamedata):
pass
class Action(object):
def avoidDeath():
pass
def chaseFood():
pass
def fleeSnake():
pass
def chaseSnake():
pass
class Decision(object):
def chooseBestOption(gamedata):
options = ['up', 'down', 'right', 'left']
current_direction = Status.getMyDirection(gamedata)
#first go
if current_direction == 'none':
choice = random.choice(options)
#remove opposite direction
if current_direction == 'up':
options.remove('down')
if current_direction == 'down':
options.remove('up')
if current_direction == 'right':
options.remove('left')
if current_direction == 'left':
options.remove('right')
#no danger keep going
if Assess.wallProximity(gamedata) == False:
choice = current_direction
#in a corner
elif Assess.wallProximity(gamedata)['type'] == 'corner':
options.remove(current_direction)
if Assess.wallProximity(gamedata)['identifier'][0] == 't' and Assess.wallProximity(gamedata)['identifier'][4] == 'l':
if 'up' in options:
choice = 'down'
else: choice = 'right'
elif Assess.wallProximity(gamedata)['identifier'][0] == 't' and Assess.wallProximity(gamedata)['identifier'][4] == 'r':
if 'up' in options:
choice = 'down'
else: choice = 'left'
#headon
elif Assess.wallProximity(gamedata)['type'] == 'head-on':
options.remove(current_direction)
choice = random.choice(options)
#parallel
elif Assess.wallProximity(gamedata)['type'] == 'parallel':
choice = current_direction
else: print("shit")
print(options)
return choice
| [
"random.choice"
] | [((3760, 3782), 'random.choice', 'random.choice', (['options'], {}), '(options)\n', (3773, 3782), False, 'import random\n'), ((4772, 4794), 'random.choice', 'random.choice', (['options'], {}), '(options)\n', (4785, 4794), False, 'import random\n')] |
from dataclasses import dataclass
import math
import chess
import chess.engine
from model import EngineMove, NextMovePair
from chess import Color, Board
from chess.pgn import GameNode
from chess.engine import SimpleEngine, Score
nps = []
def material_count(board: Board, side: Color) -> int:
values = { chess.PAWN: 1, chess.KNIGHT: 3, chess.BISHOP: 3, chess.ROOK: 5, chess.QUEEN: 9 }
return sum(len(board.pieces(piece_type, side)) * value for piece_type, value in values.items())
def material_diff(board: Board, side: Color) -> int:
return material_count(board, side) - material_count(board, not side)
def is_up_in_material(board: Board, side: Color) -> bool:
return material_diff(board, side) > 0
def get_next_move_pair(engine: SimpleEngine, node: GameNode, winner: Color, limit: chess.engine.Limit) -> NextMovePair:
info = engine.analyse(node.board(), multipv = 2, limit = limit)
global nps
nps.append(info[0]["nps"])
nps = nps[-20:]
# print(info)
best = EngineMove(info[0]["pv"][0], info[0]["score"].pov(winner))
second = EngineMove(info[1]["pv"][0], info[1]["score"].pov(winner)) if len(info) > 1 else None
return NextMovePair(node, winner, best, second)
def avg_knps():
global nps
return round(sum(nps) / len(nps) / 1000) if nps else 0
def win_chances(score: Score) -> float:
"""
winning chances from -1 to 1 https://graphsketch.com/?eqn1_color=1&eqn1_eqn=100+*+%282+%2F+%281+%2B+exp%28-0.004+*+x%29%29+-+1%29&eqn2_color=2&eqn2_eqn=&eqn3_color=3&eqn3_eqn=&eqn4_color=4&eqn4_eqn=&eqn5_color=5&eqn5_eqn=&eqn6_color=6&eqn6_eqn=&x_min=-1000&x_max=1000&y_min=-100&y_max=100&x_tick=100&y_tick=10&x_label_freq=2&y_label_freq=2&do_grid=0&do_grid=1&bold_labeled_lines=0&bold_labeled_lines=1&line_width=4&image_w=850&image_h=525
"""
mate = score.mate()
if mate is not None:
return 1 if mate > 0 else -1
cp = score.score()
return 2 / (1 + math.exp(-0.004 * cp)) - 1 if cp is not None else 0
CORRESP_TIME = 999999
def reject_by_time_control(line: str, has_master: bool, master_only: bool, bullet: bool, mates: bool) -> bool:
if not line.startswith("[TimeControl "):
return False
if master_only and not has_master:
return True
try:
seconds, increment = line[1:][:-2].split()[1].replace("\"", "").split("+")
total = int(seconds) + int(increment) * 40
if master_only or mates:
if bullet:
return total < 30 or total >= 160
else:
return total < 160 or total >= 480
else:
return total < (160 if has_master else 480)
except:
return True
def exclude_rating(line: str, mates: bool) -> bool:
if not line.startswith("[WhiteElo ") and not line.startswith("[BlackElo "):
return False
try:
return int(line[11:15]) < (1501 if mates else 1600)
except:
return True
| [
"model.NextMovePair",
"math.exp"
] | [((1172, 1212), 'model.NextMovePair', 'NextMovePair', (['node', 'winner', 'best', 'second'], {}), '(node, winner, best, second)\n', (1184, 1212), False, 'from model import EngineMove, NextMovePair\n'), ((1934, 1955), 'math.exp', 'math.exp', (['(-0.004 * cp)'], {}), '(-0.004 * cp)\n', (1942, 1955), False, 'import math\n')] |
import time, morning
from datetime import datetime
def main():
while True:
a = time.mktime(datetime.now().timetuple())
n = datetime.now()
if n.hour == 6 and (n.minute-(n.minute%5)) == 15:
return morning.main()
time.sleep(300 - (time.mktime(datetime.now().timetuple())-a)) | [
"datetime.datetime.now",
"morning.main"
] | [((135, 149), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (147, 149), False, 'from datetime import datetime\n'), ((214, 228), 'morning.main', 'morning.main', ([], {}), '()\n', (226, 228), False, 'import time, morning\n'), ((100, 114), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (112, 114), False, 'from datetime import datetime\n'), ((262, 276), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (274, 276), False, 'from datetime import datetime\n')] |
import sys
import json
import subprocess
import re
import statistics
def get_complexity():
# Load the cyclomatic complexity info
cyclostats = subprocess.check_output(['./gocyclo', 'repo']).decode("utf-8")
results = re.findall('([0-9]+)\s([^\s]+)\s([^\s]+)\s([^:]+):([0-9]+):([0-9]+)', cyclostats)
# Setup a dictionary in which to keep track of the complixities
# for each file
files = {}
# Build an array of complexities for each file
for result in results:
if result[3] in files:
files[result[3]].append(int(result[0]))
else:
files[result[3]] = [int(result[0])]
# Pick out the median value (picking the highest of the two
# middle entries if needed) for each file
for name, values in files.items():
files[name] = statistics.median_high(values)
return files
def get_duplicate_const_strings():
# Load the const string duplication info
cyclostats = subprocess.check_output(['./goconst', './repo/...']).decode("utf-8")
results = re.findall('([^:]+).+ other occurrence\(s\) of \"(.+)\" found in: ([^:]+).+\n?', cyclostats)
files = {}
# Build an array containing the number of potentially duplicated
# constants by file
for result in results:
if result[0] in files:
files[result[0]] = files[result[0]]+1
else:
files[result[0]] = 1
return files
# Main service body
if __name__ == "__main__":
complexity = get_complexity()
duplicate_const_strings = get_duplicate_const_strings()
files = set()
files.update(complexity.keys())
files.update(duplicate_const_strings.keys())
result = []
for f in files:
result.append({
'filename': f,
'cyclomaticComplexity': complexity[f] if f in complexity else 0,
'duplicateConstStrings': duplicate_const_strings[f] if f in duplicate_const_strings else 0
})
print(json.dumps(result)) | [
"subprocess.check_output",
"re.findall",
"json.dumps",
"statistics.median_high"
] | [((230, 320), 're.findall', 're.findall', (['"""([0-9]+)\\\\s([^\\\\s]+)\\\\s([^\\\\s]+)\\\\s([^:]+):([0-9]+):([0-9]+)"""', 'cyclostats'], {}), "('([0-9]+)\\\\s([^\\\\s]+)\\\\s([^\\\\s]+)\\\\s([^:]+):([0-9]+):([0-9]+)',\n cyclostats)\n", (240, 320), False, 'import re\n'), ((1044, 1148), 're.findall', 're.findall', (['"""([^:]+).+ other occurrence\\\\(s\\\\) of "(.+)" found in: ([^:]+).+\n?"""', 'cyclostats'], {}), '(\n """([^:]+).+ other occurrence\\\\(s\\\\) of "(.+)" found in: ([^:]+).+\n?""",\n cyclostats)\n', (1054, 1148), False, 'import re\n'), ((812, 842), 'statistics.median_high', 'statistics.median_high', (['values'], {}), '(values)\n', (834, 842), False, 'import statistics\n'), ((1960, 1978), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (1970, 1978), False, 'import json\n'), ((152, 198), 'subprocess.check_output', 'subprocess.check_output', (["['./gocyclo', 'repo']"], {}), "(['./gocyclo', 'repo'])\n", (175, 198), False, 'import subprocess\n'), ((960, 1012), 'subprocess.check_output', 'subprocess.check_output', (["['./goconst', './repo/...']"], {}), "(['./goconst', './repo/...'])\n", (983, 1012), False, 'import subprocess\n')] |
"""
Testing if parso finds syntax errors and indentation errors.
"""
import sys
import warnings
import pytest
import parso
from parso._compatibility import is_pypy
from .failing_examples import FAILING_EXAMPLES, indent, build_nested
if is_pypy:
# The errors in PyPy might be different. Just skip the module for now.
pytestmark = pytest.mark.skip()
def _get_error_list(code, version=None):
grammar = parso.load_grammar(version=version)
tree = grammar.parse(code)
return list(grammar.iter_errors(tree))
def assert_comparison(code, error_code, positions):
errors = [(error.start_pos, error.code) for error in _get_error_list(code)]
assert [(pos, error_code) for pos in positions] == errors
@pytest.mark.parametrize('code', FAILING_EXAMPLES)
def test_python_exception_matches(code):
wanted, line_nr = _get_actual_exception(code)
errors = _get_error_list(code)
actual = None
if errors:
error, = errors
actual = error.message
assert actual in wanted
# Somehow in Python3.3 the SyntaxError().lineno is sometimes None
assert line_nr is None or line_nr == error.start_pos[0]
def test_non_async_in_async():
"""
This example doesn't work with FAILING_EXAMPLES, because the line numbers
are not always the same / incorrect in Python 3.8.
"""
if sys.version_info[:2] < (3, 5):
pytest.skip()
# Raises multiple errors in previous versions.
code = 'async def foo():\n def nofoo():[x async for x in []]'
wanted, line_nr = _get_actual_exception(code)
errors = _get_error_list(code)
if errors:
error, = errors
actual = error.message
assert actual in wanted
if sys.version_info[:2] < (3, 8):
assert line_nr == error.start_pos[0]
else:
assert line_nr == 0 # For whatever reason this is zero in Python 3.8+
@pytest.mark.parametrize(
('code', 'positions'), [
('1 +', [(1, 3)]),
('1 +\n', [(1, 3)]),
('1 +\n2 +', [(1, 3), (2, 3)]),
('x + 2', []),
('[\n', [(2, 0)]),
('[\ndef x(): pass', [(2, 0)]),
('[\nif 1: pass', [(2, 0)]),
('1+?', [(1, 2)]),
('?', [(1, 0)]),
('??', [(1, 0)]),
('? ?', [(1, 0)]),
('?\n?', [(1, 0), (2, 0)]),
('? * ?', [(1, 0)]),
('1 + * * 2', [(1, 4)]),
('?\n1\n?', [(1, 0), (3, 0)]),
]
)
def test_syntax_errors(code, positions):
assert_comparison(code, 901, positions)
@pytest.mark.parametrize(
('code', 'positions'), [
(' 1', [(1, 0)]),
('def x():\n 1\n 2', [(3, 0)]),
('def x():\n 1\n 2', [(3, 0)]),
('def x():\n1', [(2, 0)]),
]
)
def test_indentation_errors(code, positions):
assert_comparison(code, 903, positions)
def _get_actual_exception(code):
with warnings.catch_warnings():
# We don't care about warnings where locals/globals misbehave here.
# It's as simple as either an error or not.
warnings.filterwarnings('ignore', category=SyntaxWarning)
try:
compile(code, '<unknown>', 'exec')
except (SyntaxError, IndentationError) as e:
wanted = e.__class__.__name__ + ': ' + e.msg
line_nr = e.lineno
except ValueError as e:
# The ValueError comes from byte literals in Python 2 like '\x'
# that are oddly enough not SyntaxErrors.
wanted = 'SyntaxError: (value error) ' + str(e)
line_nr = None
else:
assert False, "The piece of code should raise an exception."
# SyntaxError
# Python 2.6 has a bit different error messages here, so skip it.
if sys.version_info[:2] == (2, 6) and wanted == 'SyntaxError: unexpected EOF while parsing':
wanted = 'SyntaxError: invalid syntax'
if wanted == 'SyntaxError: non-keyword arg after keyword arg':
# The python 3.5+ way, a bit nicer.
wanted = 'SyntaxError: positional argument follows keyword argument'
elif wanted == 'SyntaxError: assignment to keyword':
return [wanted, "SyntaxError: can't assign to keyword",
'SyntaxError: cannot assign to __debug__'], line_nr
elif wanted == 'SyntaxError: assignment to None':
# Python 2.6 does has a slightly different error.
wanted = 'SyntaxError: cannot assign to None'
elif wanted == 'SyntaxError: can not assign to __debug__':
# Python 2.6 does has a slightly different error.
wanted = 'SyntaxError: cannot assign to __debug__'
elif wanted == 'SyntaxError: can use starred expression only as assignment target':
# Python 3.4/3.4 have a bit of a different warning than 3.5/3.6 in
# certain places. But in others this error makes sense.
return [wanted, "SyntaxError: can't use starred expression here"], line_nr
elif wanted == 'SyntaxError: f-string: unterminated string':
wanted = 'SyntaxError: EOL while scanning string literal'
elif wanted == 'SyntaxError: f-string expression part cannot include a backslash':
return [
wanted,
"SyntaxError: EOL while scanning string literal",
"SyntaxError: unexpected character after line continuation character",
], line_nr
elif wanted == "SyntaxError: f-string: expecting '}'":
wanted = 'SyntaxError: EOL while scanning string literal'
elif wanted == 'SyntaxError: f-string: empty expression not allowed':
wanted = 'SyntaxError: invalid syntax'
elif wanted == "SyntaxError: f-string expression part cannot include '#'":
wanted = 'SyntaxError: invalid syntax'
elif wanted == "SyntaxError: f-string: single '}' is not allowed":
wanted = 'SyntaxError: invalid syntax'
return [wanted], line_nr
def test_default_except_error_postition():
# For this error the position seemed to be one line off, but that doesn't
# really matter.
code = 'try: pass\nexcept: pass\nexcept X: pass'
wanted, line_nr = _get_actual_exception(code)
error, = _get_error_list(code)
assert error.message in wanted
assert line_nr != error.start_pos[0]
# I think this is the better position.
assert error.start_pos[0] == 2
def test_statically_nested_blocks():
def build(code, depth):
if depth == 0:
return code
new_code = 'if 1:\n' + indent(code)
return build(new_code, depth - 1)
def get_error(depth, add_func=False):
code = build('foo', depth)
if add_func:
code = 'def bar():\n' + indent(code)
errors = _get_error_list(code)
if errors:
assert errors[0].message == 'SyntaxError: too many statically nested blocks'
return errors[0]
return None
assert get_error(19) is None
assert get_error(19, add_func=True) is None
assert get_error(20)
assert get_error(20, add_func=True)
def test_future_import_first():
def is_issue(code, *args):
code = code % args
return bool(_get_error_list(code))
i1 = 'from __future__ import division'
i2 = 'from __future__ import absolute_import'
assert not is_issue(i1)
assert not is_issue(i1 + ';' + i2)
assert not is_issue(i1 + '\n' + i2)
assert not is_issue('"";' + i1)
assert not is_issue('"";' + i1)
assert not is_issue('""\n' + i1)
assert not is_issue('""\n%s\n%s', i1, i2)
assert not is_issue('""\n%s;%s', i1, i2)
assert not is_issue('"";%s;%s ', i1, i2)
assert not is_issue('"";%s\n%s ', i1, i2)
assert is_issue('1;' + i1)
assert is_issue('1\n' + i1)
assert is_issue('"";1\n' + i1)
assert is_issue('""\n%s\nfrom x import a\n%s', i1, i2)
assert is_issue('%s\n""\n%s', i1, i2)
def test_named_argument_issues(works_not_in_py):
message = works_not_in_py.get_error_message('def foo(*, **dict): pass')
message = works_not_in_py.get_error_message('def foo(*): pass')
if works_not_in_py.version.startswith('2'):
assert message == 'SyntaxError: invalid syntax'
else:
assert message == 'SyntaxError: named arguments must follow bare *'
works_not_in_py.assert_no_error_in_passing('def foo(*, name): pass')
works_not_in_py.assert_no_error_in_passing('def foo(bar, *, name=1): pass')
works_not_in_py.assert_no_error_in_passing('def foo(bar, *, name=1, **dct): pass')
def test_escape_decode_literals(each_version):
"""
We are using internal functions to assure that unicode/bytes escaping is
without syntax errors. Here we make a bit of quality assurance that this
works through versions, because the internal function might change over
time.
"""
def get_msg(end, to=1):
base = "SyntaxError: (unicode error) 'unicodeescape' " \
"codec can't decode bytes in position 0-%s: " % to
return base + end
def get_msgs(escape):
return (get_msg('end of string in escape sequence'),
get_msg(r"truncated %s escape" % escape))
error, = _get_error_list(r'u"\x"', version=each_version)
assert error.message in get_msgs(r'\xXX')
error, = _get_error_list(r'u"\u"', version=each_version)
assert error.message in get_msgs(r'\uXXXX')
error, = _get_error_list(r'u"\U"', version=each_version)
assert error.message in get_msgs(r'\UXXXXXXXX')
error, = _get_error_list(r'u"\N{}"', version=each_version)
assert error.message == get_msg(r'malformed \N character escape', to=2)
error, = _get_error_list(r'u"\N{foo}"', version=each_version)
assert error.message == get_msg(r'unknown Unicode character name', to=6)
# Finally bytes.
error, = _get_error_list(r'b"\x"', version=each_version)
wanted = r'SyntaxError: (value error) invalid \x escape'
if sys.version_info >= (3, 0):
# The positioning information is only available in Python 3.
wanted += ' at position 0'
assert error.message == wanted
def test_too_many_levels_of_indentation():
assert not _get_error_list(build_nested('pass', 99))
assert _get_error_list(build_nested('pass', 100))
base = 'def x():\n if x:\n'
assert not _get_error_list(build_nested('pass', 49, base=base))
assert _get_error_list(build_nested('pass', 50, base=base))
@pytest.mark.parametrize(
'code', [
"f'{*args,}'",
r'f"\""',
r'f"\\\""',
r'fr"\""',
r'fr"\\\""',
r"print(f'Some {x:.2f} and some {y}')",
]
)
def test_valid_fstrings(code):
assert not _get_error_list(code, version='3.6')
@pytest.mark.parametrize(
('code', 'message'), [
("f'{1+}'", ('invalid syntax')),
(r'f"\"', ('invalid syntax')),
(r'fr"\"', ('invalid syntax')),
]
)
def test_invalid_fstrings(code, message):
"""
Some fstring errors are handled differntly in 3.6 and other versions.
Therefore check specifically for these errors here.
"""
error, = _get_error_list(code, version='3.6')
assert message in error.message
@pytest.mark.parametrize(
'code', [
"from foo import (\nbar,\n rab,\n)",
"from foo import (bar, rab, )",
]
)
def test_trailing_comma(code):
errors = _get_error_list(code)
assert not errors
| [
"pytest.mark.skip",
"warnings.catch_warnings",
"parso.load_grammar",
"pytest.mark.parametrize",
"pytest.skip",
"warnings.filterwarnings"
] | [((726, 775), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""code"""', 'FAILING_EXAMPLES'], {}), "('code', FAILING_EXAMPLES)\n", (749, 775), False, 'import pytest\n'), ((1872, 2288), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('code', 'positions')", "[('1 +', [(1, 3)]), ('1 +\\n', [(1, 3)]), ('1 +\\n2 +', [(1, 3), (2, 3)]), (\n 'x + 2', []), ('[\\n', [(2, 0)]), ('[\\ndef x(): pass', [(2, 0)]), (\n '[\\nif 1: pass', [(2, 0)]), ('1+?', [(1, 2)]), ('?', [(1, 0)]), ('??',\n [(1, 0)]), ('? ?', [(1, 0)]), ('?\\n?', [(1, 0), (2, 0)]), ('? * ?', [(1,\n 0)]), ('1 + * * 2', [(1, 4)]), ('?\\n1\\n?', [(1, 0), (3, 0)])]"], {}), "(('code', 'positions'), [('1 +', [(1, 3)]), ('1 +\\n',\n [(1, 3)]), ('1 +\\n2 +', [(1, 3), (2, 3)]), ('x + 2', []), ('[\\n', [(2, \n 0)]), ('[\\ndef x(): pass', [(2, 0)]), ('[\\nif 1: pass', [(2, 0)]), (\n '1+?', [(1, 2)]), ('?', [(1, 0)]), ('??', [(1, 0)]), ('? ?', [(1, 0)]),\n ('?\\n?', [(1, 0), (2, 0)]), ('? * ?', [(1, 0)]), ('1 + * * 2', [(1, 4)]\n ), ('?\\n1\\n?', [(1, 0), (3, 0)])])\n", (1895, 2288), False, 'import pytest\n'), ((2487, 2660), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('code', 'positions')", '[(\' 1\', [(1, 0)]), ("""def x():\n 1\n 2""", [(3, 0)]), (\n \'def x():\\n 1\\n 2\', [(3, 0)]), (\'def x():\\n1\', [(2, 0)])]'], {}), '((\'code\', \'positions\'), [(\' 1\', [(1, 0)]), (\n """def x():\n 1\n 2""", [(3, 0)]), (\'def x():\\n 1\\n 2\', [(3, 0)]), (\n \'def x():\\n1\', [(2, 0)])])\n', (2510, 2660), False, 'import pytest\n'), ((10265, 10406), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""code"""', '["f\'{*args,}\'", \'f"\\\\""\', \'f"\\\\\\\\\\\\""\', \'fr"\\\\""\', \'fr"\\\\\\\\\\\\""\',\n "print(f\'Some {x:.2f} and some {y}\')"]'], {}), '(\'code\', ["f\'{*args,}\'", \'f"\\\\""\', \'f"\\\\\\\\\\\\""\',\n \'fr"\\\\""\', \'fr"\\\\\\\\\\\\""\', "print(f\'Some {x:.2f} and some {y}\')"])\n', (10288, 10406), False, 'import pytest\n'), ((10547, 10687), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('code', 'message')", '[("f\'{1+}\'", \'invalid syntax\'), (\'f"\\\\"\', \'invalid syntax\'), (\'fr"\\\\"\',\n \'invalid syntax\')]'], {}), '((\'code\', \'message\'), [("f\'{1+}\'", \'invalid syntax\'),\n (\'f"\\\\"\', \'invalid syntax\'), (\'fr"\\\\"\', \'invalid syntax\')])\n', (10570, 10687), False, 'import pytest\n'), ((11004, 11111), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""code"""', '["""from foo import (\nbar,\n rab,\n)""", \'from foo import (bar, rab, )\']'], {}), '(\'code\', ["""from foo import (\nbar,\n rab,\n)""",\n \'from foo import (bar, rab, )\'])\n', (11027, 11111), False, 'import pytest\n'), ((341, 359), 'pytest.mark.skip', 'pytest.mark.skip', ([], {}), '()\n', (357, 359), False, 'import pytest\n'), ((417, 452), 'parso.load_grammar', 'parso.load_grammar', ([], {'version': 'version'}), '(version=version)\n', (435, 452), False, 'import parso\n'), ((1377, 1390), 'pytest.skip', 'pytest.skip', ([], {}), '()\n', (1388, 1390), False, 'import pytest\n'), ((2828, 2853), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (2851, 2853), False, 'import warnings\n'), ((2991, 3048), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'SyntaxWarning'}), "('ignore', category=SyntaxWarning)\n", (3014, 3048), False, 'import warnings\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from shogitk.shogi import Coords, Move, BLACK, WHITE, DROP, PROMOTE
RANKNUM = {
'a': 1,
'b': 2,
'c': 3,
'd': 4,
'e': 5,
'f': 6,
'g': 7,
'h': 8,
'i': 9
}
def decoder(f):
color = [BLACK, WHITE]
step = 0
for line in f:
line = line.strip()
if line[0] == '[':
pass
elif line[0].isdigit():
src = Coords(int(line[0]), RANKNUM[line[1]])
dst = Coords(int(line[2]), RANKNUM[line[3]])
if line[-1] == '+':
modifier = PROMOTE
else:
modifier = None
yield Move(color[step & 1], dst, src, None, modifier=modifier)
step += 1
elif line[0].isupper():
dst = Coords(int(line[2]), RANKNUM[line[3]])
yield Move(color[step & 1], dst, None, line[0], modifier=DROP)
step += 1
| [
"shogitk.shogi.Move"
] | [((720, 776), 'shogitk.shogi.Move', 'Move', (['color[step & 1]', 'dst', 'src', 'None'], {'modifier': 'modifier'}), '(color[step & 1], dst, src, None, modifier=modifier)\n', (724, 776), False, 'from shogitk.shogi import Coords, Move, BLACK, WHITE, DROP, PROMOTE\n'), ((906, 962), 'shogitk.shogi.Move', 'Move', (['color[step & 1]', 'dst', 'None', 'line[0]'], {'modifier': 'DROP'}), '(color[step & 1], dst, None, line[0], modifier=DROP)\n', (910, 962), False, 'from shogitk.shogi import Coords, Move, BLACK, WHITE, DROP, PROMOTE\n')] |
import getpass
from plumbum import local
from plumbum.machines.paramiko_machine import ParamikoMachine
from plumbum.path.utils import copy
def _once(f):
res = None
def wrapped(*args, **kwargs):
nonlocal res
if res is None:
res = f(*args, **kwargs)
return res
return wrapped
@_once
def get_remote_machine_with_password(host, user):
password = getpass.getpass(prompt=f"Password for {user}@{host}: ", stream=None)
rem = ParamikoMachine(host, user=user, password=password)
return rem
@_once
def get_remote_machine(host, user, keyfile):
rem = ParamikoMachine(host, user=user, keyfile=keyfile)
return rem
def get_local_machine():
return local
def with_machine_rule(cls):
old_init = cls.__init__
def new_init(self, config):
if "machine" not in config:
machine_type = "local"
else:
machine_type = config["machine"]["type"]
if machine_type == "local":
self.machine = get_local_machine()
self.files_to_copy = None
elif machine_type == "remote":
if "keyfile" in config["machine"]:
self.machine = get_remote_machine(config["machine"]["host"], config["machine"]["user"], config["machine"]["keyfile"])
else:
self.machine = get_remote_machine_with_password(config["machine"]["host"], config["machine"]["user"])
self.files_to_copy = config["machine"].get("files_to_copy")
else:
raise ValueError(f"Invalid machine type: {config['machine']['type']}")
self.machine_type = machine_type
old_init(self, config)
cls.__init__ = new_init
old_apply = cls.apply
def new_apply(self, project):
with self.machine.tempdir() as tempdir:
project_path = tempdir / "project"
project_path.mkdir()
existing_files = set([f.name for f in project.root.list()])
if self.files_to_copy:
for fname in self.files_to_copy:
if fname in existing_files:
copy(project.root / fname, project_path / fname)
else:
for f in project.files():
if f.name in existing_files:
copy(f.path, project_path / f.name)
with self.machine.cwd(project_path):
self.session = self.machine.session()
self.session.run(f"cd {project_path}")
return old_apply(self, project)
cls.apply = new_apply
return cls
| [
"plumbum.path.utils.copy",
"plumbum.machines.paramiko_machine.ParamikoMachine",
"getpass.getpass"
] | [((396, 464), 'getpass.getpass', 'getpass.getpass', ([], {'prompt': 'f"""Password for {user}@{host}: """', 'stream': 'None'}), "(prompt=f'Password for {user}@{host}: ', stream=None)\n", (411, 464), False, 'import getpass\n'), ((476, 527), 'plumbum.machines.paramiko_machine.ParamikoMachine', 'ParamikoMachine', (['host'], {'user': 'user', 'password': 'password'}), '(host, user=user, password=password)\n', (491, 527), False, 'from plumbum.machines.paramiko_machine import ParamikoMachine\n'), ((607, 656), 'plumbum.machines.paramiko_machine.ParamikoMachine', 'ParamikoMachine', (['host'], {'user': 'user', 'keyfile': 'keyfile'}), '(host, user=user, keyfile=keyfile)\n', (622, 656), False, 'from plumbum.machines.paramiko_machine import ParamikoMachine\n'), ((2109, 2157), 'plumbum.path.utils.copy', 'copy', (['(project.root / fname)', '(project_path / fname)'], {}), '(project.root / fname, project_path / fname)\n', (2113, 2157), False, 'from plumbum.path.utils import copy\n'), ((2291, 2326), 'plumbum.path.utils.copy', 'copy', (['f.path', '(project_path / f.name)'], {}), '(f.path, project_path / f.name)\n', (2295, 2326), False, 'from plumbum.path.utils import copy\n')] |
from quart import Quart, jsonify, request
from quart_jwt_extended import (
JWTManager,
jwt_required,
create_access_token,
jwt_refresh_token_required,
create_refresh_token,
get_jwt_identity,
fresh_jwt_required,
)
app = Quart(__name__)
app.config["JWT_SECRET_KEY"] = "super-secret" # Change this!
jwt = JWTManager(app)
# Standard login endpoint. Will return a fresh access token and
# a refresh token
@app.route("/login", methods=["POST"])
async def login():
username = (await request.get_json()).get("username", None)
password = (await request.get_json()).get("password", None)
if username != "test" or password != "<PASSWORD>":
return {"msg": "Bad username or password"}, 401
# create_access_token supports an optional 'fresh' argument,
# which marks the token as fresh or non-fresh accordingly.
# As we just verified their username and password, we are
# going to mark the token as fresh here.
ret = {
"access_token": create_access_token(identity=username, fresh=True),
"refresh_token": create_refresh_token(identity=username),
}
return ret, 200
# Refresh token endpoint. This will generate a new access token from
# the refresh token, but will mark that access token as non-fresh,
# as we do not actually verify a password in this endpoint.
@app.route("/refresh", methods=["POST"])
@jwt_refresh_token_required
async def refresh():
current_user = get_jwt_identity()
new_token = create_access_token(identity=current_user, fresh=False)
ret = {"access_token": new_token}
return ret, 200
# Fresh login endpoint. This is designed to be used if we need to
# make a fresh token for a user (by verifying they have the
# correct username and password). Unlike the standard login endpoint,
# this will only return a new access token, so that we don't keep
# generating new refresh tokens, which entirely defeats their point.
@app.route("/fresh-login", methods=["POST"])
async def fresh_login():
username = (await request.get_json()).get("username", None)
password = (await request.get_json()).get("password", None)
if username != "test" or password != "<PASSWORD>":
return {"msg": "Bad username or password"}, 401
new_token = create_access_token(identity=username, fresh=True)
ret = {"access_token": new_token}
return ret, 200
# Any valid JWT can access this endpoint
@app.route("/protected", methods=["GET"])
@jwt_required
async def protected():
username = get_jwt_identity()
return dict(logged_in_as=username), 200
# Only fresh JWTs can access this endpoint
@app.route("/protected-fresh", methods=["GET"])
@fresh_jwt_required
async def protected_fresh():
username = get_jwt_identity()
return dict(fresh_logged_in_as=username), 200
if __name__ == "__main__":
app.run()
| [
"quart.Quart",
"quart_jwt_extended.create_refresh_token",
"quart_jwt_extended.JWTManager",
"quart.request.get_json",
"quart_jwt_extended.create_access_token",
"quart_jwt_extended.get_jwt_identity"
] | [((247, 262), 'quart.Quart', 'Quart', (['__name__'], {}), '(__name__)\n', (252, 262), False, 'from quart import Quart, jsonify, request\n'), ((332, 347), 'quart_jwt_extended.JWTManager', 'JWTManager', (['app'], {}), '(app)\n', (342, 347), False, 'from quart_jwt_extended import JWTManager, jwt_required, create_access_token, jwt_refresh_token_required, create_refresh_token, get_jwt_identity, fresh_jwt_required\n'), ((1452, 1470), 'quart_jwt_extended.get_jwt_identity', 'get_jwt_identity', ([], {}), '()\n', (1468, 1470), False, 'from quart_jwt_extended import JWTManager, jwt_required, create_access_token, jwt_refresh_token_required, create_refresh_token, get_jwt_identity, fresh_jwt_required\n'), ((1487, 1542), 'quart_jwt_extended.create_access_token', 'create_access_token', ([], {'identity': 'current_user', 'fresh': '(False)'}), '(identity=current_user, fresh=False)\n', (1506, 1542), False, 'from quart_jwt_extended import JWTManager, jwt_required, create_access_token, jwt_refresh_token_required, create_refresh_token, get_jwt_identity, fresh_jwt_required\n'), ((2260, 2310), 'quart_jwt_extended.create_access_token', 'create_access_token', ([], {'identity': 'username', 'fresh': '(True)'}), '(identity=username, fresh=True)\n', (2279, 2310), False, 'from quart_jwt_extended import JWTManager, jwt_required, create_access_token, jwt_refresh_token_required, create_refresh_token, get_jwt_identity, fresh_jwt_required\n'), ((2506, 2524), 'quart_jwt_extended.get_jwt_identity', 'get_jwt_identity', ([], {}), '()\n', (2522, 2524), False, 'from quart_jwt_extended import JWTManager, jwt_required, create_access_token, jwt_refresh_token_required, create_refresh_token, get_jwt_identity, fresh_jwt_required\n'), ((2726, 2744), 'quart_jwt_extended.get_jwt_identity', 'get_jwt_identity', ([], {}), '()\n', (2742, 2744), False, 'from quart_jwt_extended import JWTManager, jwt_required, create_access_token, jwt_refresh_token_required, create_refresh_token, get_jwt_identity, fresh_jwt_required\n'), ((1001, 1051), 'quart_jwt_extended.create_access_token', 'create_access_token', ([], {'identity': 'username', 'fresh': '(True)'}), '(identity=username, fresh=True)\n', (1020, 1051), False, 'from quart_jwt_extended import JWTManager, jwt_required, create_access_token, jwt_refresh_token_required, create_refresh_token, get_jwt_identity, fresh_jwt_required\n'), ((1078, 1117), 'quart_jwt_extended.create_refresh_token', 'create_refresh_token', ([], {'identity': 'username'}), '(identity=username)\n', (1098, 1117), False, 'from quart_jwt_extended import JWTManager, jwt_required, create_access_token, jwt_refresh_token_required, create_refresh_token, get_jwt_identity, fresh_jwt_required\n'), ((512, 530), 'quart.request.get_json', 'request.get_json', ([], {}), '()\n', (528, 530), False, 'from quart import Quart, jsonify, request\n'), ((576, 594), 'quart.request.get_json', 'request.get_json', ([], {}), '()\n', (592, 594), False, 'from quart import Quart, jsonify, request\n'), ((2026, 2044), 'quart.request.get_json', 'request.get_json', ([], {}), '()\n', (2042, 2044), False, 'from quart import Quart, jsonify, request\n'), ((2090, 2108), 'quart.request.get_json', 'request.get_json', ([], {}), '()\n', (2106, 2108), False, 'from quart import Quart, jsonify, request\n')] |
from unittest import TestCase
from unittest.mock import Mock, patch
import sys
sys.modules['smbus'] = Mock() # Mock the hardware layer to avoid errors.
from ledshimdemo.canvas import Canvas
from ledshimdemo.effects.cheerlights import CheerLightsEffect
class TestCheerLights(TestCase):
TEST_CANVAS_SIZE = 3 # type: int
def test_cheerlight_call(self):
canvas = Canvas(self.TEST_CANVAS_SIZE)
effect = CheerLightsEffect(canvas)
self.assertIsNone(effect.get_colour_from_channel("http://ejiferfneciudwedwojcmeiocnw.com"))
@patch('ledshimdemo.effects.cheerlights.CheerLightsEffect.get_colour_from_channel', return_value=None)
def test_effect_failed_cheerlights(self, patch_function):
canvas = Canvas(self.TEST_CANVAS_SIZE)
effect = CheerLightsEffect(canvas)
effect.compose()
patch_function.assert_called_once()
for i in range(canvas.get_size()):
self.assertEqual(canvas.get_pixel(i), canvas.BLANK_PIXEL)
def test_effect_working_cheerlights(self):
canvas = Canvas(self.TEST_CANVAS_SIZE)
effect = CheerLightsEffect(canvas)
# Must check before and after in case it changes during the test.
before = effect.get_colour_from_channel(effect.URL)
effect.compose()
after = effect.get_colour_from_channel(effect.URL)
self.assertRegex(repr(effect), "^CheerLights\\(Colour:({0}|{1})\\)$".format(before, after))
| [
"ledshimdemo.canvas.Canvas",
"unittest.mock.patch",
"ledshimdemo.effects.cheerlights.CheerLightsEffect",
"unittest.mock.Mock"
] | [((103, 109), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (107, 109), False, 'from unittest.mock import Mock, patch\n'), ((562, 673), 'unittest.mock.patch', 'patch', (['"""ledshimdemo.effects.cheerlights.CheerLightsEffect.get_colour_from_channel"""'], {'return_value': 'None'}), "(\n 'ledshimdemo.effects.cheerlights.CheerLightsEffect.get_colour_from_channel'\n , return_value=None)\n", (567, 673), False, 'from unittest.mock import Mock, patch\n'), ((383, 412), 'ledshimdemo.canvas.Canvas', 'Canvas', (['self.TEST_CANVAS_SIZE'], {}), '(self.TEST_CANVAS_SIZE)\n', (389, 412), False, 'from ledshimdemo.canvas import Canvas\n'), ((430, 455), 'ledshimdemo.effects.cheerlights.CheerLightsEffect', 'CheerLightsEffect', (['canvas'], {}), '(canvas)\n', (447, 455), False, 'from ledshimdemo.effects.cheerlights import CheerLightsEffect\n'), ((743, 772), 'ledshimdemo.canvas.Canvas', 'Canvas', (['self.TEST_CANVAS_SIZE'], {}), '(self.TEST_CANVAS_SIZE)\n', (749, 772), False, 'from ledshimdemo.canvas import Canvas\n'), ((790, 815), 'ledshimdemo.effects.cheerlights.CheerLightsEffect', 'CheerLightsEffect', (['canvas'], {}), '(canvas)\n', (807, 815), False, 'from ledshimdemo.effects.cheerlights import CheerLightsEffect\n'), ((1063, 1092), 'ledshimdemo.canvas.Canvas', 'Canvas', (['self.TEST_CANVAS_SIZE'], {}), '(self.TEST_CANVAS_SIZE)\n', (1069, 1092), False, 'from ledshimdemo.canvas import Canvas\n'), ((1110, 1135), 'ledshimdemo.effects.cheerlights.CheerLightsEffect', 'CheerLightsEffect', (['canvas'], {}), '(canvas)\n', (1127, 1135), False, 'from ledshimdemo.effects.cheerlights import CheerLightsEffect\n')] |
from colicoords.synthetic_data import add_readout_noise, draw_poisson
from colicoords import load
import numpy as np
import mahotas as mh
from tqdm import tqdm
import os
import tifffile
def chunk_list(l, sizes):
prev = 0
for s in sizes:
result = l[prev:prev+s]
prev += s
yield result
def generate_images(cell_list, num_images, cell_per_img, cell_per_img_std, shape):
nums = np.round(np.random.normal(cell_per_img, cell_per_img_std, num_images)).astype(int)
nums = nums[nums > 0]
assert sum(nums) < len(cell_list), 'Not enough cells'
chunked = [chunk for chunk in tqdm(chunk_list(cell_list, nums))]
dicts = [generate_image(cells, shape) for cells in tqdm(chunked)]
out_dict = {}
for i, d in enumerate(dicts):
for k, v in d.items():
if 'storm' in k:
v['frame'] = i + 1
if k in out_dict:
out_dict[k] = np.append(out_dict[k], v)
else:
out_dict[k] = v
else:
if k in out_dict:
out_dict[k][i] = v
else:
out_dict[k] = np.zeros((num_images, *shape))
out_dict[k][i] = v
return out_dict
def generate_image(cells, shape, max_dist=5):
thetas = 360 * np.random.rand(len(cells))
data_list = [cell.data.rotate(theta) for cell, theta in zip(cells, thetas)]
assert all([data.names == data_list[0].names for data in data_list]), 'All cells must have the same data elements'
out_dict = {name: np.zeros(shape) for name, dclass in zip(data_list[0].names, data_list[0].dclasses) if dclass != 'storm'}
for i, data in enumerate(data_list):
valid_position = False
while not valid_position:
pos_x = int(np.round(shape[1] * np.random.rand()))
pos_y = int(np.round(shape[0] * np.random.rand()))
min1 = pos_y - int(np.floor(data.shape[0]))
max1 = min1 + data.shape[0]
min2 = pos_x - int(np.floor(data.shape[1]))
max2 = min2 + data.shape[1]
# Crop the data for when the cell is on the border of the image
d_min1 = np.max([0 - min1, 0])
d_max1 = np.min([data.shape[0] + (shape[0] - pos_y), data.shape[0]])
d_min2 = np.max([0 - min2, 0])
d_max2 = np.min([data.shape[1] + (shape[1] - pos_x), data.shape[1]])
data_cropped = data[d_min1:d_max1, d_min2:d_max2]
# Limit image position to the edges of the image
min1 = np.max([min1, 0])
max1 = np.min([max1, shape[0]])
min2 = np.max([min2, 0])
max2 = np.min([max2, shape[1]])
temp_binary = np.zeros(shape)
temp_binary[min1:max1, min2:max2] = data_cropped.binary_img
out_binary = (out_dict['binary'] > 0).astype(int)
distance_map = mh.distance(1 - out_binary, metric='euclidean')
if np.any(distance_map[temp_binary.astype(bool)] < max_dist):
continue
valid_position = True
for name in data.names:
data_elem = data_cropped.data_dict[name]
if data_elem.dclass == 'storm':
data_elem['x'] += min2
data_elem['y'] += min1
xmax, ymax = shape[1], shape[0]
bools = (data_elem['x'] < 0) + (data_elem['x'] > xmax) + (data_elem['y'] < 0) + (data_elem['y'] > ymax)
data_out = data_elem[~bools].copy()
if name in out_dict:
out_dict[name] = np.append(out_dict[name], data_out)
else:
out_dict[name] = data_out
continue
elif data_elem.dclass == 'binary':
out_dict[name][min1:max1, min2:max2] += ((i+1)*data_elem)
else:
out_dict[name][min1:max1, min2:max2] += data_elem
return out_dict
def gen_image_from_storm(storm_table, shape, sigma=1.54, sigma_std=0.3):
xmax = shape[1]
ymax = shape[0]
step = 1
xi = np.arange(step / 2, xmax, step)
yi = np.arange(step / 2, ymax, step)
x_coords = np.repeat(xi, len(yi)).reshape(len(xi), len(yi)).T
y_coords = np.repeat(yi, len(xi)).reshape(len(yi), len(xi))
x, y = storm_table['x'], storm_table['y']
img = np.zeros_like(x_coords)
intensities = storm_table['intensity']
sigma = sigma * np.ones_like(x) if not sigma_std else np.random.normal(sigma, sigma_std, size=len(x))
for _sigma, _int, _x, _y in zip(sigma, intensities, x, y):
img += _int * np.exp(-(((_x - x_coords) / _sigma) ** 2 + ((_y - y_coords) / _sigma) ** 2) / 2)
return img
def gen_im(data_dir):
"""Generate microscopy images from a list of cell objects by placing them randomly oriented in the image."""
cell_list = load(os.path.join(data_dir, 'cell_obj', 'cells_final_selected.hdf5'))
out_dict = generate_images(cell_list, 1000, 10, 3, (512, 512))
if not os.path.exists(os.path.join(data_dir, 'images')):
os.mkdir(os.path.join(data_dir, 'images'))
np.save(os.path.join(data_dir, 'images', 'binary.npy'), out_dict['binary'])
np.save(os.path.join(data_dir, 'images', 'brightfield.npy'), out_dict['brightfield'])
np.save(os.path.join(data_dir, 'images', 'foci_inner.npy'), out_dict['foci_inner'])
np.save(os.path.join(data_dir, 'images', 'foci_outer.npy'), out_dict['foci_outer'])
np.save(os.path.join(data_dir, 'images', 'storm_inner.npy'), out_dict['storm_inner'])
np.save(os.path.join(data_dir, 'images', 'storm_outer.npy'), out_dict['storm_outer'])
tifffile.imsave(os.path.join(data_dir, 'images', 'binary.tif'), out_dict['binary'])
tifffile.imsave(os.path.join(data_dir, 'images', 'brightfield.tif'), out_dict['brightfield'])
tifffile.imsave(os.path.join(data_dir, 'images', 'foci_inner.tif'), out_dict['foci_inner'])
tifffile.imsave(os.path.join(data_dir, 'images', 'foci_outer.tif'), out_dict['foci_outer'])
np.savetxt(os.path.join(data_dir, 'images', 'storm_inner.txt'), out_dict['storm_inner'])
np.savetxt(os.path.join(data_dir, 'images', 'storm_outer.txt'), out_dict['storm_inner'])
def noise_bf(data_dir):
"""add poissonian and readout noise to brightfield images"""
noise = 20
img_stack = np.load(os.path.join(data_dir, 'images', 'brightfield.npy'))
for photons in [10000, 1000, 500]:
ratio = 1.0453 # ratio between 'background' (no cells) and cell wall
img = (photons*(ratio-1))*img_stack + photons
img = draw_poisson(img)
img = add_readout_noise(img, noise)
tifffile.imsave(os.path.join(data_dir, 'images', 'bf_noise_{}_photons.tif'.format(photons)), img)
np.save(os.path.join(data_dir, 'images', 'bf_noise_{}_photons.npy'.format(photons)), img)
if __name__ == '__main__':
np.random.seed(42)
data_dir = r'.'
if not os.path.exists(os.path.join(data_dir, 'images')):
os.mkdir(os.path.join(data_dir, 'images'))
gen_im(data_dir)
noise_bf(data_dir)
| [
"numpy.random.normal",
"numpy.ones_like",
"mahotas.distance",
"colicoords.synthetic_data.draw_poisson",
"numpy.random.rand",
"tqdm.tqdm",
"os.path.join",
"numpy.floor",
"numpy.max",
"numpy.exp",
"numpy.append",
"numpy.zeros",
"colicoords.synthetic_data.add_readout_noise",
"numpy.random.seed",
"numpy.min",
"numpy.zeros_like",
"numpy.arange"
] | [((4100, 4131), 'numpy.arange', 'np.arange', (['(step / 2)', 'xmax', 'step'], {}), '(step / 2, xmax, step)\n', (4109, 4131), True, 'import numpy as np\n'), ((4141, 4172), 'numpy.arange', 'np.arange', (['(step / 2)', 'ymax', 'step'], {}), '(step / 2, ymax, step)\n', (4150, 4172), True, 'import numpy as np\n'), ((4361, 4384), 'numpy.zeros_like', 'np.zeros_like', (['x_coords'], {}), '(x_coords)\n', (4374, 4384), True, 'import numpy as np\n'), ((6879, 6897), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (6893, 6897), True, 'import numpy as np\n'), ((1573, 1588), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (1581, 1588), True, 'import numpy as np\n'), ((4874, 4937), 'os.path.join', 'os.path.join', (['data_dir', '"""cell_obj"""', '"""cells_final_selected.hdf5"""'], {}), "(data_dir, 'cell_obj', 'cells_final_selected.hdf5')\n", (4886, 4937), False, 'import os\n'), ((5133, 5179), 'os.path.join', 'os.path.join', (['data_dir', '"""images"""', '"""binary.npy"""'], {}), "(data_dir, 'images', 'binary.npy')\n", (5145, 5179), False, 'import os\n'), ((5213, 5264), 'os.path.join', 'os.path.join', (['data_dir', '"""images"""', '"""brightfield.npy"""'], {}), "(data_dir, 'images', 'brightfield.npy')\n", (5225, 5264), False, 'import os\n'), ((5303, 5353), 'os.path.join', 'os.path.join', (['data_dir', '"""images"""', '"""foci_inner.npy"""'], {}), "(data_dir, 'images', 'foci_inner.npy')\n", (5315, 5353), False, 'import os\n'), ((5391, 5441), 'os.path.join', 'os.path.join', (['data_dir', '"""images"""', '"""foci_outer.npy"""'], {}), "(data_dir, 'images', 'foci_outer.npy')\n", (5403, 5441), False, 'import os\n'), ((5479, 5530), 'os.path.join', 'os.path.join', (['data_dir', '"""images"""', '"""storm_inner.npy"""'], {}), "(data_dir, 'images', 'storm_inner.npy')\n", (5491, 5530), False, 'import os\n'), ((5569, 5620), 'os.path.join', 'os.path.join', (['data_dir', '"""images"""', '"""storm_outer.npy"""'], {}), "(data_dir, 'images', 'storm_outer.npy')\n", (5581, 5620), False, 'import os\n'), ((5668, 5714), 'os.path.join', 'os.path.join', (['data_dir', '"""images"""', '"""binary.tif"""'], {}), "(data_dir, 'images', 'binary.tif')\n", (5680, 5714), False, 'import os\n'), ((5756, 5807), 'os.path.join', 'os.path.join', (['data_dir', '"""images"""', '"""brightfield.tif"""'], {}), "(data_dir, 'images', 'brightfield.tif')\n", (5768, 5807), False, 'import os\n'), ((5854, 5904), 'os.path.join', 'os.path.join', (['data_dir', '"""images"""', '"""foci_inner.tif"""'], {}), "(data_dir, 'images', 'foci_inner.tif')\n", (5866, 5904), False, 'import os\n'), ((5950, 6000), 'os.path.join', 'os.path.join', (['data_dir', '"""images"""', '"""foci_outer.tif"""'], {}), "(data_dir, 'images', 'foci_outer.tif')\n", (5962, 6000), False, 'import os\n'), ((6041, 6092), 'os.path.join', 'os.path.join', (['data_dir', '"""images"""', '"""storm_inner.txt"""'], {}), "(data_dir, 'images', 'storm_inner.txt')\n", (6053, 6092), False, 'import os\n'), ((6134, 6185), 'os.path.join', 'os.path.join', (['data_dir', '"""images"""', '"""storm_outer.txt"""'], {}), "(data_dir, 'images', 'storm_outer.txt')\n", (6146, 6185), False, 'import os\n'), ((6342, 6393), 'os.path.join', 'os.path.join', (['data_dir', '"""images"""', '"""brightfield.npy"""'], {}), "(data_dir, 'images', 'brightfield.npy')\n", (6354, 6393), False, 'import os\n'), ((6580, 6597), 'colicoords.synthetic_data.draw_poisson', 'draw_poisson', (['img'], {}), '(img)\n', (6592, 6597), False, 'from colicoords.synthetic_data import add_readout_noise, draw_poisson\n'), ((6612, 6641), 'colicoords.synthetic_data.add_readout_noise', 'add_readout_noise', (['img', 'noise'], {}), '(img, noise)\n', (6629, 6641), False, 'from colicoords.synthetic_data import add_readout_noise, draw_poisson\n'), ((706, 719), 'tqdm.tqdm', 'tqdm', (['chunked'], {}), '(chunked)\n', (710, 719), False, 'from tqdm import tqdm\n'), ((2202, 2223), 'numpy.max', 'np.max', (['[0 - min1, 0]'], {}), '([0 - min1, 0])\n', (2208, 2223), True, 'import numpy as np\n'), ((2245, 2304), 'numpy.min', 'np.min', (['[data.shape[0] + (shape[0] - pos_y), data.shape[0]]'], {}), '([data.shape[0] + (shape[0] - pos_y), data.shape[0]])\n', (2251, 2304), True, 'import numpy as np\n'), ((2327, 2348), 'numpy.max', 'np.max', (['[0 - min2, 0]'], {}), '([0 - min2, 0])\n', (2333, 2348), True, 'import numpy as np\n'), ((2370, 2429), 'numpy.min', 'np.min', (['[data.shape[1] + (shape[1] - pos_x), data.shape[1]]'], {}), '([data.shape[1] + (shape[1] - pos_x), data.shape[1]])\n', (2376, 2429), True, 'import numpy as np\n'), ((2574, 2591), 'numpy.max', 'np.max', (['[min1, 0]'], {}), '([min1, 0])\n', (2580, 2591), True, 'import numpy as np\n'), ((2611, 2635), 'numpy.min', 'np.min', (['[max1, shape[0]]'], {}), '([max1, shape[0]])\n', (2617, 2635), True, 'import numpy as np\n'), ((2655, 2672), 'numpy.max', 'np.max', (['[min2, 0]'], {}), '([min2, 0])\n', (2661, 2672), True, 'import numpy as np\n'), ((2692, 2716), 'numpy.min', 'np.min', (['[max2, shape[1]]'], {}), '([max2, shape[1]])\n', (2698, 2716), True, 'import numpy as np\n'), ((2744, 2759), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (2752, 2759), True, 'import numpy as np\n'), ((2921, 2968), 'mahotas.distance', 'mh.distance', (['(1 - out_binary)'], {'metric': '"""euclidean"""'}), "(1 - out_binary, metric='euclidean')\n", (2932, 2968), True, 'import mahotas as mh\n'), ((4448, 4463), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (4460, 4463), True, 'import numpy as np\n'), ((4619, 4704), 'numpy.exp', 'np.exp', (['(-(((_x - x_coords) / _sigma) ** 2 + ((_y - y_coords) / _sigma) ** 2) / 2)'], {}), '(-(((_x - x_coords) / _sigma) ** 2 + ((_y - y_coords) / _sigma) ** 2) / 2\n )\n', (4625, 4704), True, 'import numpy as np\n'), ((5034, 5066), 'os.path.join', 'os.path.join', (['data_dir', '"""images"""'], {}), "(data_dir, 'images')\n", (5046, 5066), False, 'import os\n'), ((5086, 5118), 'os.path.join', 'os.path.join', (['data_dir', '"""images"""'], {}), "(data_dir, 'images')\n", (5098, 5118), False, 'import os\n'), ((6944, 6976), 'os.path.join', 'os.path.join', (['data_dir', '"""images"""'], {}), "(data_dir, 'images')\n", (6956, 6976), False, 'import os\n'), ((6996, 7028), 'os.path.join', 'os.path.join', (['data_dir', '"""images"""'], {}), "(data_dir, 'images')\n", (7008, 7028), False, 'import os\n'), ((423, 483), 'numpy.random.normal', 'np.random.normal', (['cell_per_img', 'cell_per_img_std', 'num_images'], {}), '(cell_per_img, cell_per_img_std, num_images)\n', (439, 483), True, 'import numpy as np\n'), ((936, 961), 'numpy.append', 'np.append', (['out_dict[k]', 'v'], {}), '(out_dict[k], v)\n', (945, 961), True, 'import numpy as np\n'), ((1167, 1197), 'numpy.zeros', 'np.zeros', (['(num_images, *shape)'], {}), '((num_images, *shape))\n', (1175, 1197), True, 'import numpy as np\n'), ((1942, 1965), 'numpy.floor', 'np.floor', (['data.shape[0]'], {}), '(data.shape[0])\n', (1950, 1965), True, 'import numpy as np\n'), ((2039, 2062), 'numpy.floor', 'np.floor', (['data.shape[1]'], {}), '(data.shape[1])\n', (2047, 2062), True, 'import numpy as np\n'), ((3607, 3642), 'numpy.append', 'np.append', (['out_dict[name]', 'data_out'], {}), '(out_dict[name], data_out)\n', (3616, 3642), True, 'import numpy as np\n'), ((1828, 1844), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1842, 1844), True, 'import numpy as np\n'), ((1891, 1907), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1905, 1907), True, 'import numpy as np\n')] |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import ast
from typing import Text, Optional, Union, List
from ai_flow.rest_endpoint.protobuf.metric_service_pb2 import MetricMetaResponse, ListMetricMetaResponse, \
MetricSummaryResponse, ListMetricSummaryResponse
from ai_flow.rest_endpoint.service import int64Value, stringValue
from ai_flow.common.properties import Properties
from ai_flow.meta.metric_meta import MetricMeta, MetricType, MetricSummary
from ai_flow.rest_endpoint.protobuf.message_pb2 import MetricMetaProto, MetricSummaryProto, MetricTypeProto, ReturnCode, \
SUCCESS, RESOURCE_DOES_NOT_EXIST
from ai_flow.store.db.db_model import SqlMetricMeta, SqlMetricSummary
from ai_flow.store.db.db_model import MongoMetricSummary, MongoMetricMeta
def table_to_metric_meta(metric_meta_result) -> MetricMeta:
properties = metric_meta_result.properties
if properties is not None:
properties = ast.literal_eval(properties)
return MetricMeta(uuid=metric_meta_result.uuid,
name=metric_meta_result.name,
dataset_id=metric_meta_result.dataset_id,
model_name=metric_meta_result.model_name,
model_version=metric_meta_result.model_version,
job_id=metric_meta_result.job_id,
start_time=metric_meta_result.start_time,
end_time=metric_meta_result.end_time,
metric_type=MetricType.value_of(metric_meta_result.metric_type),
uri=metric_meta_result.uri,
tags=metric_meta_result.tags,
metric_description=metric_meta_result.metric_description,
properties=properties)
def table_to_metric_summary(metric_summary_result) -> MetricSummary:
return MetricSummary(uuid=metric_summary_result.uuid,
metric_id=metric_summary_result.metric_id,
metric_key=metric_summary_result.metric_key,
metric_value=metric_summary_result.metric_value)
def metric_meta_to_table(name: Text,
dataset_id: int,
model_name: Optional[Text],
model_version: Optional[Text],
job_id: int,
start_time: int,
end_time: int,
metric_type: MetricType,
uri: Text,
tags: Text,
metric_description: Text,
properties: Properties,
store_type: Text = 'SqlAlchemyStore'):
if properties is not None:
properties = str(properties)
if store_type == 'MongoStore':
_class = MongoMetricMeta
else:
_class = SqlMetricMeta
return _class(name=name,
dataset_id=dataset_id,
model_name=model_name,
model_version=model_version,
job_id=job_id,
start_time=start_time,
end_time=end_time,
metric_type=metric_type.value,
uri=uri,
tags=tags,
metric_description=metric_description,
properties=properties)
def metric_summary_to_table(metric_id: int,
metric_key: Text,
metric_value: Text,
store_type: Text = 'SqlAlchemyStore'):
if store_type == 'MongoStore':
_class = MongoMetricSummary
else:
_class = SqlMetricSummary
return _class(metric_id=metric_id,
metric_key=metric_key,
metric_value=metric_value)
def metric_meta_to_proto(metric_meta: MetricMeta) -> MetricMetaProto:
if metric_meta.metric_type == MetricType.DATASET:
metric_type = MetricTypeProto.DATASET
else:
metric_type = MetricTypeProto.MODEL
return MetricMetaProto(uuid=metric_meta.uuid,
name=stringValue(metric_meta.name),
dataset_id=int64Value(metric_meta.dataset_id),
model_name=stringValue(metric_meta.model_name),
model_version=stringValue(metric_meta.model_version),
job_id=int64Value(metric_meta.job_id),
start_time=int64Value(metric_meta.start_time),
end_time=int64Value(metric_meta.end_time),
metric_type=metric_type,
uri=stringValue(metric_meta.uri),
tags=stringValue(metric_meta.tags),
metric_description=stringValue(metric_meta.metric_description),
properties=metric_meta.properties)
def metric_summary_to_proto(metric_summary: MetricSummary) -> MetricSummaryProto:
return MetricSummaryProto(uuid=metric_summary.uuid,
metric_id=int64Value(metric_summary.metric_id),
metric_key=stringValue(metric_summary.metric_key),
metric_value=stringValue(metric_summary.metric_value))
def proto_to_metric_meta(metric_meta_proto: MetricMetaProto) -> MetricMeta:
if MetricTypeProto.DATASET == metric_meta_proto.metric_type:
metric_type = MetricType.DATASET
else:
metric_type = MetricType.MODEL
return MetricMeta(uuid=metric_meta_proto.uuid,
name=metric_meta_proto.name.value,
dataset_id=metric_meta_proto.dataset_id.value,
model_name=metric_meta_proto.model_name.value,
model_version=metric_meta_proto.model_version.value,
job_id=metric_meta_proto.job_id.value,
start_time=metric_meta_proto.start_time.value,
end_time=metric_meta_proto.end_time.value,
metric_type=metric_type,
uri=metric_meta_proto.uri.value if metric_meta_proto.HasField('uri') else None,
tags=metric_meta_proto.tags.value if metric_meta_proto.HasField('tags') else None,
metric_description=metric_meta_proto.metric_description.value
if metric_meta_proto.HasField('metric_description') else None,
properties=metric_meta_proto.properties
)
def proto_to_metric_summary(metric_summary_proto: MetricSummaryProto) -> MetricSummary:
return MetricSummary(uuid=metric_summary_proto.uuid,
metric_id=metric_summary_proto.metric_id.value,
metric_key=metric_summary_proto.metric_key.value
if metric_summary_proto.HasField('metric_key') else None,
metric_value=metric_summary_proto.metric_value.value
if metric_summary_proto.HasField('metric_value') else None
)
def _warp_metric_meta_response(metric_meta: Optional[MetricMeta]) -> MetricMetaResponse:
if metric_meta is not None:
return MetricMetaResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(),
metric_meta=metric_meta_to_proto(metric_meta))
else:
return MetricMetaResponse(return_code=1,
return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(),
metric_meta=None)
def _warp_list_metric_meta_response(metric_meta: Union[None, MetricMeta, List[MetricMeta]]) -> MetricMetaResponse:
if metric_meta is not None:
if isinstance(metric_meta, MetricMeta):
return ListMetricMetaResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(),
metric_meta=[metric_meta_to_proto(metric_meta)])
else:
res = []
for meta in metric_meta:
res.append(metric_meta_to_proto(meta))
return ListMetricMetaResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(),
metric_meta=res)
else:
return ListMetricMetaResponse(return_code=1,
return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(),
metric_meta=None)
def _warp_metric_summary_response(metric_summary: Optional[MetricSummary]) -> MetricSummaryResponse:
if metric_summary is not None:
return MetricSummaryResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(),
metric_summary=metric_summary_to_proto(metric_summary))
else:
return MetricSummaryResponse(return_code=1,
return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(),
metric_summary=None)
def _warp_list_metric_summary_response(metric_summary: Optional[List[MetricSummary]]) -> ListMetricSummaryResponse:
if metric_summary is not None:
res = []
for summary in metric_summary:
res.append(metric_summary_to_proto(summary))
return ListMetricSummaryResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(),
metric_summary=res)
else:
return ListMetricSummaryResponse(return_code=1,
return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(),
metric_summary=None)
| [
"ai_flow.rest_endpoint.service.stringValue",
"ai_flow.meta.metric_meta.MetricType.value_of",
"ai_flow.rest_endpoint.protobuf.message_pb2.ReturnCode.Name",
"ast.literal_eval",
"ai_flow.meta.metric_meta.MetricSummary",
"ai_flow.rest_endpoint.service.int64Value"
] | [((2570, 2763), 'ai_flow.meta.metric_meta.MetricSummary', 'MetricSummary', ([], {'uuid': 'metric_summary_result.uuid', 'metric_id': 'metric_summary_result.metric_id', 'metric_key': 'metric_summary_result.metric_key', 'metric_value': 'metric_summary_result.metric_value'}), '(uuid=metric_summary_result.uuid, metric_id=\n metric_summary_result.metric_id, metric_key=metric_summary_result.\n metric_key, metric_value=metric_summary_result.metric_value)\n', (2583, 2763), False, 'from ai_flow.meta.metric_meta import MetricMeta, MetricType, MetricSummary\n'), ((1663, 1691), 'ast.literal_eval', 'ast.literal_eval', (['properties'], {}), '(properties)\n', (1679, 1691), False, 'import ast\n'), ((2208, 2259), 'ai_flow.meta.metric_meta.MetricType.value_of', 'MetricType.value_of', (['metric_meta_result.metric_type'], {}), '(metric_meta_result.metric_type)\n', (2227, 2259), False, 'from ai_flow.meta.metric_meta import MetricMeta, MetricType, MetricSummary\n'), ((4831, 4860), 'ai_flow.rest_endpoint.service.stringValue', 'stringValue', (['metric_meta.name'], {}), '(metric_meta.name)\n', (4842, 4860), False, 'from ai_flow.rest_endpoint.service import int64Value, stringValue\n'), ((4900, 4934), 'ai_flow.rest_endpoint.service.int64Value', 'int64Value', (['metric_meta.dataset_id'], {}), '(metric_meta.dataset_id)\n', (4910, 4934), False, 'from ai_flow.rest_endpoint.service import int64Value, stringValue\n'), ((4974, 5009), 'ai_flow.rest_endpoint.service.stringValue', 'stringValue', (['metric_meta.model_name'], {}), '(metric_meta.model_name)\n', (4985, 5009), False, 'from ai_flow.rest_endpoint.service import int64Value, stringValue\n'), ((5052, 5090), 'ai_flow.rest_endpoint.service.stringValue', 'stringValue', (['metric_meta.model_version'], {}), '(metric_meta.model_version)\n', (5063, 5090), False, 'from ai_flow.rest_endpoint.service import int64Value, stringValue\n'), ((5126, 5156), 'ai_flow.rest_endpoint.service.int64Value', 'int64Value', (['metric_meta.job_id'], {}), '(metric_meta.job_id)\n', (5136, 5156), False, 'from ai_flow.rest_endpoint.service import int64Value, stringValue\n'), ((5196, 5230), 'ai_flow.rest_endpoint.service.int64Value', 'int64Value', (['metric_meta.start_time'], {}), '(metric_meta.start_time)\n', (5206, 5230), False, 'from ai_flow.rest_endpoint.service import int64Value, stringValue\n'), ((5268, 5300), 'ai_flow.rest_endpoint.service.int64Value', 'int64Value', (['metric_meta.end_time'], {}), '(metric_meta.end_time)\n', (5278, 5300), False, 'from ai_flow.rest_endpoint.service import int64Value, stringValue\n'), ((5385, 5413), 'ai_flow.rest_endpoint.service.stringValue', 'stringValue', (['metric_meta.uri'], {}), '(metric_meta.uri)\n', (5396, 5413), False, 'from ai_flow.rest_endpoint.service import int64Value, stringValue\n'), ((5447, 5476), 'ai_flow.rest_endpoint.service.stringValue', 'stringValue', (['metric_meta.tags'], {}), '(metric_meta.tags)\n', (5458, 5476), False, 'from ai_flow.rest_endpoint.service import int64Value, stringValue\n'), ((5524, 5567), 'ai_flow.rest_endpoint.service.stringValue', 'stringValue', (['metric_meta.metric_description'], {}), '(metric_meta.metric_description)\n', (5535, 5567), False, 'from ai_flow.rest_endpoint.service import int64Value, stringValue\n'), ((5811, 5847), 'ai_flow.rest_endpoint.service.int64Value', 'int64Value', (['metric_summary.metric_id'], {}), '(metric_summary.metric_id)\n', (5821, 5847), False, 'from ai_flow.rest_endpoint.service import int64Value, stringValue\n'), ((5890, 5928), 'ai_flow.rest_endpoint.service.stringValue', 'stringValue', (['metric_summary.metric_key'], {}), '(metric_summary.metric_key)\n', (5901, 5928), False, 'from ai_flow.rest_endpoint.service import int64Value, stringValue\n'), ((5973, 6013), 'ai_flow.rest_endpoint.service.stringValue', 'stringValue', (['metric_summary.metric_value'], {}), '(metric_summary.metric_value)\n', (5984, 6013), False, 'from ai_flow.rest_endpoint.service import int64Value, stringValue\n'), ((8023, 8047), 'ai_flow.rest_endpoint.protobuf.message_pb2.ReturnCode.Name', 'ReturnCode.Name', (['SUCCESS'], {}), '(SUCCESS)\n', (8038, 8047), False, 'from ai_flow.rest_endpoint.protobuf.message_pb2 import MetricMetaProto, MetricSummaryProto, MetricTypeProto, ReturnCode, SUCCESS, RESOURCE_DOES_NOT_EXIST\n'), ((8242, 8282), 'ai_flow.rest_endpoint.protobuf.message_pb2.ReturnCode.Name', 'ReturnCode.Name', (['RESOURCE_DOES_NOT_EXIST'], {}), '(RESOURCE_DOES_NOT_EXIST)\n', (8257, 8282), False, 'from ai_flow.rest_endpoint.protobuf.message_pb2 import MetricMetaProto, MetricSummaryProto, MetricTypeProto, ReturnCode, SUCCESS, RESOURCE_DOES_NOT_EXIST\n'), ((9134, 9174), 'ai_flow.rest_endpoint.protobuf.message_pb2.ReturnCode.Name', 'ReturnCode.Name', (['RESOURCE_DOES_NOT_EXIST'], {}), '(RESOURCE_DOES_NOT_EXIST)\n', (9149, 9174), False, 'from ai_flow.rest_endpoint.protobuf.message_pb2 import MetricMetaProto, MetricSummaryProto, MetricTypeProto, ReturnCode, SUCCESS, RESOURCE_DOES_NOT_EXIST\n'), ((9441, 9465), 'ai_flow.rest_endpoint.protobuf.message_pb2.ReturnCode.Name', 'ReturnCode.Name', (['SUCCESS'], {}), '(SUCCESS)\n', (9456, 9465), False, 'from ai_flow.rest_endpoint.protobuf.message_pb2 import MetricMetaProto, MetricSummaryProto, MetricTypeProto, ReturnCode, SUCCESS, RESOURCE_DOES_NOT_EXIST\n'), ((9678, 9718), 'ai_flow.rest_endpoint.protobuf.message_pb2.ReturnCode.Name', 'ReturnCode.Name', (['RESOURCE_DOES_NOT_EXIST'], {}), '(RESOURCE_DOES_NOT_EXIST)\n', (9693, 9718), False, 'from ai_flow.rest_endpoint.protobuf.message_pb2 import MetricMetaProto, MetricSummaryProto, MetricTypeProto, ReturnCode, SUCCESS, RESOURCE_DOES_NOT_EXIST\n'), ((10119, 10143), 'ai_flow.rest_endpoint.protobuf.message_pb2.ReturnCode.Name', 'ReturnCode.Name', (['SUCCESS'], {}), '(SUCCESS)\n', (10134, 10143), False, 'from ai_flow.rest_endpoint.protobuf.message_pb2 import MetricMetaProto, MetricSummaryProto, MetricTypeProto, ReturnCode, SUCCESS, RESOURCE_DOES_NOT_EXIST\n'), ((10332, 10372), 'ai_flow.rest_endpoint.protobuf.message_pb2.ReturnCode.Name', 'ReturnCode.Name', (['RESOURCE_DOES_NOT_EXIST'], {}), '(RESOURCE_DOES_NOT_EXIST)\n', (10347, 10372), False, 'from ai_flow.rest_endpoint.protobuf.message_pb2 import MetricMetaProto, MetricSummaryProto, MetricTypeProto, ReturnCode, SUCCESS, RESOURCE_DOES_NOT_EXIST\n'), ((8609, 8633), 'ai_flow.rest_endpoint.protobuf.message_pb2.ReturnCode.Name', 'ReturnCode.Name', (['SUCCESS'], {}), '(SUCCESS)\n', (8624, 8633), False, 'from ai_flow.rest_endpoint.protobuf.message_pb2 import MetricMetaProto, MetricSummaryProto, MetricTypeProto, ReturnCode, SUCCESS, RESOURCE_DOES_NOT_EXIST\n'), ((8929, 8953), 'ai_flow.rest_endpoint.protobuf.message_pb2.ReturnCode.Name', 'ReturnCode.Name', (['SUCCESS'], {}), '(SUCCESS)\n', (8944, 8953), False, 'from ai_flow.rest_endpoint.protobuf.message_pb2 import MetricMetaProto, MetricSummaryProto, MetricTypeProto, ReturnCode, SUCCESS, RESOURCE_DOES_NOT_EXIST\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import cv2
import numpy as np
def flow_to_img(flow, normalize=True):
"""Convert flow to viewable image, using color hue to encode flow vector orientation, and color saturation to
encode vector length. This is similar to the OpenCV tutorial on dense optical flow, except that they map vector
length to the value plane of the HSV color model, instead of the saturation plane, as we do here.
Args:
flow: optical flow
normalize: Normalize flow to 0..255
Returns:
img: viewable representation of the dense optical flow in RGB format
Ref:
https://github.com/philferriere/tfoptflow/blob/33e8a701e34c8ce061f17297d40619afbd459ade/tfoptflow/optflow.py
"""
hsv = np.zeros((flow.shape[0], flow.shape[1], 3), dtype=np.uint8)
flow_magnitude, flow_angle = cv2.cartToPolar(flow[..., 0].astype(np.float32), flow[..., 1].astype(np.float32))
# A couple times, we've gotten NaNs out of the above...
nans = np.isnan(flow_magnitude)
if np.any(nans):
nans = np.where(nans)
flow_magnitude[nans] = 0.
# Normalize
hsv[..., 0] = flow_angle * 180 / np.pi / 2
if normalize is True:
hsv[..., 1] = cv2.normalize(flow_magnitude, None, 0, 255, cv2.NORM_MINMAX)
else:
hsv[..., 1] = flow_magnitude
hsv[..., 2] = 255
img = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
return img | [
"cv2.normalize",
"numpy.where",
"numpy.any",
"numpy.zeros",
"numpy.isnan",
"cv2.cvtColor"
] | [((764, 823), 'numpy.zeros', 'np.zeros', (['(flow.shape[0], flow.shape[1], 3)'], {'dtype': 'np.uint8'}), '((flow.shape[0], flow.shape[1], 3), dtype=np.uint8)\n', (772, 823), True, 'import numpy as np\n'), ((1011, 1035), 'numpy.isnan', 'np.isnan', (['flow_magnitude'], {}), '(flow_magnitude)\n', (1019, 1035), True, 'import numpy as np\n'), ((1043, 1055), 'numpy.any', 'np.any', (['nans'], {}), '(nans)\n', (1049, 1055), True, 'import numpy as np\n'), ((1373, 1409), 'cv2.cvtColor', 'cv2.cvtColor', (['hsv', 'cv2.COLOR_HSV2RGB'], {}), '(hsv, cv2.COLOR_HSV2RGB)\n', (1385, 1409), False, 'import cv2\n'), ((1072, 1086), 'numpy.where', 'np.where', (['nans'], {}), '(nans)\n', (1080, 1086), True, 'import numpy as np\n'), ((1233, 1293), 'cv2.normalize', 'cv2.normalize', (['flow_magnitude', 'None', '(0)', '(255)', 'cv2.NORM_MINMAX'], {}), '(flow_magnitude, None, 0, 255, cv2.NORM_MINMAX)\n', (1246, 1293), False, 'import cv2\n')] |
# Copyright (c) 2015 Openstack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import binascii
import netaddr
from oslo_log import log as logging
from oslo_utils import excutils
from neutron.agent.l3 import dvr_fip_ns
from neutron.agent.l3 import dvr_snat_ns
from neutron.agent.l3 import router_info as router
from neutron.agent.linux import ip_lib
from neutron.common import constants as l3_constants
from neutron.common import utils as common_utils
from neutron.i18n import _LE
LOG = logging.getLogger(__name__)
# xor-folding mask used for IPv6 rule index
MASK_30 = 0x3fffffff
class DvrRouter(router.RouterInfo):
def __init__(self, agent, host, *args, **kwargs):
super(DvrRouter, self).__init__(*args, **kwargs)
self.agent = agent
self.host = host
self.floating_ips_dict = {}
self.snat_iptables_manager = None
# Linklocal subnet for router and floating IP namespace link
self.rtr_fip_subnet = None
self.dist_fip_count = None
self.snat_namespace = None
def get_floating_ips(self):
"""Filter Floating IPs to be hosted on this agent."""
floating_ips = super(DvrRouter, self).get_floating_ips()
return [i for i in floating_ips if i['host'] == self.host]
def get_snat_interfaces(self):
return self.router.get(l3_constants.SNAT_ROUTER_INTF_KEY, [])
def get_snat_int_device_name(self, port_id):
long_name = dvr_snat_ns.SNAT_INT_DEV_PREFIX + port_id
return long_name[:self.driver.DEV_NAME_LEN]
def _handle_fip_nat_rules(self, interface_name, action):
"""Configures NAT rules for Floating IPs for DVR.
Remove all the rules. This is safe because if
use_namespaces is set as False then the agent can
only configure one router, otherwise each router's
NAT rules will be in their own namespace.
"""
self.iptables_manager.ipv4['nat'].empty_chain('POSTROUTING')
self.iptables_manager.ipv4['nat'].empty_chain('snat')
# Add back the jump to float-snat
self.iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat')
# And add them back if the action is add_rules
if action == 'add_rules' and interface_name:
rule = ('POSTROUTING', '! -i %(interface_name)s '
'! -o %(interface_name)s -m conntrack ! '
'--ctstate DNAT -j ACCEPT' %
{'interface_name': interface_name})
self.iptables_manager.ipv4['nat'].add_rule(*rule)
self.iptables_manager.apply()
def floating_ip_added_dist(self, fip, fip_cidr):
"""Add floating IP to FIP namespace."""
floating_ip = fip['floating_ip_address']
fixed_ip = fip['fixed_ip_address']
rule_pr = self.fip_ns.allocate_rule_priority()
self.floating_ips_dict[floating_ip] = rule_pr
fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id)
ip_rule = ip_lib.IPRule(namespace=self.ns_name)
ip_rule.rule.add(fixed_ip, dvr_fip_ns.FIP_RT_TBL, rule_pr)
#Add routing rule in fip namespace
fip_ns_name = self.fip_ns.get_name()
rtr_2_fip, _ = self.rtr_fip_subnet.get_pair()
device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name)
device.route.add_route(fip_cidr, str(rtr_2_fip.ip))
interface_name = (
self.fip_ns.get_ext_device_name(
self.fip_ns.agent_gateway_port['id']))
ip_lib.send_garp_for_proxyarp(fip_ns_name,
interface_name,
floating_ip,
self.agent_conf.send_arp_for_ha)
# update internal structures
self.dist_fip_count = self.dist_fip_count + 1
def floating_ip_removed_dist(self, fip_cidr):
"""Remove floating IP from FIP namespace."""
floating_ip = fip_cidr.split('/')[0]
rtr_2_fip_name = self.fip_ns.get_rtr_ext_device_name(self.router_id)
fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id)
if self.rtr_fip_subnet is None:
self.rtr_fip_subnet = self.fip_ns.local_subnets.allocate(
self.router_id)
rtr_2_fip, fip_2_rtr = self.rtr_fip_subnet.get_pair()
fip_ns_name = self.fip_ns.get_name()
if floating_ip in self.floating_ips_dict:
rule_pr = self.floating_ips_dict[floating_ip]
ip_rule = ip_lib.IPRule(namespace=self.ns_name)
ip_rule.rule.delete(floating_ip, dvr_fip_ns.FIP_RT_TBL, rule_pr)
self.fip_ns.deallocate_rule_priority(rule_pr)
#TODO(rajeev): Handle else case - exception/log?
device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name)
device.route.delete_route(fip_cidr, str(rtr_2_fip.ip))
# check if this is the last FIP for this router
self.dist_fip_count = self.dist_fip_count - 1
if self.dist_fip_count == 0:
#remove default route entry
device = ip_lib.IPDevice(rtr_2_fip_name, namespace=self.ns_name)
ns_ip = ip_lib.IPWrapper(namespace=fip_ns_name)
device.route.delete_gateway(str(fip_2_rtr.ip),
table=dvr_fip_ns.FIP_RT_TBL)
self.fip_ns.local_subnets.release(self.router_id)
self.rtr_fip_subnet = None
ns_ip.del_veth(fip_2_rtr_name)
is_last = self.fip_ns.unsubscribe(self.router_id)
if is_last:
# TODO(Carl) I can't help but think that another router could
# come in and want to start using this namespace while this is
# destroying it. The two could end up conflicting on
# creating/destroying interfaces and such. I think I'd like a
# semaphore to sync creation/deletion of this namespace.
self.fip_ns.delete()
self.fip_ns = None
def add_floating_ip(self, fip, interface_name, device):
if not self._add_fip_addr_to_device(fip, device):
return l3_constants.FLOATINGIP_STATUS_ERROR
# Special Handling for DVR - update FIP namespace
ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address'])
self.floating_ip_added_dist(fip, ip_cidr)
return l3_constants.FLOATINGIP_STATUS_ACTIVE
def remove_floating_ip(self, device, ip_cidr):
super(DvrRouter, self).remove_floating_ip(device, ip_cidr)
self.floating_ip_removed_dist(ip_cidr)
def create_snat_namespace(self):
# TODO(mlavalle): in the near future, this method should contain the
# code in the L3 agent that creates a gateway for a dvr. The first step
# is to move the creation of the snat namespace here
self.snat_namespace = dvr_snat_ns.SnatNamespace(self.router['id'],
self.agent_conf,
self.driver,
self.use_ipv6)
self.snat_namespace.create()
return self.snat_namespace
def delete_snat_namespace(self):
# TODO(mlavalle): in the near future, this method should contain the
# code in the L3 agent that removes an external gateway for a dvr. The
# first step is to move the deletion of the snat namespace here
self.snat_namespace.delete()
self.snat_namespace = None
def _get_internal_port(self, subnet_id):
"""Return internal router port based on subnet_id."""
router_ports = self.router.get(l3_constants.INTERFACE_KEY, [])
for port in router_ports:
fips = port['fixed_ips']
for f in fips:
if f['subnet_id'] == subnet_id:
return port
def _update_arp_entry(self, ip, mac, subnet_id, operation):
"""Add or delete arp entry into router namespace for the subnet."""
port = self._get_internal_port(subnet_id)
# update arp entry only if the subnet is attached to the router
if not port:
return
try:
# TODO(mrsmith): optimize the calls below for bulk calls
interface_name = self.get_internal_device_name(port['id'])
device = ip_lib.IPDevice(interface_name, namespace=self.ns_name)
if operation == 'add':
device.neigh.add(ip, mac)
elif operation == 'delete':
device.neigh.delete(ip, mac)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("DVR: Failed updating arp entry"))
def _set_subnet_arp_info(self, port):
"""Set ARP info retrieved from Plugin for existing ports."""
if 'id' not in port['subnet']:
return
subnet_id = port['subnet']['id']
# TODO(Carl) Can we eliminate the need to make this RPC while
# processing a router.
subnet_ports = self.agent.get_ports_by_subnet(subnet_id)
for p in subnet_ports:
if p['device_owner'] not in l3_constants.ROUTER_INTERFACE_OWNERS:
for fixed_ip in p['fixed_ips']:
self._update_arp_entry(fixed_ip['ip_address'],
p['mac_address'],
subnet_id,
'add')
def _map_internal_interfaces(self, int_port, snat_ports):
"""Return the SNAT port for the given internal interface port."""
fixed_ip = int_port['fixed_ips'][0]
subnet_id = fixed_ip['subnet_id']
match_port = [p for p in snat_ports if
p['fixed_ips'][0]['subnet_id'] == subnet_id]
if match_port:
return match_port[0]
else:
LOG.error(_LE('DVR: no map match_port found!'))
@staticmethod
def _get_snat_idx(ip_cidr):
"""Generate index for DVR snat rules and route tables.
The index value has to be 32 bits or less but more than the system
generated entries i.e. 32768. For IPv4 use the numeric value of the
cidr. For IPv6 generate a crc32 bit hash and xor-fold to 30 bits.
Use the freed range to extend smaller values so that they become
greater than system generated entries.
"""
net = netaddr.IPNetwork(ip_cidr)
if net.version == 6:
# the crc32 & 0xffffffff is for Python 2.6 and 3.0 compatibility
snat_idx = binascii.crc32(ip_cidr) & 0xffffffff
# xor-fold the hash to reserve upper range to extend smaller values
snat_idx = (snat_idx >> 30) ^ (snat_idx & MASK_30)
if snat_idx < 32768:
snat_idx = snat_idx + MASK_30
else:
snat_idx = net.value
return snat_idx
def _snat_redirect_add(self, gateway, sn_port, sn_int):
"""Adds rules and routes for SNAT redirection."""
try:
ip_cidr = sn_port['ip_cidr']
snat_idx = self._get_snat_idx(ip_cidr)
ns_ipr = ip_lib.IPRule(namespace=self.ns_name)
ns_ipd = ip_lib.IPDevice(sn_int, namespace=self.ns_name)
ns_ipwrapr = ip_lib.IPWrapper(namespace=self.ns_name)
ns_ipd.route.add_gateway(gateway, table=snat_idx)
ns_ipr.rule.add(ip_cidr, snat_idx, snat_idx)
ns_ipwrapr.netns.execute(['sysctl', '-w', 'net.ipv4.conf.%s.'
'send_redirects=0' % sn_int])
except Exception:
LOG.exception(_LE('DVR: error adding redirection logic'))
def _snat_redirect_remove(self, gateway, sn_port, sn_int):
"""Removes rules and routes for SNAT redirection."""
try:
ip_cidr = sn_port['ip_cidr']
snat_idx = self._get_snat_idx(ip_cidr)
ns_ipr = ip_lib.IPRule(namespace=self.ns_name)
ns_ipd = ip_lib.IPDevice(sn_int, namespace=self.ns_name)
ns_ipd.route.delete_gateway(gateway, table=snat_idx)
ns_ipr.rule.delete(ip_cidr, snat_idx, snat_idx)
except Exception:
LOG.exception(_LE('DVR: removed snat failed'))
def get_gw_port_host(self):
host = self.router.get('gw_port_host')
if not host:
LOG.debug("gw_port_host missing from router: %s",
self.router['id'])
return host
def internal_network_added(self, port):
super(DvrRouter, self).internal_network_added(port)
ex_gw_port = self.get_ex_gw_port()
if not ex_gw_port:
return
snat_ports = self.get_snat_interfaces()
sn_port = self._map_internal_interfaces(port, snat_ports)
if not sn_port:
return
interface_name = self.get_internal_device_name(port['id'])
self._snat_redirect_add(sn_port['fixed_ips'][0]['ip_address'],
port,
interface_name)
# TODO(Carl) This is a sign that dvr needs two router classes.
is_this_snat_host = (self.agent_conf.agent_mode == 'dvr_snat' and
self.get_gw_port_host() == self.host)
if not is_this_snat_host:
return
ns_name = dvr_snat_ns.SnatNamespace.get_snat_ns_name(self.router['id'])
self._set_subnet_info(sn_port)
interface_name = self.get_snat_int_device_name(sn_port['id'])
self._internal_network_added(
ns_name,
sn_port['network_id'],
sn_port['id'],
sn_port['ip_cidr'],
sn_port['mac_address'],
interface_name,
dvr_snat_ns.SNAT_INT_DEV_PREFIX)
self._set_subnet_arp_info(port)
def _dvr_internal_network_removed(self, port):
if not self.ex_gw_port:
return
sn_port = self._map_internal_interfaces(port, self.snat_ports)
if not sn_port:
return
# DVR handling code for SNAT
interface_name = self.get_internal_device_name(port['id'])
self._snat_redirect_remove(sn_port['fixed_ips'][0]['ip_address'],
port,
interface_name)
is_this_snat_host = (self.agent_conf.agent_mode == 'dvr_snat' and
self.ex_gw_port['binding:host_id'] == self.host)
if not is_this_snat_host:
return
snat_interface = (
self.get_snat_int_device_name(sn_port['id']))
ns_name = self.snat_namespace.name
prefix = dvr_snat_ns.SNAT_INT_DEV_PREFIX
if ip_lib.device_exists(snat_interface, namespace=ns_name):
self.driver.unplug(snat_interface, namespace=ns_name,
prefix=prefix)
def internal_network_removed(self, port):
self._dvr_internal_network_removed(port)
super(DvrRouter, self).internal_network_removed(port)
def get_floating_agent_gw_interface(self, ext_net_id):
"""Filter Floating Agent GW port for the external network."""
fip_ports = self.router.get(l3_constants.FLOATINGIP_AGENT_INTF_KEY, [])
return next(
(p for p in fip_ports if p['network_id'] == ext_net_id), None)
| [
"netaddr.IPNetwork",
"oslo_utils.excutils.save_and_reraise_exception",
"neutron.agent.linux.ip_lib.IPRule",
"neutron.agent.linux.ip_lib.send_garp_for_proxyarp",
"neutron.agent.linux.ip_lib.IPDevice",
"neutron.agent.linux.ip_lib.device_exists",
"neutron.common.utils.ip_to_cidr",
"neutron.agent.l3.dvr_snat_ns.SnatNamespace",
"neutron.agent.l3.dvr_snat_ns.SnatNamespace.get_snat_ns_name",
"neutron.agent.linux.ip_lib.IPWrapper",
"binascii.crc32",
"neutron.i18n._LE",
"oslo_log.log.getLogger"
] | [((1027, 1054), 'oslo_log.log.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1044, 1054), True, 'from oslo_log import log as logging\n'), ((3521, 3558), 'neutron.agent.linux.ip_lib.IPRule', 'ip_lib.IPRule', ([], {'namespace': 'self.ns_name'}), '(namespace=self.ns_name)\n', (3534, 3558), False, 'from neutron.agent.linux import ip_lib\n'), ((3785, 3839), 'neutron.agent.linux.ip_lib.IPDevice', 'ip_lib.IPDevice', (['fip_2_rtr_name'], {'namespace': 'fip_ns_name'}), '(fip_2_rtr_name, namespace=fip_ns_name)\n', (3800, 3839), False, 'from neutron.agent.linux import ip_lib\n'), ((4035, 4143), 'neutron.agent.linux.ip_lib.send_garp_for_proxyarp', 'ip_lib.send_garp_for_proxyarp', (['fip_ns_name', 'interface_name', 'floating_ip', 'self.agent_conf.send_arp_for_ha'], {}), '(fip_ns_name, interface_name, floating_ip,\n self.agent_conf.send_arp_for_ha)\n', (4064, 4143), False, 'from neutron.agent.linux import ip_lib\n'), ((5276, 5330), 'neutron.agent.linux.ip_lib.IPDevice', 'ip_lib.IPDevice', (['fip_2_rtr_name'], {'namespace': 'fip_ns_name'}), '(fip_2_rtr_name, namespace=fip_ns_name)\n', (5291, 5330), False, 'from neutron.agent.linux import ip_lib\n'), ((6780, 6831), 'neutron.common.utils.ip_to_cidr', 'common_utils.ip_to_cidr', (["fip['floating_ip_address']"], {}), "(fip['floating_ip_address'])\n", (6803, 6831), True, 'from neutron.common import utils as common_utils\n'), ((7387, 7480), 'neutron.agent.l3.dvr_snat_ns.SnatNamespace', 'dvr_snat_ns.SnatNamespace', (["self.router['id']", 'self.agent_conf', 'self.driver', 'self.use_ipv6'], {}), "(self.router['id'], self.agent_conf, self.driver,\n self.use_ipv6)\n", (7412, 7480), False, 'from neutron.agent.l3 import dvr_snat_ns\n'), ((10981, 11007), 'netaddr.IPNetwork', 'netaddr.IPNetwork', (['ip_cidr'], {}), '(ip_cidr)\n', (10998, 11007), False, 'import netaddr\n'), ((13879, 13940), 'neutron.agent.l3.dvr_snat_ns.SnatNamespace.get_snat_ns_name', 'dvr_snat_ns.SnatNamespace.get_snat_ns_name', (["self.router['id']"], {}), "(self.router['id'])\n", (13921, 13940), False, 'from neutron.agent.l3 import dvr_snat_ns\n'), ((15220, 15275), 'neutron.agent.linux.ip_lib.device_exists', 'ip_lib.device_exists', (['snat_interface'], {'namespace': 'ns_name'}), '(snat_interface, namespace=ns_name)\n', (15240, 15275), False, 'from neutron.agent.linux import ip_lib\n'), ((5024, 5061), 'neutron.agent.linux.ip_lib.IPRule', 'ip_lib.IPRule', ([], {'namespace': 'self.ns_name'}), '(namespace=self.ns_name)\n', (5037, 5061), False, 'from neutron.agent.linux import ip_lib\n'), ((5603, 5658), 'neutron.agent.linux.ip_lib.IPDevice', 'ip_lib.IPDevice', (['rtr_2_fip_name'], {'namespace': 'self.ns_name'}), '(rtr_2_fip_name, namespace=self.ns_name)\n', (5618, 5658), False, 'from neutron.agent.linux import ip_lib\n'), ((5679, 5718), 'neutron.agent.linux.ip_lib.IPWrapper', 'ip_lib.IPWrapper', ([], {'namespace': 'fip_ns_name'}), '(namespace=fip_ns_name)\n', (5695, 5718), False, 'from neutron.agent.linux import ip_lib\n'), ((8890, 8945), 'neutron.agent.linux.ip_lib.IPDevice', 'ip_lib.IPDevice', (['interface_name'], {'namespace': 'self.ns_name'}), '(interface_name, namespace=self.ns_name)\n', (8905, 8945), False, 'from neutron.agent.linux import ip_lib\n'), ((11712, 11749), 'neutron.agent.linux.ip_lib.IPRule', 'ip_lib.IPRule', ([], {'namespace': 'self.ns_name'}), '(namespace=self.ns_name)\n', (11725, 11749), False, 'from neutron.agent.linux import ip_lib\n'), ((11771, 11818), 'neutron.agent.linux.ip_lib.IPDevice', 'ip_lib.IPDevice', (['sn_int'], {'namespace': 'self.ns_name'}), '(sn_int, namespace=self.ns_name)\n', (11786, 11818), False, 'from neutron.agent.linux import ip_lib\n'), ((11844, 11884), 'neutron.agent.linux.ip_lib.IPWrapper', 'ip_lib.IPWrapper', ([], {'namespace': 'self.ns_name'}), '(namespace=self.ns_name)\n', (11860, 11884), False, 'from neutron.agent.linux import ip_lib\n'), ((12492, 12529), 'neutron.agent.linux.ip_lib.IPRule', 'ip_lib.IPRule', ([], {'namespace': 'self.ns_name'}), '(namespace=self.ns_name)\n', (12505, 12529), False, 'from neutron.agent.linux import ip_lib\n'), ((12551, 12598), 'neutron.agent.linux.ip_lib.IPDevice', 'ip_lib.IPDevice', (['sn_int'], {'namespace': 'self.ns_name'}), '(sn_int, namespace=self.ns_name)\n', (12566, 12598), False, 'from neutron.agent.linux import ip_lib\n'), ((10457, 10493), 'neutron.i18n._LE', '_LE', (['"""DVR: no map match_port found!"""'], {}), "('DVR: no map match_port found!')\n", (10460, 10493), False, 'from neutron.i18n import _LE\n'), ((11137, 11160), 'binascii.crc32', 'binascii.crc32', (['ip_cidr'], {}), '(ip_cidr)\n', (11151, 11160), False, 'import binascii\n'), ((9151, 9188), 'oslo_utils.excutils.save_and_reraise_exception', 'excutils.save_and_reraise_exception', ([], {}), '()\n', (9186, 9188), False, 'from oslo_utils import excutils\n'), ((12197, 12239), 'neutron.i18n._LE', '_LE', (['"""DVR: error adding redirection logic"""'], {}), "('DVR: error adding redirection logic')\n", (12200, 12239), False, 'from neutron.i18n import _LE\n'), ((12776, 12807), 'neutron.i18n._LE', '_LE', (['"""DVR: removed snat failed"""'], {}), "('DVR: removed snat failed')\n", (12779, 12807), False, 'from neutron.i18n import _LE\n'), ((9220, 9257), 'neutron.i18n._LE', '_LE', (['"""DVR: Failed updating arp entry"""'], {}), "('DVR: Failed updating arp entry')\n", (9223, 9257), False, 'from neutron.i18n import _LE\n')] |
#!/usr/bin/env python
import chainer
from algs import trpo
from env_makers import EnvMaker
from models import GaussianMLPPolicy, MLPBaseline
from utils import SnapshotSaver
import numpy as np
import os
import logger
log_dir = "data/local/trpo-pendulum"
np.random.seed(42)
# Clean up existing logs
os.system("rm -rf {}".format(log_dir))
with logger.session(log_dir):
env_maker = EnvMaker('Pendulum-v0')
env = env_maker.make()
policy = GaussianMLPPolicy(
observation_space=env.observation_space,
action_space=env.action_space,
env_spec=env.spec,
hidden_sizes=(64, 64),
hidden_nonlinearity=chainer.functions.tanh,
)
baseline = MLPBaseline(
observation_space=env.observation_space,
action_space=env.action_space,
env_spec=env.spec,
hidden_sizes=(64, 64),
hidden_nonlinearity=chainer.functions.tanh,
)
trpo(
env=env,
env_maker=env_maker,
n_envs=16,
policy=policy,
baseline=baseline,
batch_size=10000,
n_iters=100,
snapshot_saver=SnapshotSaver(log_dir),
)
| [
"models.GaussianMLPPolicy",
"env_makers.EnvMaker",
"models.MLPBaseline",
"numpy.random.seed",
"logger.session",
"utils.SnapshotSaver"
] | [((256, 274), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (270, 274), True, 'import numpy as np\n'), ((346, 369), 'logger.session', 'logger.session', (['log_dir'], {}), '(log_dir)\n', (360, 369), False, 'import logger\n'), ((387, 410), 'env_makers.EnvMaker', 'EnvMaker', (['"""Pendulum-v0"""'], {}), "('Pendulum-v0')\n", (395, 410), False, 'from env_makers import EnvMaker\n'), ((451, 635), 'models.GaussianMLPPolicy', 'GaussianMLPPolicy', ([], {'observation_space': 'env.observation_space', 'action_space': 'env.action_space', 'env_spec': 'env.spec', 'hidden_sizes': '(64, 64)', 'hidden_nonlinearity': 'chainer.functions.tanh'}), '(observation_space=env.observation_space, action_space=env\n .action_space, env_spec=env.spec, hidden_sizes=(64, 64),\n hidden_nonlinearity=chainer.functions.tanh)\n', (468, 635), False, 'from models import GaussianMLPPolicy, MLPBaseline\n'), ((689, 867), 'models.MLPBaseline', 'MLPBaseline', ([], {'observation_space': 'env.observation_space', 'action_space': 'env.action_space', 'env_spec': 'env.spec', 'hidden_sizes': '(64, 64)', 'hidden_nonlinearity': 'chainer.functions.tanh'}), '(observation_space=env.observation_space, action_space=env.\n action_space, env_spec=env.spec, hidden_sizes=(64, 64),\n hidden_nonlinearity=chainer.functions.tanh)\n', (700, 867), False, 'from models import GaussianMLPPolicy, MLPBaseline\n'), ((1101, 1123), 'utils.SnapshotSaver', 'SnapshotSaver', (['log_dir'], {}), '(log_dir)\n', (1114, 1123), False, 'from utils import SnapshotSaver\n')] |
#!/usr/bin/python3.7
# -*- coding: utf-8 -*-
# @Time : 2019/12/2 10:17
# @Author: <EMAIL>
"""
正则解析器
"""
try:
import xml.etree.cElementTree as et
except ModuleNotFoundError:
import xml.etree.ElementTree as et
import re
class RegexEngine:
def __init__(self, xml, str_):
"""加载正则表。正则表为xml
:param xml: 正则表的位置
:param str_: 要匹配的字符串
"""
self._string = str_
self._root = et.parse(xml).getroot()
self.re = ''
self.data = []
def select(self, tag):
"""根据xml的tag来实现不同的正则提取
:param tag: xml的tag标签
:return: 正则提取的数据
"""
root = self._root.find(tag)
attrib = root.attrib
if attrib.get('part', 'False').lower() == 'true':
self._part_tag(root)
return list(filter(lambda x: x[1], self.data))
else:
sf = self._no_part(root)
self.re = ''.join(self.data) + sf
return re.findall(self.re, self._string)
def _no_part(self, tags):
"""tag标签不分开抽取"""
for tag in tags:
if tag:
if tag.attrib.get('must', 'true').lower() == 'true':
self.data.append(self.re)
self.re = ''
self.re = '(?:' + self._no_part(tag) + ')'
else:
self.re = self._no_part(tag)
else:
attrib = tag.attrib
text = tag.text.strip()
if attrib.get('must', 'true').lower() == 'true':
self.re = '(?:' + text + ')'
else:
self.re += '(?:' + text + ')?'
return self.re
def _part_tag(self, tags):
"""tag标签分开提取"""
for tag in tags:
if tag:
self._part_tag(tag)
else:
self.data.append((tag.tag, re.findall(tag.text.strip(), self._string)))
@property
def string(self):
return self._string
@string.setter
def string(self, str_):
self._string = str_
self.re, self.data = '', []
| [
"re.findall",
"xml.etree.ElementTree.parse"
] | [((951, 984), 're.findall', 're.findall', (['self.re', 'self._string'], {}), '(self.re, self._string)\n', (961, 984), False, 'import re\n'), ((426, 439), 'xml.etree.ElementTree.parse', 'et.parse', (['xml'], {}), '(xml)\n', (434, 439), True, 'import xml.etree.ElementTree as et\n')] |
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.collections as mcoll
from matplotlib.ticker import MaxNLocator
plt.style.use('seaborn-darkgrid')
class BaseTraj:
def __init__(self, model, X):
self.model = model
assert len(X.shape) == 2, f"X should be 2-d, instead got shape {X.shape}"
self.X = X
self.means = self.model.means_.copy()
self.states = self.model.predict(X)
self.n_states = len(np.unique(self.states))
self.trans = self.model.transmat_.copy()
def rho_dt_bins(self, rho, theta, dt, bins=12):
"""
Bin rho values and dwell time on polar coordinates.
:param rho:
:param theta:
:param dt:
:param bins:
:return:
"""
bins = np.linspace(-np.pi, np.pi, bins+1)
bin_means = (bins[:-1] + bins[1:]) / 2
bin_ix = np.digitize(theta, bins)
bin_rd = [rho[(bin_ix == i) & (rho > 0)].mean()
if len(rho[(bin_ix == i) & (rho > 0)]) > 0 else
0 for i in range(1, len(bins))]
bin_dt = [dt[(bin_ix == i) & (dt > 0)].sum()
if len(dt[(bin_ix == i) & (dt > 0)]) > 0 else
0 for i in range(1, len(bins))]
return bin_means, bin_rd, bin_dt
def transition_vectors(self):
"""
Transition vectors between states on polar coordinates.
:return:
"""
mu_x, mu_y = self.means[:, 0], self.means[:, 1]
mu_x_dist = mu_x - mu_x[:, np.newaxis]
mu_y_dist = mu_y - mu_y[:, np.newaxis]
dist_vect = np.column_stack((mu_x_dist.flatten(), mu_y_dist.flatten()))
trans_rho, trans_theta = self.cart2pol(dist_vect)
trans_rho = (trans_rho.reshape((self.n_states, self.n_states)) * self.design_transition()).flatten()
return trans_rho, trans_theta
def design_transition(self, thresh=0.1):
design_trans = self.trans
diag_ix = np.diag_indices(len(design_trans))
design_trans[diag_ix] = 0
design_trans[design_trans < thresh] = 0
design_trans[design_trans >= thresh] = 1
return design_trans
def norm_trans_time(self):
"""
Normalized transition time.
:return:
"""
unique, counts = np.unique(self.states, return_counts=True)
sort_ix = unique.argsort()
counts = counts[sort_ix]
# normalize by transition probability
dt = (counts * self.design_transition()).flatten()
return dt / dt.sum()
def norm_state_time(self):
"""
Normalized state time.
:return:
"""
unique, counts = np.unique(self.states, return_counts=True)
sort_ix = unique.argsort()
counts = counts[sort_ix]
return counts / counts.sum()
@staticmethod
def cart2pol(arr):
"""
Cartesion space to polar space.
Args:
arr (numpy.array): Array of shape [n_state x dims]
"""
x, y = arr[:, 0], arr[:, 1]
rho = np.sqrt(x ** 2 + y ** 2)
theta = np.arctan2(y, x)
return rho, theta
class PhenoSign(BaseTraj):
"""Phenotypic Signature class."""
def __init__(self, model, X):
super(PhenoSign, self).__init__(model, X)
self.bin_means, self.signature = self.get_signature()
def get_signature(self):
"""
Calculate phenotypic signature for a given model.
:return: bin_means, array of shape [4 x n_bins] with
1. state radial distances
2. state dwell times
3. transition distances
3. transition dwell times
"""
# states
mu_rho, mu_theta = self.cart2pol(self.means)
state_dt = self.norm_state_time()
bin_means_1, state_rd_bins, state_dt_bins = self.rho_dt_bins(mu_rho, mu_theta, state_dt)
# transitions
trans_rho, trans_theta = self.transition_vectors()
trans_dt = self.norm_trans_time()
bin_means_2, trans_rd_bins, trans_dt_bins = self.rho_dt_bins(trans_rho, trans_theta, trans_dt)
assert (bin_means_1 == bin_means_2).all(), "state and transition vectors are binned differently and can" \
"not be concatenated."
return bin_means_1, np.vstack((state_rd_bins, state_dt_bins, trans_rd_bins, trans_dt_bins))
class Saphire(PhenoSign):
"""Implementation of the SAPHIRE algorithm for plotting Hidden Markov Models.
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>. Time series modeling of live-cell shape dynamics for
image-based phenotypic profiling. Integr Biol (Camb). 2016;8(1):73-90.
"""
def __init__(self, model, X):
super(Saphire, self).__init__(model, X)
def plot_traj(self, projection='cartesian', ymax=None):
"""
Plot cell trajectory.
Args:
projection (str): cartesian or polar.
ymax (int)
"""
avail_proj = ['cartesian', 'polar']
projection = projection.lower()
assert projection in avail_proj, f"projection unknown: {projection}"
if projection == 'cartesian':
projection = None
cmap = plt.get_cmap('binary')
cmap = truncate_colormap(cmap, minval=0.2)
if projection == 'polar':
y, x = self.cart2pol(self.X)
y_mu, x_mu = self.cart2pol(self.means)
else:
x, y = self.X[:, 0], self.X[:, 1]
x_mu, y_mu = self.means[:, 0], self.means[:, 1]
fig, ax = plt.subplots(figsize=(5, 5), subplot_kw={'projection': projection})
ax.scatter(x, y,
c=self.states, cmap='Set1', zorder=2)
traj = ax.scatter(x_mu, y_mu,
c=np.unique(self.states), cmap='Set1',
s=200, zorder=2, edgecolor='black', alpha=0.6)
legend = ax.legend(*traj.legend_elements(),
loc="upper right", bbox_to_anchor=(1.2, 0.94),
title="States")
ax.add_artist(legend)
if ymax is not None:
ax.set_ylim(0, ymax)
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
colorline(x, y, cmap=cmap, zorder=1)
norm = mpl.colors.Normalize(vmin=0, vmax=48)
cax = fig.add_axes([0.94, 0.15, 0.05, 0.3])
fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cax,
orientation='vertical', label='Time')
plt.show()
return fig, ax
def plot_states(self, ymax=None):
"""
Plot cell states.
"""
bin_rd, bin_dt = self.signature[0, :], self.signature[1, :]
fig, ax = plt.subplots(figsize=(5, 5), subplot_kw={'projection': 'polar'})
cmap = plt.get_cmap("Oranges")
N = 12
width = (2 * np.pi) / N
ax.bar(self.bin_means, bin_rd, width=width, color=cmap(bin_dt))
if ymax is not None:
ax.set_ylim(0, ymax)
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
norm = mpl.colors.Normalize(vmin=0, vmax=1)
cax = fig.add_axes([0.94, 0.15, 0.05, 0.3])
fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cax,
orientation='vertical', label='Increasing state dwell time',
ticks=[0, 0.5, 1])
return fig, ax
def plot_transition(self, ymax=None):
"""
Plot transition between cell states.
"""
bin_rd, bin_dt = self.signature[2, :], self.signature[3, :]
fig, ax = plt.subplots(figsize=(5, 5), subplot_kw={'projection': 'polar'})
cmap = plt.get_cmap("Blues")
N = 12
width = (2 * np.pi) / N
ax.bar(self.bin_means, bin_rd, width=width, color=cmap(bin_dt))
if ymax is not None:
ax.set_ylim(0, ymax)
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
norm = mpl.colors.Normalize(vmin=0, vmax=1)
cax = fig.add_axes([0.94, 0.15, 0.05, 0.3])
fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cax,
orientation='vertical', label='Increasing transition dwell time',
ticks=[0, 0.5, 1])
return fig, ax
def colorline(x, y, z=None, cmap=plt.get_cmap('copper'), norm=plt.Normalize(0.0, 1.0),
linewidth=3, alpha=1.0, zorder=1):
"""
Plot a colored line with coordinates x and y
Optionally specify colors in the array z
Optionally specify a colormap, a norm function and a line width
"""
# Default colors equally spaced on [0,1]:
if z is None:
z = np.linspace(0.0, 1.0, len(x))
# Special case if a single number:
if not hasattr(z, "__iter__"): # to check for numerical input -- this is a hack
z = np.array([z])
z = np.asarray(z)
segments = make_segments(x, y)
lc = mcoll.LineCollection(segments, array=z, cmap=cmap, norm=norm,
linewidth=linewidth, alpha=alpha, zorder=zorder)
ax = plt.gca()
ax.add_collection(lc)
return lc
def make_segments(x, y):
"""
Create list of line segments from x and y coordinates, in the correct format
for LineCollection: an array of the form numlines x (points per line) x 2 (x
and y) array
"""
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
return segments
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
'''
https://stackoverflow.com/a/18926541
'''
if isinstance(cmap, str):
cmap = plt.get_cmap(cmap)
new_cmap = mpl.colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
| [
"numpy.sqrt",
"matplotlib.collections.LineCollection",
"numpy.array",
"matplotlib.ticker.MaxNLocator",
"numpy.arctan2",
"matplotlib.pyplot.Normalize",
"numpy.asarray",
"matplotlib.pyplot.style.use",
"numpy.linspace",
"matplotlib.cm.ScalarMappable",
"numpy.vstack",
"numpy.concatenate",
"numpy.digitize",
"matplotlib.pyplot.gca",
"matplotlib.colors.Normalize",
"matplotlib.pyplot.show",
"matplotlib.pyplot.get_cmap",
"numpy.unique",
"matplotlib.pyplot.subplots"
] | [((158, 191), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-darkgrid"""'], {}), "('seaborn-darkgrid')\n", (171, 191), True, 'import matplotlib.pyplot as plt\n'), ((8329, 8351), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""copper"""'], {}), "('copper')\n", (8341, 8351), True, 'import matplotlib.pyplot as plt\n'), ((8358, 8381), 'matplotlib.pyplot.Normalize', 'plt.Normalize', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (8371, 8381), True, 'import matplotlib.pyplot as plt\n'), ((8871, 8884), 'numpy.asarray', 'np.asarray', (['z'], {}), '(z)\n', (8881, 8884), True, 'import numpy as np\n'), ((8930, 9045), 'matplotlib.collections.LineCollection', 'mcoll.LineCollection', (['segments'], {'array': 'z', 'cmap': 'cmap', 'norm': 'norm', 'linewidth': 'linewidth', 'alpha': 'alpha', 'zorder': 'zorder'}), '(segments, array=z, cmap=cmap, norm=norm, linewidth=\n linewidth, alpha=alpha, zorder=zorder)\n', (8950, 9045), True, 'import matplotlib.collections as mcoll\n'), ((9081, 9090), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (9088, 9090), True, 'import matplotlib.pyplot as plt\n'), ((9420, 9469), 'numpy.concatenate', 'np.concatenate', (['[points[:-1], points[1:]]'], {'axis': '(1)'}), '([points[:-1], points[1:]], axis=1)\n', (9434, 9469), True, 'import numpy as np\n'), ((815, 851), 'numpy.linspace', 'np.linspace', (['(-np.pi)', 'np.pi', '(bins + 1)'], {}), '(-np.pi, np.pi, bins + 1)\n', (826, 851), True, 'import numpy as np\n'), ((914, 938), 'numpy.digitize', 'np.digitize', (['theta', 'bins'], {}), '(theta, bins)\n', (925, 938), True, 'import numpy as np\n'), ((2323, 2365), 'numpy.unique', 'np.unique', (['self.states'], {'return_counts': '(True)'}), '(self.states, return_counts=True)\n', (2332, 2365), True, 'import numpy as np\n'), ((2699, 2741), 'numpy.unique', 'np.unique', (['self.states'], {'return_counts': '(True)'}), '(self.states, return_counts=True)\n', (2708, 2741), True, 'import numpy as np\n'), ((3077, 3101), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (3084, 3101), True, 'import numpy as np\n'), ((3118, 3134), 'numpy.arctan2', 'np.arctan2', (['y', 'x'], {}), '(y, x)\n', (3128, 3134), True, 'import numpy as np\n'), ((5254, 5276), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""binary"""'], {}), "('binary')\n", (5266, 5276), True, 'import matplotlib.pyplot as plt\n'), ((5594, 5661), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(5, 5)', 'subplot_kw': "{'projection': projection}"}), "(figsize=(5, 5), subplot_kw={'projection': projection})\n", (5606, 5661), True, 'import matplotlib.pyplot as plt\n'), ((6303, 6340), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {'vmin': '(0)', 'vmax': '(48)'}), '(vmin=0, vmax=48)\n', (6323, 6340), True, 'import matplotlib as mpl\n'), ((6535, 6545), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6543, 6545), True, 'import matplotlib.pyplot as plt\n'), ((6747, 6811), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(5, 5)', 'subplot_kw': "{'projection': 'polar'}"}), "(figsize=(5, 5), subplot_kw={'projection': 'polar'})\n", (6759, 6811), True, 'import matplotlib.pyplot as plt\n'), ((6827, 6850), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""Oranges"""'], {}), "('Oranges')\n", (6839, 6850), True, 'import matplotlib.pyplot as plt\n'), ((7109, 7145), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {'vmin': '(0)', 'vmax': '(1)'}), '(vmin=0, vmax=1)\n', (7129, 7145), True, 'import matplotlib as mpl\n'), ((7619, 7683), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(5, 5)', 'subplot_kw': "{'projection': 'polar'}"}), "(figsize=(5, 5), subplot_kw={'projection': 'polar'})\n", (7631, 7683), True, 'import matplotlib.pyplot as plt\n'), ((7699, 7720), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""Blues"""'], {}), "('Blues')\n", (7711, 7720), True, 'import matplotlib.pyplot as plt\n'), ((7979, 8015), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {'vmin': '(0)', 'vmax': '(1)'}), '(vmin=0, vmax=1)\n', (7999, 8015), True, 'import matplotlib as mpl\n'), ((8848, 8861), 'numpy.array', 'np.array', (['[z]'], {}), '([z])\n', (8856, 8861), True, 'import numpy as np\n'), ((9654, 9672), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['cmap'], {}), '(cmap)\n', (9666, 9672), True, 'import matplotlib.pyplot as plt\n'), ((490, 512), 'numpy.unique', 'np.unique', (['self.states'], {}), '(self.states)\n', (499, 512), True, 'import numpy as np\n'), ((4349, 4420), 'numpy.vstack', 'np.vstack', (['(state_rd_bins, state_dt_bins, trans_rd_bins, trans_dt_bins)'], {}), '((state_rd_bins, state_dt_bins, trans_rd_bins, trans_dt_bins))\n', (4358, 4420), True, 'import numpy as np\n'), ((6216, 6241), 'matplotlib.ticker.MaxNLocator', 'MaxNLocator', ([], {'integer': '(True)'}), '(integer=True)\n', (6227, 6241), False, 'from matplotlib.ticker import MaxNLocator\n'), ((6414, 6457), 'matplotlib.cm.ScalarMappable', 'mpl.cm.ScalarMappable', ([], {'norm': 'norm', 'cmap': 'cmap'}), '(norm=norm, cmap=cmap)\n', (6435, 6457), True, 'import matplotlib as mpl\n'), ((7067, 7092), 'matplotlib.ticker.MaxNLocator', 'MaxNLocator', ([], {'integer': '(True)'}), '(integer=True)\n', (7078, 7092), False, 'from matplotlib.ticker import MaxNLocator\n'), ((7219, 7262), 'matplotlib.cm.ScalarMappable', 'mpl.cm.ScalarMappable', ([], {'norm': 'norm', 'cmap': 'cmap'}), '(norm=norm, cmap=cmap)\n', (7240, 7262), True, 'import matplotlib as mpl\n'), ((7937, 7962), 'matplotlib.ticker.MaxNLocator', 'MaxNLocator', ([], {'integer': '(True)'}), '(integer=True)\n', (7948, 7962), False, 'from matplotlib.ticker import MaxNLocator\n'), ((8089, 8132), 'matplotlib.cm.ScalarMappable', 'mpl.cm.ScalarMappable', ([], {'norm': 'norm', 'cmap': 'cmap'}), '(norm=norm, cmap=cmap)\n', (8110, 8132), True, 'import matplotlib as mpl\n'), ((9825, 9855), 'numpy.linspace', 'np.linspace', (['minval', 'maxval', 'n'], {}), '(minval, maxval, n)\n', (9836, 9855), True, 'import numpy as np\n'), ((5810, 5832), 'numpy.unique', 'np.unique', (['self.states'], {}), '(self.states)\n', (5819, 5832), True, 'import numpy as np\n'), ((9368, 9384), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (9376, 9384), True, 'import numpy as np\n')] |
import imp
from venv import create
from django.shortcuts import render, redirect
from django.views import View
from django.views.generic import (
ListView,
)
from account.models import *
from account.forms import *
from data.models import *
from django.contrib.auth import login as auth_login
from django.contrib.auth.models import auth
from django.contrib import messages
from django.contrib.auth.mixins import PermissionRequiredMixin, LoginRequiredMixin
# Create your views here.
def login(request):
if request.method == "POST":
form = loginForm(data=request.POST)
if form.is_valid():
user = form.get_user()
auth_login(request, user)
print("succesful login")
remember_me = form.cleaned_data["remember_me"]
if remember_me:
request.session.set_expiry(1209600)
return redirect("home")
else:
messages.warning(request, 'There is an issue with your login processes')
return redirect("login")
else:
form = loginForm()
create_form = createUserForm()
context = {
"form": form,
"create_form": create_form
}
return render(request, "login.html", context)
def logout(request):
auth.logout(request)
return redirect("login")
def register(request):
if request.method == "POST":
create_form = createUserForm(data=request.POST)
if create_form.is_valid():
user = create_form.save(commit=False)
user.save()
messages.success(request, "User created successfully!")
return redirect("login")
else:
messages.error(request, "User creation failed")
else:
create_form = createUserForm()
return render(request, "login.html", {"create_form": create_form})
def homepage(request):
user = Account.objects.filter(is_superuser=False).count()
rest = Restaurant.objects.all().count()
rating = RestaurantReview.objects.exclude(rating__isnull=True).count()
review = RestaurantReview.objects.exclude(review__isnull=True).count()
context = {
"user_count" : user,
"rest_count" : rest,
"rating_count" : rating,
"review_count" : review,
}
return render(request, "home.html", context)
class ViewUserView(View, LoginRequiredMixin, PermissionRequiredMixin):
permission_required = 'accounts.view_account'
raise_exception = True
def post(self, request, pk, *args, **kwargs):
user = Account.objects.get(account_id=pk)
form = viewUserForm(request.POST, instance=user)
return redirect("userlist")
def get(self, request, pk, *args, **kwargs):
user = Account.objects.get(account_id=pk)
form = viewUserForm(instance=user)
context = {
"form": form,
"pk": pk
}
return render(request, "profile.html", context)
class EditUserView(View, LoginRequiredMixin, PermissionRequiredMixin):
permission_required = 'accounts.change_account'
raise_exception = True
def post(self, request, pk, *args, **kwargs):
user = Account.objects.get(account_id=pk)
form = editUserForm(request.POST, instance=user)
if form.is_valid():
user = form.save()
role = request.POST.get("role")
user.save()
messages.success(request, "Successfully updated profile!")
return redirect(f'/viewUser/{user.account_id}')
else:
form = editUserForm(instance=user)
extra_context = {
"form": form,
}
print('something wrong')
messages.error(request, "Invalid input! Please input a valid information.")
return render(request, "editUser.html", extra_context)
def get(self, request, pk, *args, **kwargs):
user = Account.objects.get(account_id=pk)
form = editUserForm(instance=user)
extra_context = {
"form": form,
}
return render(request, "editUser.html", extra_context)
class UserListView(LoginRequiredMixin, PermissionRequiredMixin, ListView):
permission_required = 'accounts.view_account'
template_name = "userList.html"
queryset = Account.objects.all()
class UpdateProfilePicView(View, LoginRequiredMixin, PermissionRequiredMixin):
permission_required = 'accounts.change_account'
raise_exception = True
def post(self, request, pk, *args, **kwargs):
user = Account.objects.get(account_id=pk)
user.profile_pic = request.FILES.get('profile-pic')
user.save()
return redirect('viewUser', pk)
def deleteUser(request, event_id):
event = Account.objects.get(pk=event_id)
event.delete()
return redirect('userlist') | [
"django.shortcuts.render",
"django.contrib.messages.error",
"django.contrib.messages.warning",
"django.contrib.auth.login",
"django.contrib.auth.models.auth.logout",
"django.shortcuts.redirect",
"django.contrib.messages.success"
] | [((1219, 1257), 'django.shortcuts.render', 'render', (['request', '"""login.html"""', 'context'], {}), "(request, 'login.html', context)\n", (1225, 1257), False, 'from django.shortcuts import render, redirect\n'), ((1284, 1304), 'django.contrib.auth.models.auth.logout', 'auth.logout', (['request'], {}), '(request)\n', (1295, 1304), False, 'from django.contrib.auth.models import auth\n'), ((1316, 1333), 'django.shortcuts.redirect', 'redirect', (['"""login"""'], {}), "('login')\n", (1324, 1333), False, 'from django.shortcuts import render, redirect\n'), ((1797, 1856), 'django.shortcuts.render', 'render', (['request', '"""login.html"""', "{'create_form': create_form}"], {}), "(request, 'login.html', {'create_form': create_form})\n", (1803, 1856), False, 'from django.shortcuts import render, redirect\n'), ((2294, 2331), 'django.shortcuts.render', 'render', (['request', '"""home.html"""', 'context'], {}), "(request, 'home.html', context)\n", (2300, 2331), False, 'from django.shortcuts import render, redirect\n'), ((4804, 4824), 'django.shortcuts.redirect', 'redirect', (['"""userlist"""'], {}), "('userlist')\n", (4812, 4824), False, 'from django.shortcuts import render, redirect\n'), ((2654, 2674), 'django.shortcuts.redirect', 'redirect', (['"""userlist"""'], {}), "('userlist')\n", (2662, 2674), False, 'from django.shortcuts import render, redirect\n'), ((2910, 2950), 'django.shortcuts.render', 'render', (['request', '"""profile.html"""', 'context'], {}), "(request, 'profile.html', context)\n", (2916, 2950), False, 'from django.shortcuts import render, redirect\n'), ((4066, 4113), 'django.shortcuts.render', 'render', (['request', '"""editUser.html"""', 'extra_context'], {}), "(request, 'editUser.html', extra_context)\n", (4072, 4113), False, 'from django.shortcuts import render, redirect\n'), ((4668, 4692), 'django.shortcuts.redirect', 'redirect', (['"""viewUser"""', 'pk'], {}), "('viewUser', pk)\n", (4676, 4692), False, 'from django.shortcuts import render, redirect\n'), ((661, 686), 'django.contrib.auth.login', 'auth_login', (['request', 'user'], {}), '(request, user)\n', (671, 686), True, 'from django.contrib.auth import login as auth_login\n'), ((883, 899), 'django.shortcuts.redirect', 'redirect', (['"""home"""'], {}), "('home')\n", (891, 899), False, 'from django.shortcuts import render, redirect\n'), ((926, 998), 'django.contrib.messages.warning', 'messages.warning', (['request', '"""There is an issue with your login processes"""'], {}), "(request, 'There is an issue with your login processes')\n", (942, 998), False, 'from django.contrib import messages\n'), ((1018, 1035), 'django.shortcuts.redirect', 'redirect', (['"""login"""'], {}), "('login')\n", (1026, 1035), False, 'from django.shortcuts import render, redirect\n'), ((1570, 1625), 'django.contrib.messages.success', 'messages.success', (['request', '"""User created successfully!"""'], {}), "(request, 'User created successfully!')\n", (1586, 1625), False, 'from django.contrib import messages\n'), ((1645, 1662), 'django.shortcuts.redirect', 'redirect', (['"""login"""'], {}), "('login')\n", (1653, 1662), False, 'from django.shortcuts import render, redirect\n'), ((1689, 1736), 'django.contrib.messages.error', 'messages.error', (['request', '"""User creation failed"""'], {}), "(request, 'User creation failed')\n", (1703, 1736), False, 'from django.contrib import messages\n'), ((3400, 3458), 'django.contrib.messages.success', 'messages.success', (['request', '"""Successfully updated profile!"""'], {}), "(request, 'Successfully updated profile!')\n", (3416, 3458), False, 'from django.contrib import messages\n'), ((3478, 3518), 'django.shortcuts.redirect', 'redirect', (['f"""/viewUser/{user.account_id}"""'], {}), "(f'/viewUser/{user.account_id}')\n", (3486, 3518), False, 'from django.shortcuts import render, redirect\n'), ((3703, 3778), 'django.contrib.messages.error', 'messages.error', (['request', '"""Invalid input! Please input a valid information."""'], {}), "(request, 'Invalid input! Please input a valid information.')\n", (3717, 3778), False, 'from django.contrib import messages\n'), ((3798, 3845), 'django.shortcuts.render', 'render', (['request', '"""editUser.html"""', 'extra_context'], {}), "(request, 'editUser.html', extra_context)\n", (3804, 3845), False, 'from django.shortcuts import render, redirect\n')] |
import logging
import os
import pickle
import sys
import threading
import time
from typing import List
from Giveme5W1H.extractor.root import path
from Giveme5W1H.extractor.tools.util import bytes_2_human_readable
class KeyValueCache(object):
def __init__(self, cache_path):
"""
:param cache_path: path to cache, must be relative to the root.py file
"""
self.log = logging.getLogger('GiveMe5W')
# resolve path relative to the path file
self._cache_path = path(cache_path)
# ad a meaningful extension
self._cache_path = self._cache_path + '.prickle'
self._cache = {}
if cache_path and os.path.isfile(self._cache_path) and os.path.getsize(self._cache_path) > 0:
# reload cache object form disc, if any
with open(self._cache_path, 'rb') as ff:
self._cache = pickle.load(ff)
self.log.debug('KeyValueCache: ' + self._cache_path + ' restored')
self.log_stats()
else:
self._cache = {}
self._lock = threading.Lock()
def log_stats(self):
# size is not considering child's
self.log.info(self._cache_path + ' entries: ' + str(len(self._cache)) + ' size: ' + bytes_2_human_readable(
sys.getsizeof(self._cache)))
def persist(self):
with open(self._cache_path, 'wb') as f:
pickle.dump(self._cache, f, pickle.HIGHEST_PROTOCOL)
def cache(self, key: str, value: object):
"""
None values are considered as invalid results (ToughRequest) is producing none for exceptions
set -1 if you want to store "No distance"
:param key:
:param value:
:return:
"""
self._lock.acquire()
if value is not None:
self._cache[key] = self._pack(value);
self.log.debug(self._cache_path + ' CACHED: ' + str(key) + ': ' + str(value))
self.persist()
self._lock.release()
def get(self, key):
"""
Read cache entries
:param key:
:return:
"""
self._lock.acquire()
result = None
value = self._cache.get(key)
if value is not None:
self.log.debug(self._cache_path + ' LOADED: ' + str(key) + ': ' + str(value))
result = self._unpack(value)
self._lock.release()
return result
def get_complex(self, list_of_keys: List[str]):
"""
Read complex cache entries
"""
return self.get(self._get_id(list_of_keys))
def cache_complex(self, list_of_keys: List[str], value):
"""
helper to cache multi (string)key values.
They are sorted before concatenation, therefore an order is determined.
"""
self.cache(self._get_id(list_of_keys), value)
def _get_id(self, list_of_keys: List[str]):
"""
sorts list_of_keys, concatenates with # for readability
:param list_of_keys:
:return:
"""
sorted(list_of_keys)
return "#".join(list_of_keys)
def _pack(self, value):
"""
cache tracks the age of an entry, may be helpful in the future
:param value:
:return:
"""
return [value, str(time.time())]
def _unpack(self, value):
"""
removes the timestamp around the cached value, if any
:param value:
:return:
"""
# there are some old entries without timestamp
if isinstance(value, str) or isinstance(value, int):
return value
return value[0]
| [
"logging.getLogger",
"os.path.getsize",
"pickle.dump",
"threading.Lock",
"sys.getsizeof",
"pickle.load",
"os.path.isfile",
"time.time",
"Giveme5W1H.extractor.root.path"
] | [((404, 433), 'logging.getLogger', 'logging.getLogger', (['"""GiveMe5W"""'], {}), "('GiveMe5W')\n", (421, 433), False, 'import logging\n'), ((510, 526), 'Giveme5W1H.extractor.root.path', 'path', (['cache_path'], {}), '(cache_path)\n', (514, 526), False, 'from Giveme5W1H.extractor.root import path\n'), ((1080, 1096), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (1094, 1096), False, 'import threading\n'), ((673, 705), 'os.path.isfile', 'os.path.isfile', (['self._cache_path'], {}), '(self._cache_path)\n', (687, 705), False, 'import os\n'), ((1406, 1458), 'pickle.dump', 'pickle.dump', (['self._cache', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(self._cache, f, pickle.HIGHEST_PROTOCOL)\n', (1417, 1458), False, 'import pickle\n'), ((710, 743), 'os.path.getsize', 'os.path.getsize', (['self._cache_path'], {}), '(self._cache_path)\n', (725, 743), False, 'import os\n'), ((884, 899), 'pickle.load', 'pickle.load', (['ff'], {}), '(ff)\n', (895, 899), False, 'import pickle\n'), ((3284, 3295), 'time.time', 'time.time', ([], {}), '()\n', (3293, 3295), False, 'import time\n'), ((1293, 1319), 'sys.getsizeof', 'sys.getsizeof', (['self._cache'], {}), '(self._cache)\n', (1306, 1319), False, 'import sys\n')] |
from argparse import ArgumentParser
from tqdm import tqdm
import NSST
from nsst_translate import best_transition_sequence
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("--nsst_file", default="output/nsst_tss20_th4_nSt100_Q0.pkl", help="nsst file")
parser.add_argument("--src_lang", default="output/europarl-v7.de-en.de.clean")
parser.add_argument("--tgt_lang", default="output/europarl-v7.de-en.en.clean")
parser.add_argument("--enforce_n_reg", default=True)
parser.add_argument("--output", default=f"output/nsst_stat_nreg_100Q0.csv")
args = parser.parse_args()
args.enforce_n_final_reg = False
# load NSST
nsst = NSST.NSST()
nsst.load(args.nsst_file)
args.nsst = nsst
# open files
src_file = open(args.src_lang, 'r')
tgt_file = open(args.tgt_lang, 'r')
output_file = open(args.output, 'w')
# iterate over sentences, first 4096 -> test sentences
for src, tgt, _ in tqdm(list(zip(src_file, tgt_file, range(4096))), desc="Processing sentences"):
# remove line breaks
src = src[:-1]
tgt = tgt[:-1]
# try to translate
try:
# prepare tokenisations
token_src = [nsst.tokenization_src[word] if word in nsst.tokenization_src else 0
for word in src.split(" ") if len(word)]
token_tgt = [nsst.tokenization_tgt[word] if word in nsst.tokenization_tgt else 0
for word in tgt.split(" ") if len(word)]
# run nsst
args.input = src
args.token_src = token_src
result = best_transition_sequence(args)
# get best result
pred = sorted((k for k in result
if ('Qf' in args.nsst_file or not args.enforce_n_final_reg or len(k[1]) == 1)
and ('Q0' in args.nsst_file or k[0] == -1)
),
key=lambda x: x[2],
reverse=True)[0]
n_res = len(result)
q, reg, prob = pred
# write to csv
if not len(reg): # catch empty registers
continue
token_pred = [w for w in reg[0].split(' ') if len(w)]
pred_str = ""
for t in token_pred:
pred_str += f"{nsst.tokenization_tgt_lut[int(t)]} "
token_src_str = ""
for t in token_src:
token_src_str += f"{t} "
token_tgt_str = ""
for t in token_tgt:
token_tgt_str += f"{t} "
token_pred_str = ""
for t in token_pred:
token_pred_str += f"{t} "
print(f"{src};{token_src_str[:-1]};"
f"{tgt};{token_tgt_str[:-1]};"
f"{pred_str};{token_pred_str[:-1]};"
f"{prob};{len(reg)};{n_res}",
file=output_file)
output_file.flush()
except RuntimeError:
pass
# close files
src_file.close()
tgt_file.close()
output_file.close()
| [
"nsst_translate.best_transition_sequence",
"NSST.NSST",
"argparse.ArgumentParser"
] | [((165, 181), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (179, 181), False, 'from argparse import ArgumentParser\n'), ((685, 696), 'NSST.NSST', 'NSST.NSST', ([], {}), '()\n', (694, 696), False, 'import NSST\n'), ((1633, 1663), 'nsst_translate.best_transition_sequence', 'best_transition_sequence', (['args'], {}), '(args)\n', (1657, 1663), False, 'from nsst_translate import best_transition_sequence\n')] |
# Import library
import math
# Define functionts
def mean(data):
return sum(data) / len(data)
def stddev(data, size):
sum = 0
for i in range(size):
sum = sum + (data[i] - mean(data)) ** 2
return math.sqrt(sum / size)
# Set data
size = int(input())
numbers = list(map(int, input().split()))
# Get standard deviation
print(round(stddev(numbers, size), 1))
| [
"math.sqrt"
] | [((221, 242), 'math.sqrt', 'math.sqrt', (['(sum / size)'], {}), '(sum / size)\n', (230, 242), False, 'import math\n')] |
#!/usr/bin/env python3
# python 线程测试
import _thread
import time
from yvhai.demo.base import YHDemo
def print_time(thread_name, interval, times):
for cnt in range(times):
time.sleep(interval)
print(" -- %s: %s" % (thread_name, time.ctime(time.time())))
class RawThreadDemo(YHDemo):
def __init__(self):
super(RawThreadDemo, self).__init__('_thread')
@staticmethod
def main():
try:
_thread.start_new_thread(print_time, ("Thread-01", 1, 10))
_thread.start_new_thread(print_time, ("Thread-02", 2, 6))
except:
print("Error: 无法启动线程")
# 主线程无限等待
while 1:
pass
@staticmethod
def demo(args=[]):
RawThreadDemo.main()
if __name__ == '__main__':
RawThreadDemo.demo()
| [
"_thread.start_new_thread",
"time.sleep",
"time.time"
] | [((187, 207), 'time.sleep', 'time.sleep', (['interval'], {}), '(interval)\n', (197, 207), False, 'import time\n'), ((447, 505), '_thread.start_new_thread', '_thread.start_new_thread', (['print_time', "('Thread-01', 1, 10)"], {}), "(print_time, ('Thread-01', 1, 10))\n", (471, 505), False, 'import _thread\n'), ((518, 575), '_thread.start_new_thread', '_thread.start_new_thread', (['print_time', "('Thread-02', 2, 6)"], {}), "(print_time, ('Thread-02', 2, 6))\n", (542, 575), False, 'import _thread\n'), ((262, 273), 'time.time', 'time.time', ([], {}), '()\n', (271, 273), False, 'import time\n')] |
import cv2
from cv2 import *
import numpy as np
from matplotlib import pyplot as plt
###############################SIFT MATCH Function#################################
def SIFTMATCH(img1,img2):
# Initiate SIFT detector
sift = cv2.xfeatures2d.SIFT_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1,None)
kp2, des2 = sift.detectAndCompute(img2,None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1,des2,k=2)
# store all the good matches as per Lowe's ratio test.
good = []
for m,n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
if len(good)>MIN_MATCH_COUNT:
src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
matchesMask = mask.ravel().tolist()
h,w = img1.shape
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv2.perspectiveTransform(pts,M)
img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA)
else:
print("Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT))
matchesMask = None
draw_params = dict(matchColor = (0,255,0), # draw matches in green color
singlePointColor = None,
matchesMask = matchesMask, # draw only inliers
flags = 2)
img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params)
cv2.moveWindow('output', 150,150) # Move it to (40,30)
cv2.imshow('output',img3)
cv2.waitKey(0) #The function waits for specified milliseconds for any keyboard event
cv2.destroyAllWindows() #cv2.destroyAllWindows() simply destroys all the windows we created
###################################################################################################
#################################Function#########################
def CercleDetection(img1):
# Read Image
raw_image = cv2.imread(img1)
# Bilateral filtering forms a very good way to preserve edges. It is a non-linear filter and helps reduce noise
# The parameters used are: the image, window size for averaging the neighbour, sigmaColor(Sigma value in the color space.
bilateral_filtered_image = cv2.bilateralFilter(raw_image, 5, 175, 175)
# Canny edge detector to detect edges in the image It takes 3 parameters: image, lower threshold and upper threshold.
edge_detected_image = cv2.Canny(bilateral_filtered_image, 75, 200)
# Find Contours
_, contours, hierarchy = cv2.findContours(edge_detected_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contour_list = []
for contour in contours:
approx = cv2.approxPolyDP(contour,0.01*cv2.arcLength(contour,True),True)
area = cv2.contourArea(contour)
if ((len(approx) > 8) & (len(approx) < 23) & (area > 50000) ):
contour_list.append(contour)
print("area %.3f"%(area))
M = cv2.moments(contour)
# calculate x,y coordinate of center
if M["m00"] != 0:
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
else:
cX, cY = 0, 0
cv2.circle(raw_image, (cX, cY), 5, (255, 255, 255), -1)
cv2.putText(raw_image, "centroid", (cX - 25, cY - 25),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
# Draw Contours of circles
cv2.drawContours(raw_image, contour_list, -1, (0, 255, 0), 3)
# Display Images
cv2.imshow("Objects Detected",raw_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
return cX,cY
############################################################
###########################MAIN#############################
MIN_MATCH_COUNT = 10
e1 = cv2.getTickCount()
# # initialize the camera
# cam = VideoCapture(0) # 0 -> index of camera
# s, img1 = cam.read()
# ret = cam.set(3,1920);
# ret = cam.set(4,1080);
# if s: # frame captured without any errors
# cv2.namedWindow("output", cv2.WINDOW_NORMAL)
# cv2.imshow("cam-test",img1)
# waitKey(0)
# destroyWindow("cam-test")
# imwrite("Scene.jpg",img1) #save image
# del(cam)
# Scene image in Grayscale
# imgray = cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY)
imgray = cv2.imread('Scene.jpg', 0) # queryImage
# Reference Piece Image
img1 = cv2.imread('img3.jpg',0) # queryImage
# SIFT Algorithm fore Object Detection
SIFTMATCH(img1, imgray)
# image de reference
cX, cY = CercleDetection('img3.jpg')
print('cX = %.3f , cY =%.3f' % (cX, cY))
# Image Webcam
cX2, cY2 = CercleDetection('img3.jpg')
print('cX2 = %.3f , cY2 =%.3f' % (cX2, cY2))
deltaX = (cX2-cX)
deltaY = -(CY2-cY)
# Write X and Y values to File
file = open("values.txt", "w")
file.write("%.3f \n" % deltaX)
file.write("%.3f \n" % deltaY)
file.close()
#Calculate time of execution
e2 = cv2.getTickCount()
time = (e2 - e1)/ cv2.getTickFrequency()
print('time needed to execute')
print(time)
| [
"numpy.int32",
"cv2.imshow",
"cv2.destroyAllWindows",
"cv2.xfeatures2d.SIFT_create",
"cv2.moveWindow",
"cv2.arcLength",
"cv2.contourArea",
"cv2.perspectiveTransform",
"cv2.waitKey",
"cv2.getTickFrequency",
"cv2.drawContours",
"cv2.findHomography",
"cv2.putText",
"cv2.circle",
"cv2.moments",
"cv2.Canny",
"cv2.imread",
"cv2.bilateralFilter",
"cv2.getTickCount",
"cv2.FlannBasedMatcher",
"cv2.findContours",
"cv2.drawMatches",
"numpy.float32"
] | [((4277, 4295), 'cv2.getTickCount', 'cv2.getTickCount', ([], {}), '()\n', (4293, 4295), False, 'import cv2\n'), ((4792, 4818), 'cv2.imread', 'cv2.imread', (['"""Scene.jpg"""', '(0)'], {}), "('Scene.jpg', 0)\n", (4802, 4818), False, 'import cv2\n'), ((4869, 4894), 'cv2.imread', 'cv2.imread', (['"""img3.jpg"""', '(0)'], {}), "('img3.jpg', 0)\n", (4879, 4894), False, 'import cv2\n'), ((5432, 5450), 'cv2.getTickCount', 'cv2.getTickCount', ([], {}), '()\n', (5448, 5450), False, 'import cv2\n'), ((252, 281), 'cv2.xfeatures2d.SIFT_create', 'cv2.xfeatures2d.SIFT_create', ([], {}), '()\n', (279, 281), False, 'import cv2\n'), ((588, 638), 'cv2.FlannBasedMatcher', 'cv2.FlannBasedMatcher', (['index_params', 'search_params'], {}), '(index_params, search_params)\n', (609, 638), False, 'import cv2\n'), ((1808, 1872), 'cv2.drawMatches', 'cv2.drawMatches', (['img1', 'kp1', 'img2', 'kp2', 'good', 'None'], {}), '(img1, kp1, img2, kp2, good, None, **draw_params)\n', (1823, 1872), False, 'import cv2\n'), ((1876, 1910), 'cv2.moveWindow', 'cv2.moveWindow', (['"""output"""', '(150)', '(150)'], {}), "('output', 150, 150)\n", (1890, 1910), False, 'import cv2\n'), ((1937, 1963), 'cv2.imshow', 'cv2.imshow', (['"""output"""', 'img3'], {}), "('output', img3)\n", (1947, 1963), False, 'import cv2\n'), ((1968, 1982), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1979, 1982), False, 'import cv2\n'), ((2062, 2085), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2083, 2085), False, 'import cv2\n'), ((2394, 2410), 'cv2.imread', 'cv2.imread', (['img1'], {}), '(img1)\n', (2404, 2410), False, 'import cv2\n'), ((2691, 2734), 'cv2.bilateralFilter', 'cv2.bilateralFilter', (['raw_image', '(5)', '(175)', '(175)'], {}), '(raw_image, 5, 175, 175)\n', (2710, 2734), False, 'import cv2\n'), ((2887, 2931), 'cv2.Canny', 'cv2.Canny', (['bilateral_filtered_image', '(75)', '(200)'], {}), '(bilateral_filtered_image, 75, 200)\n', (2896, 2931), False, 'import cv2\n'), ((2987, 3064), 'cv2.findContours', 'cv2.findContours', (['edge_detected_image', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(edge_detected_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (3003, 3064), False, 'import cv2\n'), ((3906, 3967), 'cv2.drawContours', 'cv2.drawContours', (['raw_image', 'contour_list', '(-1)', '(0, 255, 0)', '(3)'], {}), '(raw_image, contour_list, -1, (0, 255, 0), 3)\n', (3922, 3967), False, 'import cv2\n'), ((4000, 4041), 'cv2.imshow', 'cv2.imshow', (['"""Objects Detected"""', 'raw_image'], {}), "('Objects Detected', raw_image)\n", (4010, 4041), False, 'import cv2\n'), ((4046, 4060), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (4057, 4060), False, 'import cv2\n'), ((4066, 4089), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4087, 4089), False, 'import cv2\n'), ((5470, 5492), 'cv2.getTickFrequency', 'cv2.getTickFrequency', ([], {}), '()\n', (5490, 5492), False, 'import cv2\n'), ((1085, 1138), 'cv2.findHomography', 'cv2.findHomography', (['src_pts', 'dst_pts', 'cv2.RANSAC', '(5.0)'], {}), '(src_pts, dst_pts, cv2.RANSAC, 5.0)\n', (1103, 1138), False, 'import cv2\n'), ((1305, 1337), 'cv2.perspectiveTransform', 'cv2.perspectiveTransform', (['pts', 'M'], {}), '(pts, M)\n', (1329, 1337), False, 'import cv2\n'), ((3218, 3242), 'cv2.contourArea', 'cv2.contourArea', (['contour'], {}), '(contour)\n', (3233, 3242), False, 'import cv2\n'), ((3413, 3433), 'cv2.moments', 'cv2.moments', (['contour'], {}), '(contour)\n', (3424, 3433), False, 'import cv2\n'), ((3663, 3718), 'cv2.circle', 'cv2.circle', (['raw_image', '(cX, cY)', '(5)', '(255, 255, 255)', '(-1)'], {}), '(raw_image, (cX, cY), 5, (255, 255, 255), -1)\n', (3673, 3718), False, 'import cv2\n'), ((3732, 3842), 'cv2.putText', 'cv2.putText', (['raw_image', '"""centroid"""', '(cX - 25, cY - 25)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', '(255, 255, 255)', '(2)'], {}), "(raw_image, 'centroid', (cX - 25, cY - 25), cv2.\n FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)\n", (3743, 3842), False, 'import cv2\n'), ((915, 961), 'numpy.float32', 'np.float32', (['[kp1[m.queryIdx].pt for m in good]'], {}), '([kp1[m.queryIdx].pt for m in good])\n', (925, 961), True, 'import numpy as np\n'), ((999, 1045), 'numpy.float32', 'np.float32', (['[kp2[m.trainIdx].pt for m in good]'], {}), '([kp2[m.trainIdx].pt for m in good])\n', (1009, 1045), True, 'import numpy as np\n'), ((1226, 1286), 'numpy.float32', 'np.float32', (['[[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]'], {}), '([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]])\n', (1236, 1286), True, 'import numpy as np\n'), ((1375, 1388), 'numpy.int32', 'np.int32', (['dst'], {}), '(dst)\n', (1383, 1388), True, 'import numpy as np\n'), ((3168, 3196), 'cv2.arcLength', 'cv2.arcLength', (['contour', '(True)'], {}), '(contour, True)\n', (3181, 3196), False, 'import cv2\n')] |
import dash
from dash.exceptions import PreventUpdate
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import dash_bootstrap_components as dbc
import dash_table
import plotly.express as ex
import plotly.graph_objects as go
import pandas as pd
import numpy as np
data = pd.read_csv("./data/Phone_dataset_new.csv", header=0)
details = pd.read_csv("./data/Phone_details.csv", header=0)
names = details.loc[0]
data = data.rename(columns=names)
details = details.rename(columns=names)
maxi = details.loc[1].astype(int)
details_on_card = details.loc[2].astype(int)
details_on_card = details.columns[details_on_card == 1]
fitness_columns = {
"Memory": -1,
"RAM": -1,
"Camera (MP)": -1,
"Price (Euros)": 1,
}
fitness_data = data[fitness_columns] * maxi[fitness_columns].values
external_stylesheets = ["https://codepen.io/chriddyp/pen/bWLwgP.css"]
app = dash.Dash(
__name__,
external_stylesheets=[dbc.themes.LITERA],
eager_loading=True,
suppress_callback_exceptions=True,
)
app.layout = html.Div(
children=[
# .container class is fixed, .container.scalable is scalable
dbc.Row(
[
dbc.Col(
html.H1(
children="What is your optimal phone?",
className="text-center mt-4",
)
)
]
),
dbc.Row(
[
dbc.Col(
children=[
# Top card with details(?)
dbc.Card(
children=[
dbc.CardBody(
[
html.H4(
"Researcher's Night Event",
className="card-title text-center",
),
html.P(
(
"This app uses decision support tools to "
"quickly and easily find phones which reflect "
"the user's desires. Input your preferences "
"below. The box on top right shows the phone "
"which matches the preferences the best. "
"The box on bottom right provides some "
"close alternatives."
),
className="card-text",
),
]
)
],
className="mr-3 ml-3 mb-2 mt-2",
),
dbc.Form(
[
dbc.FormGroup(
children=[
dbc.Label(
"Choose desired operating system",
html_for="os-choice",
),
dbc.RadioItems(
options=[
{
"label": "Android",
"value": "Android",
},
{"label": "iOS", "value": "IOS"},
{
"label": "No preference",
"value": "both",
},
],
id="os-choice",
value="both",
inline=True,
# className="text-center mt-4",
),
],
className="mr-3 ml-3 mb-2 mt-2",
),
dbc.FormGroup(
children=[
dbc.Label(
"Choose desired Memory capacity (GB)",
html_for="memory-choice",
),
dcc.Slider(
id="memory-choice",
min=16,
max=256,
step=None,
included=False,
value=256,
marks={
16: "16",
32: "32",
64: "64",
128: "128",
256: "256",
},
# className="text-center mt-5",
),
],
className="mr-3 ml-3 mb-2 mt-2",
),
dbc.FormGroup(
children=[
dbc.Label(
"Choose desired RAM capacity (GB)",
html_for="ram-choice",
),
dcc.Slider(
id="ram-choice",
min=2,
max=12,
step=1,
value=12,
included=False,
marks={
2: "2",
3: "3",
4: "4",
5: "5",
6: "6",
7: "7",
8: "8",
9: "9",
10: "10",
11: "11",
12: "12",
},
className="text-center mt-5",
),
],
className="mr-3 ml-3 mb-2 mt-2",
),
dbc.FormGroup(
children=[
dbc.Label(
"Choose desired camera resolution (MP)",
html_for="cam-choice",
),
dcc.Slider(
id="cam-choice",
min=0,
max=130,
step=1,
included=False,
value=70,
marks={
0: "0",
10: "10",
30: "30",
50: "50",
70: "70",
90: "90",
110: "110",
130: "130",
},
className="text-center mt-5",
),
],
className="mr-3 ml-3 mb-2 mt-2",
),
dbc.FormGroup(
children=[
dbc.Label(
"Choose desired budget (Euros)",
html_for="cost-choice",
),
dcc.Slider(
id="cost-choice",
min=0,
max=1400,
step=1,
included=False,
value=100,
marks={
0: "0",
200: "200",
400: "400",
600: "600",
800: "800",
1000: "1000",
1200: "1200",
1400: "1400",
},
className="text-center mt-5",
),
],
className="mr-3 ml-3 mb-2 mt-2",
),
],
style={"maxHeight": "560px", "overflow": "auto"},
),
],
width={"size": 5, "offset": 1},
),
dbc.Col(
children=[
dbc.Card(
children=[
dbc.CardHeader("The best phone for you is:"),
dbc.CardBody(id="results"),
],
className="mb-4",
),
dbc.Card(
children=[
dbc.CardHeader("Other great phones:"),
dbc.CardBody(
id="other-results",
children=(
[
html.P(
html.Span(
f"{i}. ",
id=f"other-results-list-{i}",
)
)
for i in range(2, 6)
]
+ [
dbc.Tooltip(
id=f"other-results-tooltip-{i}",
target=f"other-results-list-{i}",
placement="right",
style={
"maxWidth": 700,
"background-color": "white",
"color": "white",
"border-style": "solid",
"border-color": "black",
},
)
for i in range(2, 6)
]
),
),
],
className="mt-4",
),
html.Div(id="tooltips"),
],
width={"size": 5, "offset": 0},
className="mb-2 mt-2",
),
]
),
dbc.Row([html.Div(id="callback-dump")]),
],
)
@app.callback(
[
Output("results", "children"),
*[Output(f"other-results-list-{i}", "children") for i in range(2, 6)],
*[Output(f"other-results-tooltip-{i}", "children") for i in range(2, 6)],
],
[
Input(f"{attr}-choice", "value")
for attr in ["os", "memory", "ram", "cam", "cost"]
],
)
def results(*choices):
if choices[0] == "both":
choice_data = data
elif choices[0] == "IOS":
choice_data = data[[True if "IOS" in st else False for st in data["OS"]]]
if choices[0] == "Android":
choice_data = data[[True if "Android" in st else False for st in data["OS"]]]
relevant_data = choice_data[
["Memory", "RAM", "Camera (MP)", "Price (Euros)",]
].reset_index(drop=True)
card_data = choice_data[details_on_card].reset_index(drop=True)
maxi = np.asarray([-1, -1, -1, 1])
relevant_data = relevant_data * maxi
ideal = relevant_data.min().values
nadir = relevant_data.max().values
aspirations = choices[1:] * maxi
distance = (aspirations - relevant_data) / (ideal - nadir)
distance = distance.max(axis=1)
distance_order = np.argsort(distance)
best = table_from_data(card_data.loc[distance_order.values[0]], choices[1:])
total_number = len(distance_order)
if total_number >= 4:
others, tooltips = other_options(card_data.loc[distance_order.values[1:5]])
else:
others, tooltips = other_options(
card_data.loc[distance_order.values[1:total_number]]
)
others = others + [f"{i}. -" for i in range(len(others) + 2, 6)]
tooltips = tooltips + [None for i in range(len(tooltips) + 2, 6)]
return (best, *others, *tooltips)
"""@app.callback(Output("tooltips", "children"), [Input("callback-dump", "children")])
def tooltips(tooldict):
num = len(tooldict["ids"])
content = []
for i in range(num):
content.append(dbc.Tooltip(tooldict["tables"][i], target=tooldict["ids"][i]))
return content"""
def table_from_data(data, choices):
# print(choices)
to_compare = ["Memory", "RAM", "Camera (MP)", "Price (Euros)"]
# print(data[to_compare].values)
diff = (data[to_compare].values - choices) * [1, 1, 1, -1]
colors = [None, None, None] + ["green" if x >= 0 else "red" for x in diff]
# print(np.sign(diff))
return dbc.Table(
[
html.Tbody(
[
html.Tr(
[
html.Th(col),
html.Td([str(data[col]),],),
html.Td([html.Span(" ▉", style={"color": c,},)],),
]
)
for (col, c) in zip(data.index, colors)
]
)
]
)
def table_from_data_horizontal(data):
header = [html.Thead(html.Tr([html.Th(col) for col in data.index]))]
body = [html.Tbody([html.Tr([html.Td(data[col]) for col in data.index])])]
return dbc.Table(header + body)
def other_options(data):
contents = []
tables = []
ids = []
i = 2
for index, row in data.iterrows():
contents.append(f"{i}. {row['Model']}")
tables.append(table_from_data_horizontal(row))
i = i + 1
return contents, tables
if __name__ == "__main__":
app.run_server(debug=False)
| [
"pandas.read_csv",
"dash.dependencies.Input",
"numpy.argsort",
"dash_html_components.Td",
"dash_bootstrap_components.Label",
"dash_html_components.Div",
"dash.Dash",
"dash_bootstrap_components.Tooltip",
"dash.dependencies.Output",
"dash_html_components.Span",
"numpy.asarray",
"dash_bootstrap_components.Table",
"dash_html_components.Th",
"dash_bootstrap_components.RadioItems",
"dash_html_components.H1",
"dash_core_components.Slider",
"dash_html_components.P",
"dash_html_components.H4",
"dash_bootstrap_components.CardHeader",
"dash_bootstrap_components.CardBody"
] | [((345, 398), 'pandas.read_csv', 'pd.read_csv', (['"""./data/Phone_dataset_new.csv"""'], {'header': '(0)'}), "('./data/Phone_dataset_new.csv', header=0)\n", (356, 398), True, 'import pandas as pd\n'), ((409, 458), 'pandas.read_csv', 'pd.read_csv', (['"""./data/Phone_details.csv"""'], {'header': '(0)'}), "('./data/Phone_details.csv', header=0)\n", (420, 458), True, 'import pandas as pd\n'), ((947, 1068), 'dash.Dash', 'dash.Dash', (['__name__'], {'external_stylesheets': '[dbc.themes.LITERA]', 'eager_loading': '(True)', 'suppress_callback_exceptions': '(True)'}), '(__name__, external_stylesheets=[dbc.themes.LITERA], eager_loading\n =True, suppress_callback_exceptions=True)\n', (956, 1068), False, 'import dash\n'), ((14486, 14513), 'numpy.asarray', 'np.asarray', (['[-1, -1, -1, 1]'], {}), '([-1, -1, -1, 1])\n', (14496, 14513), True, 'import numpy as np\n'), ((14790, 14810), 'numpy.argsort', 'np.argsort', (['distance'], {}), '(distance)\n', (14800, 14810), True, 'import numpy as np\n'), ((16645, 16669), 'dash_bootstrap_components.Table', 'dbc.Table', (['(header + body)'], {}), '(header + body)\n', (16654, 16669), True, 'import dash_bootstrap_components as dbc\n'), ((13663, 13692), 'dash.dependencies.Output', 'Output', (['"""results"""', '"""children"""'], {}), "('results', 'children')\n", (13669, 13692), False, 'from dash.dependencies import Input, Output, State\n'), ((13876, 13908), 'dash.dependencies.Input', 'Input', (['f"""{attr}-choice"""', '"""value"""'], {}), "(f'{attr}-choice', 'value')\n", (13881, 13908), False, 'from dash.dependencies import Input, Output, State\n'), ((13704, 13749), 'dash.dependencies.Output', 'Output', (['f"""other-results-list-{i}"""', '"""children"""'], {}), "(f'other-results-list-{i}', 'children')\n", (13710, 13749), False, 'from dash.dependencies import Input, Output, State\n'), ((13783, 13831), 'dash.dependencies.Output', 'Output', (['f"""other-results-tooltip-{i}"""', '"""children"""'], {}), "(f'other-results-tooltip-{i}', 'children')\n", (13789, 13831), False, 'from dash.dependencies import Input, Output, State\n'), ((13591, 13619), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""callback-dump"""'}), "(id='callback-dump')\n", (13599, 13619), True, 'import dash_html_components as html\n'), ((16516, 16528), 'dash_html_components.Th', 'html.Th', (['col'], {}), '(col)\n', (16523, 16528), True, 'import dash_html_components as html\n'), ((1268, 1345), 'dash_html_components.H1', 'html.H1', ([], {'children': '"""What is your optimal phone?"""', 'className': '"""text-center mt-4"""'}), "(children='What is your optimal phone?', className='text-center mt-4')\n", (1275, 1345), True, 'import dash_html_components as html\n'), ((16588, 16606), 'dash_html_components.Td', 'html.Td', (['data[col]'], {}), '(data[col])\n', (16595, 16606), True, 'import dash_html_components as html\n'), ((16136, 16148), 'dash_html_components.Th', 'html.Th', (['col'], {}), '(col)\n', (16143, 16148), True, 'import dash_html_components as html\n'), ((13387, 13410), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""tooltips"""'}), "(id='tooltips')\n", (13395, 13410), True, 'import dash_html_components as html\n'), ((16244, 16279), 'dash_html_components.Span', 'html.Span', (['""" ▉"""'], {'style': "{'color': c}"}), "(' ▉', style={'color': c})\n", (16253, 16279), True, 'import dash_html_components as html\n'), ((11217, 11261), 'dash_bootstrap_components.CardHeader', 'dbc.CardHeader', (['"""The best phone for you is:"""'], {}), "('The best phone for you is:')\n", (11231, 11261), True, 'import dash_bootstrap_components as dbc\n'), ((11295, 11321), 'dash_bootstrap_components.CardBody', 'dbc.CardBody', ([], {'id': '"""results"""'}), "(id='results')\n", (11307, 11321), True, 'import dash_bootstrap_components as dbc\n'), ((11532, 11569), 'dash_bootstrap_components.CardHeader', 'dbc.CardHeader', (['"""Other great phones:"""'], {}), "('Other great phones:')\n", (11546, 11569), True, 'import dash_bootstrap_components as dbc\n'), ((1795, 1866), 'dash_html_components.H4', 'html.H4', (['"""Researcher\'s Night Event"""'], {'className': '"""card-title text-center"""'}), '("Researcher\'s Night Event", className=\'card-title text-center\')\n', (1802, 1866), True, 'import dash_html_components as html\n'), ((2039, 2351), 'dash_html_components.P', 'html.P', (['"""This app uses decision support tools to quickly and easily find phones which reflect the user\'s desires. Input your preferences below. The box on top right shows the phone which matches the preferences the best. The box on bottom right provides some close alternatives."""'], {'className': '"""card-text"""'}), '(\n "This app uses decision support tools to quickly and easily find phones which reflect the user\'s desires. Input your preferences below. The box on top right shows the phone which matches the preferences the best. The box on bottom right provides some close alternatives."\n , className=\'card-text\')\n', (2045, 2351), True, 'import dash_html_components as html\n'), ((3265, 3331), 'dash_bootstrap_components.Label', 'dbc.Label', (['"""Choose desired operating system"""'], {'html_for': '"""os-choice"""'}), "('Choose desired operating system', html_for='os-choice')\n", (3274, 3331), True, 'import dash_bootstrap_components as dbc\n'), ((3504, 3700), 'dash_bootstrap_components.RadioItems', 'dbc.RadioItems', ([], {'options': "[{'label': 'Android', 'value': 'Android'}, {'label': 'iOS', 'value': 'IOS'},\n {'label': 'No preference', 'value': 'both'}]", 'id': '"""os-choice"""', 'value': '"""both"""', 'inline': '(True)'}), "(options=[{'label': 'Android', 'value': 'Android'}, {'label':\n 'iOS', 'value': 'IOS'}, {'label': 'No preference', 'value': 'both'}],\n id='os-choice', value='both', inline=True)\n", (3518, 3700), True, 'import dash_bootstrap_components as dbc\n'), ((4767, 4841), 'dash_bootstrap_components.Label', 'dbc.Label', (['"""Choose desired Memory capacity (GB)"""'], {'html_for': '"""memory-choice"""'}), "('Choose desired Memory capacity (GB)', html_for='memory-choice')\n", (4776, 4841), True, 'import dash_bootstrap_components as dbc\n'), ((5014, 5180), 'dash_core_components.Slider', 'dcc.Slider', ([], {'id': '"""memory-choice"""', 'min': '(16)', 'max': '(256)', 'step': 'None', 'included': '(False)', 'value': '(256)', 'marks': "{(16): '16', (32): '32', (64): '64', (128): '128', (256): '256'}"}), "(id='memory-choice', min=16, max=256, step=None, included=False,\n value=256, marks={(16): '16', (32): '32', (64): '64', (128): '128', (\n 256): '256'})\n", (5024, 5180), True, 'import dash_core_components as dcc\n'), ((6154, 6222), 'dash_bootstrap_components.Label', 'dbc.Label', (['"""Choose desired RAM capacity (GB)"""'], {'html_for': '"""ram-choice"""'}), "('Choose desired RAM capacity (GB)', html_for='ram-choice')\n", (6163, 6222), True, 'import dash_bootstrap_components as dbc\n'), ((6395, 6638), 'dash_core_components.Slider', 'dcc.Slider', ([], {'id': '"""ram-choice"""', 'min': '(2)', 'max': '(12)', 'step': '(1)', 'value': '(12)', 'included': '(False)', 'marks': "{(2): '2', (3): '3', (4): '4', (5): '5', (6): '6', (7): '7', (8): '8', (9):\n '9', (10): '10', (11): '11', (12): '12'}", 'className': '"""text-center mt-5"""'}), "(id='ram-choice', min=2, max=12, step=1, value=12, included=False,\n marks={(2): '2', (3): '3', (4): '4', (5): '5', (6): '6', (7): '7', (8):\n '8', (9): '9', (10): '10', (11): '11', (12): '12'}, className=\n 'text-center mt-5')\n", (6405, 6638), True, 'import dash_core_components as dcc\n'), ((7852, 7925), 'dash_bootstrap_components.Label', 'dbc.Label', (['"""Choose desired camera resolution (MP)"""'], {'html_for': '"""cam-choice"""'}), "('Choose desired camera resolution (MP)', html_for='cam-choice')\n", (7861, 7925), True, 'import dash_bootstrap_components as dbc\n'), ((8098, 8321), 'dash_core_components.Slider', 'dcc.Slider', ([], {'id': '"""cam-choice"""', 'min': '(0)', 'max': '(130)', 'step': '(1)', 'included': '(False)', 'value': '(70)', 'marks': "{(0): '0', (10): '10', (30): '30', (50): '50', (70): '70', (90): '90', (110\n ): '110', (130): '130'}", 'className': '"""text-center mt-5"""'}), "(id='cam-choice', min=0, max=130, step=1, included=False, value=\n 70, marks={(0): '0', (10): '10', (30): '30', (50): '50', (70): '70', (\n 90): '90', (110): '110', (130): '130'}, className='text-center mt-5')\n", (8108, 8321), True, 'import dash_core_components as dcc\n'), ((9400, 9466), 'dash_bootstrap_components.Label', 'dbc.Label', (['"""Choose desired budget (Euros)"""'], {'html_for': '"""cost-choice"""'}), "('Choose desired budget (Euros)', html_for='cost-choice')\n", (9409, 9466), True, 'import dash_bootstrap_components as dbc\n'), ((9639, 9885), 'dash_core_components.Slider', 'dcc.Slider', ([], {'id': '"""cost-choice"""', 'min': '(0)', 'max': '(1400)', 'step': '(1)', 'included': '(False)', 'value': '(100)', 'marks': "{(0): '0', (200): '200', (400): '400', (600): '600', (800): '800', (1000):\n '1000', (1200): '1200', (1400): '1400'}", 'className': '"""text-center mt-5"""'}), "(id='cost-choice', min=0, max=1400, step=1, included=False, value\n =100, marks={(0): '0', (200): '200', (400): '400', (600): '600', (800):\n '800', (1000): '1000', (1200): '1200', (1400): '1400'}, className=\n 'text-center mt-5')\n", (9649, 9885), True, 'import dash_core_components as dcc\n'), ((12308, 12540), 'dash_bootstrap_components.Tooltip', 'dbc.Tooltip', ([], {'id': 'f"""other-results-tooltip-{i}"""', 'target': 'f"""other-results-list-{i}"""', 'placement': '"""right"""', 'style': "{'maxWidth': 700, 'background-color': 'white', 'color': 'white',\n 'border-style': 'solid', 'border-color': 'black'}"}), "(id=f'other-results-tooltip-{i}', target=\n f'other-results-list-{i}', placement='right', style={'maxWidth': 700,\n 'background-color': 'white', 'color': 'white', 'border-style': 'solid',\n 'border-color': 'black'})\n", (12319, 12540), True, 'import dash_bootstrap_components as dbc\n'), ((11862, 11911), 'dash_html_components.Span', 'html.Span', (['f"""{i}. """'], {'id': 'f"""other-results-list-{i}"""'}), "(f'{i}. ', id=f'other-results-list-{i}')\n", (11871, 11911), True, 'import dash_html_components as html\n')] |
# Segmentation script
# -------------------
# This script lets the user segment automatically one or many images based on the default segmentation models: SEM or
# TEM.
#
# <NAME> - 2017-08-30
# Imports
import sys
from pathlib import Path
import json
import argparse
from argparse import RawTextHelpFormatter
from tqdm import tqdm
import pkg_resources
import AxonDeepSeg
import AxonDeepSeg.ads_utils as ads
from AxonDeepSeg.apply_model import axon_segmentation
from AxonDeepSeg.ads_utils import convert_path
# Global variables
SEM_DEFAULT_MODEL_NAME = "default_SEM_model_v1"
TEM_DEFAULT_MODEL_NAME = "default_TEM_model_v1"
MODELS_PATH = pkg_resources.resource_filename('AxonDeepSeg', 'models')
MODELS_PATH = Path(MODELS_PATH)
default_SEM_path = MODELS_PATH / SEM_DEFAULT_MODEL_NAME
default_TEM_path = MODELS_PATH / TEM_DEFAULT_MODEL_NAME
default_overlap = 25
# Definition of the functions
def segment_image(path_testing_image, path_model,
overlap_value, config, resolution_model,
acquired_resolution = None, verbosity_level=0):
'''
Segment the image located at the path_testing_image location.
:param path_testing_image: the path of the image to segment.
:param path_model: where to access the model
:param overlap_value: the number of pixels to be used for overlap when doing prediction. Higher value means less
border effects but more time to perform the segmentation.
:param config: dict containing the configuration of the network
:param resolution_model: the resolution the model was trained on.
:param verbosity_level: Level of verbosity. The higher, the more information is given about the segmentation
process.
:return: Nothing.
'''
# If string, convert to Path objects
path_testing_image = convert_path(path_testing_image)
path_model = convert_path(path_model)
if path_testing_image.exists():
# Extracting the image name and its folder path from the total path.
path_parts = path_testing_image.parts
acquisition_name = Path(path_parts[-1])
path_acquisition = Path(*path_parts[:-1])
# Get type of model we are using
selected_model = path_model.name
# Read image
img = ads.imread(str(path_testing_image))
# Generate tmp file
fp = open(path_acquisition / '__tmp_segment__.png', 'wb+')
img_name_original = acquisition_name.stem
if selected_model == "default_TEM_model_v1":
ads.imwrite(fp,255-img, format='png')
else:
ads.imwrite(fp, img, format='png')
acquisition_name = Path(fp.name).name
segmented_image_name = img_name_original + '_seg-axonmyelin' + '.png'
# Performing the segmentation
axon_segmentation(path_acquisitions_folders=path_acquisition, acquisitions_filenames=[acquisition_name],
path_model_folder=path_model, config_dict=config, ckpt_name='model',
inference_batch_size=1, overlap_value=overlap_value,
segmentations_filenames=segmented_image_name,
resampled_resolutions=resolution_model, verbosity_level=verbosity_level,
acquired_resolution=acquired_resolution,
prediction_proba_activate=False, write_mode=True)
if verbosity_level >= 1:
print(("Image {0} segmented.".format(path_testing_image)))
# Remove temporary file used for the segmentation
fp.close()
(path_acquisition / '__tmp_segment__.png').unlink()
else:
print(("The path {0} does not exist.".format(path_testing_image)))
return None
def segment_folders(path_testing_images_folder, path_model,
overlap_value, config, resolution_model,
acquired_resolution = None,
verbosity_level=0):
'''
Segments the images contained in the image folders located in the path_testing_images_folder.
:param path_testing_images_folder: the folder where all image folders are located (the images to segment are located
in those image folders)
:param path_model: where to access the model.
:param overlap_value: the number of pixels to be used for overlap when doing prediction. Higher value means less
border effects but more time to perform the segmentation.
:param config: dict containing the configuration of the network
:param resolution_model: the resolution the model was trained on.
:param verbosity_level: Level of verbosity. The higher, the more information is given about the segmentation
process.
:return: Nothing.
'''
# If string, convert to Path objects
path_testing_images_folder = convert_path(path_testing_images_folder)
path_model = convert_path(path_model)
# Update list of images to segment by selecting only image files (not already segmented or not masks)
img_files = [file for file in path_testing_images_folder.iterdir() if (file.suffix.lower() in ('.png','.jpg','.jpeg','.tif','.tiff'))
and (not str(file).endswith(('_seg-axonmyelin.png','_seg-axon.png','_seg-myelin.png','mask.png')))]
# Pre-processing: convert to png if not already done and adapt to model contrast
for file_ in tqdm(img_files, desc="Segmentation..."):
print(path_testing_images_folder / file_)
try:
height, width, _ = ads.imread(str(path_testing_images_folder / file_)).shape
except:
try:
height, width = ads.imread(str(path_testing_images_folder / file_)).shape
except Exception as e:
raise e
image_size = [height, width]
minimum_resolution = config["trainingset_patchsize"] * resolution_model / min(image_size)
if acquired_resolution < minimum_resolution:
print("EXCEPTION: The size of one of the images ({0}x{1}) is too small for the provided pixel size ({2}).\n".format(height, width, acquired_resolution),
"The image size must be at least {0}x{0} after resampling to a resolution of {1} to create standard sized patches.\n".format(config["trainingset_patchsize"], resolution_model),
"One of the dimensions of the image has a size of {0} after resampling to that resolution.\n".format(round(acquired_resolution * min(image_size) / resolution_model)),
"Image file location: {0}".format(str(path_testing_images_folder / file_))
)
sys.exit(2)
selected_model = path_model.name
# Read image for conversion
img = ads.imread(str(path_testing_images_folder / file_))
# Generate tmpfile for segmentation pipeline
fp = open(path_testing_images_folder / '__tmp_segment__.png', 'wb+')
img_name_original = file_.stem
if selected_model == "default_TEM_model_v1":
ads.imwrite(fp,255-img, format='png')
else:
ads.imwrite(fp,img, format='png')
acquisition_name = Path(fp.name).name
segmented_image_name = img_name_original + '_seg-axonmyelin' + '.png'
axon_segmentation(path_acquisitions_folders=path_testing_images_folder, acquisitions_filenames=[acquisition_name],
path_model_folder=path_model, config_dict=config, ckpt_name='model',
inference_batch_size=1, overlap_value=overlap_value,
segmentations_filenames=[segmented_image_name],
acquired_resolution=acquired_resolution,
verbosity_level=verbosity_level,
resampled_resolutions=resolution_model, prediction_proba_activate=False,
write_mode=True)
if verbosity_level >= 1:
tqdm.write("Image {0} segmented.".format(str(path_testing_images_folder / file_)))
# Remove temporary file used for the segmentation
fp.close()
(path_testing_images_folder / '__tmp_segment__.png').unlink()
return None
def generate_default_parameters(type_acquisition, new_path):
'''
Generates the parameters used for segmentation for the default model corresponding to the type_model acquisition.
:param type_model: String, the type of model to get the parameters from.
:param new_path: Path to the model to use.
:return: the config dictionary.
'''
# If string, convert to Path objects
new_path = convert_path(new_path)
# Building the path of the requested model if it exists and was supplied, else we load the default model.
if type_acquisition == 'SEM':
if (new_path is not None) and new_path.exists():
path_model = new_path
else:
path_model = MODELS_PATH / SEM_DEFAULT_MODEL_NAME
elif type_acquisition == 'TEM':
if (new_path is not None) and new_path.exists():
path_model = new_path
else:
path_model = MODELS_PATH / TEM_DEFAULT_MODEL_NAME
path_config_file = path_model / 'config_network.json'
config = generate_config_dict(path_config_file)
return path_model, config
def generate_config_dict(path_to_config_file):
'''
Generates the dictionary version of the configuration file from the path where it is located.
:param path_to_config: relative path where the file config_network.json is located.
:return: dict containing the configuration of the network, or None if no configuration file was found at the
mentioned path.
'''
# If string, convert to Path objects
path_to_config_file = convert_path(path_to_config_file)
try:
with open(path_to_config_file, 'r') as fd:
config_network = json.loads(fd.read())
except:
raise ValueError("No configuration file available at this path.")
return config_network
def generate_resolution(type_acquisition, model_input_size):
'''
Generates the resolution to use related to the trained modeL.
:param type_acquisition: String, "SEM" or "TEM"
:param model_input_size: String or Int, the size of the input.
:return: Float, the resolution of the model.
'''
dict_size = {
"SEM":{
"512":0.1,
"256":0.2
},
"TEM":{
"512":0.01
}
}
return dict_size[str(type_acquisition)][str(model_input_size)]
# Main loop
def main(argv=None):
'''
Main loop.
:return: Exit code.
0: Success
2: Invalid argument value
3: Missing value or file
'''
print(('AxonDeepSeg v.{}'.format(AxonDeepSeg.__version__)))
ap = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter)
requiredName = ap.add_argument_group('required arguments')
# Setting the arguments of the segmentation
requiredName.add_argument('-t', '--type', required=True, choices=['SEM','TEM'], help='Type of acquisition to segment. \n'+
'SEM: scanning electron microscopy samples. \n'+
'TEM: transmission electron microscopy samples. ')
requiredName.add_argument('-i', '--imgpath', required=True, nargs='+', help='Path to the image to segment or path to the folder \n'+
'where the image(s) to segment is/are located.')
ap.add_argument("-m", "--model", required=False, help='Folder where the model is located. \n'+
'The default SEM model path is: \n'+str(default_SEM_path)+'\n'+
'The default TEM model path is: \n'+str(default_TEM_path)+'\n')
ap.add_argument('-s', '--sizepixel', required=False, help='Pixel size of the image(s) to segment, in micrometers. \n'+
'If no pixel size is specified, a pixel_size_in_micrometer.txt \n'+
'file needs to be added to the image folder path. The pixel size \n'+
'in that file will be used for the segmentation.',
default=None)
ap.add_argument('-v', '--verbose', required=False, type=int, choices=list(range(0,4)), help='Verbosity level. \n'+
'0 (default) : Displays the progress bar for the segmentation. \n'+
'1: Also displays the path of the image(s) being segmented. \n'+
'2: Also displays the information about the prediction step \n'+
' for the segmentation of current sample. \n'+
'3: Also displays the patch number being processed in the current sample.',
default=0)
ap.add_argument('-o', '--overlap', required=False, type=int, help='Overlap value (in pixels) of the patches when doing the segmentation. \n'+
'Higher values of overlap can improve the segmentation at patch borders, \n'+
'but also increase the segmentation time. \n'+
'Default value: '+str(default_overlap)+'\n'+
'Recommended range of values: [10-100]. \n',
default=25)
ap._action_groups.reverse()
# Processing the arguments
args = vars(ap.parse_args(argv))
type_ = str(args["type"])
verbosity_level = int(args["verbose"])
overlap_value = int(args["overlap"])
if args["sizepixel"] is not None:
psm = float(args["sizepixel"])
else:
psm = None
path_target_list = [Path(p) for p in args["imgpath"]]
new_path = Path(args["model"]) if args["model"] else None
# Preparing the arguments to axon_segmentation function
path_model, config = generate_default_parameters(type_, new_path)
resolution_model = generate_resolution(type_, config["trainingset_patchsize"])
# Tuple of valid file extensions
validExtensions = (
".jpeg",
".jpg",
".tif",
".tiff",
".png"
)
# Going through all paths passed into arguments
for current_path_target in path_target_list:
if not current_path_target.is_dir():
if current_path_target.suffix.lower() in validExtensions:
# Handle cases if no resolution is provided on the CLI
if psm == None:
# Check if a pixel size file exists, if so read it.
if (current_path_target.parent / 'pixel_size_in_micrometer.txt').exists():
resolution_file = open(current_path_target.parent / 'pixel_size_in_micrometer.txt', 'r')
psm = float(resolution_file.read())
else:
print("ERROR: No pixel size is provided, and there is no pixel_size_in_micrometer.txt file in image folder. ",
"Please provide a pixel size (using argument -s), or add a pixel_size_in_micrometer.txt file ",
"containing the pixel size value."
)
sys.exit(3)
# Check that image size is large enough for given resolution to reach minimum patch size after resizing.
try:
height, width, _ = ads.imread(str(current_path_target)).shape
except:
try:
height, width = ads.imread(str(current_path_target)).shape
except Exception as e:
raise e
image_size = [height, width]
minimum_resolution = config["trainingset_patchsize"] * resolution_model / min(image_size)
if psm < minimum_resolution:
print("EXCEPTION: The size of one of the images ({0}x{1}) is too small for the provided pixel size ({2}).\n".format(height, width, psm),
"The image size must be at least {0}x{0} after resampling to a resolution of {1} to create standard sized patches.\n".format(config["trainingset_patchsize"], resolution_model),
"One of the dimensions of the image has a size of {0} after resampling to that resolution.\n".format(round(psm * min(image_size) / resolution_model)),
"Image file location: {0}".format(current_path_target)
)
sys.exit(2)
# Performing the segmentation over the image
segment_image(current_path_target, path_model, overlap_value, config,
resolution_model,
acquired_resolution=psm,
verbosity_level=verbosity_level)
print("Segmentation finished.")
else:
print("The path(s) specified is/are not image(s). Please update the input path(s) and try again.")
break
else:
# Handle cases if no resolution is provided on the CLI
if psm == None:
# Check if a pixel size file exists, if so read it.
if (current_path_target / 'pixel_size_in_micrometer.txt').exists():
resolution_file = open(current_path_target / 'pixel_size_in_micrometer.txt', 'r')
psm = float(resolution_file.read())
else:
print("ERROR: No pixel size is provided, and there is no pixel_size_in_micrometer.txt file in image folder. ",
"Please provide a pixel size (using argument -s), or add a pixel_size_in_micrometer.txt file ",
"containing the pixel size value."
)
sys.exit(3)
# Performing the segmentation over all folders in the specified folder containing acquisitions to segment.
segment_folders(current_path_target, path_model, overlap_value, config,
resolution_model,
acquired_resolution=psm,
verbosity_level=verbosity_level)
print("Segmentation finished.")
sys.exit(0)
# Calling the script
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"pathlib.Path",
"AxonDeepSeg.ads_utils.imwrite",
"AxonDeepSeg.ads_utils.convert_path",
"tqdm.tqdm",
"pkg_resources.resource_filename",
"AxonDeepSeg.apply_model.axon_segmentation",
"sys.exit"
] | [((643, 699), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""AxonDeepSeg"""', '"""models"""'], {}), "('AxonDeepSeg', 'models')\n", (674, 699), False, 'import pkg_resources\n'), ((714, 731), 'pathlib.Path', 'Path', (['MODELS_PATH'], {}), '(MODELS_PATH)\n', (718, 731), False, 'from pathlib import Path\n'), ((1802, 1834), 'AxonDeepSeg.ads_utils.convert_path', 'convert_path', (['path_testing_image'], {}), '(path_testing_image)\n', (1814, 1834), False, 'from AxonDeepSeg.ads_utils import convert_path\n'), ((1852, 1876), 'AxonDeepSeg.ads_utils.convert_path', 'convert_path', (['path_model'], {}), '(path_model)\n', (1864, 1876), False, 'from AxonDeepSeg.ads_utils import convert_path\n'), ((4778, 4818), 'AxonDeepSeg.ads_utils.convert_path', 'convert_path', (['path_testing_images_folder'], {}), '(path_testing_images_folder)\n', (4790, 4818), False, 'from AxonDeepSeg.ads_utils import convert_path\n'), ((4836, 4860), 'AxonDeepSeg.ads_utils.convert_path', 'convert_path', (['path_model'], {}), '(path_model)\n', (4848, 4860), False, 'from AxonDeepSeg.ads_utils import convert_path\n'), ((5326, 5365), 'tqdm.tqdm', 'tqdm', (['img_files'], {'desc': '"""Segmentation..."""'}), "(img_files, desc='Segmentation...')\n", (5330, 5365), False, 'from tqdm import tqdm\n'), ((8552, 8574), 'AxonDeepSeg.ads_utils.convert_path', 'convert_path', (['new_path'], {}), '(new_path)\n', (8564, 8574), False, 'from AxonDeepSeg.ads_utils import convert_path\n'), ((9684, 9717), 'AxonDeepSeg.ads_utils.convert_path', 'convert_path', (['path_to_config_file'], {}), '(path_to_config_file)\n', (9696, 9717), False, 'from AxonDeepSeg.ads_utils import convert_path\n'), ((10720, 10781), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'RawTextHelpFormatter'}), '(formatter_class=RawTextHelpFormatter)\n', (10743, 10781), False, 'import argparse\n'), ((19031, 19042), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (19039, 19042), False, 'import sys\n'), ((2065, 2085), 'pathlib.Path', 'Path', (['path_parts[-1]'], {}), '(path_parts[-1])\n', (2069, 2085), False, 'from pathlib import Path\n'), ((2113, 2135), 'pathlib.Path', 'Path', (['*path_parts[:-1]'], {}), '(*path_parts[:-1])\n', (2117, 2135), False, 'from pathlib import Path\n'), ((2776, 3238), 'AxonDeepSeg.apply_model.axon_segmentation', 'axon_segmentation', ([], {'path_acquisitions_folders': 'path_acquisition', 'acquisitions_filenames': '[acquisition_name]', 'path_model_folder': 'path_model', 'config_dict': 'config', 'ckpt_name': '"""model"""', 'inference_batch_size': '(1)', 'overlap_value': 'overlap_value', 'segmentations_filenames': 'segmented_image_name', 'resampled_resolutions': 'resolution_model', 'verbosity_level': 'verbosity_level', 'acquired_resolution': 'acquired_resolution', 'prediction_proba_activate': '(False)', 'write_mode': '(True)'}), "(path_acquisitions_folders=path_acquisition,\n acquisitions_filenames=[acquisition_name], path_model_folder=path_model,\n config_dict=config, ckpt_name='model', inference_batch_size=1,\n overlap_value=overlap_value, segmentations_filenames=\n segmented_image_name, resampled_resolutions=resolution_model,\n verbosity_level=verbosity_level, acquired_resolution=\n acquired_resolution, prediction_proba_activate=False, write_mode=True)\n", (2793, 3238), False, 'from AxonDeepSeg.apply_model import axon_segmentation\n'), ((7182, 7655), 'AxonDeepSeg.apply_model.axon_segmentation', 'axon_segmentation', ([], {'path_acquisitions_folders': 'path_testing_images_folder', 'acquisitions_filenames': '[acquisition_name]', 'path_model_folder': 'path_model', 'config_dict': 'config', 'ckpt_name': '"""model"""', 'inference_batch_size': '(1)', 'overlap_value': 'overlap_value', 'segmentations_filenames': '[segmented_image_name]', 'acquired_resolution': 'acquired_resolution', 'verbosity_level': 'verbosity_level', 'resampled_resolutions': 'resolution_model', 'prediction_proba_activate': '(False)', 'write_mode': '(True)'}), "(path_acquisitions_folders=path_testing_images_folder,\n acquisitions_filenames=[acquisition_name], path_model_folder=path_model,\n config_dict=config, ckpt_name='model', inference_batch_size=1,\n overlap_value=overlap_value, segmentations_filenames=[\n segmented_image_name], acquired_resolution=acquired_resolution,\n verbosity_level=verbosity_level, resampled_resolutions=resolution_model,\n prediction_proba_activate=False, write_mode=True)\n", (7199, 7655), False, 'from AxonDeepSeg.apply_model import axon_segmentation\n'), ((14313, 14320), 'pathlib.Path', 'Path', (['p'], {}), '(p)\n', (14317, 14320), False, 'from pathlib import Path\n'), ((14362, 14381), 'pathlib.Path', 'Path', (["args['model']"], {}), "(args['model'])\n", (14366, 14381), False, 'from pathlib import Path\n'), ((2504, 2544), 'AxonDeepSeg.ads_utils.imwrite', 'ads.imwrite', (['fp', '(255 - img)'], {'format': '"""png"""'}), "(fp, 255 - img, format='png')\n", (2515, 2544), True, 'import AxonDeepSeg.ads_utils as ads\n'), ((2568, 2602), 'AxonDeepSeg.ads_utils.imwrite', 'ads.imwrite', (['fp', 'img'], {'format': '"""png"""'}), "(fp, img, format='png')\n", (2579, 2602), True, 'import AxonDeepSeg.ads_utils as ads\n'), ((2631, 2644), 'pathlib.Path', 'Path', (['fp.name'], {}), '(fp.name)\n', (2635, 2644), False, 'from pathlib import Path\n'), ((6556, 6567), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (6564, 6567), False, 'import sys\n'), ((6950, 6990), 'AxonDeepSeg.ads_utils.imwrite', 'ads.imwrite', (['fp', '(255 - img)'], {'format': '"""png"""'}), "(fp, 255 - img, format='png')\n", (6961, 6990), True, 'import AxonDeepSeg.ads_utils as ads\n'), ((7014, 7048), 'AxonDeepSeg.ads_utils.imwrite', 'ads.imwrite', (['fp', 'img'], {'format': '"""png"""'}), "(fp, img, format='png')\n", (7025, 7048), True, 'import AxonDeepSeg.ads_utils as ads\n'), ((7076, 7089), 'pathlib.Path', 'Path', (['fp.name'], {}), '(fp.name)\n', (7080, 7089), False, 'from pathlib import Path\n'), ((17264, 17275), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (17272, 17275), False, 'import sys\n'), ((18609, 18620), 'sys.exit', 'sys.exit', (['(3)'], {}), '(3)\n', (18617, 18620), False, 'import sys\n'), ((15960, 15971), 'sys.exit', 'sys.exit', (['(3)'], {}), '(3)\n', (15968, 15971), False, 'import sys\n')] |
from dcstats.hedges import Hedges_d
from dcstats.statistics_EJ import simple_stats as mean_SD
import random
import math
def generate_sample (length, mean, sigma):
#generate a list of normal distributed samples
sample = []
for n in range(length):
sample.append(random.gauss(mean, sigma))
return sample
def close_enough (a, b, count_error):
if math.fabs (a - b) < math.fabs((a + b) / (count_error * 2)) :
return True
else:
return False
def gaussian_case (sig):
sample_size = 200
count_error = math.sqrt(sample_size)
m1 = 1
m2 = 2
s1 = generate_sample (sample_size, m1, sig)
s2 = generate_sample (sample_size, m2, sig)
h_testing = Hedges_d(s1, s2)
h_testing.hedges_d_unbiased() #answer is in self.d
approx_95CI_lower, approx_95CI_upper = h_testing.approx_CI()
bs_95CI_lower, bs_95CI_upper = h_testing.bootstrap_CI(5000)
print (mean_SD(s1), mean_SD(s2))
print ("h_testing.d, analytic, correction = ", h_testing.d, (m2 - m1) / sig, h_testing.correction)
print ("lower: approx, bootstrap", approx_95CI_lower, bs_95CI_lower)
print ("upper: approx, bootstrap", approx_95CI_upper, bs_95CI_upper)
#bootstrap is similar at high d but gives wider intervals at low d
assert close_enough(approx_95CI_lower, bs_95CI_lower, count_error)
assert close_enough(approx_95CI_upper, bs_95CI_upper, count_error)
assert close_enough(h_testing.d, (m2 - m1) / sig, count_error)
###tests
def test_gaussian_case_low():
gaussian_case(0.2) #expect d = 5
def test_gaussian_case_med():
gaussian_case(0.5) #expect d = 2
def test_gaussian_case_high():
gaussian_case(1.0) #expect d = 1, fail
| [
"dcstats.hedges.Hedges_d",
"math.sqrt",
"math.fabs",
"dcstats.statistics_EJ.simple_stats",
"random.gauss"
] | [((551, 573), 'math.sqrt', 'math.sqrt', (['sample_size'], {}), '(sample_size)\n', (560, 573), False, 'import math\n'), ((723, 739), 'dcstats.hedges.Hedges_d', 'Hedges_d', (['s1', 's2'], {}), '(s1, s2)\n', (731, 739), False, 'from dcstats.hedges import Hedges_d\n'), ((373, 389), 'math.fabs', 'math.fabs', (['(a - b)'], {}), '(a - b)\n', (382, 389), False, 'import math\n'), ((393, 431), 'math.fabs', 'math.fabs', (['((a + b) / (count_error * 2))'], {}), '((a + b) / (count_error * 2))\n', (402, 431), False, 'import math\n'), ((959, 970), 'dcstats.statistics_EJ.simple_stats', 'mean_SD', (['s1'], {}), '(s1)\n', (966, 970), True, 'from dcstats.statistics_EJ import simple_stats as mean_SD\n'), ((972, 983), 'dcstats.statistics_EJ.simple_stats', 'mean_SD', (['s2'], {}), '(s2)\n', (979, 983), True, 'from dcstats.statistics_EJ import simple_stats as mean_SD\n'), ((281, 306), 'random.gauss', 'random.gauss', (['mean', 'sigma'], {}), '(mean, sigma)\n', (293, 306), False, 'import random\n')] |
from django.urls import path
from . import views
urlpatterns = [
path('', views.Records, name ="fRec"),
] | [
"django.urls.path"
] | [((70, 106), 'django.urls.path', 'path', (['""""""', 'views.Records'], {'name': '"""fRec"""'}), "('', views.Records, name='fRec')\n", (74, 106), False, 'from django.urls import path\n')] |
import pytest
from spacy.training.example import Example
from spacy.util import make_tempdir
from spacy import util
from thinc.api import Config
TRAIN_DATA = [
("I'm so happy.", {"cats": {"POSITIVE": 1.0, "NEGATIVE": 0.0}}),
("I'm so angry", {"cats": {"POSITIVE": 0.0, "NEGATIVE": 1.0}}),
]
cfg_string = """
[nlp]
lang = "en"
pipeline = ["transformer","textcat"]
[components]
[components.textcat]
factory = "textcat"
[components.textcat.model]
@architectures = "spacy.TextCatEnsemble.v2"
[components.textcat.model.tok2vec]
@architectures = "spacy-transformers.TransformerListener.v1"
grad_factor = 1.0
[components.textcat.model.tok2vec.pooling]
@layers = "reduce_mean.v1"
[components.transformer]
factory = "transformer"
"""
# Xfail this until the new spaCy rc is up.
@pytest.mark.xfail
def test_transformer_pipeline_textcat():
"""Test that a pipeline with just a transformer+textcat runs and trains properly.
This used to throw an error because of shape inference issues -
cf https://github.com/explosion/spaCy/issues/6401"""
orig_config = Config().from_str(cfg_string)
nlp = util.load_model_from_config(orig_config, auto_fill=True, validate=True)
assert nlp.pipe_names == ["transformer", "textcat"]
train_examples = []
for text, annotations in TRAIN_DATA:
train_examples.append(Example.from_dict(nlp.make_doc(text), annotations))
optimizer = nlp.initialize(get_examples=lambda: train_examples)
for i in range(2):
losses = {}
nlp.update(train_examples, sgd=optimizer, losses=losses)
doc = nlp("We're interested at underwater basket weaving.")
cats1 = doc.cats
# ensure IO goes OK
with make_tempdir() as d:
file_path = d / "trained_nlp"
nlp.to_disk(file_path)
nlp2 = util.load_model_from_path(file_path)
doc2 = nlp2("We're interested at underwater basket weaving.")
cats2 = doc2.cats
assert cats1 == cats2
| [
"spacy.util.load_model_from_config",
"spacy.util.load_model_from_path",
"thinc.api.Config",
"spacy.util.make_tempdir"
] | [((1181, 1252), 'spacy.util.load_model_from_config', 'util.load_model_from_config', (['orig_config'], {'auto_fill': '(True)', 'validate': '(True)'}), '(orig_config, auto_fill=True, validate=True)\n', (1208, 1252), False, 'from spacy import util\n'), ((1754, 1768), 'spacy.util.make_tempdir', 'make_tempdir', ([], {}), '()\n', (1766, 1768), False, 'from spacy.util import make_tempdir\n'), ((1859, 1895), 'spacy.util.load_model_from_path', 'util.load_model_from_path', (['file_path'], {}), '(file_path)\n', (1884, 1895), False, 'from spacy import util\n'), ((1141, 1149), 'thinc.api.Config', 'Config', ([], {}), '()\n', (1147, 1149), False, 'from thinc.api import Config\n')] |
import asyncio
from ..core.common.io import input
from .action_creator import ActionCreator
class REPL:
def __init__(self, action_queue, config, *args, **kwargs):
self.action_queue = action_queue
self.config = config
async def run(self):
await asyncio.sleep(1)
print("Insert command: ")
action_creator = ActionCreator()
while True:
input_data = await input("~> ")
if not input_data:
for task in asyncio.all_tasks():
task.cancel()
break
action = action_creator.parse(*input_data.split())
if action:
self.action_queue.push_action(action)
| [
"asyncio.all_tasks",
"asyncio.sleep"
] | [((280, 296), 'asyncio.sleep', 'asyncio.sleep', (['(1)'], {}), '(1)\n', (293, 296), False, 'import asyncio\n'), ((495, 514), 'asyncio.all_tasks', 'asyncio.all_tasks', ([], {}), '()\n', (512, 514), False, 'import asyncio\n')] |
# %% [markdown]
# # Testing python-som with audio dataset
# %% [markdown]
# # Imports
# %%
import matplotlib.pyplot as plt
# import librosa as lr
# import librosa.display as lrdisp
import numpy as np
import pandas as pd
import pickle
import seaborn as sns
import sklearn.preprocessing
from python_som import SOM
FILE_PREFIX = 'som64_u_grupo1'
# %% [markdown]
# # Loading dataset
# %%
df = pd.read_csv('features_means.csv', index_col=0, verbose=True)
df.index = pd.to_datetime(df.index)
df['rac'] = False
df.loc['2020-09-22':, 'rac'] = True # type: ignore
df.sort_index(inplace=True)
# %% [markdown]
# ## Checking for and dropping duplicates
# %%
# Resetting index for duplicate analysis
df.reset_index(inplace=True)
print("Duplicates by filename:",
df.duplicated(subset=['file_name']).value_counts(),
sep='\n')
df.drop_duplicates(subset=['file_name'], inplace=True)
print("Duplicates by (datetime, ala, grupo):",
df.duplicated(subset=['datetime', 'ala', 'grupo']).value_counts(),
sep='\n')
df.drop_duplicates(subset=['datetime', 'ala', 'grupo'], inplace=True)
# Rebuilding dataframe index
df.set_index('datetime', inplace=True)
# %%
# Filtering dataset by 'group'
df = df[df['grupo'] == 1]
# %%
# Dropping tail of dataset for class balancing
# tail_size = abs(
# len(df[df['rac'].astype(int) == 1]) - len(df[df['rac'].astype(int) == 0]))
# df.drop(df.tail(tail_size).index, inplace=True)
# %% [markdown]
# ## Visualizing distribution of sample dates
# %%
df_tmp = pd.DataFrame(df['file_name'].resample('1D').count())
df_tmp['count'] = df_tmp['file_name']
del df_tmp['file_name']
df_tmp['rac'] = False
df_tmp.loc['2020-09-22':, 'rac'] = True # type: ignore
plt.figure(figsize=(10, 10))
sns.set(style="whitegrid",
palette=sns.color_palette("muted", n_colors=6, desat=1.0))
sns.barplot(y=df_tmp.index, x=df_tmp['count'], hue=df_tmp['rac'])
plt.draw()
df_tmp = pd.DataFrame(df['file_name'].resample('1H').count())
df_tmp['count'] = df_tmp['file_name']
del df_tmp['file_name']
df_tmp['rac'] = False
df_tmp.loc['2020-09-22':, 'rac'] = True # type: ignore
df_tmp = df_tmp.reset_index()
df_tmp['hour'] = df_tmp['datetime'].dt.hour
plt.figure(figsize=(10, 10))
sns.set(style="whitegrid",
palette=sns.color_palette("muted", n_colors=6, desat=1.0))
sns.barplot(y=df_tmp['hour'], x=df_tmp['count'], hue=df_tmp['rac'], orient='h')
plt.draw()
# %%
df_melt = pd.melt(df, value_vars=['rac'], value_name='ractopamine')
plt.figure(figsize=(10, 10))
sns.set(style="whitegrid",
palette=sns.color_palette("muted", n_colors=6, desat=1.0))
ax = sns.countplot(data=df_melt, x='ractopamine', hue='ractopamine')
for p in ax.patches:
ax.annotate(f'\n{p.get_height()}', (p.get_x() + 0.2, p.get_height()),
ha='center',
va='top',
color='white',
size=18)
plt.draw()
# %%
# using sklearn's MinMaxScaler
scaler = sklearn.preprocessing.MinMaxScaler(feature_range=(0, 1))
df_train = df.iloc[:, 3:-1].copy()
df_train = scaler.fit_transform(df_train)
# %%
# Defining first element of SOM shape
# Second element will be assigned based on the ratio between the
# first two principal components of the train dataset
som_x: int = 64
try:
with open(f'./{FILE_PREFIX}.obj', 'rb') as f:
som = pickle.load(f)
except FileNotFoundError:
som = SOM(x=som_x,
y=None,
input_len=df_train.shape[1],
learning_rate=0.5,
neighborhood_radius=1.0,
neighborhood_function='gaussian',
cyclic_x=True,
cyclic_y=True,
data=df_train)
# Training SOM
som.weight_initialization(mode='linear', data=df_train)
som.train(data=df_train, mode='random', verbose=True)
with open(f'./{FILE_PREFIX}.obj', 'wb') as f:
pickle.dump(som, f)
# %%
som_x, som_y = som.get_shape()
print('SOM shape:', (som_x, som_y))
# %%
# Visualizing distance matrix and activation matrix
umatrix = som.distance_matrix()
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 9))
sns.heatmap(umatrix.T, cmap='bone_r', ax=ax1, robust=True)
sns.heatmap(som.activation_matrix(data=df_train).T,
cmap='mako',
ax=ax2,
robust=True)
ax1.invert_yaxis()
ax2.invert_yaxis()
fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_umatrix_activation.png',
bbox_inches='tight',
transparent=True)
plt.draw()
# %%
# Visualizing distance matrix anc activation matrix separately
fig = plt.figure(figsize=(16, 9))
ax = sns.heatmap(umatrix.T, cmap='bone_r', robust=True)
ax.invert_yaxis()
fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_umatrix.png',
bbox_inches='tight',
transparent=True)
fig = plt.figure(figsize=(16, 9))
ax = sns.heatmap(som.activation_matrix(data=df_train).T,
cmap='mako',
robust=True)
ax.invert_yaxis()
fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_activation_matrix.png',
bbox_inches='tight',
transparent=True)
# %% [markdown]
# ## Visualizing distribution of features
# %%
for column in df.iloc[:, 3:-1].columns:
hmap = som.get_weights()[:, :, df.iloc[:, 3:-1].columns.get_loc(column)].T
fig = plt.figure(figsize=(16, 9))
ax = sns.heatmap(hmap, robust=True, cmap='BrBG')
ax.invert_yaxis()
fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png',
bbox_inches='tight',
transparent=True)
plt.close(fig=fig)
# %% [markdown]
# ## Visualizing distribution of audios by metadata (day, hour, ...)
# Each node is colorized according to its most frequent label
# %%
df['days'] = df.index.date
df['days'] = (df['days'] - df['days'][0])
df['days'] = df['days'].apply(lambda x: x.days)
df['hour'] = df.index.hour
# %%
# Visualizing 'rac' distribution
class_assignments = som.label_map(np.array(df_train), np.array(df['rac']))
hmap = np.zeros((som_x, som_y))
for i, j in sorted(class_assignments.keys()):
try:
hmap[i][j] = class_assignments[(i, j)].most_common()[0][0] + 1
except Exception:
continue
hmap = hmap.T
fig = plt.figure(figsize=(16, 9))
ax = sns.heatmap(hmap,
cmap=sns.color_palette(palette=["#000000", "blue", "orange"],
n_colors=3),
cbar_kws={'ticks': [0, 1, 2]})
ax.invert_yaxis()
fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_rac.png',
bbox_inches='tight',
transparent=True)
plt.show()
# %%
# Visualizing by 'grupo'
print(df.groupby('grupo')['rac'].count())
column = 'grupo'
class_assignments = som.label_map(np.array(df_train), np.array(df[column]))
hmap = np.zeros((som_x, som_y))
for i, j in sorted(class_assignments.keys()):
try:
hmap[i][j] = class_assignments[(i, j)].most_common()[0][0]
except Exception:
hmap[i][j] = 0
hmap = hmap.T
fig = plt.figure(figsize=(16, 9))
ax = sns.heatmap(hmap,
cmap=sns.color_palette(palette=["#000000", "blue", "orange"],
n_colors=3),
cbar_kws={'ticks': [0, 1, 2]})
ax.invert_yaxis()
fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png',
bbox_inches='tight',
transparent=True)
plt.show()
# %%
# Visualizing by 'days'
print(df.groupby('days')['rac'].count())
column = 'days'
class_assignments = som.label_map(np.array(df_train), np.array(df[column]))
hmap = np.zeros((som_x, som_y))
for i, j in sorted(class_assignments.keys()):
try:
hmap[i][j] = class_assignments[(i, j)].most_common()[0][0]
except Exception:
hmap[i][j] = -1
hmap = hmap.T
fig = plt.figure(figsize=(16, 9))
ax = sns.heatmap(hmap, cmap='viridis')
ax.invert_yaxis()
fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png',
bbox_inches='tight',
transparent=True)
plt.show()
# %%
# Visualizing by 'hour'
print(df.groupby('hour')['rac'].count())
column = 'hour'
class_assignments = som.label_map(np.array(df_train), np.array(df[column]))
hmap = np.zeros((som_x, som_y))
for i, j in sorted(class_assignments.keys()):
try:
hmap[i][j] = class_assignments[(i, j)].most_common()[0][0]
except Exception:
hmap[i][j] = -1
hmap = hmap.T
fig = plt.figure(figsize=(16, 9))
ax = sns.heatmap(hmap,
cmap=sns.diverging_palette(150,
250,
s=100,
l=20,
sep=1,
n=26,
center='light'),
center=12)
ax.invert_yaxis()
fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png',
bbox_inches='tight',
transparent=True)
plt.show()
# %%
| [
"pickle.dump",
"pandas.read_csv",
"seaborn.color_palette",
"seaborn.diverging_palette",
"pickle.load",
"seaborn.heatmap",
"matplotlib.pyplot.close",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.zeros",
"python_som.SOM",
"seaborn.barplot",
"seaborn.countplot",
"matplotlib.pyplot.draw",
"pandas.melt",
"matplotlib.pyplot.subplots",
"pandas.to_datetime",
"matplotlib.pyplot.show"
] | [((395, 455), 'pandas.read_csv', 'pd.read_csv', (['"""features_means.csv"""'], {'index_col': '(0)', 'verbose': '(True)'}), "('features_means.csv', index_col=0, verbose=True)\n", (406, 455), True, 'import pandas as pd\n'), ((467, 491), 'pandas.to_datetime', 'pd.to_datetime', (['df.index'], {}), '(df.index)\n', (481, 491), True, 'import pandas as pd\n'), ((1699, 1727), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (1709, 1727), True, 'import matplotlib.pyplot as plt\n'), ((1822, 1887), 'seaborn.barplot', 'sns.barplot', ([], {'y': 'df_tmp.index', 'x': "df_tmp['count']", 'hue': "df_tmp['rac']"}), "(y=df_tmp.index, x=df_tmp['count'], hue=df_tmp['rac'])\n", (1833, 1887), True, 'import seaborn as sns\n'), ((1888, 1898), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (1896, 1898), True, 'import matplotlib.pyplot as plt\n'), ((2177, 2205), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (2187, 2205), True, 'import matplotlib.pyplot as plt\n'), ((2300, 2379), 'seaborn.barplot', 'sns.barplot', ([], {'y': "df_tmp['hour']", 'x': "df_tmp['count']", 'hue': "df_tmp['rac']", 'orient': '"""h"""'}), "(y=df_tmp['hour'], x=df_tmp['count'], hue=df_tmp['rac'], orient='h')\n", (2311, 2379), True, 'import seaborn as sns\n'), ((2380, 2390), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (2388, 2390), True, 'import matplotlib.pyplot as plt\n'), ((2407, 2464), 'pandas.melt', 'pd.melt', (['df'], {'value_vars': "['rac']", 'value_name': '"""ractopamine"""'}), "(df, value_vars=['rac'], value_name='ractopamine')\n", (2414, 2464), True, 'import pandas as pd\n'), ((2465, 2493), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (2475, 2493), True, 'import matplotlib.pyplot as plt\n'), ((2593, 2656), 'seaborn.countplot', 'sns.countplot', ([], {'data': 'df_melt', 'x': '"""ractopamine"""', 'hue': '"""ractopamine"""'}), "(data=df_melt, x='ractopamine', hue='ractopamine')\n", (2606, 2656), True, 'import seaborn as sns\n'), ((2865, 2875), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (2873, 2875), True, 'import matplotlib.pyplot as plt\n'), ((4037, 4072), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(16, 9)'}), '(1, 2, figsize=(16, 9))\n', (4049, 4072), True, 'import matplotlib.pyplot as plt\n'), ((4073, 4131), 'seaborn.heatmap', 'sns.heatmap', (['umatrix.T'], {'cmap': '"""bone_r"""', 'ax': 'ax1', 'robust': '(True)'}), "(umatrix.T, cmap='bone_r', ax=ax1, robust=True)\n", (4084, 4131), True, 'import seaborn as sns\n'), ((4431, 4441), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (4439, 4441), True, 'import matplotlib.pyplot as plt\n'), ((4517, 4544), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (4527, 4544), True, 'import matplotlib.pyplot as plt\n'), ((4550, 4600), 'seaborn.heatmap', 'sns.heatmap', (['umatrix.T'], {'cmap': '"""bone_r"""', 'robust': '(True)'}), "(umatrix.T, cmap='bone_r', robust=True)\n", (4561, 4600), True, 'import seaborn as sns\n'), ((4754, 4781), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (4764, 4781), True, 'import matplotlib.pyplot as plt\n'), ((5935, 5959), 'numpy.zeros', 'np.zeros', (['(som_x, som_y)'], {}), '((som_x, som_y))\n', (5943, 5959), True, 'import numpy as np\n'), ((6145, 6172), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (6155, 6172), True, 'import matplotlib.pyplot as plt\n'), ((6518, 6528), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6526, 6528), True, 'import matplotlib.pyplot as plt\n'), ((6702, 6726), 'numpy.zeros', 'np.zeros', (['(som_x, som_y)'], {}), '((som_x, som_y))\n', (6710, 6726), True, 'import numpy as np\n'), ((6914, 6941), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (6924, 6941), True, 'import matplotlib.pyplot as plt\n'), ((7292, 7302), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7300, 7302), True, 'import matplotlib.pyplot as plt\n'), ((7473, 7497), 'numpy.zeros', 'np.zeros', (['(som_x, som_y)'], {}), '((som_x, som_y))\n', (7481, 7497), True, 'import numpy as np\n'), ((7686, 7713), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (7696, 7713), True, 'import matplotlib.pyplot as plt\n'), ((7719, 7752), 'seaborn.heatmap', 'sns.heatmap', (['hmap'], {'cmap': '"""viridis"""'}), "(hmap, cmap='viridis')\n", (7730, 7752), True, 'import seaborn as sns\n'), ((7900, 7910), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7908, 7910), True, 'import matplotlib.pyplot as plt\n'), ((8081, 8105), 'numpy.zeros', 'np.zeros', (['(som_x, som_y)'], {}), '((som_x, som_y))\n', (8089, 8105), True, 'import numpy as np\n'), ((8294, 8321), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (8304, 8321), True, 'import matplotlib.pyplot as plt\n'), ((8881, 8891), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8889, 8891), True, 'import matplotlib.pyplot as plt\n'), ((5249, 5276), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (5259, 5276), True, 'import matplotlib.pyplot as plt\n'), ((5286, 5329), 'seaborn.heatmap', 'sns.heatmap', (['hmap'], {'robust': '(True)', 'cmap': '"""BrBG"""'}), "(hmap, robust=True, cmap='BrBG')\n", (5297, 5329), True, 'import seaborn as sns\n'), ((5497, 5515), 'matplotlib.pyplot.close', 'plt.close', ([], {'fig': 'fig'}), '(fig=fig)\n', (5506, 5515), True, 'import matplotlib.pyplot as plt\n'), ((5887, 5905), 'numpy.array', 'np.array', (['df_train'], {}), '(df_train)\n', (5895, 5905), True, 'import numpy as np\n'), ((5907, 5926), 'numpy.array', 'np.array', (["df['rac']"], {}), "(df['rac'])\n", (5915, 5926), True, 'import numpy as np\n'), ((6653, 6671), 'numpy.array', 'np.array', (['df_train'], {}), '(df_train)\n', (6661, 6671), True, 'import numpy as np\n'), ((6673, 6693), 'numpy.array', 'np.array', (['df[column]'], {}), '(df[column])\n', (6681, 6693), True, 'import numpy as np\n'), ((7424, 7442), 'numpy.array', 'np.array', (['df_train'], {}), '(df_train)\n', (7432, 7442), True, 'import numpy as np\n'), ((7444, 7464), 'numpy.array', 'np.array', (['df[column]'], {}), '(df[column])\n', (7452, 7464), True, 'import numpy as np\n'), ((8032, 8050), 'numpy.array', 'np.array', (['df_train'], {}), '(df_train)\n', (8040, 8050), True, 'import numpy as np\n'), ((8052, 8072), 'numpy.array', 'np.array', (['df[column]'], {}), '(df[column])\n', (8060, 8072), True, 'import numpy as np\n'), ((1771, 1820), 'seaborn.color_palette', 'sns.color_palette', (['"""muted"""'], {'n_colors': '(6)', 'desat': '(1.0)'}), "('muted', n_colors=6, desat=1.0)\n", (1788, 1820), True, 'import seaborn as sns\n'), ((2249, 2298), 'seaborn.color_palette', 'sns.color_palette', (['"""muted"""'], {'n_colors': '(6)', 'desat': '(1.0)'}), "('muted', n_colors=6, desat=1.0)\n", (2266, 2298), True, 'import seaborn as sns\n'), ((2537, 2586), 'seaborn.color_palette', 'sns.color_palette', (['"""muted"""'], {'n_colors': '(6)', 'desat': '(1.0)'}), "('muted', n_colors=6, desat=1.0)\n", (2554, 2586), True, 'import seaborn as sns\n'), ((3305, 3319), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3316, 3319), False, 'import pickle\n'), ((3356, 3537), 'python_som.SOM', 'SOM', ([], {'x': 'som_x', 'y': 'None', 'input_len': 'df_train.shape[1]', 'learning_rate': '(0.5)', 'neighborhood_radius': '(1.0)', 'neighborhood_function': '"""gaussian"""', 'cyclic_x': '(True)', 'cyclic_y': '(True)', 'data': 'df_train'}), "(x=som_x, y=None, input_len=df_train.shape[1], learning_rate=0.5,\n neighborhood_radius=1.0, neighborhood_function='gaussian', cyclic_x=\n True, cyclic_y=True, data=df_train)\n", (3359, 3537), False, 'from python_som import SOM\n'), ((6218, 6286), 'seaborn.color_palette', 'sns.color_palette', ([], {'palette': "['#000000', 'blue', 'orange']", 'n_colors': '(3)'}), "(palette=['#000000', 'blue', 'orange'], n_colors=3)\n", (6235, 6286), True, 'import seaborn as sns\n'), ((6987, 7055), 'seaborn.color_palette', 'sns.color_palette', ([], {'palette': "['#000000', 'blue', 'orange']", 'n_colors': '(3)'}), "(palette=['#000000', 'blue', 'orange'], n_colors=3)\n", (7004, 7055), True, 'import seaborn as sns\n'), ((8367, 8440), 'seaborn.diverging_palette', 'sns.diverging_palette', (['(150)', '(250)'], {'s': '(100)', 'l': '(20)', 'sep': '(1)', 'n': '(26)', 'center': '"""light"""'}), "(150, 250, s=100, l=20, sep=1, n=26, center='light')\n", (8388, 8440), True, 'import seaborn as sns\n'), ((3836, 3855), 'pickle.dump', 'pickle.dump', (['som', 'f'], {}), '(som, f)\n', (3847, 3855), False, 'import pickle\n')] |
"""Script to embed pydeck examples into .rst pages with code
These populate the files you see once you click into a grid cell
on the pydeck gallery page
"""
from multiprocessing import Pool
import os
import subprocess
import sys
from const import DECKGL_URL_BASE, EXAMPLE_GLOB, GALLERY_DIR, HTML_DIR, HOSTED_STATIC_PATH
from utils import to_presentation_name, to_snake_case_string
from templates import DOC_TEMPLATE
if not os.environ.get("MAPBOX_API_KEY"):
# If running for rtfd.io, set this variable from the Admin panel
raise Exception("MAPBOX_API_KEY not set")
def create_rst(pydeck_example_file_name):
asset_name = to_snake_case_string(file_name=pydeck_example_file_name)
deckgl_docs_layer_name = asset_name.replace("_", "-")
deckgl_doc_url = None
if "layer" in deckgl_docs_layer_name:
# Don't add a deck.gl docs link if we're not referencing a layer
# Obviously very rough, should change this eventually to handle views etc
deckgl_doc_url = DECKGL_URL_BASE + deckgl_docs_layer_name
# Create new .html examples
html_fname = os.path.basename(pydeck_example_file_name).replace(".py", ".html")
# Run the pydeck example and move the .html output
subprocess.call(
"{python} {fname}; mv {html_src} {html_dest}".format(
python=sys.executable, fname=pydeck_example_file_name, html_src=html_fname, html_dest=HTML_DIR
),
shell=True,
)
python_code = open(pydeck_example_file_name, "r").read()
doc_source = DOC_TEMPLATE.render(
page_title=to_presentation_name(asset_name),
snake_name=asset_name,
python_code=python_code,
hosted_html_path=os.path.join(HOSTED_STATIC_PATH, html_fname),
deckgl_doc_url=deckgl_doc_url,
)
rst_path = os.path.join(GALLERY_DIR, asset_name + ".rst")
f = open(rst_path, "w+")
print("* Converted %s to %s" % (pydeck_example_file_name, rst_path))
f.write(doc_source)
f.close()
def main():
pool = Pool(processes=4)
candidate_files = [f for f in EXAMPLE_GLOB]
if not candidate_files:
raise Exception("No files found to convert")
subprocess.call("mkdir -p %s" % HTML_DIR, shell=True)
pool.map(create_rst, candidate_files)
if __name__ == "__main__":
main()
| [
"os.path.join",
"os.environ.get",
"utils.to_snake_case_string",
"subprocess.call",
"multiprocessing.Pool",
"os.path.basename",
"utils.to_presentation_name"
] | [((428, 460), 'os.environ.get', 'os.environ.get', (['"""MAPBOX_API_KEY"""'], {}), "('MAPBOX_API_KEY')\n", (442, 460), False, 'import os\n'), ((638, 694), 'utils.to_snake_case_string', 'to_snake_case_string', ([], {'file_name': 'pydeck_example_file_name'}), '(file_name=pydeck_example_file_name)\n', (658, 694), False, 'from utils import to_presentation_name, to_snake_case_string\n'), ((1787, 1833), 'os.path.join', 'os.path.join', (['GALLERY_DIR', "(asset_name + '.rst')"], {}), "(GALLERY_DIR, asset_name + '.rst')\n", (1799, 1833), False, 'import os\n'), ((1999, 2016), 'multiprocessing.Pool', 'Pool', ([], {'processes': '(4)'}), '(processes=4)\n', (2003, 2016), False, 'from multiprocessing import Pool\n'), ((2150, 2203), 'subprocess.call', 'subprocess.call', (["('mkdir -p %s' % HTML_DIR)"], {'shell': '(True)'}), "('mkdir -p %s' % HTML_DIR, shell=True)\n", (2165, 2203), False, 'import subprocess\n'), ((1091, 1133), 'os.path.basename', 'os.path.basename', (['pydeck_example_file_name'], {}), '(pydeck_example_file_name)\n', (1107, 1133), False, 'import os\n'), ((1558, 1590), 'utils.to_presentation_name', 'to_presentation_name', (['asset_name'], {}), '(asset_name)\n', (1578, 1590), False, 'from utils import to_presentation_name, to_snake_case_string\n'), ((1681, 1725), 'os.path.join', 'os.path.join', (['HOSTED_STATIC_PATH', 'html_fname'], {}), '(HOSTED_STATIC_PATH, html_fname)\n', (1693, 1725), False, 'import os\n')] |
from django.conf.urls import include, url
from django.views.generic.base import TemplateView
from . import views as core_views
from .category.urls import urlpatterns as category_urls
from .collection.urls import urlpatterns as collection_urls
from .customer.urls import urlpatterns as customer_urls
from .discount.urls import urlpatterns as discount_urls
from .menu.urls import urlpatterns as menu_urls
from .order.urls import urlpatterns as order_urls
from .page.urls import urlpatterns as page_urls
from .product.urls import urlpatterns as product_urls
from .search.urls import urlpatterns as search_urls
from .shipping.urls import urlpatterns as shipping_urls
from .sites.urls import urlpatterns as site_urls
from .staff.urls import urlpatterns as staff_urls
from .taxes.urls import urlpatterns as taxes_urls
# BEGIN :: SoftButterfly Extensions --------------------------------------------
from .brand.urls import urlpatterns as brand_urls
from .widget.slider.urls import urlpatterns as slider_urls
from .widget.banner.urls import urlpatterns as banner_urls
from .widget.scene.urls import urlpatterns as scene_urls
from .widget.benefit.urls import urlpatterns as benefit_urls
from .store.physical_store.urls import urlpatterns as store_urls
from .store.social_network.urls import urlpatterns as social_network_urls
from .store.special_page.urls import urlpatterns as special_page_urls
from .store.bank_account.urls import urlpatterns as bank_account_urls
from .store.footer_item.urls import urlpatterns as footer_item_urls
# END :: SoftButterfly Extensions ----------------------------------------------
urlpatterns = [
url(r'^$', core_views.index, name='index'),
url(r'^categories/', include(category_urls)),
url(r'^collections/', include(collection_urls)),
url(r'^orders/', include(order_urls)),
url(r'^page/', include(page_urls)),
url(r'^products/', include(product_urls)),
url(r'^customers/', include(customer_urls)),
url(r'^staff/', include(staff_urls)),
url(r'^discounts/', include(discount_urls)),
url(r'^settings/', include(
site_urls + social_network_urls
+ special_page_urls + bank_account_urls + footer_item_urls)), # Extensions
url(r'^menu/', include(menu_urls)),
url(r'^shipping/', include(shipping_urls)),
url(r'^style-guide/', core_views.styleguide, name='styleguide'),
url(r'^search/', include(search_urls)),
url(r'^taxes/', include(taxes_urls)),
url(r'^next/', TemplateView.as_view(template_name='dashboard/next.html')),
# BEGIN :: SoftButterfly Extensions ----------------------------------------
url(r'^brand/', include(brand_urls)),
url(r'^slider/', include(slider_urls)),
url(r'^banner/', include(banner_urls)),
url(r'^scene/', include(scene_urls)),
url(r'^store/', include(store_urls)),
url(r'^benefit/', include(benefit_urls)),
# END :: SoftButterfly Extensions ------------------------------------------
]
| [
"django.conf.urls.include",
"django.conf.urls.url",
"django.views.generic.base.TemplateView.as_view"
] | [((1630, 1671), 'django.conf.urls.url', 'url', (['"""^$"""', 'core_views.index'], {'name': '"""index"""'}), "('^$', core_views.index, name='index')\n", (1633, 1671), False, 'from django.conf.urls import include, url\n'), ((2295, 2357), 'django.conf.urls.url', 'url', (['"""^style-guide/"""', 'core_views.styleguide'], {'name': '"""styleguide"""'}), "('^style-guide/', core_views.styleguide, name='styleguide')\n", (2298, 2357), False, 'from django.conf.urls import include, url\n'), ((1699, 1721), 'django.conf.urls.include', 'include', (['category_urls'], {}), '(category_urls)\n', (1706, 1721), False, 'from django.conf.urls import include, url\n'), ((1750, 1774), 'django.conf.urls.include', 'include', (['collection_urls'], {}), '(collection_urls)\n', (1757, 1774), False, 'from django.conf.urls import include, url\n'), ((1798, 1817), 'django.conf.urls.include', 'include', (['order_urls'], {}), '(order_urls)\n', (1805, 1817), False, 'from django.conf.urls import include, url\n'), ((1839, 1857), 'django.conf.urls.include', 'include', (['page_urls'], {}), '(page_urls)\n', (1846, 1857), False, 'from django.conf.urls import include, url\n'), ((1883, 1904), 'django.conf.urls.include', 'include', (['product_urls'], {}), '(product_urls)\n', (1890, 1904), False, 'from django.conf.urls import include, url\n'), ((1931, 1953), 'django.conf.urls.include', 'include', (['customer_urls'], {}), '(customer_urls)\n', (1938, 1953), False, 'from django.conf.urls import include, url\n'), ((1976, 1995), 'django.conf.urls.include', 'include', (['staff_urls'], {}), '(staff_urls)\n', (1983, 1995), False, 'from django.conf.urls import include, url\n'), ((2022, 2044), 'django.conf.urls.include', 'include', (['discount_urls'], {}), '(discount_urls)\n', (2029, 2044), False, 'from django.conf.urls import include, url\n'), ((2070, 2173), 'django.conf.urls.include', 'include', (['(site_urls + social_network_urls + special_page_urls + bank_account_urls +\n footer_item_urls)'], {}), '(site_urls + social_network_urls + special_page_urls +\n bank_account_urls + footer_item_urls)\n', (2077, 2173), False, 'from django.conf.urls import include, url\n'), ((2222, 2240), 'django.conf.urls.include', 'include', (['menu_urls'], {}), '(menu_urls)\n', (2229, 2240), False, 'from django.conf.urls import include, url\n'), ((2266, 2288), 'django.conf.urls.include', 'include', (['shipping_urls'], {}), '(shipping_urls)\n', (2273, 2288), False, 'from django.conf.urls import include, url\n'), ((2381, 2401), 'django.conf.urls.include', 'include', (['search_urls'], {}), '(search_urls)\n', (2388, 2401), False, 'from django.conf.urls import include, url\n'), ((2424, 2443), 'django.conf.urls.include', 'include', (['taxes_urls'], {}), '(taxes_urls)\n', (2431, 2443), False, 'from django.conf.urls import include, url\n'), ((2465, 2522), 'django.views.generic.base.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""dashboard/next.html"""'}), "(template_name='dashboard/next.html')\n", (2485, 2522), False, 'from django.views.generic.base import TemplateView\n'), ((2626, 2645), 'django.conf.urls.include', 'include', (['brand_urls'], {}), '(brand_urls)\n', (2633, 2645), False, 'from django.conf.urls import include, url\n'), ((2669, 2689), 'django.conf.urls.include', 'include', (['slider_urls'], {}), '(slider_urls)\n', (2676, 2689), False, 'from django.conf.urls import include, url\n'), ((2713, 2733), 'django.conf.urls.include', 'include', (['banner_urls'], {}), '(banner_urls)\n', (2720, 2733), False, 'from django.conf.urls import include, url\n'), ((2756, 2775), 'django.conf.urls.include', 'include', (['scene_urls'], {}), '(scene_urls)\n', (2763, 2775), False, 'from django.conf.urls import include, url\n'), ((2798, 2817), 'django.conf.urls.include', 'include', (['store_urls'], {}), '(store_urls)\n', (2805, 2817), False, 'from django.conf.urls import include, url\n'), ((2842, 2863), 'django.conf.urls.include', 'include', (['benefit_urls'], {}), '(benefit_urls)\n', (2849, 2863), False, 'from django.conf.urls import include, url\n')] |
import os
import sys
import torch
import yaml
from functools import partial
sys.path.append('../../../../')
from trainers import trainer, frn_train
from datasets import dataloaders
from models.FRN import FRN
args = trainer.train_parser()
with open('../../../../config.yml', 'r') as f:
temp = yaml.safe_load(f)
data_path = os.path.abspath(temp['data_path'])
fewshot_path = os.path.join(data_path,'CUB_fewshot_raw')
pm = trainer.Path_Manager(fewshot_path=fewshot_path,args=args)
train_way = args.train_way
shots = [args.train_shot, args.train_query_shot]
train_loader = dataloaders.meta_train_dataloader(data_path=pm.train,
way=train_way,
shots=shots,
transform_type=args.train_transform_type)
model = FRN(way=train_way,
shots=[args.train_shot, args.train_query_shot],
resnet=args.resnet)
train_func = partial(frn_train.default_train,train_loader=train_loader)
tm = trainer.Train_Manager(args,path_manager=pm,train_func=train_func)
tm.train(model)
tm.evaluate(model) | [
"models.FRN.FRN",
"trainers.trainer.Path_Manager",
"trainers.trainer.Train_Manager",
"os.path.join",
"yaml.safe_load",
"functools.partial",
"os.path.abspath",
"sys.path.append",
"datasets.dataloaders.meta_train_dataloader",
"trainers.trainer.train_parser"
] | [((76, 107), 'sys.path.append', 'sys.path.append', (['"""../../../../"""'], {}), "('../../../../')\n", (91, 107), False, 'import sys\n'), ((217, 239), 'trainers.trainer.train_parser', 'trainer.train_parser', ([], {}), '()\n', (237, 239), False, 'from trainers import trainer, frn_train\n'), ((328, 362), 'os.path.abspath', 'os.path.abspath', (["temp['data_path']"], {}), "(temp['data_path'])\n", (343, 362), False, 'import os\n'), ((378, 420), 'os.path.join', 'os.path.join', (['data_path', '"""CUB_fewshot_raw"""'], {}), "(data_path, 'CUB_fewshot_raw')\n", (390, 420), False, 'import os\n'), ((426, 484), 'trainers.trainer.Path_Manager', 'trainer.Path_Manager', ([], {'fewshot_path': 'fewshot_path', 'args': 'args'}), '(fewshot_path=fewshot_path, args=args)\n', (446, 484), False, 'from trainers import trainer, frn_train\n'), ((577, 705), 'datasets.dataloaders.meta_train_dataloader', 'dataloaders.meta_train_dataloader', ([], {'data_path': 'pm.train', 'way': 'train_way', 'shots': 'shots', 'transform_type': 'args.train_transform_type'}), '(data_path=pm.train, way=train_way, shots=\n shots, transform_type=args.train_transform_type)\n', (610, 705), False, 'from datasets import dataloaders\n'), ((854, 945), 'models.FRN.FRN', 'FRN', ([], {'way': 'train_way', 'shots': '[args.train_shot, args.train_query_shot]', 'resnet': 'args.resnet'}), '(way=train_way, shots=[args.train_shot, args.train_query_shot], resnet=\n args.resnet)\n', (857, 945), False, 'from models.FRN import FRN\n'), ((979, 1038), 'functools.partial', 'partial', (['frn_train.default_train'], {'train_loader': 'train_loader'}), '(frn_train.default_train, train_loader=train_loader)\n', (986, 1038), False, 'from functools import partial\n'), ((1044, 1111), 'trainers.trainer.Train_Manager', 'trainer.Train_Manager', (['args'], {'path_manager': 'pm', 'train_func': 'train_func'}), '(args, path_manager=pm, train_func=train_func)\n', (1065, 1111), False, 'from trainers import trainer, frn_train\n'), ((298, 315), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (312, 315), False, 'import yaml\n')] |
from gym_pcgrl.envs.reps.representation3D import Representation3D
from PIL import Image
from gym import spaces
import numpy as np
from gym_pcgrl.envs.probs.minecraft.mc_render import reps_3D_render
"""
The wide representation where the agent can pick the tile position and tile value at each update.
"""
class Wide3DRepresentation(Representation3D):
"""
Initialize all the parameters used by that representation
"""
def __init__(self):
super().__init__()
"""
Gets the action space used by the wide representation
Parameters:
length: the current map length
width: the current map width
height: the current map height
num_tiles: the total number of the tile values
Returns:
MultiDiscrete: the action space used by that wide representation which
consists of the x position, y position, z position and the tile value
"""
def get_action_space(self, length, width, height, num_tiles):
return spaces.MultiDiscrete([length, width, height, num_tiles])
"""
Get the observation space used by the wide representation
Parameters:
length: the current map length
width: the current map width
height: the current map height
num_tiles: the total number of the tile values
Returns:
Box: the observation space used by that representation. A 3D array of tile numbers
"""
def get_observation_space(self, length, width, height, num_tiles):
return spaces.Dict({
"map": spaces.Box(low=0, high=num_tiles-1, dtype=np.uint8, shape=(height, width, length))
})
"""
Get the current representation observation object at the current moment
Returns:
observation: the current observation at the current moment. A 3D array of tile numbers
"""
def get_observation(self):
return {
"map": self._map.copy()
}
"""
Update the wide representation with the input action
Parameters:
action: an action that is used to advance the environment (same as action space)
Returns:
boolean: True if the action change the map, False if nothing changed
"""
def update(self, action):
change = [0,1][self._map[action[2]][action[1]][action[0]] != action[3]]
self._map[action[2]][action[1]][action[0]] = action[3]
return change, action[0], action[1], action[2]
| [
"gym.spaces.MultiDiscrete",
"gym.spaces.Box"
] | [((994, 1050), 'gym.spaces.MultiDiscrete', 'spaces.MultiDiscrete', (['[length, width, height, num_tiles]'], {}), '([length, width, height, num_tiles])\n', (1014, 1050), False, 'from gym import spaces\n'), ((1541, 1629), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(num_tiles - 1)', 'dtype': 'np.uint8', 'shape': '(height, width, length)'}), '(low=0, high=num_tiles - 1, dtype=np.uint8, shape=(height, width,\n length))\n', (1551, 1629), False, 'from gym import spaces\n')] |
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
Description :
Author : cmy
date: 2020/1/2
-------------------------------------------------
"""
import datetime
import heapq
import numpy as np
import tensorflow as tf
import time
from metrics import ndcg_at_k
from train import get_user_record
from DMF import DMF
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.5 # maximun alloc gpu50% of MEM
config.gpu_options.allow_growth = True #allocate dynamically
def train(args, data, show_loss, show_topk, log_dir):
n_user, n_item = data[0], data[1]
train_data, eval_data, test_data = data[2], data[3], data[4]
model = DMF(args, n_user, n_item)
user_num = 100
k_list = [1, 2, 5, 10, 20, 50, 100]
train_record = get_user_record(train_data, True)
test_record = get_user_record(test_data, False)
user_list = list(set(train_record.keys()) & set(test_record.keys()))
if len(user_list) > user_num:
user_list = np.random.choice(user_list, size=user_num, replace=False)
item_set = set(list(range(n_item)))
with tf.Session(config=config) as sess,\
open(log_dir + 'result_' + str(args.epochs) + '_' + str(args.lr) + '_' + str(int(time.time())) + '.txt', 'w') as f_result:
sess.run(tf.global_variables_initializer())
for step in range(args.epochs):
f_result.write('**************************epoch_i:' + str(step) + '********************' + '\n')
# RS training
np.random.shuffle(train_data)
start = 0
batch_i = 0
while start < train_data.shape[0]:
_, loss = model.train_dmf(sess, get_feed_dict_for_dmf(model, train_data, start, start + args.batch_size, 0.5))
start += args.batch_size
if show_loss:
if (step * (len(train_data) // args.batch_size) + batch_i) % 20 == 0:
time_str = datetime.datetime.now().isoformat()
print('{}: Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
time_str,
step,
batch_i,
(len(train_data) // args.batch_size),
loss))
# print(loss)
batch_i += 1
# CTR evaluation
# train_auc, train_acc = model.eval(sess, get_feed_dict_for_dmf(model, train_data, 0, train_data.shape[0]))
eval_auc, eval_acc = model.eval(sess, get_feed_dict_for_dmf(model, eval_data, 0, eval_data.shape[0]))
test_auc, test_acc = model.eval(sess, get_feed_dict_for_dmf(model, test_data, 0, test_data.shape[0]))
# eval_str = 'epoch %d train auc: %.4f acc: %.4f eval auc: %.4f acc: %.4f test auc: %.4f acc: %.4f' \
# % (step, train_auc, train_acc, eval_auc, eval_acc, test_auc, test_acc)
eval_str = 'epoch %d eval auc: %.4f acc: %.4f test auc: %.4f acc: %.4f' \
% (step, eval_auc, eval_acc, test_auc, test_acc)
print(eval_str)
f_result.write(eval_str + '\n')
# top-K evaluation
if show_topk:
topk_str = ''
precision, recall, f1, hr, ndcg = topk_eval(
sess, model, user_list, train_record, test_record, item_set, k_list)
print('precision: ', end='')
topk_str += 'precision: '
for i in precision:
print('%.4f\t' % i, end='')
topk_str += '%.4f\t' % i
print()
print('recall: ', end='')
topk_str += '\n' + 'recall: '
for i in recall:
print('%.4f\t' % i, end='')
topk_str += '%.4f\t' % i
print()
print('f1: ', end='')
topk_str += '\n' + 'f1: '
for i in f1:
print('%.4f\t' % i, end='')
topk_str += '%.4f\t' % i
print()
print('hr: ', end='')
topk_str += '\n' + 'hr: '
for i in hr:
print('%.4f\t' % i, end='')
topk_str += '%.4f\t' % i
print()
print('ndcg: ', end='')
topk_str += '\n' + 'ndcg: '
for i in ndcg:
print('%.4f\t' % i, end='')
topk_str += '%.4f\t' % i
print()
f_result.write(topk_str + '\n')
def get_feed_dict_for_dmf(model, data, start, end, keep_drop=0.0):
feed_dict = {model.user_indices: data[start:end, 0],
model.item_indices: data[start:end, 1],
model.labels: data[start:end, 2],
model.keep_drop: keep_drop}
return feed_dict
def topk_eval(sess, model, user_list, train_record, test_record, item_set, k_list):
precision_list = {k: [] for k in k_list}
recall_list = {k: [] for k in k_list}
hr_list = {k: [] for k in k_list}
ndcg_list = {k: [] for k in k_list}
total_test = 0
for user in user_list:
test_item_list = list(item_set - train_record[user])
item_score_map = dict()
items, scores = model.get_scores(sess, {model.user_indices: [user] * len(test_item_list),
model.item_indices: test_item_list, model.keep_drop: 0.0})
for item, score in zip(items, scores):
item_score_map[item] = score
item_score_pair_sorted = sorted(item_score_map.items(), key=lambda x: x[1], reverse=True)
item_sorted = [i[0] for i in item_score_pair_sorted]
K_max_item_score = heapq.nlargest(k_list[-1], item_score_map, key=item_score_map.get)
r = []
for i in K_max_item_score:
if i in test_record[user]:
r.append(1)
else:
r.append(0)
for k in k_list:
hit_num = len(set(item_sorted[:k]) & test_record[user])
precision_list[k].append(hit_num / k)
recall_list[k].append(hit_num / len(test_record[user]))
hr_list[k].append(hit_num)
ndcg_list[k].append(ndcg_at_k(r, k))
total_test += len(test_record[user])
precision = [np.mean(precision_list[k]) for k in k_list]
recall = [np.mean(recall_list[k]) for k in k_list]
f1 = [2 / (1 / precision[i] + 1 / recall[i]) for i in range(len(k_list))]
hr = [np.sum(hr_list[k]) / total_test for k in k_list]
ndcg = [np.mean(ndcg_list[k]) for k in k_list]
return precision, recall, f1, hr, ndcg | [
"numpy.mean",
"train.get_user_record",
"numpy.random.choice",
"metrics.ndcg_at_k",
"tensorflow.Session",
"tensorflow.global_variables_initializer",
"heapq.nlargest",
"numpy.sum",
"datetime.datetime.now",
"DMF.DMF",
"tensorflow.ConfigProto",
"time.time",
"numpy.random.shuffle"
] | [((429, 445), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (443, 445), True, 'import tensorflow as tf\n'), ((766, 791), 'DMF.DMF', 'DMF', (['args', 'n_user', 'n_item'], {}), '(args, n_user, n_item)\n', (769, 791), False, 'from DMF import DMF\n'), ((870, 903), 'train.get_user_record', 'get_user_record', (['train_data', '(True)'], {}), '(train_data, True)\n', (885, 903), False, 'from train import get_user_record\n'), ((922, 955), 'train.get_user_record', 'get_user_record', (['test_data', '(False)'], {}), '(test_data, False)\n', (937, 955), False, 'from train import get_user_record\n'), ((1083, 1140), 'numpy.random.choice', 'np.random.choice', (['user_list'], {'size': 'user_num', 'replace': '(False)'}), '(user_list, size=user_num, replace=False)\n', (1099, 1140), True, 'import numpy as np\n'), ((1191, 1216), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (1201, 1216), True, 'import tensorflow as tf\n'), ((5904, 5970), 'heapq.nlargest', 'heapq.nlargest', (['k_list[-1]', 'item_score_map'], {'key': 'item_score_map.get'}), '(k_list[-1], item_score_map, key=item_score_map.get)\n', (5918, 5970), False, 'import heapq\n'), ((6499, 6525), 'numpy.mean', 'np.mean', (['precision_list[k]'], {}), '(precision_list[k])\n', (6506, 6525), True, 'import numpy as np\n'), ((6557, 6580), 'numpy.mean', 'np.mean', (['recall_list[k]'], {}), '(recall_list[k])\n', (6564, 6580), True, 'import numpy as np\n'), ((6747, 6768), 'numpy.mean', 'np.mean', (['ndcg_list[k]'], {}), '(ndcg_list[k])\n', (6754, 6768), True, 'import numpy as np\n'), ((1375, 1408), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1406, 1408), True, 'import tensorflow as tf\n'), ((1597, 1626), 'numpy.random.shuffle', 'np.random.shuffle', (['train_data'], {}), '(train_data)\n', (1614, 1626), True, 'import numpy as np\n'), ((6686, 6704), 'numpy.sum', 'np.sum', (['hr_list[k]'], {}), '(hr_list[k])\n', (6692, 6704), True, 'import numpy as np\n'), ((6418, 6433), 'metrics.ndcg_at_k', 'ndcg_at_k', (['r', 'k'], {}), '(r, k)\n', (6427, 6433), False, 'from metrics import ndcg_at_k\n'), ((1316, 1327), 'time.time', 'time.time', ([], {}), '()\n', (1325, 1327), False, 'import time\n'), ((2043, 2066), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2064, 2066), False, 'import datetime\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import uuid
import random
import math
import time
import typing as t
from . import experiment as hip
# Demos from the README. If one of those is modified, please modify the readme as well
def demo_change_column_properties() -> hip.Experiment:
data = [{"param": 1, "loss": 10, "hidden_field": "value1", "c": "red"}, {"param": 2, "loss": 5, "hidden_field": "value2", "c": "black"}]
exp = hip.Experiment.from_iterable(data)
exp.parameters_definition["c"].colors = {"red": "rgb(255, 0, 0)", "black": "rgb(0, 0, 0)"}
exp.parameters_definition["loss"].type = hip.ValueType.NUMERIC_LOG
exp.display_data(hip.Displays.PARALLEL_PLOT).update({
'hide': ['hidden_field'], # This column won't appear in the parallel plot
'order': ['c'] # Column `c` will be displayed first the in parallel plot
})
return exp
def demo_basic_usage() -> hip.Experiment:
data = [{'dropout': 0.1, 'lr': 0.001, 'loss': 10.0, 'optimizer': 'SGD'},
{'dropout': 0.15, 'lr': 0.01, 'loss': 3.5, 'optimizer': 'Adam'},
{'dropout': 0.3, 'lr': 0.1, 'loss': 4.5, 'optimizer': 'Adam'}]
return hip.Experiment.from_iterable(data)
def demo_line_xy() -> hip.Experiment:
# DEMO_LINE_XY_BEGIN
exp = hip.Experiment()
exp.display_data(hip.Displays.XY).update({
'axis_x': 'generation',
'axis_y': 'loss',
})
for i in range(200):
dp = hip.Datapoint(
uid=str(i),
values={
'generation': i,
'param': 10 ** random.uniform(-1, 1),
'loss': random.uniform(-5, 5),
})
if i > 10:
from_parent = random.choice(exp.datapoints[-10:])
dp.from_uid = from_parent.uid # <-- Connect the parent to the child
dp.values['loss'] += from_parent.values['loss'] # type: ignore
dp.values['param'] *= from_parent.values['param'] # type: ignore
exp.datapoints.append(dp)
# DEMO_LINE_XY_END
return exp
def demo_bug_uid() -> hip.Experiment:
return hip.Experiment.from_iterable([{'a': 1, 'b': 2, 'uid': 50.0}, {'a': 2, 'b': 3, 'uid': 49.33}])
def demo(n: int = 100) -> hip.Experiment:
xp = hip.Experiment()
xp.display_data(hip.Displays.XY).update({
'axis_x': 'time',
'axis_y': 'exp_metric',
})
# Some fake PBT-ish data
def fake_params() -> t.Dict[str, hip.DisplayableType]:
r = random.random()
p: t.Dict[str, hip.DisplayableType] = {
"lr": 10 ** random.uniform(-5, 0),
"seed": random.uniform(0, 10),
"name": uuid.uuid4().hex[:6],
"optimizer": random.choice(["sgd", "adam", "adamw"]),
"r": r,
"c": random.choice(["red", "green", "black"]),
}
if r < 0.1:
del p['optimizer']
if r > 0.3:
p["optionA"] = random.uniform(1, 5)
else:
p["optionB"] = random.uniform(1, 5)
if r < 0.2:
p["pctile"] = -1.0
elif r < 0.5:
p["pctile"] = random.uniform(-1.0, 10.0)
elif r < 0.8:
p["pctile"] = 10 ** random.uniform(1, 2)
else:
p["pctile"] = random.uniform(100, 101)
if random.random() > 0.3:
p["special_values"] = random.uniform(1, 5)
else:
p["special_values"] = random.choice([math.inf, -math.inf, math.nan])
return p
def fake_metrics(tm: float) -> t.Dict[str, hip.DisplayableType]:
return {
"exp_metric": 10 ** random.uniform(-5, 0),
"pct_success": random.uniform(10, 90),
"chkpt": uuid.uuid4().hex[:6],
"time": tm + random.uniform(-0.2, 0.2),
"force_numericlog": random.uniform(1, 100),
'timestamp': int(time.time() + (task_idx * 2000)),
}
current_pop: t.List[t.Dict[str, t.Any]] = [dict(uid=f"init{i}", params=fake_params(), last_ckpt_uid=None) for i in range(10)]
continue_num = 0
for task_idx in range(n):
# All drop checkpoints
for p in current_pop:
ckpt_uid = f"{p['uid']}_{uuid.uuid4().hex[:6]}"
xp.datapoints.append(hip.Datapoint(uid=ckpt_uid, from_uid=p['last_ckpt_uid'], values={**p['params'], **fake_metrics(task_idx)}))
p['last_ckpt_uid'] = ckpt_uid
# Randomly drop some
current_pop = [p for p in current_pop if random.random() > 0.3]
# Respawn as needed
for _ in range(10 - len(current_pop)):
continue_num += 1
parent = random.choice(xp.datapoints[-10:])
current_pop.append(dict(uid=f"continue{continue_num}", params=fake_params(), last_ckpt_uid=parent.uid))
xp.parameters_definition["c"].colors = {"red": "rgb(255, 0, 0)", "green": "rgb(0, 255, 0)", "black": "rgb(0, 0, 0)"}
xp.parameters_definition["force_numericlog"].type = hip.ValueType.NUMERIC_LOG
xp.parameters_definition["pctile"].type = hip.ValueType.NUMERIC_PERCENTILE
xp.parameters_definition["timestamp"].type = hip.ValueType.TIMESTAMP
return xp
def demo_customize() -> hip.Experiment:
exp = demo()
# EXPERIMENT_SETTINGS_SNIPPET2_BEGIN
# Provide configuration for the parallel plot
exp.display_data(hip.Displays.PARALLEL_PLOT).update({
# Hide some columns in the parallel plot
'hide': ['optionB'],
# Specify the order for others
'order': ['time'], # Put column time first on the left
})
# Provide configuration for the table with all the rows
exp.display_data(hip.Displays.TABLE).update({
# Don't display `uid` and `from_uid` columns to the user
'hide': ['uid', 'from_uid'],
# In the table, order rows by default
'order_by': [['pct_success', 'desc']],
# Specify the order for columns
'order': ['time'], # Put column time first on the left
})
# Provide configuration for the XY graph
exp.display_data(hip.Displays.XY).update({
# Default X axis for the XY plot
'axis_x': 'time',
# Default Y axis
'axis_y': 'lr',
# Configure lines
'lines_thickness': 1.0,
'lines_opacity': 0.1,
# Configure dots
'dots_thickness': 2.0,
'dots_opacity': 0.3,
})
# EXPERIMENT_SETTINGS_SNIPPET2_END
return exp
def demo_force_scale() -> hip.Experiment:
xp = hip.Experiment()
for _ in range(100):
values = [abs(random.gauss(0.0, 1.0)) for _ in range(4)]
xp.datapoints.append(hip.Datapoint({
f"value{i}": v / sum(values)
for i, v in enumerate(values)
}))
for i in range(4):
xp.parameters_definition[f"value{i}"].force_range(0.0, 1.0)
return xp
def demo_distribution(**kwargs: t.Any) -> hip.Experiment:
xp = hip.Experiment.from_iterable([{
'cat': random.choice(["a", "b", "c", "d", "e", "f", "g", "h"]),
'numeric': random.uniform(0.0, 1.0),
} for i in range(1000)])
xp.display_data(hip.Displays.DISTRIBUTION).update(kwargs)
return xp
def demo_bool() -> hip.Experiment:
return hip.Experiment.from_iterable([
{"bool": True},
{"bool": False}
])
def demo_color_interpolate() -> hip.Experiment:
exp = demo()
exp.parameters_definition["exp_metric"].colormap = "interpolateSinebow"
return exp
def demo_color_scheme_ylrd() -> hip.Experiment:
exp = demo()
exp.parameters_definition["exp_metric"].colormap = "schemeYlOrRd"
return exp
def demo_color_scheme_accent() -> hip.Experiment:
exp = demo()
exp.parameters_definition["exp_metric"].colormap = "schemeAccent"
return exp
def demo_color_interpolate_inverse() -> hip.Experiment:
exp = demo_color_interpolate()
assert exp.parameters_definition["exp_metric"].colormap is not None
exp.parameters_definition["exp_metric"].colormap += "#inverse"
return exp
def demo_axis_style() -> hip.Experiment:
data: t.List[t.Dict[str, t.Any]] = []
for _ in range(100):
data.append({
**{
f'param{i}': random.uniform(0, 1)
for i in range(6)
},
'loss': random.uniform(0, 100),
'metric': 10 ** random.uniform(0, 10)
})
xp = hip.Experiment.from_iterable(data)
for i in range(6):
xp.parameters_definition[f"param{i}"].label_css = "badge badge-pill badge-secondary"
xp.parameters_definition["loss"].label_css = "badge badge-pill badge-primary"
xp.parameters_definition["metric"].label_css = "badge badge-pill badge-info"
return xp
def demo_categorical() -> hip.Experiment:
data: t.List[t.Dict[str, t.Any]] = []
for _ in range(100):
data.append({
'cat_num_05': random.randint(0, 5),
'cat_num_15': random.randint(0, 10),
'cat_num_25': random.randint(0, 25),
'cat_str_05': f's{random.randint(0, 5)}',
'cat_str_15': f's{random.randint(0, 15)}',
'cat_str_25': f's{random.randint(0, 25)}',
})
xp = hip.Experiment.from_iterable(data)
for param in ["cat_num_05", "cat_num_15", "cat_num_25"]:
xp.parameters_definition[param].type = hip.ValueType.CATEGORICAL
xp.colorby = 'cat_num_25'
return xp
def demo_long_names() -> hip.Experiment:
return hip.Experiment.from_iterable([
{
'some very very long name for a field': random.randint(0, 5),
'this one is also very long': random.randint(0, 10),
'another.long.one.but.with.dots': random.randint(0, 25),
}
for _ in range(100)
])
def demo_force_constant_pplot() -> hip.Experiment:
exp = hip.Experiment.from_iterable([
{'uid': 123, 'a': 1, 'b': 3},
{'uid': 345, 'a': 2, 'b': 3}
])
exp.parameters_definition["b"].force_range(0, 100)
return exp
def demo_first_value_nan() -> hip.Experiment:
return hip.Experiment.from_iterable([
{},
{'a': None},
{'a': 2},
{'a': 2.1},
{'a': 2.2},
{'a': 5.5},
{'a': math.nan},
])
def demo_weighted_rows() -> hip.Experiment:
experiment = hip.Experiment.from_iterable([
{'w': 1.0, 'a': 1, 'b': 1},
{'w': 2.0, 'a': 2, 'b': 1},
{'w': -2.0, 'a': 2, 'b': 1},
{'w': math.inf, 'a': 2, 'b': 2},
{'w': 'not_a_number', 'a': 2, 'b': 3},
{'w': None, 'a': 3, 'b': 3},
{'a': 4, 'b': 3},
])
experiment.weightcolumn = "w"
return experiment
def demo_3xcols() -> hip.Experiment:
xp = demo()
for i in range(2):
new_xp = demo()
for dp, new_dp in zip(xp.datapoints, new_xp.datapoints):
dp.values.update({
f"{k}{i}": v
for k, v in new_dp.values.items()
})
return xp
def demo_col_html() -> hip.Experiment:
COL1 = "<h1>col1</h1>"
COL2 = "col_2"
experiment = hip.Experiment.from_iterable([
{COL1: 1.0, COL2: 1},
{COL1: 2.0, COL2: 2},
{COL1: 3.0, COL2: 3},
])
experiment.parameters_definition[COL2].label_html = "col<sub>2</sub>"
return experiment
def demo_disable_table() -> hip.Experiment:
experiment = demo()
experiment.enabledDisplays.remove(hip.Displays.TABLE)
return experiment
def demo_big_floats() -> hip.Experiment:
return hip.Experiment.from_iterable(
{
'bigfloat': math.nan if i < 10 else 10 ** random.uniform(15, 32),
}
for i in range(100)
)
README_DEMOS: t.Dict[str, t.Callable[[], hip.Experiment]] = {
"demo": demo,
"demo_3xcols": demo_3xcols,
"demo_big": lambda: demo(1000),
"demo_change_column_properties": demo_change_column_properties,
"demo_basic_usage": demo_basic_usage,
"demo_line_xy": demo_line_xy,
"demo_bug_uid": demo_bug_uid,
"demo_force_scale": demo_force_scale,
"demo_distribution_cat": lambda: demo_distribution(axis="cat"),
"demo_distribution_num": lambda: demo_distribution(axis="numeric"),
"demo_distribution_num_100bins": lambda: demo_distribution(axis="numeric", nbins=100),
"demo_bool": demo_bool,
"demo_color_interpolate": demo_color_interpolate,
"demo_color_scheme_ylrd": demo_color_scheme_ylrd,
"demo_color_scheme_accent": demo_color_scheme_accent,
"demo_axis_style": demo_axis_style,
"demo_categorical": demo_categorical,
"demo_customize": demo_customize,
"demo_long_names": demo_long_names,
"demo_force_constant_pplot": demo_force_constant_pplot,
"demo_color_interpolate_inverse": demo_color_interpolate_inverse,
"demo_first_value_nan": demo_first_value_nan,
"demo_weighted_rows": demo_weighted_rows,
"demo_col_html": demo_col_html,
"demo_disable_table": demo_disable_table,
"demo_big_floats": demo_big_floats,
}
| [
"random.uniform",
"random.choice",
"uuid.uuid4",
"random.random",
"time.time",
"random.randint",
"random.gauss"
] | [((2618, 2633), 'random.random', 'random.random', ([], {}), '()\n', (2631, 2633), False, 'import random\n'), ((1848, 1883), 'random.choice', 'random.choice', (['exp.datapoints[-10:]'], {}), '(exp.datapoints[-10:])\n', (1861, 1883), False, 'import random\n'), ((2749, 2770), 'random.uniform', 'random.uniform', (['(0)', '(10)'], {}), '(0, 10)\n', (2763, 2770), False, 'import random\n'), ((2839, 2878), 'random.choice', 'random.choice', (["['sgd', 'adam', 'adamw']"], {}), "(['sgd', 'adam', 'adamw'])\n", (2852, 2878), False, 'import random\n'), ((2917, 2957), 'random.choice', 'random.choice', (["['red', 'green', 'black']"], {}), "(['red', 'green', 'black'])\n", (2930, 2957), False, 'import random\n'), ((3067, 3087), 'random.uniform', 'random.uniform', (['(1)', '(5)'], {}), '(1, 5)\n', (3081, 3087), False, 'import random\n'), ((3129, 3149), 'random.uniform', 'random.uniform', (['(1)', '(5)'], {}), '(1, 5)\n', (3143, 3149), False, 'import random\n'), ((3429, 3444), 'random.random', 'random.random', ([], {}), '()\n', (3442, 3444), False, 'import random\n'), ((3486, 3506), 'random.uniform', 'random.uniform', (['(1)', '(5)'], {}), '(1, 5)\n', (3500, 3506), False, 'import random\n'), ((3555, 3601), 'random.choice', 'random.choice', (['[math.inf, -math.inf, math.nan]'], {}), '([math.inf, -math.inf, math.nan])\n', (3568, 3601), False, 'import random\n'), ((3788, 3810), 'random.uniform', 'random.uniform', (['(10)', '(90)'], {}), '(10, 90)\n', (3802, 3810), False, 'import random\n'), ((3939, 3961), 'random.uniform', 'random.uniform', (['(1)', '(100)'], {}), '(1, 100)\n', (3953, 3961), False, 'import random\n'), ((4751, 4785), 'random.choice', 'random.choice', (['xp.datapoints[-10:]'], {}), '(xp.datapoints[-10:])\n', (4764, 4785), False, 'import random\n'), ((2706, 2727), 'random.uniform', 'random.uniform', (['(-5)', '(0)'], {}), '(-5, 0)\n', (2720, 2727), False, 'import random\n'), ((3250, 3276), 'random.uniform', 'random.uniform', (['(-1.0)', '(10.0)'], {}), '(-1.0, 10.0)\n', (3264, 3276), False, 'import random\n'), ((3738, 3759), 'random.uniform', 'random.uniform', (['(-5)', '(0)'], {}), '(-5, 0)\n', (3752, 3759), False, 'import random\n'), ((3880, 3905), 'random.uniform', 'random.uniform', (['(-0.2)', '(0.2)'], {}), '(-0.2, 0.2)\n', (3894, 3905), False, 'import random\n'), ((6644, 6666), 'random.gauss', 'random.gauss', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (6656, 6666), False, 'import random\n'), ((7048, 7103), 'random.choice', 'random.choice', (["['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']"], {}), "(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'])\n", (7061, 7103), False, 'import random\n'), ((7124, 7148), 'random.uniform', 'random.uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (7138, 7148), False, 'import random\n'), ((8367, 8389), 'random.uniform', 'random.uniform', (['(0)', '(100)'], {}), '(0, 100)\n', (8381, 8389), False, 'import random\n'), ((8948, 8968), 'random.randint', 'random.randint', (['(0)', '(5)'], {}), '(0, 5)\n', (8962, 8968), False, 'import random\n'), ((8996, 9017), 'random.randint', 'random.randint', (['(0)', '(10)'], {}), '(0, 10)\n', (9010, 9017), False, 'import random\n'), ((9045, 9066), 'random.randint', 'random.randint', (['(0)', '(25)'], {}), '(0, 25)\n', (9059, 9066), False, 'import random\n'), ((9612, 9632), 'random.randint', 'random.randint', (['(0)', '(5)'], {}), '(0, 5)\n', (9626, 9632), False, 'import random\n'), ((9676, 9697), 'random.randint', 'random.randint', (['(0)', '(10)'], {}), '(0, 10)\n', (9690, 9697), False, 'import random\n'), ((9745, 9766), 'random.randint', 'random.randint', (['(0)', '(25)'], {}), '(0, 25)\n', (9759, 9766), False, 'import random\n'), ((1765, 1786), 'random.uniform', 'random.uniform', (['(-5)', '(5)'], {}), '(-5, 5)\n', (1779, 1786), False, 'import random\n'), ((2792, 2804), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2802, 2804), False, 'import uuid\n'), ((3392, 3416), 'random.uniform', 'random.uniform', (['(100)', '(101)'], {}), '(100, 101)\n', (3406, 3416), False, 'import random\n'), ((3833, 3845), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3843, 3845), False, 'import uuid\n'), ((3992, 4003), 'time.time', 'time.time', ([], {}), '()\n', (4001, 4003), False, 'import time\n'), ((4601, 4616), 'random.random', 'random.random', ([], {}), '()\n', (4614, 4616), False, 'import random\n'), ((8277, 8297), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (8291, 8297), False, 'import random\n'), ((8419, 8440), 'random.uniform', 'random.uniform', (['(0)', '(10)'], {}), '(0, 10)\n', (8433, 8440), False, 'import random\n'), ((1718, 1739), 'random.uniform', 'random.uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (1732, 1739), False, 'import random\n'), ((3331, 3351), 'random.uniform', 'random.uniform', (['(1)', '(2)'], {}), '(1, 2)\n', (3345, 3351), False, 'import random\n'), ((9098, 9118), 'random.randint', 'random.randint', (['(0)', '(5)'], {}), '(0, 5)\n', (9112, 9118), False, 'import random\n'), ((9152, 9173), 'random.randint', 'random.randint', (['(0)', '(15)'], {}), '(0, 15)\n', (9166, 9173), False, 'import random\n'), ((9207, 9228), 'random.randint', 'random.randint', (['(0)', '(25)'], {}), '(0, 25)\n', (9221, 9228), False, 'import random\n'), ((11641, 11663), 'random.uniform', 'random.uniform', (['(15)', '(32)'], {}), '(15, 32)\n', (11655, 11663), False, 'import random\n'), ((4316, 4328), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4326, 4328), False, 'import uuid\n')] |
import logging
import groceries.api as groceries
import barcodescanner.scan as barcode
def main():
grocy = groceries.GrocyAPIClient()
while True:
scanner = barcode.Scan()
line = scanner.PollScanner()
if line != None:
response = grocy.consume_barcode(line)
logging.debug(response)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
main()
| [
"logging.basicConfig",
"barcodescanner.scan.Scan",
"groceries.api.GrocyAPIClient",
"logging.debug"
] | [((113, 139), 'groceries.api.GrocyAPIClient', 'groceries.GrocyAPIClient', ([], {}), '()\n', (137, 139), True, 'import groceries.api as groceries\n'), ((371, 411), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (390, 411), False, 'import logging\n'), ((174, 188), 'barcodescanner.scan.Scan', 'barcode.Scan', ([], {}), '()\n', (186, 188), True, 'import barcodescanner.scan as barcode\n'), ((314, 337), 'logging.debug', 'logging.debug', (['response'], {}), '(response)\n', (327, 337), False, 'import logging\n')] |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="easy-icm-runner",
version="1.0.6",
author="<NAME>",
author_email="<EMAIL>",
description="A wrapper for IBM ICMs Scheduler API Calls",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/equinoxfitness/easy-icm-runner/",
#packages=setuptools.find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
py_modules = ['icm_runner'],
install_requires=[
'requests',
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| [
"setuptools.setup"
] | [((88, 612), 'setuptools.setup', 'setuptools.setup', ([], {'name': '"""easy-icm-runner"""', 'version': '"""1.0.6"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'description': '"""A wrapper for IBM ICMs Scheduler API Calls"""', 'long_description': 'long_description', 'long_description_content_type': '"""text/markdown"""', 'url': '"""https://github.com/equinoxfitness/easy-icm-runner/"""', 'py_modules': "['icm_runner']", 'install_requires': "['requests']", 'classifiers': "['Programming Language :: Python :: 3',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent']"}), "(name='easy-icm-runner', version='1.0.6', author='<NAME>',\n author_email='<EMAIL>', description=\n 'A wrapper for IBM ICMs Scheduler API Calls', long_description=\n long_description, long_description_content_type='text/markdown', url=\n 'https://github.com/equinoxfitness/easy-icm-runner/', py_modules=[\n 'icm_runner'], install_requires=['requests'], classifiers=[\n 'Programming Language :: Python :: 3',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent'])\n", (104, 612), False, 'import setuptools\n')] |
from setuptools import find_packages, setup
setup(name='ActT',
version='0.6',
description='Active Testing',
url='',
author='',
author_email='none',
license='BSD',
packages=find_packages(),
install_requires=[
'numpy', 'pandas', 'matplotlib','scipy','scikit-learn','opencv-python',
'statswag','tensorflow'
],
zip_safe=True)
| [
"setuptools.find_packages"
] | [((215, 230), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (228, 230), False, 'from setuptools import find_packages, setup\n')] |
# deltat.py time difference calculation for sensor fusion
# Released under the MIT License (MIT)
# Copyright (c) 2018 <NAME>
# Provides TimeDiff function and DeltaT class.
# The following notes cover special cases. Where the device performing fusion
# is linked to the IMU and is running MicroPython no special treatment is
# needed.
# The special cases are:
# 1. Device connected to the IMU is linked to a separate platform doing fusion.
# 2. Either or both are not running MicroPython.
# If the device providing the vectors is not running on MicroPython the user
# must supply timestamps and a function capable of differencing these. The
# function is passed to the Fusion constructor and the timestamp is provided
# along with the vector, being the time when the vector was acquired.
# If the device providing the vectors is running MicroPython but fusion is
# being performed on a device which is not, the user must provide their own
# implementation of ticks_diff which accounts for MicroPython rollover and
# must supply the returned ticks_us() values as a timestamp.
# Under MicroPython TimeDiff(start, end) uses time.ticks_diff.
# A DeltaT instance, called with function call syntax, returns a time
# difference from the previous call as a float value. Units seconds.
# If running under MicroPython and no time differencing function is supplied
# to the Fusion constructor it uses time.ticks_us as its time source and a
# default timediff function using time.ticks_diff() with a division by 1e6.
# If time differencing function is supplied a timestamp must be passsed as an
# arg to instance calls of Fusion.update() or Fusion.update_nomag(). In the
# async version the user supplied read_coro() must return a timestamp with the
# vector.
# On 1st pass dt evidently can't be computed. A notional value of 100μs is
# returned. The Madgwick algorithm takes seconds to stabilise.
try:
import utime as time
except ImportError:
import time
is_micropython = hasattr(time, 'ticks_diff')
class DeltaT():
def __init__(self, timediff):
if timediff is None:
self.expect_ts = False
if is_micropython:
self.timediff = lambda start, end : time.ticks_diff(start, end)/1000000
else:
raise ValueError('You must define a timediff function')
else:
self.expect_ts = True
self.timediff = timediff
self.start_time = None
def __call__(self, ts):
if self.expect_ts:
if ts is None:
raise ValueError('Timestamp expected but not supplied.')
else:
if is_micropython:
ts = time.ticks_us()
else:
raise RuntimeError('Not MicroPython: provide timestamps and a timediff function')
# ts is now valid
if self.start_time is None: # 1st call: self.start_time is invalid
self.start_time = ts
return 0.0001 # 100μs notional delay. 1st reading is invalid in any case
dt = self.timediff(ts, self.start_time)
self.start_time = ts
return dt
| [
"time.ticks_diff",
"time.ticks_us"
] | [((2667, 2682), 'time.ticks_us', 'time.ticks_us', ([], {}), '()\n', (2680, 2682), False, 'import time\n'), ((2203, 2230), 'time.ticks_diff', 'time.ticks_diff', (['start', 'end'], {}), '(start, end)\n', (2218, 2230), False, 'import time\n')] |
# -*- coding: utf-8 -*-
"""
Sony Colourspaces
=================
Defines the *Sony* colourspaces:
- :attr:`colour.models.RGB_COLOURSPACE_S_GAMUT`.
- :attr:`colour.models.RGB_COLOURSPACE_S_GAMUT3`.
- :attr:`colour.models.RGB_COLOURSPACE_S_GAMUT3_CINE`.
- :attr:`colour.models.RGB_COLOURSPACE_VENICE_S_GAMUT3`.
- :attr:`colour.models.RGB_COLOURSPACE_VENICE_S_GAMUT3_CINE`.
Notes
-----
- The *Venice S-Gamut3* and *Venice S-Gamut3.Cine* primaries and whitepoint
were derived with the following `Google Colab Notebook \
<https://colab.research.google.com/drive/1ZGTij7jT8eZRMPUkyWlv_x5ix5Q5twMB>`__.
References
----------
- :cite:`Gaggioni` : <NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., & <NAME>. (n.d.). S-Log: A new LUT for digital production
mastering and interchange applications (Vol. 709, pp. 1-13).
http://pro.sony.com/bbsccms/assets/files/mkt/cinema/solutions/slog_manual.pdf
- :cite:`SonyCorporation` : Sony Corporation. (n.d.). S-Log Whitepaper (pp.
1-17). http://www.theodoropoulos.info/attachments/076_on%20S-Log.pdf
- :cite:`SonyCorporationd` : Sony Corporation. (n.d.). Technical Summary
for S-Gamut3.Cine/S-Log3 and S-Gamut3/S-Log3 (pp. 1-7).
http://community.sony.com/sony/attachments/sony/\
large-sensor-camera-F5-F55/12359/2/\
TechnicalSummary_for_S-Gamut3Cine_S-Gamut3_S-Log3_V1_00.pdf
- :cite:`SonyCorporatione` : Sony Corporation. (n.d.).
S-Gamut3_S-Gamut3Cine_Matrix.xlsx.
https://community.sony.com/sony/attachments/sony/\
large-sensor-camera-F5-F55/12359/3/S-Gamut3_S-Gamut3Cine_Matrix.xlsx
- :cite:`SonyElectronicsCorporation2020` : Sony Electronics Corporation.
(2020). IDT.Sony.Venice_SLog3_SGamut3.ctl. https://github.com/ampas/\
aces-dev/blob/710ecbe52c87ce9f4a1e02c8ddf7ea0d6b611cc8/transforms/ctl/idt/\
vendorSupplied/sony/IDT.Sony.Venice_SLog3_SGamut3.ctl
- :cite:`SonyElectronicsCorporation2020a` : Sony Electronics Corporation.
(2020). IDT.Sony.Venice_SLog3_SGamut3Cine.ctl. https://github.com/ampas/\
aces-dev/blob/710ecbe52c87ce9f4a1e02c8ddf7ea0d6b611cc8/transforms/ctl/idt/\
vendorSupplied/sony/IDT.Sony.Venice_SLog3_SGamut3Cine.ctl
"""
from __future__ import division, unicode_literals
import numpy as np
from colour.colorimetry import CCS_ILLUMINANTS
from colour.models.rgb import (RGB_Colourspace, log_encoding_SLog2,
log_decoding_SLog2, log_encoding_SLog3,
log_decoding_SLog3, normalised_primary_matrix)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2020 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = [
'PRIMARIES_S_GAMUT', 'WHITEPOINT_NAME_S_GAMUT', 'CCS_WHITEPOINT_S_GAMUT',
'MATRIX_S_GAMUT_TO_XYZ', 'MATRIX_XYZ_TO_S_GAMUT',
'RGB_COLOURSPACE_S_GAMUT', 'PRIMARIES_S_GAMUT3',
'WHITEPOINT_NAME_S_GAMUT3', 'CCS_WHITEPOINT_S_GAMUT3',
'MATRIX_S_GAMUT3_TO_XYZ', 'MATRIX_XYZ_TO_S_GAMUT3',
'RGB_COLOURSPACE_S_GAMUT3', 'PRIMARIES_S_GAMUT3_CINE',
'WHITEPOINT_NAME_S_GAMUT3_CINE', 'CCS_WHITEPOINT_S_GAMUT3_CINE',
'MATRIX_S_GAMUT3_CINE_TO_XYZ', 'MATRIX_XYZ_TO_S_GAMUT3_CINE',
'RGB_COLOURSPACE_S_GAMUT3_CINE', 'PRIMARIES_VENICE_S_GAMUT3',
'WHITEPOINT_NAME_VENICE_S_GAMUT3', 'CCS_WHITEPOINT_VENICE_S_GAMUT3',
'MATRIX_VENICE_S_GAMUT3_TO_XYZ', 'MATRIX_XYZ_TO_VENICE_S_GAMUT3',
'RGB_COLOURSPACE_VENICE_S_GAMUT3', 'PRIMARIES_VENICE_S_GAMUT3_CINE',
'WHITEPOINT_NAME_VENICE_S_GAMUT3_CINE',
'CCS_WHITEPOINT_VENICE_S_GAMUT3_CINE',
'MATRIX_VENICE_S_GAMUT3_CINE_TO_XYZ', 'MATRIX_XYZ_TO_VENICE_S_GAMUT3_CINE',
'RGB_COLOURSPACE_VENICE_S_GAMUT3_CINE'
]
PRIMARIES_S_GAMUT = np.array([
[0.7300, 0.2800],
[0.1400, 0.8550],
[0.1000, -0.0500],
])
"""
*S-Gamut* colourspace primaries.
PRIMARIES_S_GAMUT : ndarray, (3, 2)
"""
WHITEPOINT_NAME_S_GAMUT = 'D65'
"""
*S-Gamut* colourspace whitepoint name.
WHITEPOINT_NAME_S_GAMUT : unicode
"""
CCS_WHITEPOINT_S_GAMUT = (CCS_ILLUMINANTS[
'CIE 1931 2 Degree Standard Observer'][WHITEPOINT_NAME_S_GAMUT])
"""
*S-Gamut* colourspace whitepoint chromaticity coordinates.
CCS_WHITEPOINT_S_GAMUT : ndarray
"""
MATRIX_S_GAMUT_TO_XYZ = np.array([
[0.7064827132, 0.1288010498, 0.1151721641],
[0.2709796708, 0.7866064112, -0.0575860820],
[-0.0096778454, 0.0046000375, 1.0941355587],
])
"""
*S-Gamut* colourspace to *CIE XYZ* tristimulus values matrix.
MATRIX_S_GAMUT_TO_XYZ : array_like, (3, 3)
"""
MATRIX_XYZ_TO_S_GAMUT = np.array([
[1.5073998991, -0.2458221374, -0.1716116808],
[-0.5181517271, 1.3553912409, 0.1258786682],
[0.0155116982, -0.0078727714, 0.9119163656],
])
"""
*CIE XYZ* tristimulus values to *S-Gamut* colourspace matrix.
MATRIX_XYZ_TO_S_GAMUT : array_like, (3, 3)
"""
RGB_COLOURSPACE_S_GAMUT = RGB_Colourspace(
'S-Gamut',
PRIMARIES_S_GAMUT,
CCS_WHITEPOINT_S_GAMUT,
WHITEPOINT_NAME_S_GAMUT,
MATRIX_S_GAMUT_TO_XYZ,
MATRIX_XYZ_TO_S_GAMUT,
log_encoding_SLog2,
log_decoding_SLog2,
)
RGB_COLOURSPACE_S_GAMUT.__doc__ = """
*S-Gamut* colourspace.
References
----------
:cite:`Gaggioni`, :cite:`SonyCorporation`
RGB_COLOURSPACE_S_GAMUT : RGB_Colourspace
"""
PRIMARIES_S_GAMUT3 = PRIMARIES_S_GAMUT
"""
*S-Gamut3* colourspace primaries.
PRIMARIES_S_GAMUT3 : ndarray, (3, 2)
"""
WHITEPOINT_NAME_S_GAMUT3 = WHITEPOINT_NAME_S_GAMUT
"""
*S-Gamut3* colourspace whitepoint name.
WHITEPOINT_NAME_S_GAMUT3 : unicode
"""
CCS_WHITEPOINT_S_GAMUT3 = CCS_WHITEPOINT_S_GAMUT
"""
*S-Gamut3* colourspace whitepoint chromaticity coordinates.
CCS_WHITEPOINT_S_GAMUT3 : ndarray
"""
MATRIX_S_GAMUT3_TO_XYZ = MATRIX_S_GAMUT_TO_XYZ
"""
*S-Gamut3* colourspace to *CIE XYZ* tristimulus values matrix.
MATRIX_S_GAMUT3_TO_XYZ : array_like, (3, 3)
"""
MATRIX_XYZ_TO_S_GAMUT3 = MATRIX_XYZ_TO_S_GAMUT
"""
*CIE XYZ* tristimulus values to *S-Gamut3* colourspace matrix.
MATRIX_XYZ_TO_S_GAMUT3 : array_like, (3, 3)
"""
RGB_COLOURSPACE_S_GAMUT3 = RGB_Colourspace(
'S-Gamut3',
PRIMARIES_S_GAMUT3,
CCS_WHITEPOINT_S_GAMUT3,
WHITEPOINT_NAME_S_GAMUT3,
MATRIX_S_GAMUT3_TO_XYZ,
MATRIX_XYZ_TO_S_GAMUT3,
log_encoding_SLog3,
log_decoding_SLog3,
)
RGB_COLOURSPACE_S_GAMUT3.__doc__ = """
*S-Gamut3* colourspace.
References
----------
:cite:`SonyCorporationd`
RGB_COLOURSPACE_S_GAMUT3 : RGB_Colourspace
"""
PRIMARIES_S_GAMUT3_CINE = np.array([
[0.76600, 0.27500],
[0.22500, 0.80000],
[0.08900, -0.08700],
])
"""
*S-Gamut3.Cine* colourspace primaries.
PRIMARIES_S_GAMUT3_CINE : ndarray, (3, 2)
"""
WHITEPOINT_NAME_S_GAMUT3_CINE = WHITEPOINT_NAME_S_GAMUT
"""
*S-Gamut3.Cine* colourspace whitepoint name.
WHITEPOINT_NAME_S_GAMUT3_CINE : unicode
"""
CCS_WHITEPOINT_S_GAMUT3_CINE = CCS_WHITEPOINT_S_GAMUT
"""
*S-Gamut3.Cine* colourspace whitepoint chromaticity coordinates.
CCS_WHITEPOINT_S_GAMUT3_CINE : ndarray
"""
MATRIX_S_GAMUT3_CINE_TO_XYZ = np.array([
[0.5990839208, 0.2489255161, 0.1024464902],
[0.2150758201, 0.8850685017, -0.1001443219],
[-0.0320658495, -0.0276583907, 1.1487819910],
])
"""
*S-Gamut3.Cine* colourspace to *CIE XYZ* tristimulus values matrix.
MATRIX_S_GAMUT3_CINE_TO_XYZ : array_like, (3, 3)
"""
MATRIX_XYZ_TO_S_GAMUT3_CINE = np.array([
[1.8467789693, -0.5259861230, -0.2105452114],
[-0.4441532629, 1.2594429028, 0.1493999729],
[0.0408554212, 0.0156408893, 0.8682072487],
])
"""
*CIE XYZ* tristimulus values to *S-Gamut3.Cine* colourspace matrix.
MATRIX_XYZ_TO_S_GAMUT3_CINE : array_like, (3, 3)
"""
RGB_COLOURSPACE_S_GAMUT3_CINE = RGB_Colourspace(
'S-Gamut3.Cine',
PRIMARIES_S_GAMUT3_CINE,
CCS_WHITEPOINT_S_GAMUT3_CINE,
WHITEPOINT_NAME_S_GAMUT3_CINE,
MATRIX_S_GAMUT3_CINE_TO_XYZ,
MATRIX_XYZ_TO_S_GAMUT3_CINE,
log_encoding_SLog3,
log_decoding_SLog3,
)
RGB_COLOURSPACE_S_GAMUT3_CINE.__doc__ = """
*S-Gamut3.Cine* colourspace.
References
----------
:cite:`SonyCorporatione`
RGB_COLOURSPACE_S_GAMUT3_CINE : RGB_Colourspace
"""
PRIMARIES_VENICE_S_GAMUT3 = np.array([
[0.740464264304292, 0.279364374750660],
[0.089241145423286, 0.893809528608105],
[0.110488236673827, -0.052579333080476],
])
"""
*Venice S-Gamut3* colourspace primaries.
PRIMARIES_VENICE_S_GAMUT3 : ndarray, (3, 2)
"""
WHITEPOINT_NAME_VENICE_S_GAMUT3 = WHITEPOINT_NAME_S_GAMUT
"""
*Venice S-Gamut3* colourspace whitepoint name.
WHITEPOINT_NAME_VENICE_S_GAMUT3 : unicode
"""
CCS_WHITEPOINT_VENICE_S_GAMUT3 = CCS_WHITEPOINT_S_GAMUT
"""
*Venice S-Gamut3* colourspace whitepoint chromaticity coordinates.
CCS_WHITEPOINT_VENICE_S_GAMUT3 : ndarray
"""
MATRIX_VENICE_S_GAMUT3_TO_XYZ = normalised_primary_matrix(
PRIMARIES_VENICE_S_GAMUT3, CCS_WHITEPOINT_VENICE_S_GAMUT3)
"""
*Venice S-Gamut3* colourspace to *CIE XYZ* tristimulus values matrix.
MATRIX_VENICE_S_GAMUT3_TO_XYZ : array_like, (3, 3)
"""
MATRIX_XYZ_TO_VENICE_S_GAMUT3 = np.linalg.inv(MATRIX_VENICE_S_GAMUT3_TO_XYZ)
"""
*CIE XYZ* tristimulus values to *Venice S-Gamut3* colourspace matrix.
MATRIX_XYZ_TO_VENICE_S_GAMUT3 : array_like, (3, 3)
"""
RGB_COLOURSPACE_VENICE_S_GAMUT3 = RGB_Colourspace(
'Venice S-Gamut3',
PRIMARIES_VENICE_S_GAMUT3,
CCS_WHITEPOINT_VENICE_S_GAMUT3,
WHITEPOINT_NAME_VENICE_S_GAMUT3,
MATRIX_VENICE_S_GAMUT3_TO_XYZ,
MATRIX_XYZ_TO_VENICE_S_GAMUT3,
log_encoding_SLog3,
log_decoding_SLog3,
)
RGB_COLOURSPACE_VENICE_S_GAMUT3.__doc__ = """
*Venice S-Gamut3* colourspace.
References
----------
:cite:`SonyElectronicsCorporation2020`
RGB_COLOURSPACE_VENICE_S_GAMUT3 : RGB_Colourspace
"""
PRIMARIES_VENICE_S_GAMUT3_CINE = np.array([
[0.775901871567345, 0.274502392854799],
[0.188682902773355, 0.828684937020288],
[0.101337382499301, -0.089187517306263],
])
"""
*Venice S-Gamut3.Cine* colourspace primaries.
PRIMARIES_VENICE_S_GAMUT3_CINE : ndarray, (3, 2)
"""
WHITEPOINT_NAME_VENICE_S_GAMUT3_CINE = WHITEPOINT_NAME_S_GAMUT
"""
*Venice S-Gamut3.Cine* colourspace whitepoint name.
WHITEPOINT_NAME_VENICE_S_GAMUT3_CINE : unicode
"""
CCS_WHITEPOINT_VENICE_S_GAMUT3_CINE = CCS_WHITEPOINT_S_GAMUT
"""
*Venice S-Gamut3.Cine* colourspace whitepoint chromaticity coordinates.
CCS_WHITEPOINT_VENICE_S_GAMUT3_CINE : ndarray
"""
MATRIX_VENICE_S_GAMUT3_CINE_TO_XYZ = normalised_primary_matrix(
PRIMARIES_VENICE_S_GAMUT3_CINE, CCS_WHITEPOINT_VENICE_S_GAMUT3_CINE)
"""
*Venice S-Gamut3.Cine* colourspace to *CIE XYZ* tristimulus values matrix.
MATRIX_VENICE_S_GAMUT3_CINE_TO_XYZ : array_like, (3, 3)
"""
MATRIX_XYZ_TO_VENICE_S_GAMUT3_CINE = np.linalg.inv(
MATRIX_VENICE_S_GAMUT3_CINE_TO_XYZ)
"""
*CIE XYZ* tristimulus values to *Venice S-Gamut3.Cine* colourspace matrix.
MATRIX_XYZ_TO_VENICE_S_GAMUT3_CINE : array_like, (3, 3)
"""
RGB_COLOURSPACE_VENICE_S_GAMUT3_CINE = RGB_Colourspace(
'Venice S-Gamut3.Cine',
PRIMARIES_VENICE_S_GAMUT3_CINE,
CCS_WHITEPOINT_VENICE_S_GAMUT3_CINE,
WHITEPOINT_NAME_VENICE_S_GAMUT3_CINE,
MATRIX_VENICE_S_GAMUT3_CINE_TO_XYZ,
MATRIX_XYZ_TO_VENICE_S_GAMUT3_CINE,
log_encoding_SLog3,
log_decoding_SLog3,
)
RGB_COLOURSPACE_VENICE_S_GAMUT3_CINE.__doc__ = """
*Venice S-Gamut3.Cine* colourspace.
References
----------
:cite:`SonyElectronicsCorporation2020a`
RGB_COLOURSPACE_VENICE_S_GAMUT3_CINE : RGB_Colourspace
"""
| [
"numpy.array",
"colour.models.rgb.normalised_primary_matrix",
"colour.models.rgb.RGB_Colourspace",
"numpy.linalg.inv"
] | [((3760, 3813), 'numpy.array', 'np.array', (['[[0.73, 0.28], [0.14, 0.855], [0.1, -0.05]]'], {}), '([[0.73, 0.28], [0.14, 0.855], [0.1, -0.05]])\n', (3768, 3813), True, 'import numpy as np\n'), ((4273, 4421), 'numpy.array', 'np.array', (['[[0.7064827132, 0.1288010498, 0.1151721641], [0.2709796708, 0.7866064112, -\n 0.057586082], [-0.0096778454, 0.0046000375, 1.0941355587]]'], {}), '([[0.7064827132, 0.1288010498, 0.1151721641], [0.2709796708, \n 0.7866064112, -0.057586082], [-0.0096778454, 0.0046000375, 1.0941355587]])\n', (4281, 4421), True, 'import numpy as np\n'), ((4572, 4723), 'numpy.array', 'np.array', (['[[1.5073998991, -0.2458221374, -0.1716116808], [-0.5181517271, 1.3553912409,\n 0.1258786682], [0.0155116982, -0.0078727714, 0.9119163656]]'], {}), '([[1.5073998991, -0.2458221374, -0.1716116808], [-0.5181517271, \n 1.3553912409, 0.1258786682], [0.0155116982, -0.0078727714, 0.9119163656]])\n', (4580, 4723), True, 'import numpy as np\n'), ((4875, 5063), 'colour.models.rgb.RGB_Colourspace', 'RGB_Colourspace', (['"""S-Gamut"""', 'PRIMARIES_S_GAMUT', 'CCS_WHITEPOINT_S_GAMUT', 'WHITEPOINT_NAME_S_GAMUT', 'MATRIX_S_GAMUT_TO_XYZ', 'MATRIX_XYZ_TO_S_GAMUT', 'log_encoding_SLog2', 'log_decoding_SLog2'], {}), "('S-Gamut', PRIMARIES_S_GAMUT, CCS_WHITEPOINT_S_GAMUT,\n WHITEPOINT_NAME_S_GAMUT, MATRIX_S_GAMUT_TO_XYZ, MATRIX_XYZ_TO_S_GAMUT,\n log_encoding_SLog2, log_decoding_SLog2)\n", (4890, 5063), False, 'from colour.models.rgb import RGB_Colourspace, log_encoding_SLog2, log_decoding_SLog2, log_encoding_SLog3, log_decoding_SLog3, normalised_primary_matrix\n'), ((6029, 6223), 'colour.models.rgb.RGB_Colourspace', 'RGB_Colourspace', (['"""S-Gamut3"""', 'PRIMARIES_S_GAMUT3', 'CCS_WHITEPOINT_S_GAMUT3', 'WHITEPOINT_NAME_S_GAMUT3', 'MATRIX_S_GAMUT3_TO_XYZ', 'MATRIX_XYZ_TO_S_GAMUT3', 'log_encoding_SLog3', 'log_decoding_SLog3'], {}), "('S-Gamut3', PRIMARIES_S_GAMUT3, CCS_WHITEPOINT_S_GAMUT3,\n WHITEPOINT_NAME_S_GAMUT3, MATRIX_S_GAMUT3_TO_XYZ,\n MATRIX_XYZ_TO_S_GAMUT3, log_encoding_SLog3, log_decoding_SLog3)\n", (6044, 6223), False, 'from colour.models.rgb import RGB_Colourspace, log_encoding_SLog2, log_decoding_SLog2, log_encoding_SLog3, log_decoding_SLog3, normalised_primary_matrix\n'), ((6437, 6494), 'numpy.array', 'np.array', (['[[0.766, 0.275], [0.225, 0.8], [0.089, -0.087]]'], {}), '([[0.766, 0.275], [0.225, 0.8], [0.089, -0.087]])\n', (6445, 6494), True, 'import numpy as np\n'), ((6964, 7113), 'numpy.array', 'np.array', (['[[0.5990839208, 0.2489255161, 0.1024464902], [0.2150758201, 0.8850685017, -\n 0.1001443219], [-0.0320658495, -0.0276583907, 1.148781991]]'], {}), '([[0.5990839208, 0.2489255161, 0.1024464902], [0.2150758201, \n 0.8850685017, -0.1001443219], [-0.0320658495, -0.0276583907, 1.148781991]])\n', (6972, 7113), True, 'import numpy as np\n'), ((7282, 7431), 'numpy.array', 'np.array', (['[[1.8467789693, -0.525986123, -0.2105452114], [-0.4441532629, 1.2594429028,\n 0.1493999729], [0.0408554212, 0.0156408893, 0.8682072487]]'], {}), '([[1.8467789693, -0.525986123, -0.2105452114], [-0.4441532629, \n 1.2594429028, 0.1493999729], [0.0408554212, 0.0156408893, 0.8682072487]])\n', (7290, 7431), True, 'import numpy as np\n'), ((7602, 7830), 'colour.models.rgb.RGB_Colourspace', 'RGB_Colourspace', (['"""S-Gamut3.Cine"""', 'PRIMARIES_S_GAMUT3_CINE', 'CCS_WHITEPOINT_S_GAMUT3_CINE', 'WHITEPOINT_NAME_S_GAMUT3_CINE', 'MATRIX_S_GAMUT3_CINE_TO_XYZ', 'MATRIX_XYZ_TO_S_GAMUT3_CINE', 'log_encoding_SLog3', 'log_decoding_SLog3'], {}), "('S-Gamut3.Cine', PRIMARIES_S_GAMUT3_CINE,\n CCS_WHITEPOINT_S_GAMUT3_CINE, WHITEPOINT_NAME_S_GAMUT3_CINE,\n MATRIX_S_GAMUT3_CINE_TO_XYZ, MATRIX_XYZ_TO_S_GAMUT3_CINE,\n log_encoding_SLog3, log_decoding_SLog3)\n", (7617, 7830), False, 'from colour.models.rgb import RGB_Colourspace, log_encoding_SLog2, log_decoding_SLog2, log_encoding_SLog3, log_decoding_SLog3, normalised_primary_matrix\n'), ((8057, 8192), 'numpy.array', 'np.array', (['[[0.740464264304292, 0.27936437475066], [0.089241145423286, \n 0.893809528608105], [0.110488236673827, -0.052579333080476]]'], {}), '([[0.740464264304292, 0.27936437475066], [0.089241145423286, \n 0.893809528608105], [0.110488236673827, -0.052579333080476]])\n', (8065, 8192), True, 'import numpy as np\n'), ((8662, 8750), 'colour.models.rgb.normalised_primary_matrix', 'normalised_primary_matrix', (['PRIMARIES_VENICE_S_GAMUT3', 'CCS_WHITEPOINT_VENICE_S_GAMUT3'], {}), '(PRIMARIES_VENICE_S_GAMUT3,\n CCS_WHITEPOINT_VENICE_S_GAMUT3)\n', (8687, 8750), False, 'from colour.models.rgb import RGB_Colourspace, log_encoding_SLog2, log_decoding_SLog2, log_encoding_SLog3, log_decoding_SLog3, normalised_primary_matrix\n'), ((8915, 8959), 'numpy.linalg.inv', 'np.linalg.inv', (['MATRIX_VENICE_S_GAMUT3_TO_XYZ'], {}), '(MATRIX_VENICE_S_GAMUT3_TO_XYZ)\n', (8928, 8959), True, 'import numpy as np\n'), ((9125, 9365), 'colour.models.rgb.RGB_Colourspace', 'RGB_Colourspace', (['"""Venice S-Gamut3"""', 'PRIMARIES_VENICE_S_GAMUT3', 'CCS_WHITEPOINT_VENICE_S_GAMUT3', 'WHITEPOINT_NAME_VENICE_S_GAMUT3', 'MATRIX_VENICE_S_GAMUT3_TO_XYZ', 'MATRIX_XYZ_TO_VENICE_S_GAMUT3', 'log_encoding_SLog3', 'log_decoding_SLog3'], {}), "('Venice S-Gamut3', PRIMARIES_VENICE_S_GAMUT3,\n CCS_WHITEPOINT_VENICE_S_GAMUT3, WHITEPOINT_NAME_VENICE_S_GAMUT3,\n MATRIX_VENICE_S_GAMUT3_TO_XYZ, MATRIX_XYZ_TO_VENICE_S_GAMUT3,\n log_encoding_SLog3, log_decoding_SLog3)\n", (9140, 9365), False, 'from colour.models.rgb import RGB_Colourspace, log_encoding_SLog2, log_decoding_SLog2, log_encoding_SLog3, log_decoding_SLog3, normalised_primary_matrix\n'), ((9617, 9753), 'numpy.array', 'np.array', (['[[0.775901871567345, 0.274502392854799], [0.188682902773355, \n 0.828684937020288], [0.101337382499301, -0.089187517306263]]'], {}), '([[0.775901871567345, 0.274502392854799], [0.188682902773355, \n 0.828684937020288], [0.101337382499301, -0.089187517306263]])\n', (9625, 9753), True, 'import numpy as np\n'), ((10267, 10365), 'colour.models.rgb.normalised_primary_matrix', 'normalised_primary_matrix', (['PRIMARIES_VENICE_S_GAMUT3_CINE', 'CCS_WHITEPOINT_VENICE_S_GAMUT3_CINE'], {}), '(PRIMARIES_VENICE_S_GAMUT3_CINE,\n CCS_WHITEPOINT_VENICE_S_GAMUT3_CINE)\n', (10292, 10365), False, 'from colour.models.rgb import RGB_Colourspace, log_encoding_SLog2, log_decoding_SLog2, log_encoding_SLog3, log_decoding_SLog3, normalised_primary_matrix\n'), ((10545, 10594), 'numpy.linalg.inv', 'np.linalg.inv', (['MATRIX_VENICE_S_GAMUT3_CINE_TO_XYZ'], {}), '(MATRIX_VENICE_S_GAMUT3_CINE_TO_XYZ)\n', (10558, 10594), True, 'import numpy as np\n'), ((10780, 11054), 'colour.models.rgb.RGB_Colourspace', 'RGB_Colourspace', (['"""Venice S-Gamut3.Cine"""', 'PRIMARIES_VENICE_S_GAMUT3_CINE', 'CCS_WHITEPOINT_VENICE_S_GAMUT3_CINE', 'WHITEPOINT_NAME_VENICE_S_GAMUT3_CINE', 'MATRIX_VENICE_S_GAMUT3_CINE_TO_XYZ', 'MATRIX_XYZ_TO_VENICE_S_GAMUT3_CINE', 'log_encoding_SLog3', 'log_decoding_SLog3'], {}), "('Venice S-Gamut3.Cine', PRIMARIES_VENICE_S_GAMUT3_CINE,\n CCS_WHITEPOINT_VENICE_S_GAMUT3_CINE,\n WHITEPOINT_NAME_VENICE_S_GAMUT3_CINE,\n MATRIX_VENICE_S_GAMUT3_CINE_TO_XYZ, MATRIX_XYZ_TO_VENICE_S_GAMUT3_CINE,\n log_encoding_SLog3, log_decoding_SLog3)\n", (10795, 11054), False, 'from colour.models.rgb import RGB_Colourspace, log_encoding_SLog2, log_decoding_SLog2, log_encoding_SLog3, log_decoding_SLog3, normalised_primary_matrix\n')] |
# -*- coding: UTF-8 -*-
'''=================================================
@Project -> File :EWC -> network
@IDE :PyCharm
@Author :<NAME>
@Date :2021/6/23 20:28
@Desc :
=================================================='''
from tensorflow.keras import Model
from tensorflow.keras.layers import Dense, Conv2D,LeakyReLU,MaxPool2D,Flatten,Input
def fcnn():
input = Input(shape=784,dtype='float32',name='input')
# x = Dense(128,activation='relu')(input)
# x = Dense(64,activation='relu')(x)
# x = Dense(32,activation='relu')(x)
x = Dense(256,activation='relu')(input)
x = Dense(256,activation='relu')(x)
output = Dense(10,activation='softmax')(x)
return Model(input, output) | [
"tensorflow.keras.Model",
"tensorflow.keras.layers.Input",
"tensorflow.keras.layers.Dense"
] | [((378, 425), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(784)', 'dtype': '"""float32"""', 'name': '"""input"""'}), "(shape=784, dtype='float32', name='input')\n", (383, 425), False, 'from tensorflow.keras.layers import Dense, Conv2D, LeakyReLU, MaxPool2D, Flatten, Input\n'), ((694, 714), 'tensorflow.keras.Model', 'Model', (['input', 'output'], {}), '(input, output)\n', (699, 714), False, 'from tensorflow.keras import Model\n'), ((560, 589), 'tensorflow.keras.layers.Dense', 'Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (565, 589), False, 'from tensorflow.keras.layers import Dense, Conv2D, LeakyReLU, MaxPool2D, Flatten, Input\n'), ((604, 633), 'tensorflow.keras.layers.Dense', 'Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (609, 633), False, 'from tensorflow.keras.layers import Dense, Conv2D, LeakyReLU, MaxPool2D, Flatten, Input\n'), ((649, 680), 'tensorflow.keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""softmax"""'}), "(10, activation='softmax')\n", (654, 680), False, 'from tensorflow.keras.layers import Dense, Conv2D, LeakyReLU, MaxPool2D, Flatten, Input\n')] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import os
import pprint
import unittest
import numpy as np
# pyre-fixme[21]: Could not find module `pytest`.
import pytest
import torch
from parameterized import parameterized
from reagent.core.types import RewardOptions
from reagent.gym.agents.agent import Agent
from reagent.gym.agents.post_step import train_with_replay_buffer_post_step
from reagent.gym.envs.union import Env__Union
from reagent.gym.runners.gymrunner import evaluate_for_n_episodes, run_episode
from reagent.gym.utils import build_normalizer, fill_replay_buffer
from reagent.model_managers.model_manager import ModelManager
from reagent.model_managers.union import ModelManager__Union
from reagent.replay_memory.circular_replay_buffer import ReplayBuffer
from reagent.tensorboardX import summary_writer_context
from reagent.test.base.horizon_test_base import HorizonTestBase
from torch.utils.tensorboard import SummaryWriter
try:
# Use internal runner or OSS otherwise
from reagent.runners.fb.fb_batch_runner import FbBatchRunner as BatchRunner
except ImportError:
from reagent.runners.oss_batch_runner import OssBatchRunner as BatchRunner
# for seeding the environment
SEED = 0
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
"""
Put on-policy gym tests here in the format (test name, path to yaml config).
Format path to be: "configs/<env_name>/<model_name>_<env_name>_online.yaml."
NOTE: These tests should ideally finish quickly (within 10 minutes) since they are
unit tests which are run many times.
"""
GYM_TESTS = [
("Discrete DQN Cartpole", "configs/cartpole/discrete_dqn_cartpole_online.yaml"),
("Discrete C51 Cartpole", "configs/cartpole/discrete_c51_cartpole_online.yaml"),
("Discrete QR Cartpole", "configs/cartpole/discrete_qr_cartpole_online.yaml"),
(
"Discrete DQN Open Gridworld",
"configs/open_gridworld/discrete_dqn_open_gridworld.yaml",
),
("SAC Pendulum", "configs/pendulum/sac_pendulum_online.yaml"),
("TD3 Pendulum", "configs/pendulum/td3_pendulum_online.yaml"),
("Parametric DQN Cartpole", "configs/cartpole/parametric_dqn_cartpole_online.yaml"),
(
"Parametric SARSA Cartpole",
"configs/cartpole/parametric_sarsa_cartpole_online.yaml",
),
(
"Sparse DQN Changing Arms",
"configs/sparse/discrete_dqn_changing_arms_online.yaml",
),
("SlateQ RecSim", "configs/recsim/slate_q_recsim_online.yaml"),
("PossibleActionsMask DQN", "configs/functionality/dqn_possible_actions_mask.yaml"),
]
curr_dir = os.path.dirname(__file__)
class TestGym(HorizonTestBase):
# pyre-fixme[16]: Module `parameterized` has no attribute `expand`.
@parameterized.expand(GYM_TESTS)
def test_gym_cpu(self, name: str, config_path: str):
logger.info(f"Starting {name} on CPU")
self.run_from_config(
run_test=run_test,
config_path=os.path.join(curr_dir, config_path),
use_gpu=False,
)
logger.info(f"{name} passes!")
# pyre-fixme[16]: Module `parameterized` has no attribute `expand`.
@parameterized.expand(GYM_TESTS)
@pytest.mark.serial
# pyre-fixme[56]: Argument `not torch.cuda.is_available()` to decorator factory
# `unittest.skipIf` could not be resolved in a global scope.
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_gym_gpu(self, name: str, config_path: str):
logger.info(f"Starting {name} on GPU")
self.run_from_config(
run_test=run_test,
config_path=os.path.join(curr_dir, config_path),
use_gpu=True,
)
logger.info(f"{name} passes!")
def run_test(
env: Env__Union,
model: ModelManager__Union,
replay_memory_size: int,
train_every_ts: int,
train_after_ts: int,
num_train_episodes: int,
passing_score_bar: float,
num_eval_episodes: int,
use_gpu: bool,
):
env = env.value
env.seed(SEED)
env.action_space.seed(SEED)
normalization = build_normalizer(env)
logger.info(f"Normalization is: \n{pprint.pformat(normalization)}")
manager: ModelManager = model.value
runner = BatchRunner(use_gpu, manager, RewardOptions(), normalization)
trainer = runner.initialize_trainer()
reporter = manager.get_reporter()
trainer.reporter = reporter
training_policy = manager.create_policy(trainer)
replay_buffer = ReplayBuffer(
replay_capacity=replay_memory_size, batch_size=trainer.minibatch_size
)
device = torch.device("cuda") if use_gpu else torch.device("cpu")
# first fill the replay buffer to burn_in
train_after_ts = max(train_after_ts, trainer.minibatch_size)
fill_replay_buffer(
env=env, replay_buffer=replay_buffer, desired_size=train_after_ts
)
post_step = train_with_replay_buffer_post_step(
replay_buffer=replay_buffer,
env=env,
trainer=trainer,
training_freq=train_every_ts,
batch_size=trainer.minibatch_size,
device=device,
)
agent = Agent.create_for_env(
env, policy=training_policy, post_transition_callback=post_step, device=device
)
writer = SummaryWriter()
with summary_writer_context(writer):
train_rewards = []
for i in range(num_train_episodes):
trajectory = run_episode(
env=env, agent=agent, mdp_id=i, max_steps=env.max_steps
)
ep_reward = trajectory.calculate_cumulative_reward()
train_rewards.append(ep_reward)
logger.info(
f"Finished training episode {i} (len {len(trajectory)})"
f" with reward {ep_reward}."
)
logger.info("============Train rewards=============")
logger.info(train_rewards)
logger.info(f"average: {np.mean(train_rewards)};\tmax: {np.max(train_rewards)}")
# Check whether the max score passed the score bar; we explore during training
# the return could be bad (leading to flakiness in C51 and QRDQN).
assert np.max(train_rewards) >= passing_score_bar, (
f"max reward ({np.max(train_rewards)})after training for "
f"{len(train_rewards)} episodes is less than < {passing_score_bar}.\n"
)
serving_policy = manager.create_serving_policy(normalization, trainer)
agent = Agent.create_for_env_with_serving_policy(env, serving_policy)
eval_rewards = evaluate_for_n_episodes(
n=num_eval_episodes, env=env, agent=agent, max_steps=env.max_steps
).squeeze(1)
logger.info("============Eval rewards==============")
logger.info(eval_rewards)
mean_eval = np.mean(eval_rewards)
logger.info(f"average: {mean_eval};\tmax: {np.max(eval_rewards)}")
assert (
mean_eval >= passing_score_bar
), f"Eval reward is {mean_eval}, less than < {passing_score_bar}.\n"
if __name__ == "__main__":
unittest.main()
| [
"logging.getLogger",
"reagent.gym.utils.build_normalizer",
"reagent.gym.utils.fill_replay_buffer",
"torch.cuda.is_available",
"reagent.replay_memory.circular_replay_buffer.ReplayBuffer",
"unittest.main",
"torch.utils.tensorboard.SummaryWriter",
"numpy.mean",
"reagent.tensorboardX.summary_writer_context",
"parameterized.parameterized.expand",
"reagent.core.types.RewardOptions",
"numpy.max",
"reagent.gym.agents.agent.Agent.create_for_env",
"reagent.gym.agents.agent.Agent.create_for_env_with_serving_policy",
"pprint.pformat",
"os.path.dirname",
"reagent.gym.agents.post_step.train_with_replay_buffer_post_step",
"torch.device",
"reagent.gym.runners.gymrunner.evaluate_for_n_episodes",
"reagent.gym.runners.gymrunner.run_episode",
"os.path.join"
] | [((1286, 1313), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1303, 1313), False, 'import logging\n'), ((2639, 2664), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2654, 2664), False, 'import os\n'), ((2776, 2807), 'parameterized.parameterized.expand', 'parameterized.expand', (['GYM_TESTS'], {}), '(GYM_TESTS)\n', (2796, 2807), False, 'from parameterized import parameterized\n'), ((3188, 3219), 'parameterized.parameterized.expand', 'parameterized.expand', (['GYM_TESTS'], {}), '(GYM_TESTS)\n', (3208, 3219), False, 'from parameterized import parameterized\n'), ((4117, 4138), 'reagent.gym.utils.build_normalizer', 'build_normalizer', (['env'], {}), '(env)\n', (4133, 4138), False, 'from reagent.gym.utils import build_normalizer, fill_replay_buffer\n'), ((4513, 4601), 'reagent.replay_memory.circular_replay_buffer.ReplayBuffer', 'ReplayBuffer', ([], {'replay_capacity': 'replay_memory_size', 'batch_size': 'trainer.minibatch_size'}), '(replay_capacity=replay_memory_size, batch_size=trainer.\n minibatch_size)\n', (4525, 4601), False, 'from reagent.replay_memory.circular_replay_buffer import ReplayBuffer\n'), ((4797, 4887), 'reagent.gym.utils.fill_replay_buffer', 'fill_replay_buffer', ([], {'env': 'env', 'replay_buffer': 'replay_buffer', 'desired_size': 'train_after_ts'}), '(env=env, replay_buffer=replay_buffer, desired_size=\n train_after_ts)\n', (4815, 4887), False, 'from reagent.gym.utils import build_normalizer, fill_replay_buffer\n'), ((4914, 5092), 'reagent.gym.agents.post_step.train_with_replay_buffer_post_step', 'train_with_replay_buffer_post_step', ([], {'replay_buffer': 'replay_buffer', 'env': 'env', 'trainer': 'trainer', 'training_freq': 'train_every_ts', 'batch_size': 'trainer.minibatch_size', 'device': 'device'}), '(replay_buffer=replay_buffer, env=env,\n trainer=trainer, training_freq=train_every_ts, batch_size=trainer.\n minibatch_size, device=device)\n', (4948, 5092), False, 'from reagent.gym.agents.post_step import train_with_replay_buffer_post_step\n'), ((5152, 5257), 'reagent.gym.agents.agent.Agent.create_for_env', 'Agent.create_for_env', (['env'], {'policy': 'training_policy', 'post_transition_callback': 'post_step', 'device': 'device'}), '(env, policy=training_policy, post_transition_callback=\n post_step, device=device)\n', (5172, 5257), False, 'from reagent.gym.agents.agent import Agent\n'), ((5281, 5296), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (5294, 5296), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((6426, 6487), 'reagent.gym.agents.agent.Agent.create_for_env_with_serving_policy', 'Agent.create_for_env_with_serving_policy', (['env', 'serving_policy'], {}), '(env, serving_policy)\n', (6466, 6487), False, 'from reagent.gym.agents.agent import Agent\n'), ((6730, 6751), 'numpy.mean', 'np.mean', (['eval_rewards'], {}), '(eval_rewards)\n', (6737, 6751), True, 'import numpy as np\n'), ((6981, 6996), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6994, 6996), False, 'import unittest\n'), ((4295, 4310), 'reagent.core.types.RewardOptions', 'RewardOptions', ([], {}), '()\n', (4308, 4310), False, 'from reagent.core.types import RewardOptions\n'), ((4625, 4645), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (4637, 4645), False, 'import torch\n'), ((4662, 4681), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (4674, 4681), False, 'import torch\n'), ((5306, 5336), 'reagent.tensorboardX.summary_writer_context', 'summary_writer_context', (['writer'], {}), '(writer)\n', (5328, 5336), False, 'from reagent.tensorboardX import summary_writer_context\n'), ((6140, 6161), 'numpy.max', 'np.max', (['train_rewards'], {}), '(train_rewards)\n', (6146, 6161), True, 'import numpy as np\n'), ((3419, 3444), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3442, 3444), False, 'import torch\n'), ((5434, 5502), 'reagent.gym.runners.gymrunner.run_episode', 'run_episode', ([], {'env': 'env', 'agent': 'agent', 'mdp_id': 'i', 'max_steps': 'env.max_steps'}), '(env=env, agent=agent, mdp_id=i, max_steps=env.max_steps)\n', (5445, 5502), False, 'from reagent.gym.runners.gymrunner import evaluate_for_n_episodes, run_episode\n'), ((6209, 6230), 'numpy.max', 'np.max', (['train_rewards'], {}), '(train_rewards)\n', (6215, 6230), True, 'import numpy as np\n'), ((6508, 6603), 'reagent.gym.runners.gymrunner.evaluate_for_n_episodes', 'evaluate_for_n_episodes', ([], {'n': 'num_eval_episodes', 'env': 'env', 'agent': 'agent', 'max_steps': 'env.max_steps'}), '(n=num_eval_episodes, env=env, agent=agent,\n max_steps=env.max_steps)\n', (6531, 6603), False, 'from reagent.gym.runners.gymrunner import evaluate_for_n_episodes, run_episode\n'), ((2997, 3032), 'os.path.join', 'os.path.join', (['curr_dir', 'config_path'], {}), '(curr_dir, config_path)\n', (3009, 3032), False, 'import os\n'), ((3657, 3692), 'os.path.join', 'os.path.join', (['curr_dir', 'config_path'], {}), '(curr_dir, config_path)\n', (3669, 3692), False, 'import os\n'), ((4178, 4207), 'pprint.pformat', 'pprint.pformat', (['normalization'], {}), '(normalization)\n', (4192, 4207), False, 'import pprint\n'), ((5917, 5939), 'numpy.mean', 'np.mean', (['train_rewards'], {}), '(train_rewards)\n', (5924, 5939), True, 'import numpy as np\n'), ((5949, 5970), 'numpy.max', 'np.max', (['train_rewards'], {}), '(train_rewards)\n', (5955, 5970), True, 'import numpy as np\n'), ((6799, 6819), 'numpy.max', 'np.max', (['eval_rewards'], {}), '(eval_rewards)\n', (6805, 6819), True, 'import numpy as np\n')] |
from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(ErrorGroup)
admin.site.register(Error) | [
"django.contrib.admin.site.register"
] | [((84, 115), 'django.contrib.admin.site.register', 'admin.site.register', (['ErrorGroup'], {}), '(ErrorGroup)\n', (103, 115), False, 'from django.contrib import admin\n'), ((116, 142), 'django.contrib.admin.site.register', 'admin.site.register', (['Error'], {}), '(Error)\n', (135, 142), False, 'from django.contrib import admin\n')] |
from rest_framework import serializers
from .models import *
class CoordinatorSerializer(serializers.ModelSerializer):
# ModelID = serializers.CharField(max_length=100, required=True)
CourseID = serializers.CharField(max_length=100, required=True)
FName = serializers.CharField(max_length=100, required=False)
LName = serializers.CharField(max_length=100, required=False)
Phone = serializers.CharField(max_length=100, required=False)
Office = serializers.CharField(max_length=100, required=False)
Email = serializers.CharField(max_length=100, required=False)
def create(self, validated_data):
# Once the request data has been validated, we can create a todo item instance in the database
return Coordinator.objects.create(
ModelID=validated_data.get('ModelID'),
CourseID=validated_data.get('CourseID'),
FName=validated_data.get('FName'),
LName=validated_data.get('LName'),
Phone=validated_data.get('Phone'),
Office=validated_data.get('Office'),
Email=validated_data.get('Email')
)
def update(self, instance, validated_data):
# Once the request data has been validated, we can update the todo item instance in the database
instance.ModelID = validated_data.get('ModelID', instance.ModelID)
instance.CourseID = validated_data.get('CourseID', instance.CourseID)
instance.FName = validated_data.get('FName', instance.FName)
instance.LName = validated_data.get('LName', instance.LName)
instance.Phone = validated_data.get('Phone', instance.Phone)
instance.Office = validated_data.get('Office', instance.Office)
instance.Email = validated_data.get('Email', instance.Email)
instance.save()
return instance
class Meta:
model = Coordinator
fields = (
'ModelID',
'CourseID',
'FName',
'LName',
'Phone',
'Office',
'Email'
)
class InfoSerializer(serializers.ModelSerializer):
# ModelID = serializers.CharField(max_length=100, required=True)
CourseID = serializers.CharField(max_length=100, required=True)
GradeNotes = serializers.CharField(max_length=5000, required=False)
Examination = serializers.CharField(max_length=5000, required=False)
CourseDescription = serializers.CharField(max_length=5000, required=False)
UseCalc = serializers.CharField(max_length=100, required=False)
def create(self, validated_data):
return Info.objects.create(
ModelID=validated_data.get('ModelID'),
CourseID=validated_data.get('CourseID'),
GradeNotes=validated_data.get('GradeNotes'),
Examination=validated_data.get('Examination'),
CourseDescription=validated_data.get('CourseDescription'),
UseCalc=validated_data.get('UseCalc')
)
def update(self, instance, validated_data):
instance.ModelID = validated_data.get('ModelID', instance.ModelID)
instance.CourseID = validated_data.get('CourseID', instance.CourseID)
instance.GradeNotes = validated_data.get('GradeNotes', instance.GradeNotes)
instance.Examination = validated_data.get('Examination', instance.Examination)
instance.CourseDescription = validated_data.get('CourseDescription', instance.CourseDescription)
instance.UseCalc = validated_data.get('UseCalc', instance.UseCalc)
instance.save()
return instance
class Meta:
model = Info
fields = (
'ModelID',
'CourseID',
'GradeNotes',
'Examination',
'CourseDescription',
'UseCalc'
)
class GradeDeterminationSerializer(serializers.ModelSerializer):
# ModelID = serializers.CharField(max_length=100, required=True)
CourseID = serializers.CharField(max_length=100, required=True)
Component = serializers.CharField(max_length=100, required=False)
OutcomeEvaluated = serializers.CharField(max_length=100, required=False)
Weight = serializers.IntegerField(required=False)
def create(self, validated_data):
# Once the request data has been validated, we can create a todo item instance in the database
return GradeDetermination.objects.create(
ModelID=validated_data.get('ModelID'),
CourseID=validated_data.get('CourseID'),
Component=validated_data.get('Component'),
OutcomeEvaluated=validated_data.get('OutcomeEvaluated'),
Weight=validated_data.get('Weight'),
)
def update(self, instance, validated_data):
# Once the request data has been validated, we can update the todo item instance in the database
instance.ModelID = validated_data.get('ModelID', instance.ModelID)
instance.CourseID = validated_data.get('CourseID', instance.CourseID)
instance.Component = validated_data.get('Component', instance.Component)
instance.OutcomeEvaluated = validated_data.get('OutcomeEvaluated', instance.OutcomeEvaluated)
instance.Weight = validated_data.get('Weight', instance.Weight)
instance.save()
return instance
class Meta:
model = GradeDetermination
fields = (
'ModelID',
'CourseID',
'Component',
'OutcomeEvaluated',
'Weight'
)
class OutcomeSerializer(serializers.ModelSerializer):
# ModelID = serializers.CharField(max_length=100, required=True)
CourseID = serializers.CharField(max_length=100, required=True)
OutcomeNum = serializers.IntegerField(required=False) # removed max_length=100
Description = serializers.CharField(max_length=500, required=False) # Changed max_length to 500
GraduateAttribute = serializers.CharField(max_length=100, required=False)
InstructionLvl = serializers.CharField(max_length=100, required=False)
def create(self, validated_data):
return Outcome.objects.create(
ModelID=validated_data.get('ModelID'),
CourseID=validated_data.get('CourseID'),
OutcomeNum=validated_data.get('OutcomeNum'),
Description=validated_data.get('Description'),
GraduateAttribute=validated_data.get('GraduateAttribute'),
InstructionLvl=validated_data.get('InstructionLvl'),
)
def update(self, instance, validated_data):
instance.ModelID = validated_data.get('ModelID', instance.ModelID)
instance.CourseID = validated_data.get('CourseID', instance.CourseID)
instance.OutcomeNum = validated_data.get('OutcomeNum', instance.OutcomeNum)
instance.Description = validated_data.get('Description', instance.Description)
instance.GraduateAttribute = validated_data.get('GraduateAttribute', instance.GraduateAttribute)
instance.InstructionLvl = validated_data.get('InstructionLvl', instance.InstructionLvl)
instance.save()
return instance
class Meta:
model = Outcome
fields = (
'ModelID',
'CourseID',
'OutcomeNum',
'Description',
'GraduateAttribute',
'InstructionLvl'
)
class TimetableSerializer(serializers.ModelSerializer):
# ModelID = serializers.CharField(max_length=100, required=True)
CourseID = serializers.CharField(max_length=100, required=True)
SectionNum = serializers.CharField(max_length=100, required=False)
Days = serializers.CharField(max_length=100, required=False)
Time = serializers.CharField(max_length=100, required=False)
Location = serializers.CharField(max_length=100, required=False)
def create(self, validated_data):
return Timetable.objects.create(
ModelID=validated_data.get('ModelID'),
CourseID=validated_data.get('CourseID'),
SectionNum=validated_data.get('SectionNum'),
Days=validated_data.get('Days'),
Time=validated_data.get('Time'),
Location=validated_data.get('Location'),
)
def update(self, instance, validated_data):
instance.ModelID = validated_data.get('ModelID', instance.ModelID)
instance.CourseID = validated_data.get('CourseID', instance.CourseID)
instance.SectionNum = validated_data.get('SectionNum', instance.SectionNum)
instance.Days = validated_data.get('Days', instance.Days)
instance.Time = validated_data.get('Time', instance.Time)
instance.Location = validated_data.get('Location', instance.Location)
instance.save()
return instance
class Meta:
model = Timetable
fields = (
'ModelID',
'CourseID',
'SectionNum',
'Days',
'Time',
'Location'
)
class GradeDistributionSerializer(serializers.ModelSerializer):
# ModelID = serializers.CharField(max_length=100, required=True)
CourseID = serializers.CharField(max_length=100, required=True)
LowerLimit = serializers.IntegerField(required=False) # removed max_length = 100
UpperLimit = serializers.IntegerField(required=False) # removed max_length = 100
LetterGrade = serializers.CharField(max_length=100, required=False)
def create(self, validated_data):
return GradeDistribution.objects.create(
ModelID=validated_data.get('ModelID'),
CourseID=validated_data.get('CourseID'),
LowerLimit=validated_data.get('LowerLimit'),
UpperLimit=validated_data.get('UpperLimit'),
LetterGrade=validated_data.get('LetterGrade'),
)
def update(self, instance, validated_data):
instance.ModelID = validated_data.get('ModelID', instance.ModelID)
instance.CourseID = validated_data.get('CourseID', instance.CourseID)
instance.LowerLimit = validated_data.get('LowerLimit', instance.LowerLimit)
instance.UpperLimit = validated_data.get('UpperLimit', instance.UpperLimit)
instance.LetterGrade = validated_data.get('LetterGrade', instance.LetterGrade)
instance.save()
return instance
class Meta:
model = GradeDistribution
fields = (
'ModelID',
'CourseID',
'LowerLimit',
'UpperLimit',
'LetterGrade'
)
class LectureSerializer(serializers.ModelSerializer):
# ModelID = serializers.CharField(max_length=100, required=True)
CourseID = serializers.CharField(max_length=100, required=True)
LectureNum = serializers.CharField(max_length=100, required=False)
FName = serializers.CharField(max_length=100, required=False)
LName = serializers.CharField(max_length=100, required=False)
Phone = serializers.CharField(max_length=100, required=False)
Office = serializers.CharField(max_length=100, required=False)
Email = serializers.CharField(max_length=100, required=False)
def create(self, validated_data):
return Lecture.objects.create(
ModelID=validated_data.get('ModelID'),
CourseID=validated_data.get('CourseID'),
LectureNum=validated_data.get('LectureNum'),
FName=validated_data.get('FName'),
LName=validated_data.get('LName'),
Phone=validated_data.get('Phone'),
Office=validated_data.get('Office'),
Email=validated_data.get('Email'),
)
def update(self, instance, validated_data):
instance.ModelID = validated_data.get('ModelID', instance.ModelID)
instance.CourseID = validated_data.get('CourseID', instance.CourseID)
instance.LectureNum = validated_data.get('LectureNum', instance.LectureNum)
instance.FName = validated_data.get('FName', instance.FName)
instance.LName = validated_data.get('LName', instance.LName)
instance.Phone = validated_data.get('Phone', instance.Phone)
instance.Office = validated_data.get('Office', instance.Office)
instance.Email = validated_data.get('Email', instance.Email)
instance.save()
return instance
class Meta:
model = Lecture
fields = (
'ModelID',
'CourseID',
'LectureNum',
'FName',
'LName',
'Phone',
'Office',
'Email'
)
class TutorialSerializer(serializers.ModelSerializer):
# ModelID = serializers.CharField(max_length=100, required=True)
CourseID = serializers.CharField(max_length=100, required=True)
TutorialNum = serializers.CharField(max_length=100, required=False) # Changed Tutorial Num to CharField
FName = serializers.CharField(max_length=100, required=False) # Changed FName to CharField
LName = serializers.CharField(max_length=100, required=False)
Phone = serializers.CharField(max_length=100, required=False)
Office = serializers.CharField(max_length=100, required=False)
Email = serializers.CharField(max_length=100, required=False)
def create(self, validated_data):
return Tutorial.objects.create(
ModelID=validated_data.get('ModelID'),
CourseID=validated_data.get('CourseID'),
TutorialNum=validated_data.get('TutorialNum'),
FName=validated_data.get('FName'),
LName=validated_data.get('LName'),
Phone=validated_data.get('Phone'),
Office=validated_data.get('Office'),
Email=validated_data.get('Email'),
)
def update(self, instance, validated_data):
instance.ModelID = validated_data.get('ModelID', instance.ModelID)
instance.CourseID = validated_data.get('CourseID', instance.CourseID)
instance.TutorialNum = validated_data.get('TutorialNum', instance.TutorialNum)
instance.FName = validated_data.get('FName', instance.FName)
instance.LName = validated_data.get('LName', instance.LName)
instance.Phone = validated_data.get('Phone', instance.Phone)
instance.Office = validated_data.get('Office', instance.Office)
instance.Email = validated_data.get('Email', instance.Email)
instance.save()
return instance
class Meta:
model = Tutorial
fields = (
'ModelID',
'CourseID',
'TutorialNum',
'FName',
'LName',
'Phone',
'Office',
'Email'
)
class CourseSerializer(serializers.ModelSerializer):
# ModelID = serializers.CharField(max_length=100, required=True)
CourseID = serializers.CharField(max_length=100, required=True)
CourseHours = serializers.CharField(max_length=100, required=False) # Changed CourseHours to CharField
CourseName = serializers.CharField(max_length=100, required=False) # Changed CourseName to CharField
CalenderRefrence = serializers.CharField(max_length=100, required=False)
AcademicCredit = serializers.IntegerField(required=False) # Changed AcademicCredit to IntegerField
DateCreated = serializers.CharField(max_length=100, required=False)
def create(self, validated_data):
return Course.objects.create(
ModelID=validated_data.get('ModelID'),
CourseID=validated_data.get('CourseID'),
CourseHours=validated_data.get('CourseHours'),
CourseName=validated_data.get('CourseName'),
CalenderRefrence=validated_data.get('CalenderRefrence'),
AcademicCredit=validated_data.get('AcademicCredit'),
DateCreated=validated_data.get('DateCreated'),
)
def update(self, instance, validated_data):
instance.ModelID = validated_data.get('ModelID', instance.ModelID)
instance.CourseID = validated_data.get('CourseID', instance.CourseID)
instance.CourseHours = validated_data.get('CourseHours', instance.CourseHours)
instance.CourseName = validated_data.get('CourseName', instance.CourseName)
instance.CalenderRefrence = validated_data.get('CalenderRefrence', instance.CalenderRefrence)
instance.AcademicCredit = validated_data.get('AcademicCredit', instance.AcademicCredit)
instance.DateCreated = validated_data.get('DateCreated', instance.DateCreated)
instance.save()
return instance
class Meta:
model = Course
fields = (
'ModelID',
'CourseID',
'CourseHours',
'CourseName',
'CalenderRefrence',
'AcademicCredit',
'DateCreated'
)
class TextbookSerializer(serializers.ModelSerializer):
# ModelID = serializers.CharField(max_length=100, required=True)
CourseID = serializers.CharField(max_length=100, required=True)
TITLE = serializers.CharField(max_length=100, required=False)
Publisher = serializers.CharField(max_length=100, required=False)
Author = serializers.CharField(max_length=100, required=False)
Edition = serializers.CharField(max_length=100, required=False)
type = serializers.CharField(max_length=100, required=False)
def create(self, validated_data):
return Textbook.objects.create(
ModelID=validated_data.get('ModelID'),
CourseID=validated_data.get('CourseID'),
TITLE=validated_data.get('TITLE'),
Publisher=validated_data.get('Publisher'),
Author=validated_data.get('Author'),
Edition=validated_data.get('Edition'),
type=validated_data.get('type'),
)
def update(self, instance, validated_data):
instance.ModelID = validated_data.get('ModelID', instance.ModelID)
instance.CourseID = validated_data.get('CourseID', instance.CourseID)
instance.TITLE = validated_data.get('TITLE', instance.TITLE)
instance.Publisher = validated_data.get('Publisher', instance.Publisher)
instance.Author = validated_data.get('Author', instance.Author)
instance.Edition = validated_data.get('Edition', instance.Edition)
instance.type = validated_data.get('type', instance.type)
instance.save()
return instance
class Meta:
model = Textbook
fields = (
'ModelID',
'CourseID',
'TITLE',
'Publisher',
'Author',
'Edition',
'type'
)
class AuWeightSerializer(serializers.ModelSerializer):
# ModelID = serializers.CharField(max_length=100, required=True)
CourseID = serializers.CharField(max_length=100, required=True)
Category = serializers.CharField(max_length=100, required=True)
AU = serializers.IntegerField(required=False)
def create(self, validated_data):
return AuWeight.objects.create(
ModelID=validated_data.get('ModelID'),
CourseID=validated_data.get('CourseID'),
Category=validated_data.get('Category'),
AU=validated_data.get('AU'),
)
def update(self, instance, validated_data):
instance.ModelID = validated_data.get('ModelID', instance.ModelID)
instance.CourseID = validated_data.get('CourseID', instance.CourseID)
instance.Category = validated_data.get('Category', instance.Category)
instance.AU = validated_data.get('AU', instance.AU)
instance.save()
return instance
class Meta:
model = AuWeight
fields = (
'ModelID',
'CourseID',
'Category',
'AU'
)
class ContentCategorySerializer(serializers.ModelSerializer):
# ModelID = serializers.CharField(max_length=100, required=True)
CourseID = serializers.CharField(max_length=100, required=True)
CategoryType = serializers.CharField(max_length=100, required=True)
Element = serializers.CharField(max_length=100, required=True)
def create(self, validated_data):
return ContentCategory.objects.create(
ModelID=validated_data.get('ModelID'),
CourseID=validated_data.get('CourseID'),
CategoryType=validated_data.get('CategoryType'),
Element=validated_data.get('Element'),
)
def update(self, instance, validated_data):
instance.ModelID = validated_data.get('ModelID', instance.ModelID)
instance.CourseID = validated_data.get('CourseID', instance.CourseID)
instance.CategoryType = validated_data.get('CategoryType', instance.CategoryType)
instance.Element = validated_data.get('Element', instance.Element)
instance.save()
return instance
class Meta:
model = ContentCategory
fields = (
'ModelID',
'CourseID',
'CategoryType',
'Element'
)
class LabSerializer(serializers.ModelSerializer):
# ModelID = serializers.CharField(max_length=100, required=True)
CourseID = serializers.CharField(max_length=100, required=True)
LabNum = serializers.CharField(max_length=100, required=True)
NumberOfLabs = serializers.IntegerField(required=False)
LabType = serializers.CharField(max_length=100, required=True)
SafetyExamined = serializers.CharField(max_length=100, required=True)
SafetyTaught = serializers.CharField(max_length=100, required=True)
FName = serializers.CharField(max_length=100, required=True)
LName = serializers.CharField(max_length=100, required=True)
Phone = serializers.CharField(max_length=100, required=True)
Office = serializers.CharField(max_length=100, required=True)
Email = serializers.CharField(max_length=100, required=True)
def create(self, validated_data):
return Lab.objects.create(
ModelID=validated_data.get('ModelID'),
CourseID=validated_data.get('CourseID'),
LabNum=validated_data.get('LabNum'),
NumberOfLabs=validated_data.get('NumberOfLabs'),
LabType=validated_data.get('LabType'),
SafetyExamined=validated_data.get('SafetyExamined'),
SafetyTaught=validated_data.get('SafetyTaught'),
FName=validated_data.get('FName'),
LName=validated_data.get('LName'),
Phone=validated_data.get('Phone'),
Office=validated_data.get('Office'),
Email=validated_data.get('Email'),
)
def update(self, instance, validated_data):
instance.ModelID = validated_data.get('ModelID', instance.ModelID)
instance.CourseID = validated_data.get('CourseID', instance.CourseID)
instance.LabNum = validated_data.get('LabNum', instance.LabNum)
instance.NumberOfLabs = validated_data.get('NumberOfLabs', instance.NumberOfLabs)
instance.LabType = validated_data.get('LabType', instance.LabType)
instance.SafetyExamined = validated_data.get('SafetyExamined', instance.SafetyExamined)
instance.SafetyTaught = validated_data.get('SafetyTaught', instance.SafetyTaught)
instance.FName = validated_data.get('FName', instance.FName)
instance.LName = validated_data.get('LName', instance.LName)
instance.Phone = validated_data.get('Phone', instance.Phone)
instance.Office = validated_data.get('Office', instance.Office)
instance.Email = validated_data.get('Email', instance.Email)
instance.save()
return instance
class Meta:
model = Lab
fields = (
'ModelID',
'CourseID',
'LabNum',
'NumberOfLabs',
'LabType',
'SafetyExamined',
'SafetyTaught',
'FName',
'LName',
'Phone',
'Office',
'Email'
)
class SectionSerializer(serializers.ModelSerializer):
# ModelID = serializers.CharField(max_length=100, required=True)
CourseID = serializers.CharField(max_length=100, required=True)
SectionNumber = serializers.CharField(max_length=100, required=False)
Students = serializers.IntegerField(required=False)
Hours = serializers.IntegerField(required=False)
type = serializers.CharField(max_length=100, required=True)
def create(self, validated_data):
return Section.objects.create(
ModelID=validated_data.get('ModelID'),
CourseID=validated_data.get('CourseID'),
SectionNumber=validated_data.get('SectionNumber'),
Students=validated_data.get('Students'),
Hours=validated_data.get('Hours'),
type=validated_data.get('type'),
)
def update(self, instance, validated_data):
instance.ModelID = validated_data.get('ModelID', instance.ModelID)
instance.CourseID = validated_data.get('CourseID', instance.CourseID)
instance.SectionNumber = validated_data.get('SectionNumber', instance.SectionNumber)
instance.Students = validated_data.get('Students', instance.Students)
instance.Hours = validated_data.get('Hours', instance.Hours)
instance.type = validated_data.get('type', instance.type)
instance.save()
return instance
class Meta:
model = Section
fields = (
'ModelID',
'CourseID',
'SectionNumber',
'Students',
'Hours',
'type'
)
| [
"rest_framework.serializers.IntegerField",
"rest_framework.serializers.CharField"
] | [((206, 258), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (227, 258), False, 'from rest_framework import serializers\n'), ((271, 324), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (292, 324), False, 'from rest_framework import serializers\n'), ((337, 390), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (358, 390), False, 'from rest_framework import serializers\n'), ((403, 456), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (424, 456), False, 'from rest_framework import serializers\n'), ((470, 523), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (491, 523), False, 'from rest_framework import serializers\n'), ((536, 589), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (557, 589), False, 'from rest_framework import serializers\n'), ((2191, 2243), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (2212, 2243), False, 'from rest_framework import serializers\n'), ((2261, 2315), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(5000)', 'required': '(False)'}), '(max_length=5000, required=False)\n', (2282, 2315), False, 'from rest_framework import serializers\n'), ((2334, 2388), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(5000)', 'required': '(False)'}), '(max_length=5000, required=False)\n', (2355, 2388), False, 'from rest_framework import serializers\n'), ((2413, 2467), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(5000)', 'required': '(False)'}), '(max_length=5000, required=False)\n', (2434, 2467), False, 'from rest_framework import serializers\n'), ((2482, 2535), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (2503, 2535), False, 'from rest_framework import serializers\n'), ((3936, 3988), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (3957, 3988), False, 'from rest_framework import serializers\n'), ((4005, 4058), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (4026, 4058), False, 'from rest_framework import serializers\n'), ((4082, 4135), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (4103, 4135), False, 'from rest_framework import serializers\n'), ((4149, 4189), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'required': '(False)'}), '(required=False)\n', (4173, 4189), False, 'from rest_framework import serializers\n'), ((5625, 5677), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (5646, 5677), False, 'from rest_framework import serializers\n'), ((5695, 5735), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'required': '(False)'}), '(required=False)\n', (5719, 5735), False, 'from rest_framework import serializers\n'), ((5780, 5833), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(500)', 'required': '(False)'}), '(max_length=500, required=False)\n', (5801, 5833), False, 'from rest_framework import serializers\n'), ((5887, 5940), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (5908, 5940), False, 'from rest_framework import serializers\n'), ((5962, 6015), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (5983, 6015), False, 'from rest_framework import serializers\n'), ((7456, 7508), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (7477, 7508), False, 'from rest_framework import serializers\n'), ((7526, 7579), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (7547, 7579), False, 'from rest_framework import serializers\n'), ((7591, 7644), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (7612, 7644), False, 'from rest_framework import serializers\n'), ((7656, 7709), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (7677, 7709), False, 'from rest_framework import serializers\n'), ((7725, 7778), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (7746, 7778), False, 'from rest_framework import serializers\n'), ((9075, 9127), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (9096, 9127), False, 'from rest_framework import serializers\n'), ((9145, 9185), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'required': '(False)'}), '(required=False)\n', (9169, 9185), False, 'from rest_framework import serializers\n'), ((9231, 9271), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'required': '(False)'}), '(required=False)\n', (9255, 9271), False, 'from rest_framework import serializers\n'), ((9318, 9371), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (9339, 9371), False, 'from rest_framework import serializers\n'), ((10597, 10649), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (10618, 10649), False, 'from rest_framework import serializers\n'), ((10667, 10720), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (10688, 10720), False, 'from rest_framework import serializers\n'), ((10733, 10786), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (10754, 10786), False, 'from rest_framework import serializers\n'), ((10799, 10852), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (10820, 10852), False, 'from rest_framework import serializers\n'), ((10865, 10918), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (10886, 10918), False, 'from rest_framework import serializers\n'), ((10932, 10985), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (10953, 10985), False, 'from rest_framework import serializers\n'), ((10998, 11051), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (11019, 11051), False, 'from rest_framework import serializers\n'), ((12609, 12661), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (12630, 12661), False, 'from rest_framework import serializers\n'), ((12680, 12733), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (12701, 12733), False, 'from rest_framework import serializers\n'), ((12783, 12836), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (12804, 12836), False, 'from rest_framework import serializers\n'), ((12879, 12932), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (12900, 12932), False, 'from rest_framework import serializers\n'), ((12945, 12998), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (12966, 12998), False, 'from rest_framework import serializers\n'), ((13012, 13065), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (13033, 13065), False, 'from rest_framework import serializers\n'), ((13078, 13131), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (13099, 13131), False, 'from rest_framework import serializers\n'), ((14695, 14747), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (14716, 14747), False, 'from rest_framework import serializers\n'), ((14766, 14819), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (14787, 14819), False, 'from rest_framework import serializers\n'), ((14873, 14926), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (14894, 14926), False, 'from rest_framework import serializers\n'), ((14985, 15038), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (15006, 15038), False, 'from rest_framework import serializers\n'), ((15060, 15100), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'required': '(False)'}), '(required=False)\n', (15084, 15100), False, 'from rest_framework import serializers\n'), ((15161, 15214), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (15182, 15214), False, 'from rest_framework import serializers\n'), ((16819, 16871), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (16840, 16871), False, 'from rest_framework import serializers\n'), ((16884, 16937), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (16905, 16937), False, 'from rest_framework import serializers\n'), ((16954, 17007), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (16975, 17007), False, 'from rest_framework import serializers\n'), ((17021, 17074), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (17042, 17074), False, 'from rest_framework import serializers\n'), ((17089, 17142), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (17110, 17142), False, 'from rest_framework import serializers\n'), ((17154, 17207), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (17175, 17207), False, 'from rest_framework import serializers\n'), ((18630, 18682), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (18651, 18682), False, 'from rest_framework import serializers\n'), ((18698, 18750), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (18719, 18750), False, 'from rest_framework import serializers\n'), ((18760, 18800), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'required': '(False)'}), '(required=False)\n', (18784, 18800), False, 'from rest_framework import serializers\n'), ((19783, 19835), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (19804, 19835), False, 'from rest_framework import serializers\n'), ((19855, 19907), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (19876, 19907), False, 'from rest_framework import serializers\n'), ((19922, 19974), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (19943, 19974), False, 'from rest_framework import serializers\n'), ((21013, 21065), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (21034, 21065), False, 'from rest_framework import serializers\n'), ((21079, 21131), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (21100, 21131), False, 'from rest_framework import serializers\n'), ((21151, 21191), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'required': '(False)'}), '(required=False)\n', (21175, 21191), False, 'from rest_framework import serializers\n'), ((21206, 21258), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (21227, 21258), False, 'from rest_framework import serializers\n'), ((21280, 21332), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (21301, 21332), False, 'from rest_framework import serializers\n'), ((21352, 21404), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (21373, 21404), False, 'from rest_framework import serializers\n'), ((21417, 21469), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (21438, 21469), False, 'from rest_framework import serializers\n'), ((21482, 21534), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (21503, 21534), False, 'from rest_framework import serializers\n'), ((21547, 21599), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (21568, 21599), False, 'from rest_framework import serializers\n'), ((21613, 21665), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (21634, 21665), False, 'from rest_framework import serializers\n'), ((21678, 21730), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (21699, 21730), False, 'from rest_framework import serializers\n'), ((23953, 24005), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (23974, 24005), False, 'from rest_framework import serializers\n'), ((24026, 24079), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (24047, 24079), False, 'from rest_framework import serializers\n'), ((24095, 24135), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'required': '(False)'}), '(required=False)\n', (24119, 24135), False, 'from rest_framework import serializers\n'), ((24148, 24188), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'required': '(False)'}), '(required=False)\n', (24172, 24188), False, 'from rest_framework import serializers\n'), ((24200, 24252), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (24221, 24252), False, 'from rest_framework import serializers\n')] |
import cv2
import numpy as np
from keras.models import model_from_json
from keras.preprocessing.image import img_to_array
#load model
model = model_from_json(open("fer.json", "r").read()) #change the path accoring to files
#load weights
model.load_weights('fer.h5') #change the path accoring to files
detection_model_path="C:/Users/panur/.spyder-py3/FaceMaskDetection/cascadeH5.xml" #change the path accoring to files
face_detection = cv2.CascadeClassifier(detection_model_path)
ret=1
flag=True
cap = cv2.VideoCapture(0) #default 0 for webcam
frameRate = cap.get(30)
while(cap.isOpened()):
ret, fm=cap.read()
fm = cv2.resize(fm, (224, 224))
file = cv2.cvtColor(fm, cv2.COLOR_BGR2RGB)
orig_frame = file
frame = file
faces = face_detection.detectMultiScale(frame,scaleFactor=1.1,minNeighbors=5,minSize=(30,30),flags=cv2.CASCADE_SCALE_IMAGE)
if len(faces) :
faces = sorted(faces, reverse=True,key=lambda x: (x[2] - x[0]) * (x[3] - x[1]))[0]
(fX, fY, fW, fH) = faces
roi = frame[fY:fY + fH, fX:fX + fW]
roi = cv2.resize(roi, (48, 48),3)
roi = frame.astype("float") / 255.0
roi = img_to_array(roi)
roi = np.expand_dims(roi, axis=0)
preds=model.predict_classes(roi)[0]
if preds==0:
print("Mask worn")
test='Mask worn'
elif preds==1:
print("Danger: No Mask")
test='Danger: No Mask'
cv2.putText(fm,test, (fX-15, fY - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
cv2.rectangle(fm, (fX, fY), (fX + fW, fY + fH),(0, 0, 255), 2)
cv2.imshow("Live Video", fm)
k=cv2.waitKey(25) #Press ESC to stop/exit
if k == 27:
ret=0
break
print("closed")
cap.release()
cv2.destroyAllWindows() | [
"cv2.rectangle",
"keras.preprocessing.image.img_to_array",
"cv2.imshow",
"cv2.putText",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.cvtColor",
"numpy.expand_dims",
"cv2.CascadeClassifier",
"cv2.resize",
"cv2.waitKey"
] | [((472, 515), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['detection_model_path'], {}), '(detection_model_path)\n', (493, 515), False, 'import cv2\n'), ((564, 583), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (580, 583), False, 'import cv2\n'), ((1747, 1770), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1768, 1770), False, 'import cv2\n'), ((685, 711), 'cv2.resize', 'cv2.resize', (['fm', '(224, 224)'], {}), '(fm, (224, 224))\n', (695, 711), False, 'import cv2\n'), ((720, 755), 'cv2.cvtColor', 'cv2.cvtColor', (['fm', 'cv2.COLOR_BGR2RGB'], {}), '(fm, cv2.COLOR_BGR2RGB)\n', (732, 755), False, 'import cv2\n'), ((1596, 1624), 'cv2.imshow', 'cv2.imshow', (['"""Live Video"""', 'fm'], {}), "('Live Video', fm)\n", (1606, 1624), False, 'import cv2\n'), ((1629, 1644), 'cv2.waitKey', 'cv2.waitKey', (['(25)'], {}), '(25)\n', (1640, 1644), False, 'import cv2\n'), ((1101, 1129), 'cv2.resize', 'cv2.resize', (['roi', '(48, 48)', '(3)'], {}), '(roi, (48, 48), 3)\n', (1111, 1129), False, 'import cv2\n'), ((1184, 1201), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['roi'], {}), '(roi)\n', (1196, 1201), False, 'from keras.preprocessing.image import img_to_array\n'), ((1212, 1239), 'numpy.expand_dims', 'np.expand_dims', (['roi'], {'axis': '(0)'}), '(roi, axis=0)\n', (1226, 1239), True, 'import numpy as np\n'), ((1435, 1529), 'cv2.putText', 'cv2.putText', (['fm', 'test', '(fX - 15, fY - 10)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.45)', '(0, 0, 255)', '(2)'], {}), '(fm, test, (fX - 15, fY - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (\n 0, 0, 255), 2)\n', (1446, 1529), False, 'import cv2\n'), ((1526, 1589), 'cv2.rectangle', 'cv2.rectangle', (['fm', '(fX, fY)', '(fX + fW, fY + fH)', '(0, 0, 255)', '(2)'], {}), '(fm, (fX, fY), (fX + fW, fY + fH), (0, 0, 255), 2)\n', (1539, 1589), False, 'import cv2\n')] |
#!/usr/bin/env python
import logging
from datetime import datetime
logging.basicConfig(level=logging.WARNING)
import os
import urllib2, base64, json
import dateutil.parser
def from_ISO8601( str_iso8601 ):
return dateutil.parser.parse(str_iso8601)
def to_ISO8601( timestamp ):
return timestamp.isoformat()
def convert_time_strings(toggl_dicts):
timestamp_fields = ['at',
'created_at',
'start',
'stop']
result = []
for tdict in toggl_dicts:
d = tdict
for tsf in timestamp_fields:
if tdict.has_key(tsf):
d[tsf] = from_ISO8601(tdict[tsf])
result.append(d)
return result
class Toggl:
def __init__(self, api_token=None):
self.log = logging.getLogger("Toggl")
self.log.setLevel(logging.DEBUG)
self.toggl_domain = "www.toggl.com"
self.toggl_api = "https://%s/api/v8/" % self.toggl_domain
self.report_api = "https://%s/reports/api/v2" % self.toggl_domain
self._api_token = api_token
# Search for an Toggl API token in a list of files
# No validation of the collected token
# TODO: encryption of tokenfiles could be nice
tokenfiles = [os.path.expanduser(f) for f in ['.toggltoken', '~/.toggltoken', '~/.togglapplet/.toggltoken']]
for tf in tokenfiles:
if os.path.exists( tf ):
try:
f = open(tf)
self._api_token = f.read().strip()
f.close()
except:
self.log.exception("Could not read token from " + tf)
self._api_token = None
if self._api_token: break
def send_request( self, api_call_url ):
''' Send a request or command to Toggl, retrieve and parse the json response.
returns a list of dictionary objects.
Throws an exception if the http response is not OK (200) or if no JSON can be decoded from the response.
'''
request = urllib2.Request( api_call_url )
self.log.debug("http request url = \'%s\'", request.get_full_url())
# username:password
# Use base64.standard_b64encode instead of replace...
user_pass = base64.encodestring('%s:%s' % (self._api_token, 'api_token')).replace('\n', '')
request.add_header("Authorization", "Basic %s" % user_pass)
opener = urllib2.build_opener(
urllib2.HTTPHandler(),
urllib2.HTTPSHandler(),
urllib2.ProxyHandler({'https': 'http://wwwcache.rl.ac.uk:8080'}))
urllib2.install_opener(opener)
result = urllib2.urlopen(request, timeout = 3.0) # with no data, this is a http GET.
self.log.debug("http request result: code=%s url=\'%s\'", result.getcode(), result.geturl())
js = json.load(result)
#self.log.debug("JSON raw result: %s" % json.dumps(js,sort_keys=True, indent=4, separators=(',', ': ')))
return js
def get_workspaces(self):
self.log.debug("get_workspaces()")
js = self.send_request(self.toggl_api + "workspaces")
js = convert_time_strings(js)
return js
def get_default_workspace(self):
self.log.debug("get_default_workspace()")
wid = self.get_user()['default_wid']
js = self.send_request(self.toggl_api + "workspaces/%s"%str(wid))
js = convert_time_strings([js['data']])
return js[0]
def get_default_workspace_id(self):
self.log.debug("get_default_workspace_id()")
ws = self.get_default_workspace()
self.log.debug(ws)
return ws['id']
def get_projects(self, wid=None):
self.log.debug("get_projects(wid=%s)"%str(wid))
if wid:
js = self.send_request(self.toggl_api + "workspaces/%s/projects"%str(wid))
else:
js = []
for w in self.get_workspaces():
js += self.send_request(self.toggl_api + "workspaces/%s/projects"%str(w['id']))
js = convert_time_strings(js)
return js
def get_current_entry(self):
'''get the currently active time entry'''
self.log.debug("get_current_entry()")
js = self.send_request(self.toggl_api + "time_entries/current")
self.log.debug( js )
js = convert_time_strings(js['data'])
return js
def get_range_entries(self, start_end=None):
'''Get a list of entries in a range (max 1000 entries).
If no start-end range is defined, the default is to return all entries
from the last 9 days.
start_end: tuple with start and end date'''
self.log.debug("get_range_entries()")
query = "time_entries"
if start_end:
start, end = start_end
if type(start) == datetime.datetime:
start = to_ISO8601(start)
if type(end) == datetime.datetime:
end = to_ISO8601(end)
query += "?start_date=%s&end_date=%s"%(start, end)
js = self.send_request(self.toggl_api + query)
js = convert_time_strings(js)
return js
def get_user(self):
self.log.debug("get_user()")
js = self.send_request(self.toggl_api + "me")
return js['data']
| [
"logging.basicConfig",
"logging.getLogger",
"os.path.exists",
"urllib2.urlopen",
"urllib2.ProxyHandler",
"urllib2.Request",
"urllib2.HTTPHandler",
"urllib2.install_opener",
"base64.encodestring",
"urllib2.HTTPSHandler",
"json.load",
"os.path.expanduser"
] | [((68, 110), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.WARNING'}), '(level=logging.WARNING)\n', (87, 110), False, 'import logging\n'), ((792, 818), 'logging.getLogger', 'logging.getLogger', (['"""Toggl"""'], {}), "('Toggl')\n", (809, 818), False, 'import logging\n'), ((2098, 2127), 'urllib2.Request', 'urllib2.Request', (['api_call_url'], {}), '(api_call_url)\n', (2113, 2127), False, 'import urllib2, base64, json\n'), ((2676, 2706), 'urllib2.install_opener', 'urllib2.install_opener', (['opener'], {}), '(opener)\n', (2698, 2706), False, 'import urllib2, base64, json\n'), ((2724, 2761), 'urllib2.urlopen', 'urllib2.urlopen', (['request'], {'timeout': '(3.0)'}), '(request, timeout=3.0)\n', (2739, 2761), False, 'import urllib2, base64, json\n'), ((2914, 2931), 'json.load', 'json.load', (['result'], {}), '(result)\n', (2923, 2931), False, 'import urllib2, base64, json\n'), ((1273, 1294), 'os.path.expanduser', 'os.path.expanduser', (['f'], {}), '(f)\n', (1291, 1294), False, 'import os\n'), ((1413, 1431), 'os.path.exists', 'os.path.exists', (['tf'], {}), '(tf)\n', (1427, 1431), False, 'import os\n'), ((2522, 2543), 'urllib2.HTTPHandler', 'urllib2.HTTPHandler', ([], {}), '()\n', (2541, 2543), False, 'import urllib2, base64, json\n'), ((2561, 2583), 'urllib2.HTTPSHandler', 'urllib2.HTTPSHandler', ([], {}), '()\n', (2581, 2583), False, 'import urllib2, base64, json\n'), ((2601, 2665), 'urllib2.ProxyHandler', 'urllib2.ProxyHandler', (["{'https': 'http://wwwcache.rl.ac.uk:8080'}"], {}), "({'https': 'http://wwwcache.rl.ac.uk:8080'})\n", (2621, 2665), False, 'import urllib2, base64, json\n'), ((2317, 2378), 'base64.encodestring', 'base64.encodestring', (["('%s:%s' % (self._api_token, 'api_token'))"], {}), "('%s:%s' % (self._api_token, 'api_token'))\n", (2336, 2378), False, 'import urllib2, base64, json\n')] |
from typing import Optional
import torch
from gs_divergence import gs_div
def symmetrized_gs_div(
input: torch.Tensor,
target: torch.Tensor,
alpha: float = -1,
lmd: float = 0.5,
reduction: Optional[str] = 'sum',
) -> torch.Tensor:
lhs = gs_div(input, target, alpha=alpha, lmd=lmd, reduction=reduction)
rhs = gs_div(target, input, alpha=alpha, lmd=lmd, reduction=reduction)
return (lhs + rhs) / 2
| [
"gs_divergence.gs_div"
] | [((264, 328), 'gs_divergence.gs_div', 'gs_div', (['input', 'target'], {'alpha': 'alpha', 'lmd': 'lmd', 'reduction': 'reduction'}), '(input, target, alpha=alpha, lmd=lmd, reduction=reduction)\n', (270, 328), False, 'from gs_divergence import gs_div\n'), ((339, 403), 'gs_divergence.gs_div', 'gs_div', (['target', 'input'], {'alpha': 'alpha', 'lmd': 'lmd', 'reduction': 'reduction'}), '(target, input, alpha=alpha, lmd=lmd, reduction=reduction)\n', (345, 403), False, 'from gs_divergence import gs_div\n')] |
'''
* Copyright (c) 2022, salesforce.com, inc.
* All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
* For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
* By <NAME>
'''
import warnings
warnings.filterwarnings("ignore")
from models.vit import VisionTransformer, interpolate_pos_embed
from models.med import BertConfig, BertModel, BertLMHeadModel
from transformers import BertTokenizer
import torch
from torch import nn
import torch.nn.functional as F
import os
from urllib.parse import urlparse
from timm.models.hub import download_cached_file
class BLIP_Base(nn.Module):
def __init__(self,
med_config = 'configs/med_config.json',
image_size = 224,
vit = 'base',
vit_grad_ckpt = False,
vit_ckpt_layer = 0,
):
"""
Args:
med_config (str): path for the mixture of encoder-decoder model's configuration file
image_size (int): input image size
vit (str): model size of vision transformer
"""
super().__init__()
self.visual_encoder, vision_width = create_vit(vit,image_size, vit_grad_ckpt, vit_ckpt_layer)
self.tokenizer = init_tokenizer()
med_config = BertConfig.from_json_file(med_config)
med_config.encoder_width = vision_width
self.text_encoder = BertModel(config=med_config, add_pooling_layer=False)
def forward(self, image, caption, mode):
assert mode in ['image', 'text', 'multimodal'], "mode parameter must be image, text, or multimodal"
text = self.tokenizer(caption, return_tensors="pt").to(image.device)
if mode=='image':
# return image features
image_embeds = self.visual_encoder(image)
return image_embeds
elif mode=='text':
# return text features
text_output = self.text_encoder(text.input_ids, attention_mask = text.attention_mask,
return_dict = True, mode = 'text')
return text_output.last_hidden_state
elif mode=='multimodal':
# return multimodel features
image_embeds = self.visual_encoder(image)
image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device)
text.input_ids[:,0] = self.tokenizer.enc_token_id
output = self.text_encoder(text.input_ids,
attention_mask = text.attention_mask,
encoder_hidden_states = image_embeds,
encoder_attention_mask = image_atts,
return_dict = True,
)
return output.last_hidden_state
class BLIP_Decoder(nn.Module):
def __init__(self,
med_config = 'configs/med_config.json',
image_size = 384,
vit = 'base',
vit_grad_ckpt = False,
vit_ckpt_layer = 0,
prompt = 'a picture of ',
):
"""
Args:
med_config (str): path for the mixture of encoder-decoder model's configuration file
image_size (int): input image size
vit (str): model size of vision transformer
"""
super().__init__()
self.visual_encoder, vision_width = create_vit(vit,image_size, vit_grad_ckpt, vit_ckpt_layer)
self.tokenizer = init_tokenizer()
med_config = BertConfig.from_json_file(med_config)
med_config.encoder_width = vision_width
self.text_decoder = BertLMHeadModel(config=med_config)
self.prompt = prompt
self.prompt_length = len(self.tokenizer(self.prompt).input_ids)-1
def forward(self, image, caption):
image_embeds = self.visual_encoder(image)
image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device)
text = self.tokenizer(caption, padding='longest', truncation=True, max_length=40, return_tensors="pt").to(image.device)
text.input_ids[:,0] = self.tokenizer.bos_token_id
decoder_targets = text.input_ids.masked_fill(text.input_ids == self.tokenizer.pad_token_id, -100)
decoder_targets[:,:self.prompt_length] = -100
decoder_output = self.text_decoder(text.input_ids,
attention_mask = text.attention_mask,
encoder_hidden_states = image_embeds,
encoder_attention_mask = image_atts,
labels = decoder_targets,
return_dict = True,
)
loss_lm = decoder_output.loss
return loss_lm
def generate(self, image, sample=False, num_beams=5, max_length=30, min_length=10, top_p=0.9, repetition_penalty=1.0):
image_embeds = self.visual_encoder(image)
if not sample:
image_embeds = image_embeds.repeat_interleave(num_beams,dim=0)
image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device)
model_kwargs = {"encoder_hidden_states": image_embeds, "encoder_attention_mask":image_atts}
prompt = [self.prompt] * image.size(0)
input_ids = self.tokenizer(prompt, return_tensors="pt").input_ids.to(image.device)
input_ids[:,0] = self.tokenizer.bos_token_id
input_ids = input_ids[:, :-1]
if sample:
#nucleus sampling
outputs = self.text_decoder.generate(input_ids=input_ids,
max_length=max_length,
min_length=min_length,
do_sample=True,
top_p=top_p,
num_return_sequences=3,
eos_token_id=self.tokenizer.sep_token_id,
pad_token_id=self.tokenizer.pad_token_id,
repetition_penalty=1.1,
**model_kwargs)
else:
#beam search
outputs = self.text_decoder.generate(input_ids=input_ids,
max_length=max_length,
min_length=min_length,
num_beams=num_beams,
num_return_sequences=3,
eos_token_id=self.tokenizer.sep_token_id,
pad_token_id=self.tokenizer.pad_token_id,
repetition_penalty=repetition_penalty,
**model_kwargs)
captions = []
for output in outputs:
caption = self.tokenizer.decode(output, skip_special_tokens=True)
captions.append(caption[len(self.prompt):])
return captions
def blip_decoder(pretrained='',**kwargs):
model = BLIP_Decoder(**kwargs)
if pretrained:
model,msg = load_checkpoint(model,pretrained)
assert(len(msg.missing_keys)==0)
return model
def blip_feature_extractor(pretrained='',**kwargs):
model = BLIP_Base(**kwargs)
if pretrained:
model,msg = load_checkpoint(model,pretrained)
assert(len(msg.missing_keys)==0)
return model
def init_tokenizer():
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
tokenizer.add_special_tokens({'bos_token':'[DEC]'})
tokenizer.add_special_tokens({'additional_special_tokens':['[ENC]']})
tokenizer.enc_token_id = tokenizer.additional_special_tokens_ids[0]
return tokenizer
def create_vit(vit, image_size, use_grad_checkpointing=False, ckpt_layer=0, drop_path_rate=0):
assert vit in ['base', 'large'], "vit parameter must be base or large"
if vit=='base':
vision_width = 768
visual_encoder = VisionTransformer(img_size=image_size, patch_size=16, embed_dim=vision_width, depth=12,
num_heads=12, use_grad_checkpointing=use_grad_checkpointing, ckpt_layer=ckpt_layer,
drop_path_rate=0 or drop_path_rate
)
elif vit=='large':
vision_width = 1024
visual_encoder = VisionTransformer(img_size=image_size, patch_size=16, embed_dim=vision_width, depth=24,
num_heads=16, use_grad_checkpointing=use_grad_checkpointing, ckpt_layer=ckpt_layer,
drop_path_rate=0.1 or drop_path_rate
)
return visual_encoder, vision_width
def is_url(url_or_filename):
parsed = urlparse(url_or_filename)
return parsed.scheme in ("http", "https")
def load_checkpoint(model,url_or_filename):
if is_url(url_or_filename):
cached_file = download_cached_file(url_or_filename, check_hash=False, progress=True)
checkpoint = torch.load(cached_file, map_location='cpu')
elif os.path.isfile(url_or_filename):
checkpoint = torch.load(url_or_filename, map_location='cpu')
else:
raise RuntimeError('checkpoint url or path is invalid')
state_dict = checkpoint['model']
state_dict['visual_encoder.pos_embed'] = interpolate_pos_embed(state_dict['visual_encoder.pos_embed'],model.visual_encoder)
if 'visual_encoder_m.pos_embed' in model.state_dict().keys():
state_dict['visual_encoder_m.pos_embed'] = interpolate_pos_embed(state_dict['visual_encoder_m.pos_embed'],
model.visual_encoder_m)
for key in model.state_dict().keys():
if key in state_dict.keys():
if state_dict[key].shape!=model.state_dict()[key].shape:
del state_dict[key]
msg = model.load_state_dict(state_dict,strict=False)
print('load checkpoint from %s'%url_or_filename)
return model,msg
| [
"models.med.BertConfig.from_json_file",
"timm.models.hub.download_cached_file",
"urllib.parse.urlparse",
"models.vit.interpolate_pos_embed",
"torch.load",
"transformers.BertTokenizer.from_pretrained",
"os.path.isfile",
"models.vit.VisionTransformer",
"models.med.BertModel",
"models.med.BertLMHeadModel",
"warnings.filterwarnings"
] | [((258, 291), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (281, 291), False, 'import warnings\n'), ((8347, 8397), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['"""bert-base-uncased"""'], {}), "('bert-base-uncased')\n", (8376, 8397), False, 'from transformers import BertTokenizer\n'), ((9726, 9751), 'urllib.parse.urlparse', 'urlparse', (['url_or_filename'], {}), '(url_or_filename)\n', (9734, 9751), False, 'from urllib.parse import urlparse\n'), ((10324, 10412), 'models.vit.interpolate_pos_embed', 'interpolate_pos_embed', (["state_dict['visual_encoder.pos_embed']", 'model.visual_encoder'], {}), "(state_dict['visual_encoder.pos_embed'], model.\n visual_encoder)\n", (10345, 10412), False, 'from models.vit import VisionTransformer, interpolate_pos_embed\n'), ((1384, 1421), 'models.med.BertConfig.from_json_file', 'BertConfig.from_json_file', (['med_config'], {}), '(med_config)\n', (1409, 1421), False, 'from models.med import BertConfig, BertModel, BertLMHeadModel\n'), ((1498, 1551), 'models.med.BertModel', 'BertModel', ([], {'config': 'med_config', 'add_pooling_layer': '(False)'}), '(config=med_config, add_pooling_layer=False)\n', (1507, 1551), False, 'from models.med import BertConfig, BertModel, BertLMHeadModel\n'), ((3882, 3919), 'models.med.BertConfig.from_json_file', 'BertConfig.from_json_file', (['med_config'], {}), '(med_config)\n', (3907, 3919), False, 'from models.med import BertConfig, BertModel, BertLMHeadModel\n'), ((3996, 4030), 'models.med.BertLMHeadModel', 'BertLMHeadModel', ([], {'config': 'med_config'}), '(config=med_config)\n', (4011, 4030), False, 'from models.med import BertConfig, BertModel, BertLMHeadModel\n'), ((8883, 9104), 'models.vit.VisionTransformer', 'VisionTransformer', ([], {'img_size': 'image_size', 'patch_size': '(16)', 'embed_dim': 'vision_width', 'depth': '(12)', 'num_heads': '(12)', 'use_grad_checkpointing': 'use_grad_checkpointing', 'ckpt_layer': 'ckpt_layer', 'drop_path_rate': '(0 or drop_path_rate)'}), '(img_size=image_size, patch_size=16, embed_dim=\n vision_width, depth=12, num_heads=12, use_grad_checkpointing=\n use_grad_checkpointing, ckpt_layer=ckpt_layer, drop_path_rate=0 or\n drop_path_rate)\n', (8900, 9104), False, 'from models.vit import VisionTransformer, interpolate_pos_embed\n'), ((9897, 9967), 'timm.models.hub.download_cached_file', 'download_cached_file', (['url_or_filename'], {'check_hash': '(False)', 'progress': '(True)'}), '(url_or_filename, check_hash=False, progress=True)\n', (9917, 9967), False, 'from timm.models.hub import download_cached_file\n'), ((9989, 10032), 'torch.load', 'torch.load', (['cached_file'], {'map_location': '"""cpu"""'}), "(cached_file, map_location='cpu')\n", (9999, 10032), False, 'import torch\n'), ((10043, 10074), 'os.path.isfile', 'os.path.isfile', (['url_or_filename'], {}), '(url_or_filename)\n', (10057, 10074), False, 'import os\n'), ((10525, 10617), 'models.vit.interpolate_pos_embed', 'interpolate_pos_embed', (["state_dict['visual_encoder_m.pos_embed']", 'model.visual_encoder_m'], {}), "(state_dict['visual_encoder_m.pos_embed'], model.\n visual_encoder_m)\n", (10546, 10617), False, 'from models.vit import VisionTransformer, interpolate_pos_embed\n'), ((9300, 9523), 'models.vit.VisionTransformer', 'VisionTransformer', ([], {'img_size': 'image_size', 'patch_size': '(16)', 'embed_dim': 'vision_width', 'depth': '(24)', 'num_heads': '(16)', 'use_grad_checkpointing': 'use_grad_checkpointing', 'ckpt_layer': 'ckpt_layer', 'drop_path_rate': '(0.1 or drop_path_rate)'}), '(img_size=image_size, patch_size=16, embed_dim=\n vision_width, depth=24, num_heads=16, use_grad_checkpointing=\n use_grad_checkpointing, ckpt_layer=ckpt_layer, drop_path_rate=0.1 or\n drop_path_rate)\n', (9317, 9523), False, 'from models.vit import VisionTransformer, interpolate_pos_embed\n'), ((10105, 10152), 'torch.load', 'torch.load', (['url_or_filename'], {'map_location': '"""cpu"""'}), "(url_or_filename, map_location='cpu')\n", (10115, 10152), False, 'import torch\n')] |
#! /usr/local/bin/python3
# -*- coding: UTF-8 -*-
# 抓取 妹子图 并存储
import urllib.request
import os
import random
def open_url(url):
request = urllib.request.Request(url)
request.add_header('User-Agent',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:60.0) Gecko/20100101 Firefox/60.0')
# 添加代理,改变 ip
iplist = ['172.16.17.32:53281', '192.168.3.11:80', '192.168.3.11:9797', ]
proxy_support = urllib.request.ProxyHandler({'http': random.choice(iplist)})
opener = urllib.request.build_opener(proxy_support) # 创建
urllib.request.install_opener(opener) # 安装
# 访问网页
response = urllib.request.urlopen(request)
html = response.read()
return html
# 获取图片id
def get_page(url):
html = open_url(url).decode('utf-8')
a = html.find('current-comment-page') + 23
b = html.find(']',a)
print("图片id是:",html[a:b])
return html[a:b]
# 根据 url 获取图片添加到数组并返回
def find_imgs(url):
html = open_url(url).decode('utf-8')
print("html内容:", html)
imgs_addrs = []
a = html.find('img src=')
while a != -1: # 找到字符串
print("找到字符串a")
b = html.find('.gif',a,a+255)
if b != -1:
print("找到字符串b")
imgs_addrs.append(html[a+9:b+4])
else:
print("未找到字符串b")
b = a + 9
a = html.find('img src=',b)
return imgs_addrs
# 保存图片
def save_imgs(folder,imgs_addrs):
print("folder", folder, "imgs_addrs", imgs_addrs)
for each in imgs_addrs:
filename = each.split('/')[-1]
with open(filename,'wb') as f:
img = open_url(each)
f.write(img)
# 下载图片
def download_img(folder='Image',pages=10):
if os.path.exists(folder) == False:
os.mkdir(folder)
os.chdir(folder)
url = 'http://jandan.net/ooxx/'
page_num = int(get_page(url))
for i in range(pages):
page_num -= i
page_url = url + 'page-' + str(page_num) + '#comments'
print("页面链接是:",page_url)
# 图片列表
imgs_addrs = find_imgs(page_url)
save_imgs(folder,imgs_addrs)
if __name__ == '__main__':
download_img() | [
"os.chdir",
"os.path.exists",
"random.choice",
"os.mkdir"
] | [((1692, 1714), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (1706, 1714), False, 'import os\n'), ((1733, 1749), 'os.mkdir', 'os.mkdir', (['folder'], {}), '(folder)\n', (1741, 1749), False, 'import os\n'), ((1758, 1774), 'os.chdir', 'os.chdir', (['folder'], {}), '(folder)\n', (1766, 1774), False, 'import os\n'), ((471, 492), 'random.choice', 'random.choice', (['iplist'], {}), '(iplist)\n', (484, 492), False, 'import random\n')] |
# copyright (c) 2018 paddlepaddle authors. all rights reserved.
#
# licensed under the apache license, version 2.0 (the "license");
# you may not use this file except in compliance with the license.
# you may obtain a copy of the license at
#
# http://www.apache.org/licenses/license-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the license is distributed on an "as is" basis,
# without warranties or conditions of any kind, either express or implied.
# see the license for the specific language governing permissions and
# limitations under the license.
from __future__ import print_function
import os
import numpy as np
import random
import unittest
import logging
import warnings
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid import core
from paddle.fluid.optimizer import AdamOptimizer
from paddle.fluid.framework import IrGraph
from paddle.fluid.contrib.slim.quantization import ImperativeQuantAware
from paddle.fluid.contrib.slim.quantization import OutScaleForTrainingPass, OutScaleForInferencePass, QuantizationTransformPass
from paddle.fluid.dygraph.container import Sequential
from paddle.fluid.dygraph.io import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX
from paddle.nn.layer import ReLU, LeakyReLU, Sigmoid, Softmax, PReLU
from paddle.nn import Linear, Conv2D, Softmax, BatchNorm2D, MaxPool2D
from paddle.fluid.dygraph.nn import Pool2D
from paddle.fluid.log_helper import get_logger
from paddle.fluid.dygraph import nn
paddle.enable_static()
os.environ["CPU_NUM"] = "1"
if core.is_compiled_with_cuda():
fluid.set_flags({"FLAGS_cudnn_deterministic": True})
_logger = get_logger(
__name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s')
def get_vaild_warning_num(warning, w):
num = 0
for i in range(len(w)):
if warning in str(w[i].message):
num += 1
return num
def StaticLenet(data, num_classes=10, classifier_activation='softmax'):
conv2d_w1_attr = fluid.ParamAttr(name="conv2d_w_1")
conv2d_w2_attr = fluid.ParamAttr(name="conv2d_w_2")
fc_w1_attr = fluid.ParamAttr(name="fc_w_1")
fc_w2_attr = fluid.ParamAttr(name="fc_w_2")
fc_w3_attr = fluid.ParamAttr(name="fc_w_3")
conv2d_b2_attr = fluid.ParamAttr(name="conv2d_b_2")
fc_b1_attr = fluid.ParamAttr(name="fc_b_1")
fc_b2_attr = fluid.ParamAttr(name="fc_b_2")
fc_b3_attr = fluid.ParamAttr(name="fc_b_3")
conv1 = fluid.layers.conv2d(
data,
num_filters=6,
filter_size=3,
stride=1,
padding=1,
param_attr=conv2d_w1_attr,
bias_attr=False)
batch_norm1 = layers.batch_norm(conv1)
relu1 = layers.relu(batch_norm1)
pool1 = fluid.layers.pool2d(
relu1, pool_size=2, pool_type='max', pool_stride=2)
conv2 = fluid.layers.conv2d(
pool1,
num_filters=16,
filter_size=5,
stride=1,
padding=0,
param_attr=conv2d_w2_attr,
bias_attr=conv2d_b2_attr)
batch_norm2 = layers.batch_norm(conv2)
prelu1 = layers.prelu(batch_norm2, mode='all')
pool2 = fluid.layers.pool2d(
prelu1, pool_size=2, pool_type='max', pool_stride=2)
fc1 = fluid.layers.fc(input=pool2,
size=120,
param_attr=fc_w1_attr,
bias_attr=fc_b1_attr)
leaky_relu1 = layers.leaky_relu(fc1, alpha=0.01)
fc2 = fluid.layers.fc(input=leaky_relu1,
size=84,
param_attr=fc_w2_attr,
bias_attr=fc_b2_attr)
sigmoid1 = layers.sigmoid(fc2)
fc3 = fluid.layers.fc(input=sigmoid1,
size=num_classes,
param_attr=fc_w3_attr,
bias_attr=fc_b3_attr)
softmax1 = layers.softmax(fc3, use_cudnn=True)
return softmax1
class ImperativeLenet(fluid.dygraph.Layer):
def __init__(self, num_classes=10):
super(ImperativeLenet, self).__init__()
conv2d_w1_attr = fluid.ParamAttr(name="conv2d_w_1")
conv2d_w2_attr = fluid.ParamAttr(name="conv2d_w_2")
fc_w1_attr = fluid.ParamAttr(name="fc_w_1")
fc_w2_attr = fluid.ParamAttr(name="fc_w_2")
fc_w3_attr = fluid.ParamAttr(name="fc_w_3")
conv2d_b2_attr = fluid.ParamAttr(name="conv2d_b_2")
fc_b1_attr = fluid.ParamAttr(name="fc_b_1")
fc_b2_attr = fluid.ParamAttr(name="fc_b_2")
fc_b3_attr = fluid.ParamAttr(name="fc_b_3")
self.features = Sequential(
Conv2D(
in_channels=1,
out_channels=6,
kernel_size=3,
stride=1,
padding=1,
weight_attr=conv2d_w1_attr,
bias_attr=False),
BatchNorm2D(6),
ReLU(),
Pool2D(
pool_size=2, pool_type='max', pool_stride=2),
Conv2D(
in_channels=6,
out_channels=16,
kernel_size=5,
stride=1,
padding=0,
weight_attr=conv2d_w2_attr,
bias_attr=conv2d_b2_attr),
BatchNorm2D(16),
PReLU(),
MaxPool2D(
kernel_size=2, stride=2))
self.fc = Sequential(
Linear(
in_features=400,
out_features=120,
weight_attr=fc_w1_attr,
bias_attr=fc_b1_attr),
LeakyReLU(),
Linear(
in_features=120,
out_features=84,
weight_attr=fc_w2_attr,
bias_attr=fc_b2_attr),
Sigmoid(),
Linear(
in_features=84,
out_features=num_classes,
weight_attr=fc_w3_attr,
bias_attr=fc_b3_attr),
Softmax())
def forward(self, inputs):
x = self.features(inputs)
x = fluid.layers.flatten(x, 1)
x = self.fc(x)
return x
class TestImperativeOutSclae(unittest.TestCase):
def test_out_scale_acc(self):
def _build_static_lenet(main, startup, is_test=False, seed=1000):
with fluid.unique_name.guard():
with fluid.program_guard(main, startup):
main.random_seed = seed
startup.random_seed = seed
img = fluid.layers.data(
name='image', shape=[1, 28, 28], dtype='float32')
label = fluid.layers.data(
name='label', shape=[1], dtype='int64')
prediction = StaticLenet(img)
if not is_test:
loss = fluid.layers.cross_entropy(
input=prediction, label=label)
avg_loss = fluid.layers.mean(loss)
else:
avg_loss = prediction
return img, label, avg_loss
reader = paddle.batch(
paddle.dataset.mnist.test(), batch_size=32, drop_last=True)
weight_quantize_type = 'abs_max'
activation_quantize_type = 'moving_average_abs_max'
param_init_map = {}
seed = 1000
lr = 0.001
dynamic_out_scale_list = []
static_out_scale_list = []
# imperative train
_logger.info(
"--------------------------dynamic graph qat--------------------------"
)
imperative_out_scale = ImperativeQuantAware(
weight_quantize_type=weight_quantize_type,
activation_quantize_type=activation_quantize_type)
with fluid.dygraph.guard():
np.random.seed(seed)
fluid.default_main_program().random_seed = seed
fluid.default_startup_program().random_seed = seed
lenet = ImperativeLenet()
fixed_state = {}
for name, param in lenet.named_parameters():
p_shape = param.numpy().shape
p_value = param.numpy()
if name.endswith("bias"):
value = np.zeros_like(p_value).astype('float32')
else:
value = np.random.normal(
loc=0.0, scale=0.01, size=np.product(p_shape)).reshape(
p_shape).astype('float32')
fixed_state[name] = value
param_init_map[param.name] = value
lenet.set_dict(fixed_state)
imperative_out_scale.quantize(lenet)
adam = AdamOptimizer(
learning_rate=lr, parameter_list=lenet.parameters())
dynamic_loss_rec = []
lenet.train()
for batch_id, data in enumerate(reader()):
x_data = np.array([x[0].reshape(1, 28, 28)
for x in data]).astype('float32')
y_data = np.array(
[x[1] for x in data]).astype('int64').reshape(-1, 1)
img = fluid.dygraph.to_variable(x_data)
label = fluid.dygraph.to_variable(y_data)
out = lenet(img)
loss = fluid.layers.cross_entropy(out, label)
avg_loss = fluid.layers.mean(loss)
avg_loss.backward()
adam.minimize(avg_loss)
lenet.clear_gradients()
dynamic_loss_rec.append(avg_loss.numpy()[0])
if batch_id % 100 == 0:
_logger.info('{}: {}'.format('loss', avg_loss.numpy()))
lenet.eval()
param_save_path = "test_save_quantized_model/lenet.pdparams"
save_dict = lenet.state_dict()
paddle.save(save_dict, param_save_path)
path = "./dynamic_outscale_infer_model/lenet"
dynamic_save_dir = "./dynamic_outscale_infer_model"
imperative_out_scale.save_quantized_model(
layer=lenet,
path=path,
input_spec=[
paddle.static.InputSpec(
shape=[None, 1, 28, 28], dtype='float32')
])
_logger.info(
"--------------------------static graph qat--------------------------"
)
static_loss_rec = []
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
else:
place = core.CPUPlace()
exe = fluid.Executor(place)
main = fluid.Program()
infer = fluid.Program()
startup = fluid.Program()
static_img, static_label, static_loss = _build_static_lenet(
main, startup, False, seed)
infer_img, _, infer_pre = _build_static_lenet(infer, startup, True,
seed)
with fluid.unique_name.guard():
with fluid.program_guard(main, startup):
opt = AdamOptimizer(learning_rate=lr)
opt.minimize(static_loss)
scope = core.Scope()
with fluid.scope_guard(scope):
exe.run(startup)
for param in main.all_parameters():
if "batch_norm" in param.name:
param_name = param.name.replace("norm", "norm2d")
elif 'prelu' in param.name:
param_name = param.name.replace("prelu", 'p_re_lu')
else:
param_name = param.name
param_tensor = scope.var(param.name).get_tensor()
param_tensor.set(param_init_map[param_name], place)
main_graph = IrGraph(core.Graph(main.desc), for_test=False)
infer_graph = IrGraph(core.Graph(infer.desc), for_test=True)
transform_pass = QuantizationTransformPass(
scope=scope,
place=place,
activation_quantize_type=activation_quantize_type,
weight_quantize_type=weight_quantize_type,
quantizable_op_type=['conv2d', 'depthwise_conv2d', 'mul'])
transform_pass.apply(main_graph)
transform_pass.apply(infer_graph)
outscale_pass = OutScaleForTrainingPass(scope=scope, place=place)
outscale_pass.apply(main_graph)
build_strategy = fluid.BuildStrategy()
build_strategy.fuse_all_reduce_ops = False
binary = fluid.CompiledProgram(main_graph.graph).with_data_parallel(
loss_name=static_loss.name, build_strategy=build_strategy)
feeder = fluid.DataFeeder(
feed_list=[static_img, static_label], place=place)
with fluid.scope_guard(scope):
for batch_id, data in enumerate(reader()):
loss_v, = exe.run(binary,
feed=feeder.feed(data),
fetch_list=[static_loss])
static_loss_rec.append(loss_v[0])
if batch_id % 100 == 0:
_logger.info('{}: {}'.format('loss', loss_v))
scale_inference_pass = OutScaleForInferencePass(scope=scope)
scale_inference_pass.apply(infer_graph)
save_program = infer_graph.to_program()
static_save_dir = "./static_outscale_infer_model"
with fluid.scope_guard(scope):
fluid.io.save_inference_model(
dirname=static_save_dir,
feeded_var_names=[infer_img.name],
target_vars=[infer_pre],
executor=exe,
main_program=save_program,
model_filename="lenet" + INFER_MODEL_SUFFIX,
params_filename="lenet" + INFER_PARAMS_SUFFIX)
rtol = 1e-05
atol = 1e-08
for i, (loss_d,
loss_s) in enumerate(zip(dynamic_loss_rec, static_loss_rec)):
diff = np.abs(loss_d - loss_s)
if diff > (atol + rtol * np.abs(loss_s)):
_logger.info(
"diff({}) at {}, dynamic loss = {}, static loss = {}".
format(diff, i, loss_d, loss_s))
break
self.assertTrue(
np.allclose(
np.array(dynamic_loss_rec),
np.array(static_loss_rec),
rtol=rtol,
atol=atol,
equal_nan=True),
msg='Failed to do the imperative qat.')
# load dynamic model
[dynamic_inference_program, feed_target_names, fetch_targets] = (
fluid.io.load_inference_model(
dirname=dynamic_save_dir,
executor=exe,
model_filename="lenet" + INFER_MODEL_SUFFIX,
params_filename="lenet" + INFER_PARAMS_SUFFIX))
# load static model
[static_inference_program, feed_target_names, fetch_targets] = (
fluid.io.load_inference_model(
dirname=static_save_dir,
executor=exe,
model_filename="lenet" + INFER_MODEL_SUFFIX,
params_filename="lenet" + INFER_PARAMS_SUFFIX))
dynamic_ops = dynamic_inference_program.global_block().ops
static_ops = static_inference_program.global_block().ops
for op in dynamic_ops[:]:
if op.type == "flatten2" or 'fake' in op.type:
dynamic_ops.remove(op)
for op in static_ops[:]:
if 'fake' in op.type:
static_ops.remove(op)
op_count = 0
for i in range(len(dynamic_ops)):
if dynamic_ops[i].has_attr("out_threshold"):
op_count += 1
self.assertTrue(dynamic_ops[i].type == static_ops[i].type)
self.assertTrue(dynamic_ops[i].attr("out_threshold") ==
static_ops[i].attr("out_threshold"))
self.assertTrue(op_count == 13)
class TestSaveQuanztizedModelFromCheckPoint(unittest.TestCase):
def test_save_quantized_model(self):
weight_quantize_type = 'abs_max'
activation_quantize_type = 'moving_average_abs_max'
load_param_path = "test_save_quantized_model/lenet.pdparams"
path = "./dynamic_outscale_infer_model_from_checkpoint/lenet"
dynamic_model_save_dir = "./dynamic_outscale_infer_model_from_checkpoint"
static_model_save_dir = "./static_outscale_infer_model"
imperative_out_scale = ImperativeQuantAware(
weight_quantize_type=weight_quantize_type,
activation_quantize_type=activation_quantize_type)
with fluid.dygraph.guard():
lenet = ImperativeLenet()
load_dict = paddle.load(load_param_path)
imperative_out_scale.quantize(lenet)
lenet.set_dict(load_dict)
imperative_out_scale.save_quantized_model(
layer=lenet,
path=path,
input_spec=[
paddle.static.InputSpec(
shape=[None, 1, 28, 28], dtype='float32')
])
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
else:
place = core.CPUPlace()
exe = fluid.Executor(place)
# load dynamic model
[dynamic_inference_program, feed_target_names, fetch_targets] = (
fluid.io.load_inference_model(
dirname=dynamic_model_save_dir,
executor=exe,
model_filename="lenet" + INFER_MODEL_SUFFIX,
params_filename="lenet" + INFER_PARAMS_SUFFIX))
# load static model
[static_inference_program, feed_target_names, fetch_targets] = (
fluid.io.load_inference_model(
dirname=static_model_save_dir,
executor=exe,
model_filename="lenet" + INFER_MODEL_SUFFIX,
params_filename="lenet" + INFER_PARAMS_SUFFIX))
dynamic_ops = dynamic_inference_program.global_block().ops
static_ops = static_inference_program.global_block().ops
for op in dynamic_ops[:]:
if op.type == "flatten2" or 'fake' in op.type:
dynamic_ops.remove(op)
for op in static_ops[:]:
if 'fake' in op.type:
static_ops.remove(op)
op_count = 0
for i in range(len(dynamic_ops)):
if dynamic_ops[i].has_attr("out_threshold"):
op_count += 1
self.assertTrue(dynamic_ops[i].type == static_ops[i].type)
self.assertTrue(dynamic_ops[i].attr("out_threshold") ==
static_ops[i].attr("out_threshold"))
self.assertTrue(op_count == 13)
class TestSaveQuantizedModel_Warning(unittest.TestCase):
def test_warning(self):
path = "./dynamic_outscale_infer_model_with_warnings/lenet"
imperative_out_scale = ImperativeQuantAware()
with fluid.dygraph.guard():
lenet = ImperativeLenet()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
imperative_out_scale.save_quantized_model(
layer=lenet,
path=path,
input_spec=[
paddle.static.InputSpec(
shape=[None, 1, 28, 28], dtype='float32')
])
warning_message = "Warning: No Layer of the model while to be saved contains the out_threshold attribute, " \
"so the generated inference model would not contain the out_threshold."
num = get_vaild_warning_num(warning_message, w)
assert num == 1
if __name__ == '__main__':
unittest.main()
| [
"numpy.product",
"paddle.fluid.DataFeeder",
"paddle.nn.layer.ReLU",
"paddle.fluid.dygraph.guard",
"paddle.fluid.dygraph.to_variable",
"paddle.static.InputSpec",
"paddle.fluid.layers.cross_entropy",
"paddle.fluid.layers.data",
"numpy.array",
"paddle.fluid.Executor",
"paddle.fluid.log_helper.get_logger",
"paddle.fluid.core.Graph",
"unittest.main",
"paddle.dataset.mnist.test",
"paddle.fluid.layers.prelu",
"paddle.nn.layer.Sigmoid",
"paddle.fluid.default_startup_program",
"paddle.fluid.layers.mean",
"paddle.enable_static",
"paddle.fluid.default_main_program",
"paddle.fluid.layers.conv2d",
"numpy.random.seed",
"warnings.simplefilter",
"paddle.fluid.core.is_compiled_with_cuda",
"paddle.fluid.contrib.slim.quantization.ImperativeQuantAware",
"paddle.nn.MaxPool2D",
"paddle.fluid.contrib.slim.quantization.OutScaleForInferencePass",
"paddle.fluid.contrib.slim.quantization.QuantizationTransformPass",
"numpy.abs",
"paddle.nn.layer.PReLU",
"paddle.fluid.ParamAttr",
"paddle.fluid.Program",
"paddle.fluid.set_flags",
"paddle.nn.layer.LeakyReLU",
"paddle.fluid.BuildStrategy",
"paddle.fluid.layers.softmax",
"paddle.fluid.CompiledProgram",
"paddle.fluid.layers.sigmoid",
"paddle.fluid.layers.leaky_relu",
"paddle.fluid.io.save_inference_model",
"paddle.nn.Softmax",
"paddle.fluid.contrib.slim.quantization.OutScaleForTrainingPass",
"paddle.nn.Linear",
"paddle.fluid.layers.batch_norm",
"paddle.fluid.dygraph.nn.Pool2D",
"paddle.nn.BatchNorm2D",
"paddle.fluid.scope_guard",
"paddle.fluid.layers.flatten",
"paddle.fluid.layers.relu",
"paddle.fluid.core.Scope",
"paddle.fluid.io.load_inference_model",
"paddle.nn.Conv2D",
"paddle.fluid.optimizer.AdamOptimizer",
"paddle.fluid.layers.fc",
"warnings.catch_warnings",
"paddle.fluid.layers.pool2d",
"paddle.save",
"paddle.fluid.unique_name.guard",
"paddle.load",
"paddle.fluid.core.CUDAPlace",
"numpy.zeros_like",
"paddle.fluid.program_guard",
"paddle.fluid.core.CPUPlace"
] | [((1541, 1563), 'paddle.enable_static', 'paddle.enable_static', ([], {}), '()\n', (1561, 1563), False, 'import paddle\n'), ((1596, 1624), 'paddle.fluid.core.is_compiled_with_cuda', 'core.is_compiled_with_cuda', ([], {}), '()\n', (1622, 1624), False, 'from paddle.fluid import core\n'), ((1694, 1779), 'paddle.fluid.log_helper.get_logger', 'get_logger', (['__name__', 'logging.INFO'], {'fmt': '"""%(asctime)s-%(levelname)s: %(message)s"""'}), "(__name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s'\n )\n", (1704, 1779), False, 'from paddle.fluid.log_helper import get_logger\n'), ((1630, 1682), 'paddle.fluid.set_flags', 'fluid.set_flags', (["{'FLAGS_cudnn_deterministic': True}"], {}), "({'FLAGS_cudnn_deterministic': True})\n", (1645, 1682), True, 'import paddle.fluid as fluid\n'), ((2033, 2067), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""conv2d_w_1"""'}), "(name='conv2d_w_1')\n", (2048, 2067), True, 'import paddle.fluid as fluid\n'), ((2089, 2123), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""conv2d_w_2"""'}), "(name='conv2d_w_2')\n", (2104, 2123), True, 'import paddle.fluid as fluid\n'), ((2141, 2171), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""fc_w_1"""'}), "(name='fc_w_1')\n", (2156, 2171), True, 'import paddle.fluid as fluid\n'), ((2189, 2219), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""fc_w_2"""'}), "(name='fc_w_2')\n", (2204, 2219), True, 'import paddle.fluid as fluid\n'), ((2237, 2267), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""fc_w_3"""'}), "(name='fc_w_3')\n", (2252, 2267), True, 'import paddle.fluid as fluid\n'), ((2289, 2323), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""conv2d_b_2"""'}), "(name='conv2d_b_2')\n", (2304, 2323), True, 'import paddle.fluid as fluid\n'), ((2341, 2371), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""fc_b_1"""'}), "(name='fc_b_1')\n", (2356, 2371), True, 'import paddle.fluid as fluid\n'), ((2389, 2419), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""fc_b_2"""'}), "(name='fc_b_2')\n", (2404, 2419), True, 'import paddle.fluid as fluid\n'), ((2437, 2467), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""fc_b_3"""'}), "(name='fc_b_3')\n", (2452, 2467), True, 'import paddle.fluid as fluid\n'), ((2480, 2604), 'paddle.fluid.layers.conv2d', 'fluid.layers.conv2d', (['data'], {'num_filters': '(6)', 'filter_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'param_attr': 'conv2d_w1_attr', 'bias_attr': '(False)'}), '(data, num_filters=6, filter_size=3, stride=1, padding=1,\n param_attr=conv2d_w1_attr, bias_attr=False)\n', (2499, 2604), True, 'import paddle.fluid as fluid\n'), ((2676, 2700), 'paddle.fluid.layers.batch_norm', 'layers.batch_norm', (['conv1'], {}), '(conv1)\n', (2693, 2700), True, 'import paddle.fluid.layers as layers\n'), ((2713, 2737), 'paddle.fluid.layers.relu', 'layers.relu', (['batch_norm1'], {}), '(batch_norm1)\n', (2724, 2737), True, 'import paddle.fluid.layers as layers\n'), ((2750, 2821), 'paddle.fluid.layers.pool2d', 'fluid.layers.pool2d', (['relu1'], {'pool_size': '(2)', 'pool_type': '"""max"""', 'pool_stride': '(2)'}), "(relu1, pool_size=2, pool_type='max', pool_stride=2)\n", (2769, 2821), True, 'import paddle.fluid as fluid\n'), ((2843, 2979), 'paddle.fluid.layers.conv2d', 'fluid.layers.conv2d', (['pool1'], {'num_filters': '(16)', 'filter_size': '(5)', 'stride': '(1)', 'padding': '(0)', 'param_attr': 'conv2d_w2_attr', 'bias_attr': 'conv2d_b2_attr'}), '(pool1, num_filters=16, filter_size=5, stride=1, padding\n =0, param_attr=conv2d_w2_attr, bias_attr=conv2d_b2_attr)\n', (2862, 2979), True, 'import paddle.fluid as fluid\n'), ((3050, 3074), 'paddle.fluid.layers.batch_norm', 'layers.batch_norm', (['conv2'], {}), '(conv2)\n', (3067, 3074), True, 'import paddle.fluid.layers as layers\n'), ((3088, 3125), 'paddle.fluid.layers.prelu', 'layers.prelu', (['batch_norm2'], {'mode': '"""all"""'}), "(batch_norm2, mode='all')\n", (3100, 3125), True, 'import paddle.fluid.layers as layers\n'), ((3138, 3210), 'paddle.fluid.layers.pool2d', 'fluid.layers.pool2d', (['prelu1'], {'pool_size': '(2)', 'pool_type': '"""max"""', 'pool_stride': '(2)'}), "(prelu1, pool_size=2, pool_type='max', pool_stride=2)\n", (3157, 3210), True, 'import paddle.fluid as fluid\n'), ((3231, 3319), 'paddle.fluid.layers.fc', 'fluid.layers.fc', ([], {'input': 'pool2', 'size': '(120)', 'param_attr': 'fc_w1_attr', 'bias_attr': 'fc_b1_attr'}), '(input=pool2, size=120, param_attr=fc_w1_attr, bias_attr=\n fc_b1_attr)\n', (3246, 3319), True, 'import paddle.fluid as fluid\n'), ((3411, 3445), 'paddle.fluid.layers.leaky_relu', 'layers.leaky_relu', (['fc1'], {'alpha': '(0.01)'}), '(fc1, alpha=0.01)\n', (3428, 3445), True, 'import paddle.fluid.layers as layers\n'), ((3456, 3548), 'paddle.fluid.layers.fc', 'fluid.layers.fc', ([], {'input': 'leaky_relu1', 'size': '(84)', 'param_attr': 'fc_w2_attr', 'bias_attr': 'fc_b2_attr'}), '(input=leaky_relu1, size=84, param_attr=fc_w2_attr,\n bias_attr=fc_b2_attr)\n', (3471, 3548), True, 'import paddle.fluid as fluid\n'), ((3638, 3657), 'paddle.fluid.layers.sigmoid', 'layers.sigmoid', (['fc2'], {}), '(fc2)\n', (3652, 3657), True, 'import paddle.fluid.layers as layers\n'), ((3668, 3766), 'paddle.fluid.layers.fc', 'fluid.layers.fc', ([], {'input': 'sigmoid1', 'size': 'num_classes', 'param_attr': 'fc_w3_attr', 'bias_attr': 'fc_b3_attr'}), '(input=sigmoid1, size=num_classes, param_attr=fc_w3_attr,\n bias_attr=fc_b3_attr)\n', (3683, 3766), True, 'import paddle.fluid as fluid\n'), ((3856, 3891), 'paddle.fluid.layers.softmax', 'layers.softmax', (['fc3'], {'use_cudnn': '(True)'}), '(fc3, use_cudnn=True)\n', (3870, 3891), True, 'import paddle.fluid.layers as layers\n'), ((19436, 19451), 'unittest.main', 'unittest.main', ([], {}), '()\n', (19449, 19451), False, 'import unittest\n'), ((4071, 4105), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""conv2d_w_1"""'}), "(name='conv2d_w_1')\n", (4086, 4105), True, 'import paddle.fluid as fluid\n'), ((4131, 4165), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""conv2d_w_2"""'}), "(name='conv2d_w_2')\n", (4146, 4165), True, 'import paddle.fluid as fluid\n'), ((4187, 4217), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""fc_w_1"""'}), "(name='fc_w_1')\n", (4202, 4217), True, 'import paddle.fluid as fluid\n'), ((4239, 4269), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""fc_w_2"""'}), "(name='fc_w_2')\n", (4254, 4269), True, 'import paddle.fluid as fluid\n'), ((4291, 4321), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""fc_w_3"""'}), "(name='fc_w_3')\n", (4306, 4321), True, 'import paddle.fluid as fluid\n'), ((4347, 4381), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""conv2d_b_2"""'}), "(name='conv2d_b_2')\n", (4362, 4381), True, 'import paddle.fluid as fluid\n'), ((4403, 4433), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""fc_b_1"""'}), "(name='fc_b_1')\n", (4418, 4433), True, 'import paddle.fluid as fluid\n'), ((4455, 4485), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""fc_b_2"""'}), "(name='fc_b_2')\n", (4470, 4485), True, 'import paddle.fluid as fluid\n'), ((4507, 4537), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""fc_b_3"""'}), "(name='fc_b_3')\n", (4522, 4537), True, 'import paddle.fluid as fluid\n'), ((6004, 6030), 'paddle.fluid.layers.flatten', 'fluid.layers.flatten', (['x', '(1)'], {}), '(x, 1)\n', (6024, 6030), True, 'import paddle.fluid as fluid\n'), ((7545, 7663), 'paddle.fluid.contrib.slim.quantization.ImperativeQuantAware', 'ImperativeQuantAware', ([], {'weight_quantize_type': 'weight_quantize_type', 'activation_quantize_type': 'activation_quantize_type'}), '(weight_quantize_type=weight_quantize_type,\n activation_quantize_type=activation_quantize_type)\n', (7565, 7663), False, 'from paddle.fluid.contrib.slim.quantization import ImperativeQuantAware\n'), ((9736, 9775), 'paddle.save', 'paddle.save', (['save_dict', 'param_save_path'], {}), '(save_dict, param_save_path)\n', (9747, 9775), False, 'import paddle\n'), ((10290, 10318), 'paddle.fluid.core.is_compiled_with_cuda', 'core.is_compiled_with_cuda', ([], {}), '()\n', (10316, 10318), False, 'from paddle.fluid import core\n'), ((10422, 10443), 'paddle.fluid.Executor', 'fluid.Executor', (['place'], {}), '(place)\n', (10436, 10443), True, 'import paddle.fluid as fluid\n'), ((10460, 10475), 'paddle.fluid.Program', 'fluid.Program', ([], {}), '()\n', (10473, 10475), True, 'import paddle.fluid as fluid\n'), ((10492, 10507), 'paddle.fluid.Program', 'fluid.Program', ([], {}), '()\n', (10505, 10507), True, 'import paddle.fluid as fluid\n'), ((10526, 10541), 'paddle.fluid.Program', 'fluid.Program', ([], {}), '()\n', (10539, 10541), True, 'import paddle.fluid as fluid\n'), ((10993, 11005), 'paddle.fluid.core.Scope', 'core.Scope', ([], {}), '()\n', (11003, 11005), False, 'from paddle.fluid import core\n'), ((11681, 11898), 'paddle.fluid.contrib.slim.quantization.QuantizationTransformPass', 'QuantizationTransformPass', ([], {'scope': 'scope', 'place': 'place', 'activation_quantize_type': 'activation_quantize_type', 'weight_quantize_type': 'weight_quantize_type', 'quantizable_op_type': "['conv2d', 'depthwise_conv2d', 'mul']"}), "(scope=scope, place=place,\n activation_quantize_type=activation_quantize_type, weight_quantize_type\n =weight_quantize_type, quantizable_op_type=['conv2d',\n 'depthwise_conv2d', 'mul'])\n", (11706, 11898), False, 'from paddle.fluid.contrib.slim.quantization import OutScaleForTrainingPass, OutScaleForInferencePass, QuantizationTransformPass\n'), ((12054, 12103), 'paddle.fluid.contrib.slim.quantization.OutScaleForTrainingPass', 'OutScaleForTrainingPass', ([], {'scope': 'scope', 'place': 'place'}), '(scope=scope, place=place)\n', (12077, 12103), False, 'from paddle.fluid.contrib.slim.quantization import OutScaleForTrainingPass, OutScaleForInferencePass, QuantizationTransformPass\n'), ((12169, 12190), 'paddle.fluid.BuildStrategy', 'fluid.BuildStrategy', ([], {}), '()\n', (12188, 12190), True, 'import paddle.fluid as fluid\n'), ((12408, 12475), 'paddle.fluid.DataFeeder', 'fluid.DataFeeder', ([], {'feed_list': '[static_img, static_label]', 'place': 'place'}), '(feed_list=[static_img, static_label], place=place)\n', (12424, 12475), True, 'import paddle.fluid as fluid\n'), ((12930, 12967), 'paddle.fluid.contrib.slim.quantization.OutScaleForInferencePass', 'OutScaleForInferencePass', ([], {'scope': 'scope'}), '(scope=scope)\n', (12954, 12967), False, 'from paddle.fluid.contrib.slim.quantization import OutScaleForTrainingPass, OutScaleForInferencePass, QuantizationTransformPass\n'), ((14350, 14519), 'paddle.fluid.io.load_inference_model', 'fluid.io.load_inference_model', ([], {'dirname': 'dynamic_save_dir', 'executor': 'exe', 'model_filename': "('lenet' + INFER_MODEL_SUFFIX)", 'params_filename': "('lenet' + INFER_PARAMS_SUFFIX)"}), "(dirname=dynamic_save_dir, executor=exe,\n model_filename='lenet' + INFER_MODEL_SUFFIX, params_filename='lenet' +\n INFER_PARAMS_SUFFIX)\n", (14379, 14519), True, 'import paddle.fluid as fluid\n'), ((14691, 14859), 'paddle.fluid.io.load_inference_model', 'fluid.io.load_inference_model', ([], {'dirname': 'static_save_dir', 'executor': 'exe', 'model_filename': "('lenet' + INFER_MODEL_SUFFIX)", 'params_filename': "('lenet' + INFER_PARAMS_SUFFIX)"}), "(dirname=static_save_dir, executor=exe,\n model_filename='lenet' + INFER_MODEL_SUFFIX, params_filename='lenet' +\n INFER_PARAMS_SUFFIX)\n", (14720, 14859), True, 'import paddle.fluid as fluid\n'), ((16223, 16341), 'paddle.fluid.contrib.slim.quantization.ImperativeQuantAware', 'ImperativeQuantAware', ([], {'weight_quantize_type': 'weight_quantize_type', 'activation_quantize_type': 'activation_quantize_type'}), '(weight_quantize_type=weight_quantize_type,\n activation_quantize_type=activation_quantize_type)\n', (16243, 16341), False, 'from paddle.fluid.contrib.slim.quantization import ImperativeQuantAware\n'), ((16833, 16861), 'paddle.fluid.core.is_compiled_with_cuda', 'core.is_compiled_with_cuda', ([], {}), '()\n', (16859, 16861), False, 'from paddle.fluid import core\n'), ((16965, 16986), 'paddle.fluid.Executor', 'fluid.Executor', (['place'], {}), '(place)\n', (16979, 16986), True, 'import paddle.fluid as fluid\n'), ((17103, 17278), 'paddle.fluid.io.load_inference_model', 'fluid.io.load_inference_model', ([], {'dirname': 'dynamic_model_save_dir', 'executor': 'exe', 'model_filename': "('lenet' + INFER_MODEL_SUFFIX)", 'params_filename': "('lenet' + INFER_PARAMS_SUFFIX)"}), "(dirname=dynamic_model_save_dir, executor=exe,\n model_filename='lenet' + INFER_MODEL_SUFFIX, params_filename='lenet' +\n INFER_PARAMS_SUFFIX)\n", (17132, 17278), True, 'import paddle.fluid as fluid\n'), ((17450, 17624), 'paddle.fluid.io.load_inference_model', 'fluid.io.load_inference_model', ([], {'dirname': 'static_model_save_dir', 'executor': 'exe', 'model_filename': "('lenet' + INFER_MODEL_SUFFIX)", 'params_filename': "('lenet' + INFER_PARAMS_SUFFIX)"}), "(dirname=static_model_save_dir, executor=exe,\n model_filename='lenet' + INFER_MODEL_SUFFIX, params_filename='lenet' +\n INFER_PARAMS_SUFFIX)\n", (17479, 17624), True, 'import paddle.fluid as fluid\n'), ((18648, 18670), 'paddle.fluid.contrib.slim.quantization.ImperativeQuantAware', 'ImperativeQuantAware', ([], {}), '()\n', (18668, 18670), False, 'from paddle.fluid.contrib.slim.quantization import ImperativeQuantAware\n'), ((4586, 4708), 'paddle.nn.Conv2D', 'Conv2D', ([], {'in_channels': '(1)', 'out_channels': '(6)', 'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'weight_attr': 'conv2d_w1_attr', 'bias_attr': '(False)'}), '(in_channels=1, out_channels=6, kernel_size=3, stride=1, padding=1,\n weight_attr=conv2d_w1_attr, bias_attr=False)\n', (4592, 4708), False, 'from paddle.nn import Linear, Conv2D, Softmax, BatchNorm2D, MaxPool2D\n'), ((4831, 4845), 'paddle.nn.BatchNorm2D', 'BatchNorm2D', (['(6)'], {}), '(6)\n', (4842, 4845), False, 'from paddle.nn import Linear, Conv2D, Softmax, BatchNorm2D, MaxPool2D\n'), ((4859, 4865), 'paddle.nn.layer.ReLU', 'ReLU', ([], {}), '()\n', (4863, 4865), False, 'from paddle.nn.layer import ReLU, LeakyReLU, Sigmoid, Softmax, PReLU\n'), ((4879, 4930), 'paddle.fluid.dygraph.nn.Pool2D', 'Pool2D', ([], {'pool_size': '(2)', 'pool_type': '"""max"""', 'pool_stride': '(2)'}), "(pool_size=2, pool_type='max', pool_stride=2)\n", (4885, 4930), False, 'from paddle.fluid.dygraph.nn import Pool2D\n'), ((4961, 5093), 'paddle.nn.Conv2D', 'Conv2D', ([], {'in_channels': '(6)', 'out_channels': '(16)', 'kernel_size': '(5)', 'stride': '(1)', 'padding': '(0)', 'weight_attr': 'conv2d_w2_attr', 'bias_attr': 'conv2d_b2_attr'}), '(in_channels=6, out_channels=16, kernel_size=5, stride=1, padding=0,\n weight_attr=conv2d_w2_attr, bias_attr=conv2d_b2_attr)\n', (4967, 5093), False, 'from paddle.nn import Linear, Conv2D, Softmax, BatchNorm2D, MaxPool2D\n'), ((5216, 5231), 'paddle.nn.BatchNorm2D', 'BatchNorm2D', (['(16)'], {}), '(16)\n', (5227, 5231), False, 'from paddle.nn import Linear, Conv2D, Softmax, BatchNorm2D, MaxPool2D\n'), ((5245, 5252), 'paddle.nn.layer.PReLU', 'PReLU', ([], {}), '()\n', (5250, 5252), False, 'from paddle.nn.layer import ReLU, LeakyReLU, Sigmoid, Softmax, PReLU\n'), ((5266, 5300), 'paddle.nn.MaxPool2D', 'MaxPool2D', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (5275, 5300), False, 'from paddle.nn import Linear, Conv2D, Softmax, BatchNorm2D, MaxPool2D\n'), ((5362, 5454), 'paddle.nn.Linear', 'Linear', ([], {'in_features': '(400)', 'out_features': '(120)', 'weight_attr': 'fc_w1_attr', 'bias_attr': 'fc_b1_attr'}), '(in_features=400, out_features=120, weight_attr=fc_w1_attr, bias_attr\n =fc_b1_attr)\n', (5368, 5454), False, 'from paddle.nn import Linear, Conv2D, Softmax, BatchNorm2D, MaxPool2D\n'), ((5528, 5539), 'paddle.nn.layer.LeakyReLU', 'LeakyReLU', ([], {}), '()\n', (5537, 5539), False, 'from paddle.nn.layer import ReLU, LeakyReLU, Sigmoid, Softmax, PReLU\n'), ((5553, 5644), 'paddle.nn.Linear', 'Linear', ([], {'in_features': '(120)', 'out_features': '(84)', 'weight_attr': 'fc_w2_attr', 'bias_attr': 'fc_b2_attr'}), '(in_features=120, out_features=84, weight_attr=fc_w2_attr, bias_attr=\n fc_b2_attr)\n', (5559, 5644), False, 'from paddle.nn import Linear, Conv2D, Softmax, BatchNorm2D, MaxPool2D\n'), ((5718, 5727), 'paddle.nn.layer.Sigmoid', 'Sigmoid', ([], {}), '()\n', (5725, 5727), False, 'from paddle.nn.layer import ReLU, LeakyReLU, Sigmoid, Softmax, PReLU\n'), ((5741, 5839), 'paddle.nn.Linear', 'Linear', ([], {'in_features': '(84)', 'out_features': 'num_classes', 'weight_attr': 'fc_w3_attr', 'bias_attr': 'fc_b3_attr'}), '(in_features=84, out_features=num_classes, weight_attr=fc_w3_attr,\n bias_attr=fc_b3_attr)\n', (5747, 5839), False, 'from paddle.nn import Linear, Conv2D, Softmax, BatchNorm2D, MaxPool2D\n'), ((5914, 5923), 'paddle.nn.Softmax', 'Softmax', ([], {}), '()\n', (5921, 5923), False, 'from paddle.nn import Linear, Conv2D, Softmax, BatchNorm2D, MaxPool2D\n'), ((7071, 7098), 'paddle.dataset.mnist.test', 'paddle.dataset.mnist.test', ([], {}), '()\n', (7096, 7098), False, 'import paddle\n'), ((7699, 7720), 'paddle.fluid.dygraph.guard', 'fluid.dygraph.guard', ([], {}), '()\n', (7718, 7720), True, 'import paddle.fluid as fluid\n'), ((7734, 7754), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (7748, 7754), True, 'import numpy as np\n'), ((10340, 10357), 'paddle.fluid.core.CUDAPlace', 'core.CUDAPlace', (['(0)'], {}), '(0)\n', (10354, 10357), False, 'from paddle.fluid import core\n'), ((10392, 10407), 'paddle.fluid.core.CPUPlace', 'core.CPUPlace', ([], {}), '()\n', (10405, 10407), False, 'from paddle.fluid import core\n'), ((10800, 10825), 'paddle.fluid.unique_name.guard', 'fluid.unique_name.guard', ([], {}), '()\n', (10823, 10825), True, 'import paddle.fluid as fluid\n'), ((11019, 11043), 'paddle.fluid.scope_guard', 'fluid.scope_guard', (['scope'], {}), '(scope)\n', (11036, 11043), True, 'import paddle.fluid as fluid\n'), ((11548, 11569), 'paddle.fluid.core.Graph', 'core.Graph', (['main.desc'], {}), '(main.desc)\n', (11558, 11569), False, 'from paddle.fluid import core\n'), ((11617, 11639), 'paddle.fluid.core.Graph', 'core.Graph', (['infer.desc'], {}), '(infer.desc)\n', (11627, 11639), False, 'from paddle.fluid import core\n'), ((12502, 12526), 'paddle.fluid.scope_guard', 'fluid.scope_guard', (['scope'], {}), '(scope)\n', (12519, 12526), True, 'import paddle.fluid as fluid\n'), ((13136, 13160), 'paddle.fluid.scope_guard', 'fluid.scope_guard', (['scope'], {}), '(scope)\n', (13153, 13160), True, 'import paddle.fluid as fluid\n'), ((13174, 13435), 'paddle.fluid.io.save_inference_model', 'fluid.io.save_inference_model', ([], {'dirname': 'static_save_dir', 'feeded_var_names': '[infer_img.name]', 'target_vars': '[infer_pre]', 'executor': 'exe', 'main_program': 'save_program', 'model_filename': "('lenet' + INFER_MODEL_SUFFIX)", 'params_filename': "('lenet' + INFER_PARAMS_SUFFIX)"}), "(dirname=static_save_dir, feeded_var_names=[\n infer_img.name], target_vars=[infer_pre], executor=exe, main_program=\n save_program, model_filename='lenet' + INFER_MODEL_SUFFIX,\n params_filename='lenet' + INFER_PARAMS_SUFFIX)\n", (13203, 13435), True, 'import paddle.fluid as fluid\n'), ((13699, 13722), 'numpy.abs', 'np.abs', (['(loss_d - loss_s)'], {}), '(loss_d - loss_s)\n', (13705, 13722), True, 'import numpy as np\n'), ((16377, 16398), 'paddle.fluid.dygraph.guard', 'fluid.dygraph.guard', ([], {}), '()\n', (16396, 16398), True, 'import paddle.fluid as fluid\n'), ((16462, 16490), 'paddle.load', 'paddle.load', (['load_param_path'], {}), '(load_param_path)\n', (16473, 16490), False, 'import paddle\n'), ((16883, 16900), 'paddle.fluid.core.CUDAPlace', 'core.CUDAPlace', (['(0)'], {}), '(0)\n', (16897, 16900), False, 'from paddle.fluid import core\n'), ((16935, 16950), 'paddle.fluid.core.CPUPlace', 'core.CPUPlace', ([], {}), '()\n', (16948, 16950), False, 'from paddle.fluid import core\n'), ((18684, 18705), 'paddle.fluid.dygraph.guard', 'fluid.dygraph.guard', ([], {}), '()\n', (18703, 18705), True, 'import paddle.fluid as fluid\n'), ((18759, 18795), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (18782, 18795), False, 'import warnings\n'), ((18814, 18845), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""'], {}), "('always')\n", (18835, 18845), False, 'import warnings\n'), ((6247, 6272), 'paddle.fluid.unique_name.guard', 'fluid.unique_name.guard', ([], {}), '()\n', (6270, 6272), True, 'import paddle.fluid as fluid\n'), ((7767, 7795), 'paddle.fluid.default_main_program', 'fluid.default_main_program', ([], {}), '()\n', (7793, 7795), True, 'import paddle.fluid as fluid\n'), ((7827, 7858), 'paddle.fluid.default_startup_program', 'fluid.default_startup_program', ([], {}), '()\n', (7856, 7858), True, 'import paddle.fluid as fluid\n'), ((9061, 9094), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['x_data'], {}), '(x_data)\n', (9086, 9094), True, 'import paddle.fluid as fluid\n'), ((9119, 9152), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['y_data'], {}), '(y_data)\n', (9144, 9152), True, 'import paddle.fluid as fluid\n'), ((9210, 9248), 'paddle.fluid.layers.cross_entropy', 'fluid.layers.cross_entropy', (['out', 'label'], {}), '(out, label)\n', (9236, 9248), True, 'import paddle.fluid as fluid\n'), ((9276, 9299), 'paddle.fluid.layers.mean', 'fluid.layers.mean', (['loss'], {}), '(loss)\n', (9293, 9299), True, 'import paddle.fluid as fluid\n'), ((10844, 10878), 'paddle.fluid.program_guard', 'fluid.program_guard', (['main', 'startup'], {}), '(main, startup)\n', (10863, 10878), True, 'import paddle.fluid as fluid\n'), ((10902, 10933), 'paddle.fluid.optimizer.AdamOptimizer', 'AdamOptimizer', ([], {'learning_rate': 'lr'}), '(learning_rate=lr)\n', (10915, 10933), False, 'from paddle.fluid.optimizer import AdamOptimizer\n'), ((12259, 12298), 'paddle.fluid.CompiledProgram', 'fluid.CompiledProgram', (['main_graph.graph'], {}), '(main_graph.graph)\n', (12280, 12298), True, 'import paddle.fluid as fluid\n'), ((14024, 14050), 'numpy.array', 'np.array', (['dynamic_loss_rec'], {}), '(dynamic_loss_rec)\n', (14032, 14050), True, 'import numpy as np\n'), ((14068, 14093), 'numpy.array', 'np.array', (['static_loss_rec'], {}), '(static_loss_rec)\n', (14076, 14093), True, 'import numpy as np\n'), ((6295, 6329), 'paddle.fluid.program_guard', 'fluid.program_guard', (['main', 'startup'], {}), '(main, startup)\n', (6314, 6329), True, 'import paddle.fluid as fluid\n'), ((6448, 6515), 'paddle.fluid.layers.data', 'fluid.layers.data', ([], {'name': '"""image"""', 'shape': '[1, 28, 28]', 'dtype': '"""float32"""'}), "(name='image', shape=[1, 28, 28], dtype='float32')\n", (6465, 6515), True, 'import paddle.fluid as fluid\n'), ((6569, 6626), 'paddle.fluid.layers.data', 'fluid.layers.data', ([], {'name': '"""label"""', 'shape': '[1]', 'dtype': '"""int64"""'}), "(name='label', shape=[1], dtype='int64')\n", (6586, 6626), True, 'import paddle.fluid as fluid\n'), ((10032, 10097), 'paddle.static.InputSpec', 'paddle.static.InputSpec', ([], {'shape': '[None, 1, 28, 28]', 'dtype': '"""float32"""'}), "(shape=[None, 1, 28, 28], dtype='float32')\n", (10055, 10097), False, 'import paddle\n'), ((16719, 16784), 'paddle.static.InputSpec', 'paddle.static.InputSpec', ([], {'shape': '[None, 1, 28, 28]', 'dtype': '"""float32"""'}), "(shape=[None, 1, 28, 28], dtype='float32')\n", (16742, 16784), False, 'import paddle\n'), ((6769, 6826), 'paddle.fluid.layers.cross_entropy', 'fluid.layers.cross_entropy', ([], {'input': 'prediction', 'label': 'label'}), '(input=prediction, label=label)\n', (6795, 6826), True, 'import paddle.fluid as fluid\n'), ((6891, 6914), 'paddle.fluid.layers.mean', 'fluid.layers.mean', (['loss'], {}), '(loss)\n', (6908, 6914), True, 'import paddle.fluid as fluid\n'), ((13760, 13774), 'numpy.abs', 'np.abs', (['loss_s'], {}), '(loss_s)\n', (13766, 13774), True, 'import numpy as np\n'), ((19006, 19071), 'paddle.static.InputSpec', 'paddle.static.InputSpec', ([], {'shape': '[None, 1, 28, 28]', 'dtype': '"""float32"""'}), "(shape=[None, 1, 28, 28], dtype='float32')\n", (19029, 19071), False, 'import paddle\n'), ((8158, 8180), 'numpy.zeros_like', 'np.zeros_like', (['p_value'], {}), '(p_value)\n', (8171, 8180), True, 'import numpy as np\n'), ((8955, 8985), 'numpy.array', 'np.array', (['[x[1] for x in data]'], {}), '([x[1] for x in data])\n', (8963, 8985), True, 'import numpy as np\n'), ((8317, 8336), 'numpy.product', 'np.product', (['p_shape'], {}), '(p_shape)\n', (8327, 8336), True, 'import numpy as np\n')] |
import os
import glob
import shutil
import yaml
from IPython import embed
import pytest
import numpy as np
from pypeit.par.util import parse_pypeit_file
from pypeit.pypeitsetup import PypeItSetup
from pypeit.tests.tstutils import dev_suite_required, data_path
from pypeit.metadata import PypeItMetaData
from pypeit.spectrographs.util import load_spectrograph
from pypeit.scripts.setup import Setup
def test_read_combid():
# ------------------------------------------------------------------
# In case of failed tests
setup_dir = data_path('setup_files')
if os.path.isdir(setup_dir):
shutil.rmtree(setup_dir)
config_dir = data_path('shane_kast_blue_A')
if os.path.isdir(config_dir):
shutil.rmtree(config_dir)
# ------------------------------------------------------------------
# Generate the pypeit file with the comb_id
droot = data_path('b')
pargs = Setup.parse_args(['-r', droot, '-s', 'shane_kast_blue', '-c=all', '-b',
'--extension=fits.gz', '--output_path={:s}'.format(data_path(''))])
Setup.main(pargs)
shutil.rmtree(setup_dir)
pypeit_file = os.path.join(config_dir, 'shane_kast_blue_A.pypeit')
cfg_lines, data_files, frametype, usrdata, setups, _ = parse_pypeit_file(pypeit_file)
# Get the spectrograph
spectrograph = None
for l in cfg_lines:
if 'spectrograph' in l:
spectrograph = load_spectrograph(l.split(' ')[-1])
break
assert spectrograph is not None, 'Did not appropriately read spectrograph'
# Set the metadata
pmd = PypeItMetaData(spectrograph, spectrograph.default_pypeit_par(), files=data_files,
usrdata=usrdata, strict=False)
indx = pmd['filename'] == 'b27.fits.gz'
assert pmd['comb_id'][indx] == [1], 'Incorrect combination group ID'
assert pmd['comb_id'][np.where(~indx)[0]][0] == -1, 'Incorrect combination group ID'
shutil.rmtree(config_dir)
@dev_suite_required
def test_lris_red_multi_400():
file_list = glob.glob(os.path.join(os.environ['PYPEIT_DEV'], 'RAW_DATA', 'keck_lris_red',
'multi_400_8500_d560', '*.fits.gz'))
cfg_lines = ['[rdx]',
'spectrograph = keck_lris_red']
ps = PypeItSetup(file_list, cfg_lines=cfg_lines)
ps.build_fitstbl()
ps.get_frame_types(flag_unknown=True)
cfgs = ps.fitstbl.unique_configurations()
ps.fitstbl.set_configurations(cfgs)
ps.fitstbl.set_calibration_groups() #global_frames=['bias', 'dark'])
# Test
assert np.all(ps.fitstbl['setup'] == 'A')
@dev_suite_required
def test_lris_red_multi():
file_list = glob.glob(os.path.join(os.environ['PYPEIT_DEV'], 'RAW_DATA', 'keck_lris_red',
'multi*', '*.fits*'))
cfg_lines = ['[rdx]',
'spectrograph = keck_lris_red']
ps = PypeItSetup(file_list, cfg_lines=cfg_lines)
ps.build_fitstbl()
ps.get_frame_types(flag_unknown=True)
cfgs = ps.fitstbl.unique_configurations()
ps.fitstbl.set_configurations(cfgs)
ps.fitstbl.set_calibration_groups() #global_frames=['bias', 'dark'])
@dev_suite_required
def test_lris_red_multi_calib():
file_list = glob.glob(os.path.join(os.environ['PYPEIT_DEV'], 'RAW_DATA', 'keck_lris_red',
'multi_400_8500_d560', '*.fits.gz'))
cfg_lines = ['[rdx]',
'spectrograph = keck_lris_red']
ps = PypeItSetup(file_list, cfg_lines=cfg_lines)
ps.build_fitstbl()
ps.get_frame_types(flag_unknown=True)
cfgs = ps.fitstbl.unique_configurations()
ps.fitstbl.set_configurations(cfgs)
ps.fitstbl.set_calibration_groups() #global_frames=['bias', 'dark'])
cfile = data_path('test.calib')
ps.fitstbl.write_calib(cfile)
with open(cfile, 'r') as f:
calib = yaml.load(f, Loader=yaml.FullLoader)
assert np.array_equal(list(calib['A'].keys()), ['--', 1]), \
'Calibrations dictionary read incorrectly.'
os.remove(cfile)
@dev_suite_required
def test_lris_red_multi_run():
# Perform the setup
file_list = glob.glob(os.path.join(os.environ['PYPEIT_DEV'], 'RAW_DATA', 'keck_lris_red',
'multi*', '*.fits*'))
cfg_lines = ['[rdx]',
'spectrograph = keck_lris_red']
ps = PypeItSetup(file_list, cfg_lines=cfg_lines)
ps.run(setup_only=True)
# Test
#assert len(ps.setup_dict) == 2, 'Should find two setups'
assert len(ps.fitstbl) >= 40, 'Should find 40+ files'
arcs = ps.fitstbl['filename'][ps.fitstbl.find_frames('arc')]
assert len(arcs) >= 2, 'Should find two or more arcs'
assert 'r170320_2017.fits.gz' in arcs, \
'Should have identified r170320_2017.fits.gz as an arc'
assert 'r170816_0057.fits' in ps.fitstbl['filename'][ps.fitstbl.find_frames('science')], \
'Should have identified r170816_0057.fits as a science frame'
# Clean-up
#os.remove('keck_lris_red.lst')
#os.remove('keck_lris_red.setups')
os.remove('keck_lris_red.sorted')
@dev_suite_required
def test_lris_blue_pypeit_overwrite():
f = os.path.join(os.environ['PYPEIT_DEV'],
'pypeit_files/keck_lris_blue_long_400_3400_d560.pypeit')
assert os.path.isfile(f), 'Could not find pypeit file.'
cfg_lines, data_files, frametype, usrdata, setups, _ = parse_pypeit_file(f, file_check=False)
# Change the dev path
for i in range(len(data_files)):
path_list = data_files[i].split('/')
for j,p in enumerate(path_list):
if p == 'RAW_DATA':
break
data_files[i] = os.path.join(os.environ['PYPEIT_DEV'], '/'.join(path_list[j:]))
# Read the fits table with and without the user data
spectrograph = load_spectrograph('keck_lris_blue')
par = spectrograph.default_pypeit_par()
fitstbl = PypeItMetaData(spectrograph, par, files=data_files)
fitstbl_usr = PypeItMetaData(spectrograph, par, files=data_files, usrdata=usrdata)
assert fitstbl['target'][0] == 'unknown', 'Grating name changed in file header'
assert fitstbl_usr['target'][0] == 'test', 'Grating name changed in pypeit file'
assert fitstbl['target'][0] != fitstbl_usr['target'][0], \
'Fits header value and input pypeit file value expected to be different.'
| [
"pypeit.scripts.setup.Setup.main",
"numpy.where",
"os.path.join",
"yaml.load",
"pypeit.par.util.parse_pypeit_file",
"os.path.isfile",
"pypeit.tests.tstutils.data_path",
"os.path.isdir",
"pypeit.metadata.PypeItMetaData",
"shutil.rmtree",
"numpy.all",
"pypeit.spectrographs.util.load_spectrograph",
"pypeit.pypeitsetup.PypeItSetup",
"os.remove"
] | [((547, 571), 'pypeit.tests.tstutils.data_path', 'data_path', (['"""setup_files"""'], {}), "('setup_files')\n", (556, 571), False, 'from pypeit.tests.tstutils import dev_suite_required, data_path\n'), ((579, 603), 'os.path.isdir', 'os.path.isdir', (['setup_dir'], {}), '(setup_dir)\n', (592, 603), False, 'import os\n'), ((655, 685), 'pypeit.tests.tstutils.data_path', 'data_path', (['"""shane_kast_blue_A"""'], {}), "('shane_kast_blue_A')\n", (664, 685), False, 'from pypeit.tests.tstutils import dev_suite_required, data_path\n'), ((693, 718), 'os.path.isdir', 'os.path.isdir', (['config_dir'], {}), '(config_dir)\n', (706, 718), False, 'import os\n'), ((888, 902), 'pypeit.tests.tstutils.data_path', 'data_path', (['"""b"""'], {}), "('b')\n", (897, 902), False, 'from pypeit.tests.tstutils import dev_suite_required, data_path\n'), ((1088, 1105), 'pypeit.scripts.setup.Setup.main', 'Setup.main', (['pargs'], {}), '(pargs)\n', (1098, 1105), False, 'from pypeit.scripts.setup import Setup\n'), ((1110, 1134), 'shutil.rmtree', 'shutil.rmtree', (['setup_dir'], {}), '(setup_dir)\n', (1123, 1134), False, 'import shutil\n'), ((1154, 1206), 'os.path.join', 'os.path.join', (['config_dir', '"""shane_kast_blue_A.pypeit"""'], {}), "(config_dir, 'shane_kast_blue_A.pypeit')\n", (1166, 1206), False, 'import os\n'), ((1266, 1296), 'pypeit.par.util.parse_pypeit_file', 'parse_pypeit_file', (['pypeit_file'], {}), '(pypeit_file)\n', (1283, 1296), False, 'from pypeit.par.util import parse_pypeit_file\n'), ((1949, 1974), 'shutil.rmtree', 'shutil.rmtree', (['config_dir'], {}), '(config_dir)\n', (1962, 1974), False, 'import shutil\n'), ((2268, 2311), 'pypeit.pypeitsetup.PypeItSetup', 'PypeItSetup', (['file_list'], {'cfg_lines': 'cfg_lines'}), '(file_list, cfg_lines=cfg_lines)\n', (2279, 2311), False, 'from pypeit.pypeitsetup import PypeItSetup\n'), ((2558, 2592), 'numpy.all', 'np.all', (["(ps.fitstbl['setup'] == 'A')"], {}), "(ps.fitstbl['setup'] == 'A')\n", (2564, 2592), True, 'import numpy as np\n'), ((2868, 2911), 'pypeit.pypeitsetup.PypeItSetup', 'PypeItSetup', (['file_list'], {'cfg_lines': 'cfg_lines'}), '(file_list, cfg_lines=cfg_lines)\n', (2879, 2911), False, 'from pypeit.pypeitsetup import PypeItSetup\n'), ((3432, 3475), 'pypeit.pypeitsetup.PypeItSetup', 'PypeItSetup', (['file_list'], {'cfg_lines': 'cfg_lines'}), '(file_list, cfg_lines=cfg_lines)\n', (3443, 3475), False, 'from pypeit.pypeitsetup import PypeItSetup\n'), ((3713, 3736), 'pypeit.tests.tstutils.data_path', 'data_path', (['"""test.calib"""'], {}), "('test.calib')\n", (3722, 3736), False, 'from pypeit.tests.tstutils import dev_suite_required, data_path\n'), ((3984, 4000), 'os.remove', 'os.remove', (['cfile'], {}), '(cfile)\n', (3993, 4000), False, 'import os\n'), ((4304, 4347), 'pypeit.pypeitsetup.PypeItSetup', 'PypeItSetup', (['file_list'], {'cfg_lines': 'cfg_lines'}), '(file_list, cfg_lines=cfg_lines)\n', (4315, 4347), False, 'from pypeit.pypeitsetup import PypeItSetup\n'), ((5008, 5041), 'os.remove', 'os.remove', (['"""keck_lris_red.sorted"""'], {}), "('keck_lris_red.sorted')\n", (5017, 5041), False, 'import os\n'), ((5111, 5210), 'os.path.join', 'os.path.join', (["os.environ['PYPEIT_DEV']", '"""pypeit_files/keck_lris_blue_long_400_3400_d560.pypeit"""'], {}), "(os.environ['PYPEIT_DEV'],\n 'pypeit_files/keck_lris_blue_long_400_3400_d560.pypeit')\n", (5123, 5210), False, 'import os\n'), ((5239, 5256), 'os.path.isfile', 'os.path.isfile', (['f'], {}), '(f)\n', (5253, 5256), False, 'import os\n'), ((5356, 5394), 'pypeit.par.util.parse_pypeit_file', 'parse_pypeit_file', (['f'], {'file_check': '(False)'}), '(f, file_check=False)\n', (5373, 5394), False, 'from pypeit.par.util import parse_pypeit_file\n'), ((5764, 5799), 'pypeit.spectrographs.util.load_spectrograph', 'load_spectrograph', (['"""keck_lris_blue"""'], {}), "('keck_lris_blue')\n", (5781, 5799), False, 'from pypeit.spectrographs.util import load_spectrograph\n'), ((5858, 5909), 'pypeit.metadata.PypeItMetaData', 'PypeItMetaData', (['spectrograph', 'par'], {'files': 'data_files'}), '(spectrograph, par, files=data_files)\n', (5872, 5909), False, 'from pypeit.metadata import PypeItMetaData\n'), ((5928, 5996), 'pypeit.metadata.PypeItMetaData', 'PypeItMetaData', (['spectrograph', 'par'], {'files': 'data_files', 'usrdata': 'usrdata'}), '(spectrograph, par, files=data_files, usrdata=usrdata)\n', (5942, 5996), False, 'from pypeit.metadata import PypeItMetaData\n'), ((613, 637), 'shutil.rmtree', 'shutil.rmtree', (['setup_dir'], {}), '(setup_dir)\n', (626, 637), False, 'import shutil\n'), ((728, 753), 'shutil.rmtree', 'shutil.rmtree', (['config_dir'], {}), '(config_dir)\n', (741, 753), False, 'import shutil\n'), ((2053, 2160), 'os.path.join', 'os.path.join', (["os.environ['PYPEIT_DEV']", '"""RAW_DATA"""', '"""keck_lris_red"""', '"""multi_400_8500_d560"""', '"""*.fits.gz"""'], {}), "(os.environ['PYPEIT_DEV'], 'RAW_DATA', 'keck_lris_red',\n 'multi_400_8500_d560', '*.fits.gz')\n", (2065, 2160), False, 'import os\n'), ((2668, 2760), 'os.path.join', 'os.path.join', (["os.environ['PYPEIT_DEV']", '"""RAW_DATA"""', '"""keck_lris_red"""', '"""multi*"""', '"""*.fits*"""'], {}), "(os.environ['PYPEIT_DEV'], 'RAW_DATA', 'keck_lris_red',\n 'multi*', '*.fits*')\n", (2680, 2760), False, 'import os\n'), ((3217, 3324), 'os.path.join', 'os.path.join', (["os.environ['PYPEIT_DEV']", '"""RAW_DATA"""', '"""keck_lris_red"""', '"""multi_400_8500_d560"""', '"""*.fits.gz"""'], {}), "(os.environ['PYPEIT_DEV'], 'RAW_DATA', 'keck_lris_red',\n 'multi_400_8500_d560', '*.fits.gz')\n", (3229, 3324), False, 'import os\n'), ((3820, 3856), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (3829, 3856), False, 'import yaml\n'), ((4104, 4196), 'os.path.join', 'os.path.join', (["os.environ['PYPEIT_DEV']", '"""RAW_DATA"""', '"""keck_lris_red"""', '"""multi*"""', '"""*.fits*"""'], {}), "(os.environ['PYPEIT_DEV'], 'RAW_DATA', 'keck_lris_red',\n 'multi*', '*.fits*')\n", (4116, 4196), False, 'import os\n'), ((1067, 1080), 'pypeit.tests.tstutils.data_path', 'data_path', (['""""""'], {}), "('')\n", (1076, 1080), False, 'from pypeit.tests.tstutils import dev_suite_required, data_path\n'), ((1881, 1896), 'numpy.where', 'np.where', (['(~indx)'], {}), '(~indx)\n', (1889, 1896), True, 'import numpy as np\n')] |
import numpy as np
import pickle
import os
def GenerateFeature_alpha(ligand_name, working_dir):
Cut = 12.0
LIGELE = ['C','N','O','S','CN','CO','CS','NO','NS','OS','CCl','CBr','CP','CF','CNO','CNS','COS','NOS','CNOS','CNOSPFClBrI','H','CH','NH','OH','SH','CNH','COH','CSH','NOH','NSH','OSH','CNOH','CNSH','COSH','NOSH','CNOSH','CNOSPFClBrIH','CClH','CBrH','CPH','CFH']
Feature_i = []
pdb = ligand_name
InFile = open(working_dir+'/'+ligand_name+'_alpha.pkl')
BarCollection = pickle.load(InFile)
for el in LIGELE:
if 'lig_'+el in BarCollection.keys():
Bars = BarCollection['lig_'+el]
Bar0Birth = []; Bar0Death = []; Bar1Birth = []; Bar1Death = []; Bar2Birth = []; Bar2Death = [];
for Bar in Bars:
if Bar[2] < Bar[1]:
continue
if Bar[2] > 12.0 and Bar[0] == 0: continue
if Bar[2] > 12.0 and Bar[0] > 0: Bar[2] = 12.0
if Bar[0] == 0:
Bar0Birth.append(Bar[1])
Bar0Death.append(Bar[2])
if Bar[0] == 1:
Bar1Birth.append(Bar[1])
Bar1Death.append(Bar[2])
if Bar[0] == 2:
Bar2Birth.append(Bar[1])
Bar2Death.append(Bar[2])
if len(Bar0Birth) > 0:
Bar0Birth = np.asarray(Bar0Birth, float)
Bar0Death = np.asarray(Bar0Death, float)
if len(Bar1Birth) > 0:
Bar1Birth = np.asarray(Bar1Birth, float)
Bar1Death = np.asarray(Bar1Death, float)
if len(Bar2Birth) > 0:
Bar2Birth = np.asarray(Bar2Birth, float)
Bar2Death = np.asarray(Bar2Death, float)
if len(Bar0Death) > 0:
Feature_i.append(np.mean(Bar0Death[:]))
Feature_i.append(np.std(Bar0Death[:]))
Feature_i.append(np.max(Bar0Death[:]))
Feature_i.append(np.min(Bar0Death[:]))
Feature_i.append(np.sum(Bar0Death[:]))
Feature_i.append(len(Bar0Death))
else:
Feature_i.extend([0.]*6)
if len(Bar1Death) > 0:
Feature_i.append(np.mean(Bar1Death[:] - Bar1Birth[:]))
Feature_i.append(np.std(Bar1Death[:] - Bar1Birth[:]))
Feature_i.append(np.max(Bar1Death[:] - Bar1Birth[:]))
Feature_i.append(np.min(Bar1Death[:] - Bar1Birth[:]))
Feature_i.append(np.sum(Bar1Death[:] - Bar1Birth[:]))
Feature_i.append(Bar1Birth[np.argmax(Bar1Death[:] - Bar1Birth[:])])
Feature_i.append(Bar1Death[np.argmax(Bar1Death[:] - Bar1Birth[:])])
Feature_i.append(np.mean(Bar1Birth[:]))
Feature_i.append(np.std(Bar1Birth[:]))
Feature_i.append(np.max(Bar1Birth[:]))
Feature_i.append(np.min(Bar1Birth[:]))
Feature_i.append(np.sum(Bar1Birth[:]))
Feature_i.append(np.mean(Bar1Death[:]))
Feature_i.append(np.std(Bar1Death[:]))
Feature_i.append(np.max(Bar1Death[:]))
Feature_i.append(np.min(Bar1Death[:]))
Feature_i.append(np.sum(Bar1Death[:]))
Feature_i.append(len(Bar1Death))
else:
Feature_i.extend([0.]*18)
if len(Bar2Death) > 0:
Feature_i.append(np.mean(Bar2Death[:] - Bar2Birth[:]))
Feature_i.append(np.std(Bar2Death[:] - Bar2Birth[:]))
Feature_i.append(np.max(Bar2Death[:] - Bar2Birth[:]))
Feature_i.append(np.min(Bar2Death[:] - Bar2Birth[:]))
Feature_i.append(np.sum(Bar2Death[:] - Bar2Birth[:]))
Feature_i.append(Bar2Birth[np.argmax(Bar2Death[:] - Bar2Birth[:])])
Feature_i.append(Bar2Death[np.argmax(Bar2Death[:] - Bar2Birth[:])])
Feature_i.append(np.mean(Bar2Birth[:]))
Feature_i.append(np.std(Bar2Birth[:]))
Feature_i.append(np.max(Bar2Birth[:]))
Feature_i.append(np.min(Bar2Birth[:]))
Feature_i.append(np.sum(Bar2Birth[:]))
Feature_i.append(np.mean(Bar2Death[:]))
Feature_i.append(np.std(Bar2Death[:]))
Feature_i.append(np.max(Bar2Death[:]))
Feature_i.append(np.min(Bar2Death[:]))
Feature_i.append(np.sum(Bar2Death[:]))
Feature_i.append(len(Bar2Death))
else:
Feature_i.extend([0.]*18)
else:
Feature_i.extend([0.]*42)
Feature_i = np.asarray(Feature_i, float)
outfile = open(working_dir+'/'+ligand_name+'_feature_alpha_handcrafted.npy', 'w')
np.save(outfile, Feature_i)
outfile.close()
def GenerateFeature_level1(ligand_name, working_dir):
small = 0.01
Feature_i = []
Cut = 12.0
LIGELE = ['C','N','O','S','CN','CO','CS','NO','NS','OS','CCl','CBr','CP','CF','CNO','CNS','COS','NOS','CNOS','CNOSPFClBrI','H','CH','NH','OH','SH','CNH','COH','CSH','NOH','NSH','OSH','CNOH','CNSH','COSH','NOSH','CNOSH','CNOSPFClBrIH','CClH','CBrH','CPH','CFH']
pdb = ligand_name
for el in LIGELE:
if os.path.exists(working_dir+'/'+ligand_name+'_'+el+'_level1.PH'):
InFile = open(working_dir+'/'+ligand_name+'_'+el+'_level1.PH')
lines = InFile.read().splitlines()
Bars = []
for line in lines:
a,b,c = line.split()
Bars.append([int(a), float(b), float(c)])
InFile.close()
Bar0Birth = []; Bar0Death = []; Bar1Birth = []; Bar1Death = []; Bar2Birth = []; Bar2Death = [];
for Bar in Bars:
if Bar[2] < Bar[1]:
continue
if Bar[2] > 12.0 and Bar[0] == 0: continue
if Bar[2] > 12.0 and Bar[0] > 0: Bar[2] = 12.0
if Bar[0] == 0 and Bar[2]-Bar[1] >= small:
Bar0Birth.append(Bar[1])
Bar0Death.append(Bar[2])
if Bar[0] == 1 and Bar[2]-Bar[1] >= small:
Bar1Birth.append(Bar[1])
Bar1Death.append(Bar[2])
if Bar[0] == 2 and Bar[2]-Bar[1] >= small:
Bar2Birth.append(Bar[1])
Bar2Death.append(Bar[2])
if len(Bar0Birth) > 0:
Bar0Birth = np.asarray(Bar0Birth, float)
Bar0Death = np.asarray(Bar0Death, float)
if len(Bar1Birth) > 0:
Bar1Birth = np.asarray(Bar1Birth, float)
Bar1Death = np.asarray(Bar1Death, float)
if len(Bar2Birth) > 0:
Bar2Birth = np.asarray(Bar2Birth, float)
Bar2Death = np.asarray(Bar2Death, float)
if len(Bar0Death) > 0:
Feature_i.append(np.mean(Bar0Death[:]))
Feature_i.append(np.std(Bar0Death[:]))
Feature_i.append(np.max(Bar0Death[:]))
Feature_i.append(np.min(Bar0Death[:]))
Feature_i.append(np.sum(Bar0Death[:]))
Feature_i.append(len(Bar0Death))
else:
Feature_i.extend([0.]*6)
if len(Bar1Death) > 0:
Feature_i.append(np.mean(Bar1Death[:] - Bar1Birth[:]))
Feature_i.append(np.std(Bar1Death[:] - Bar1Birth[:]))
Feature_i.append(np.max(Bar1Death[:] - Bar1Birth[:]))
Feature_i.append(np.min(Bar1Death[:] - Bar1Birth[:]))
Feature_i.append(np.sum(Bar1Death[:] - Bar1Birth[:]))
Feature_i.append(Bar1Birth[np.argmax(Bar1Death[:] - Bar1Birth[:])])
Feature_i.append(Bar1Death[np.argmax(Bar1Death[:] - Bar1Birth[:])])
Feature_i.append(np.mean(Bar1Birth[:]))
Feature_i.append(np.std(Bar1Birth[:]))
Feature_i.append(np.max(Bar1Birth[:]))
Feature_i.append(np.min(Bar1Birth[:]))
Feature_i.append(np.sum(Bar1Birth[:]))
Feature_i.append(np.mean(Bar1Death[:]))
Feature_i.append(np.std(Bar1Death[:]))
Feature_i.append(np.max(Bar1Death[:]))
Feature_i.append(np.min(Bar1Death[:]))
Feature_i.append(np.sum(Bar1Death[:]))
Feature_i.append(len(Bar1Death))
else:
Feature_i.extend([0.]*18)
if len(Bar2Death) > 0:
Feature_i.append(np.mean(Bar2Death[:] - Bar2Birth[:]))
Feature_i.append(np.std(Bar2Death[:] - Bar2Birth[:]))
Feature_i.append(np.max(Bar2Death[:] - Bar2Birth[:]))
Feature_i.append(np.min(Bar2Death[:] - Bar2Birth[:]))
Feature_i.append(np.sum(Bar2Death[:] - Bar2Birth[:]))
Feature_i.append(Bar2Birth[np.argmax(Bar2Death[:] - Bar2Birth[:])])
Feature_i.append(Bar2Death[np.argmax(Bar2Death[:] - Bar2Birth[:])])
Feature_i.append(np.mean(Bar2Birth[:]))
Feature_i.append(np.std(Bar2Birth[:]))
Feature_i.append(np.max(Bar2Birth[:]))
Feature_i.append(np.min(Bar2Birth[:]))
Feature_i.append(np.sum(Bar2Birth[:]))
Feature_i.append(np.mean(Bar2Death[:]))
Feature_i.append(np.std(Bar2Death[:]))
Feature_i.append(np.max(Bar2Death[:]))
Feature_i.append(np.min(Bar2Death[:]))
Feature_i.append(np.sum(Bar2Death[:]))
Feature_i.append(len(Bar2Death))
else:
Feature_i.extend([0.]*18)
else:
Feature_i.extend([0.]*42)
Feature_i = np.asarray(Feature_i, float)
outfile = open(working_dir+'/'+ligand_name+'_feature_ligand_level1_handcrafted.npy', 'w')
np.save(outfile, Feature_i)
outfile.close()
| [
"os.path.exists",
"numpy.mean",
"numpy.asarray",
"pickle.load",
"numpy.min",
"numpy.max",
"numpy.argmax",
"numpy.sum",
"numpy.std",
"numpy.save"
] | [((500, 519), 'pickle.load', 'pickle.load', (['InFile'], {}), '(InFile)\n', (511, 519), False, 'import pickle\n'), ((4686, 4714), 'numpy.asarray', 'np.asarray', (['Feature_i', 'float'], {}), '(Feature_i, float)\n', (4696, 4714), True, 'import numpy as np\n'), ((4806, 4833), 'numpy.save', 'np.save', (['outfile', 'Feature_i'], {}), '(outfile, Feature_i)\n', (4813, 4833), True, 'import numpy as np\n'), ((9780, 9808), 'numpy.asarray', 'np.asarray', (['Feature_i', 'float'], {}), '(Feature_i, float)\n', (9790, 9808), True, 'import numpy as np\n'), ((9908, 9935), 'numpy.save', 'np.save', (['outfile', 'Feature_i'], {}), '(outfile, Feature_i)\n', (9915, 9935), True, 'import numpy as np\n'), ((5283, 5356), 'os.path.exists', 'os.path.exists', (["(working_dir + '/' + ligand_name + '_' + el + '_level1.PH')"], {}), "(working_dir + '/' + ligand_name + '_' + el + '_level1.PH')\n", (5297, 5356), False, 'import os\n'), ((1385, 1413), 'numpy.asarray', 'np.asarray', (['Bar0Birth', 'float'], {}), '(Bar0Birth, float)\n', (1395, 1413), True, 'import numpy as np\n'), ((1442, 1470), 'numpy.asarray', 'np.asarray', (['Bar0Death', 'float'], {}), '(Bar0Death, float)\n', (1452, 1470), True, 'import numpy as np\n'), ((1534, 1562), 'numpy.asarray', 'np.asarray', (['Bar1Birth', 'float'], {}), '(Bar1Birth, float)\n', (1544, 1562), True, 'import numpy as np\n'), ((1591, 1619), 'numpy.asarray', 'np.asarray', (['Bar1Death', 'float'], {}), '(Bar1Death, float)\n', (1601, 1619), True, 'import numpy as np\n'), ((1683, 1711), 'numpy.asarray', 'np.asarray', (['Bar2Birth', 'float'], {}), '(Bar2Birth, float)\n', (1693, 1711), True, 'import numpy as np\n'), ((1740, 1768), 'numpy.asarray', 'np.asarray', (['Bar2Death', 'float'], {}), '(Bar2Death, float)\n', (1750, 1768), True, 'import numpy as np\n'), ((6479, 6507), 'numpy.asarray', 'np.asarray', (['Bar0Birth', 'float'], {}), '(Bar0Birth, float)\n', (6489, 6507), True, 'import numpy as np\n'), ((6536, 6564), 'numpy.asarray', 'np.asarray', (['Bar0Death', 'float'], {}), '(Bar0Death, float)\n', (6546, 6564), True, 'import numpy as np\n'), ((6628, 6656), 'numpy.asarray', 'np.asarray', (['Bar1Birth', 'float'], {}), '(Bar1Birth, float)\n', (6638, 6656), True, 'import numpy as np\n'), ((6685, 6713), 'numpy.asarray', 'np.asarray', (['Bar1Death', 'float'], {}), '(Bar1Death, float)\n', (6695, 6713), True, 'import numpy as np\n'), ((6777, 6805), 'numpy.asarray', 'np.asarray', (['Bar2Birth', 'float'], {}), '(Bar2Birth, float)\n', (6787, 6805), True, 'import numpy as np\n'), ((6834, 6862), 'numpy.asarray', 'np.asarray', (['Bar2Death', 'float'], {}), '(Bar2Death, float)\n', (6844, 6862), True, 'import numpy as np\n'), ((1837, 1858), 'numpy.mean', 'np.mean', (['Bar0Death[:]'], {}), '(Bar0Death[:])\n', (1844, 1858), True, 'import numpy as np\n'), ((1893, 1913), 'numpy.std', 'np.std', (['Bar0Death[:]'], {}), '(Bar0Death[:])\n', (1899, 1913), True, 'import numpy as np\n'), ((1948, 1968), 'numpy.max', 'np.max', (['Bar0Death[:]'], {}), '(Bar0Death[:])\n', (1954, 1968), True, 'import numpy as np\n'), ((2003, 2023), 'numpy.min', 'np.min', (['Bar0Death[:]'], {}), '(Bar0Death[:])\n', (2009, 2023), True, 'import numpy as np\n'), ((2058, 2078), 'numpy.sum', 'np.sum', (['Bar0Death[:]'], {}), '(Bar0Death[:])\n', (2064, 2078), True, 'import numpy as np\n'), ((2256, 2292), 'numpy.mean', 'np.mean', (['(Bar1Death[:] - Bar1Birth[:])'], {}), '(Bar1Death[:] - Bar1Birth[:])\n', (2263, 2292), True, 'import numpy as np\n'), ((2327, 2362), 'numpy.std', 'np.std', (['(Bar1Death[:] - Bar1Birth[:])'], {}), '(Bar1Death[:] - Bar1Birth[:])\n', (2333, 2362), True, 'import numpy as np\n'), ((2397, 2432), 'numpy.max', 'np.max', (['(Bar1Death[:] - Bar1Birth[:])'], {}), '(Bar1Death[:] - Bar1Birth[:])\n', (2403, 2432), True, 'import numpy as np\n'), ((2467, 2502), 'numpy.min', 'np.min', (['(Bar1Death[:] - Bar1Birth[:])'], {}), '(Bar1Death[:] - Bar1Birth[:])\n', (2473, 2502), True, 'import numpy as np\n'), ((2537, 2572), 'numpy.sum', 'np.sum', (['(Bar1Death[:] - Bar1Birth[:])'], {}), '(Bar1Death[:] - Bar1Birth[:])\n', (2543, 2572), True, 'import numpy as np\n'), ((2775, 2796), 'numpy.mean', 'np.mean', (['Bar1Birth[:]'], {}), '(Bar1Birth[:])\n', (2782, 2796), True, 'import numpy as np\n'), ((2831, 2851), 'numpy.std', 'np.std', (['Bar1Birth[:]'], {}), '(Bar1Birth[:])\n', (2837, 2851), True, 'import numpy as np\n'), ((2886, 2906), 'numpy.max', 'np.max', (['Bar1Birth[:]'], {}), '(Bar1Birth[:])\n', (2892, 2906), True, 'import numpy as np\n'), ((2941, 2961), 'numpy.min', 'np.min', (['Bar1Birth[:]'], {}), '(Bar1Birth[:])\n', (2947, 2961), True, 'import numpy as np\n'), ((2996, 3016), 'numpy.sum', 'np.sum', (['Bar1Birth[:]'], {}), '(Bar1Birth[:])\n', (3002, 3016), True, 'import numpy as np\n'), ((3051, 3072), 'numpy.mean', 'np.mean', (['Bar1Death[:]'], {}), '(Bar1Death[:])\n', (3058, 3072), True, 'import numpy as np\n'), ((3107, 3127), 'numpy.std', 'np.std', (['Bar1Death[:]'], {}), '(Bar1Death[:])\n', (3113, 3127), True, 'import numpy as np\n'), ((3162, 3182), 'numpy.max', 'np.max', (['Bar1Death[:]'], {}), '(Bar1Death[:])\n', (3168, 3182), True, 'import numpy as np\n'), ((3217, 3237), 'numpy.min', 'np.min', (['Bar1Death[:]'], {}), '(Bar1Death[:])\n', (3223, 3237), True, 'import numpy as np\n'), ((3272, 3292), 'numpy.sum', 'np.sum', (['Bar1Death[:]'], {}), '(Bar1Death[:])\n', (3278, 3292), True, 'import numpy as np\n'), ((3471, 3507), 'numpy.mean', 'np.mean', (['(Bar2Death[:] - Bar2Birth[:])'], {}), '(Bar2Death[:] - Bar2Birth[:])\n', (3478, 3507), True, 'import numpy as np\n'), ((3542, 3577), 'numpy.std', 'np.std', (['(Bar2Death[:] - Bar2Birth[:])'], {}), '(Bar2Death[:] - Bar2Birth[:])\n', (3548, 3577), True, 'import numpy as np\n'), ((3612, 3647), 'numpy.max', 'np.max', (['(Bar2Death[:] - Bar2Birth[:])'], {}), '(Bar2Death[:] - Bar2Birth[:])\n', (3618, 3647), True, 'import numpy as np\n'), ((3682, 3717), 'numpy.min', 'np.min', (['(Bar2Death[:] - Bar2Birth[:])'], {}), '(Bar2Death[:] - Bar2Birth[:])\n', (3688, 3717), True, 'import numpy as np\n'), ((3752, 3787), 'numpy.sum', 'np.sum', (['(Bar2Death[:] - Bar2Birth[:])'], {}), '(Bar2Death[:] - Bar2Birth[:])\n', (3758, 3787), True, 'import numpy as np\n'), ((3990, 4011), 'numpy.mean', 'np.mean', (['Bar2Birth[:]'], {}), '(Bar2Birth[:])\n', (3997, 4011), True, 'import numpy as np\n'), ((4046, 4066), 'numpy.std', 'np.std', (['Bar2Birth[:]'], {}), '(Bar2Birth[:])\n', (4052, 4066), True, 'import numpy as np\n'), ((4101, 4121), 'numpy.max', 'np.max', (['Bar2Birth[:]'], {}), '(Bar2Birth[:])\n', (4107, 4121), True, 'import numpy as np\n'), ((4156, 4176), 'numpy.min', 'np.min', (['Bar2Birth[:]'], {}), '(Bar2Birth[:])\n', (4162, 4176), True, 'import numpy as np\n'), ((4211, 4231), 'numpy.sum', 'np.sum', (['Bar2Birth[:]'], {}), '(Bar2Birth[:])\n', (4217, 4231), True, 'import numpy as np\n'), ((4266, 4287), 'numpy.mean', 'np.mean', (['Bar2Death[:]'], {}), '(Bar2Death[:])\n', (4273, 4287), True, 'import numpy as np\n'), ((4322, 4342), 'numpy.std', 'np.std', (['Bar2Death[:]'], {}), '(Bar2Death[:])\n', (4328, 4342), True, 'import numpy as np\n'), ((4377, 4397), 'numpy.max', 'np.max', (['Bar2Death[:]'], {}), '(Bar2Death[:])\n', (4383, 4397), True, 'import numpy as np\n'), ((4432, 4452), 'numpy.min', 'np.min', (['Bar2Death[:]'], {}), '(Bar2Death[:])\n', (4438, 4452), True, 'import numpy as np\n'), ((4487, 4507), 'numpy.sum', 'np.sum', (['Bar2Death[:]'], {}), '(Bar2Death[:])\n', (4493, 4507), True, 'import numpy as np\n'), ((6931, 6952), 'numpy.mean', 'np.mean', (['Bar0Death[:]'], {}), '(Bar0Death[:])\n', (6938, 6952), True, 'import numpy as np\n'), ((6987, 7007), 'numpy.std', 'np.std', (['Bar0Death[:]'], {}), '(Bar0Death[:])\n', (6993, 7007), True, 'import numpy as np\n'), ((7042, 7062), 'numpy.max', 'np.max', (['Bar0Death[:]'], {}), '(Bar0Death[:])\n', (7048, 7062), True, 'import numpy as np\n'), ((7097, 7117), 'numpy.min', 'np.min', (['Bar0Death[:]'], {}), '(Bar0Death[:])\n', (7103, 7117), True, 'import numpy as np\n'), ((7152, 7172), 'numpy.sum', 'np.sum', (['Bar0Death[:]'], {}), '(Bar0Death[:])\n', (7158, 7172), True, 'import numpy as np\n'), ((7350, 7386), 'numpy.mean', 'np.mean', (['(Bar1Death[:] - Bar1Birth[:])'], {}), '(Bar1Death[:] - Bar1Birth[:])\n', (7357, 7386), True, 'import numpy as np\n'), ((7421, 7456), 'numpy.std', 'np.std', (['(Bar1Death[:] - Bar1Birth[:])'], {}), '(Bar1Death[:] - Bar1Birth[:])\n', (7427, 7456), True, 'import numpy as np\n'), ((7491, 7526), 'numpy.max', 'np.max', (['(Bar1Death[:] - Bar1Birth[:])'], {}), '(Bar1Death[:] - Bar1Birth[:])\n', (7497, 7526), True, 'import numpy as np\n'), ((7561, 7596), 'numpy.min', 'np.min', (['(Bar1Death[:] - Bar1Birth[:])'], {}), '(Bar1Death[:] - Bar1Birth[:])\n', (7567, 7596), True, 'import numpy as np\n'), ((7631, 7666), 'numpy.sum', 'np.sum', (['(Bar1Death[:] - Bar1Birth[:])'], {}), '(Bar1Death[:] - Bar1Birth[:])\n', (7637, 7666), True, 'import numpy as np\n'), ((7869, 7890), 'numpy.mean', 'np.mean', (['Bar1Birth[:]'], {}), '(Bar1Birth[:])\n', (7876, 7890), True, 'import numpy as np\n'), ((7925, 7945), 'numpy.std', 'np.std', (['Bar1Birth[:]'], {}), '(Bar1Birth[:])\n', (7931, 7945), True, 'import numpy as np\n'), ((7980, 8000), 'numpy.max', 'np.max', (['Bar1Birth[:]'], {}), '(Bar1Birth[:])\n', (7986, 8000), True, 'import numpy as np\n'), ((8035, 8055), 'numpy.min', 'np.min', (['Bar1Birth[:]'], {}), '(Bar1Birth[:])\n', (8041, 8055), True, 'import numpy as np\n'), ((8090, 8110), 'numpy.sum', 'np.sum', (['Bar1Birth[:]'], {}), '(Bar1Birth[:])\n', (8096, 8110), True, 'import numpy as np\n'), ((8145, 8166), 'numpy.mean', 'np.mean', (['Bar1Death[:]'], {}), '(Bar1Death[:])\n', (8152, 8166), True, 'import numpy as np\n'), ((8201, 8221), 'numpy.std', 'np.std', (['Bar1Death[:]'], {}), '(Bar1Death[:])\n', (8207, 8221), True, 'import numpy as np\n'), ((8256, 8276), 'numpy.max', 'np.max', (['Bar1Death[:]'], {}), '(Bar1Death[:])\n', (8262, 8276), True, 'import numpy as np\n'), ((8311, 8331), 'numpy.min', 'np.min', (['Bar1Death[:]'], {}), '(Bar1Death[:])\n', (8317, 8331), True, 'import numpy as np\n'), ((8366, 8386), 'numpy.sum', 'np.sum', (['Bar1Death[:]'], {}), '(Bar1Death[:])\n', (8372, 8386), True, 'import numpy as np\n'), ((8565, 8601), 'numpy.mean', 'np.mean', (['(Bar2Death[:] - Bar2Birth[:])'], {}), '(Bar2Death[:] - Bar2Birth[:])\n', (8572, 8601), True, 'import numpy as np\n'), ((8636, 8671), 'numpy.std', 'np.std', (['(Bar2Death[:] - Bar2Birth[:])'], {}), '(Bar2Death[:] - Bar2Birth[:])\n', (8642, 8671), True, 'import numpy as np\n'), ((8706, 8741), 'numpy.max', 'np.max', (['(Bar2Death[:] - Bar2Birth[:])'], {}), '(Bar2Death[:] - Bar2Birth[:])\n', (8712, 8741), True, 'import numpy as np\n'), ((8776, 8811), 'numpy.min', 'np.min', (['(Bar2Death[:] - Bar2Birth[:])'], {}), '(Bar2Death[:] - Bar2Birth[:])\n', (8782, 8811), True, 'import numpy as np\n'), ((8846, 8881), 'numpy.sum', 'np.sum', (['(Bar2Death[:] - Bar2Birth[:])'], {}), '(Bar2Death[:] - Bar2Birth[:])\n', (8852, 8881), True, 'import numpy as np\n'), ((9084, 9105), 'numpy.mean', 'np.mean', (['Bar2Birth[:]'], {}), '(Bar2Birth[:])\n', (9091, 9105), True, 'import numpy as np\n'), ((9140, 9160), 'numpy.std', 'np.std', (['Bar2Birth[:]'], {}), '(Bar2Birth[:])\n', (9146, 9160), True, 'import numpy as np\n'), ((9195, 9215), 'numpy.max', 'np.max', (['Bar2Birth[:]'], {}), '(Bar2Birth[:])\n', (9201, 9215), True, 'import numpy as np\n'), ((9250, 9270), 'numpy.min', 'np.min', (['Bar2Birth[:]'], {}), '(Bar2Birth[:])\n', (9256, 9270), True, 'import numpy as np\n'), ((9305, 9325), 'numpy.sum', 'np.sum', (['Bar2Birth[:]'], {}), '(Bar2Birth[:])\n', (9311, 9325), True, 'import numpy as np\n'), ((9360, 9381), 'numpy.mean', 'np.mean', (['Bar2Death[:]'], {}), '(Bar2Death[:])\n', (9367, 9381), True, 'import numpy as np\n'), ((9416, 9436), 'numpy.std', 'np.std', (['Bar2Death[:]'], {}), '(Bar2Death[:])\n', (9422, 9436), True, 'import numpy as np\n'), ((9471, 9491), 'numpy.max', 'np.max', (['Bar2Death[:]'], {}), '(Bar2Death[:])\n', (9477, 9491), True, 'import numpy as np\n'), ((9526, 9546), 'numpy.min', 'np.min', (['Bar2Death[:]'], {}), '(Bar2Death[:])\n', (9532, 9546), True, 'import numpy as np\n'), ((9581, 9601), 'numpy.sum', 'np.sum', (['Bar2Death[:]'], {}), '(Bar2Death[:])\n', (9587, 9601), True, 'import numpy as np\n'), ((2617, 2655), 'numpy.argmax', 'np.argmax', (['(Bar1Death[:] - Bar1Birth[:])'], {}), '(Bar1Death[:] - Bar1Birth[:])\n', (2626, 2655), True, 'import numpy as np\n'), ((2701, 2739), 'numpy.argmax', 'np.argmax', (['(Bar1Death[:] - Bar1Birth[:])'], {}), '(Bar1Death[:] - Bar1Birth[:])\n', (2710, 2739), True, 'import numpy as np\n'), ((3832, 3870), 'numpy.argmax', 'np.argmax', (['(Bar2Death[:] - Bar2Birth[:])'], {}), '(Bar2Death[:] - Bar2Birth[:])\n', (3841, 3870), True, 'import numpy as np\n'), ((3916, 3954), 'numpy.argmax', 'np.argmax', (['(Bar2Death[:] - Bar2Birth[:])'], {}), '(Bar2Death[:] - Bar2Birth[:])\n', (3925, 3954), True, 'import numpy as np\n'), ((7711, 7749), 'numpy.argmax', 'np.argmax', (['(Bar1Death[:] - Bar1Birth[:])'], {}), '(Bar1Death[:] - Bar1Birth[:])\n', (7720, 7749), True, 'import numpy as np\n'), ((7795, 7833), 'numpy.argmax', 'np.argmax', (['(Bar1Death[:] - Bar1Birth[:])'], {}), '(Bar1Death[:] - Bar1Birth[:])\n', (7804, 7833), True, 'import numpy as np\n'), ((8926, 8964), 'numpy.argmax', 'np.argmax', (['(Bar2Death[:] - Bar2Birth[:])'], {}), '(Bar2Death[:] - Bar2Birth[:])\n', (8935, 8964), True, 'import numpy as np\n'), ((9010, 9048), 'numpy.argmax', 'np.argmax', (['(Bar2Death[:] - Bar2Birth[:])'], {}), '(Bar2Death[:] - Bar2Birth[:])\n', (9019, 9048), True, 'import numpy as np\n')] |
import tensorflow as tf
def _is_tensor(x):
"""Returns `True` if `x` is a symbolic tensor-like object.
From http://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/image_ops_impl.py
Args:
x: A python object to check.
Returns:
`True` if `x` is a `tf.Tensor` or `tf.Variable`, otherwise `False`.
"""
return isinstance(x, (tf.Tensor, tf.Variable))
def _ImageDimensions(image, rank):
"""Returns the dimensions of an image tensor.
From http://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/image_ops_impl.py
Args:
image: A rank-D Tensor. For 3-D of shape: `[height, width, channels]`.
rank: The expected rank of the image
Returns:
A list of corresponding to the dimensions of the
input image. Dimensions that are statically known are python integers,
otherwise they are integer scalar tensors.
"""
if image.get_shape().is_fully_defined():
return image.get_shape().as_list()
else:
static_shape = image.get_shape().with_rank(rank).as_list()
dynamic_shape = tf.unstack(tf.shape(image), rank)
return [
s if s is not None else d for s, d in zip(static_shape, dynamic_shape)
]
def _CheckAtLeast4DImage(image, require_static=True):
"""Assert that we are working with properly shaped image.
(modified) From http://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/image_ops_impl.py
Args:
image: >= 4-D Tensor of size [*, height, width, depth, channels]
require_static: If `True`, requires that all dimensions of `image` are
known and non-zero.
Raises:
ValueError: if image.shape is not a [>= 4] vector.
Returns:
An empty list, if `image` has fully defined dimensions. Otherwise, a list
containing an assert op is returned.
"""
try:
if image.get_shape().ndims is None:
image_shape = image.get_shape().with_rank(4)
else:
image_shape = image.get_shape().with_rank_at_least(4)
except ValueError:
raise ValueError("'image' must be at least four-dimensional.")
if require_static and not image_shape.is_fully_defined():
raise ValueError('\'image\' must be fully defined.')
if any(x == 0 for x in image_shape):
raise ValueError(
'all dims of \'image.shape\' must be > 0: %s' % image_shape)
if not image_shape.is_fully_defined():
return [
tf.assert_positive(
tf.shape(image),
['all dims of "image.shape " must be > 0.'])
]
else:
return []
def uniform(*args, **kwargs):
return tf.random.uniform(*args, **kwargs)
def pad(*args, **kwargs):
return tf.pad(*args, **kwargs)
def top_k(*args, **kwargs):
return tf.math.top_k(*args, **kwargs)
def non_max_suppression_overlaps(*args, **kwargs):
return tf.image.non_max_suppression_overlaps(*args, **kwargs)
def gather_nd(*args, **kwargs):
return tf.gather_nd(*args, **kwargs)
def clip_by_value(*args, **kwargs):
return tf.clip_by_value(*args, **kwargs)
def meshgrid(*args, **kwargs):
return tf.meshgrid(*args, **kwargs)
def map_fn(*args, **kwargs):
return tf.map_fn(*args, **kwargs)
def where(*args, **kwargs):
return tf.where(*args, **kwargs)
def crop_to_bounding_box_3d(image, box, target_size):
'''Crops an 3d image to a specificed bounding box. When the size of box is smaller than 'target_size', then the surroundings of image is evenly (approximately) padded with zero. The 'box' with size = 0 is allowed.
Args:
image: 5-D Tensor of shape '[batch, heigh, width, depth, channels]' or
4-D Tensor of shape '[heights, width, depth, channels]'
box: 1-D Tensor of shape '[6,]' representing the cropped area.
target_size: The ultimate bounding box size.
Returns:
if 'image' was 5-D, a 5-D float Tensor of shape '[batch_size] + target_size + [channels]'
if 'image' was 4-D, a 5-D float Tensor of shape 'target_size + [channels]'
'''
with tf.name_scope(None, 'crop_to_bounding_box_3d', [image]):
image = tf.convert_to_tensor(image, name='image')
is_batch = True
image_shape = image.get_shape()
if image_shape.ndims == 4:
is_batch = False
image = tf.expand_dims(image, 0)
elif image_shape.ndims is None:
is_batch = False
image = tf.expand_dims(image, 0)
image.set_shape([None] * 5)
elif image_shape.ndims != 5:
raise ValueError('\'image\' must have either 4 or 5 dimensions.')
assert_ops = _CheckAtLeast4DImage(image, require_static=False)
# Never mind what are the real meaning of height/width/depth. They are mimics from the tensorflow API 's writting convention.
batch, height, width, depth, channels = _ImageDimensions(image, rank=5)
# print('crop_to_bounding_box_3d height:',height)
box_size = box[1::2] - box[::2]
assert_ops.append(tf.assert_greater_equal([height, width, depth], box[1::2], ['The remote corner of box must not exceed image boundaries.']))
assert_ops.append(tf.assert_non_negative(box[::2], ['The near corner of box must be non negative.']))
assert_ops.append(tf.assert_non_negative(box_size, ['The box size should be non negative.']))
assert_ops.append(tf.assert_greater_equal(target_size, box_size, ['The target size should be not less than box size. ']))
with tf.control_dependencies(assert_ops):
image = image
# tf.with_dependencies(assert_ops, image)
cropped = tf.slice(
image, tf.stack([0, box[0], box[2], box[4], 0]),
tf.stack([-1, box_size[0], box_size[1], box_size[2] , -1])
)
def _max(x, y):
if _is_tensor(x) or _is_tensor(y):
return tf.maximum(x, y)
else:
return max(x, y)
padding_offsets = _max((target_size - box_size) // 2, 0)
after_padding_size = target_size - padding_offsets - box_size
paddings = tf.reshape(
tf.stack([
0, 0, padding_offsets[0], after_padding_size[0],
padding_offsets[1], after_padding_size[1], # noqa: E131
padding_offsets[2], after_padding_size[2], 0, 0 # noqa: E131
]), [5, 2])
padded = tf.pad(cropped, paddings)
result_shape = [
None if _is_tensor(i) else i
for i in [batch, target_size[0], target_size[1], target_size[2], channels]
]
padded.set_shape(result_shape)
if not is_batch:
padded = tf.squeeze(padded, axis=[0])
return padded
| [
"tensorflow.meshgrid",
"tensorflow.shape",
"tensorflow.pad",
"tensorflow.assert_greater_equal",
"tensorflow.control_dependencies",
"tensorflow.assert_non_negative",
"tensorflow.image.non_max_suppression_overlaps",
"tensorflow.clip_by_value",
"tensorflow.maximum",
"tensorflow.convert_to_tensor",
"tensorflow.math.top_k",
"tensorflow.stack",
"tensorflow.random.uniform",
"tensorflow.where",
"tensorflow.expand_dims",
"tensorflow.name_scope",
"tensorflow.map_fn",
"tensorflow.gather_nd",
"tensorflow.squeeze"
] | [((2734, 2768), 'tensorflow.random.uniform', 'tf.random.uniform', (['*args'], {}), '(*args, **kwargs)\n', (2751, 2768), True, 'import tensorflow as tf\n'), ((2808, 2831), 'tensorflow.pad', 'tf.pad', (['*args'], {}), '(*args, **kwargs)\n', (2814, 2831), True, 'import tensorflow as tf\n'), ((2873, 2903), 'tensorflow.math.top_k', 'tf.math.top_k', (['*args'], {}), '(*args, **kwargs)\n', (2886, 2903), True, 'import tensorflow as tf\n'), ((2968, 3022), 'tensorflow.image.non_max_suppression_overlaps', 'tf.image.non_max_suppression_overlaps', (['*args'], {}), '(*args, **kwargs)\n', (3005, 3022), True, 'import tensorflow as tf\n'), ((3068, 3097), 'tensorflow.gather_nd', 'tf.gather_nd', (['*args'], {}), '(*args, **kwargs)\n', (3080, 3097), True, 'import tensorflow as tf\n'), ((3147, 3180), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['*args'], {}), '(*args, **kwargs)\n', (3163, 3180), True, 'import tensorflow as tf\n'), ((3225, 3253), 'tensorflow.meshgrid', 'tf.meshgrid', (['*args'], {}), '(*args, **kwargs)\n', (3236, 3253), True, 'import tensorflow as tf\n'), ((3296, 3322), 'tensorflow.map_fn', 'tf.map_fn', (['*args'], {}), '(*args, **kwargs)\n', (3305, 3322), True, 'import tensorflow as tf\n'), ((3364, 3389), 'tensorflow.where', 'tf.where', (['*args'], {}), '(*args, **kwargs)\n', (3372, 3389), True, 'import tensorflow as tf\n'), ((4163, 4218), 'tensorflow.name_scope', 'tf.name_scope', (['None', '"""crop_to_bounding_box_3d"""', '[image]'], {}), "(None, 'crop_to_bounding_box_3d', [image])\n", (4176, 4218), True, 'import tensorflow as tf\n'), ((4236, 4277), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['image'], {'name': '"""image"""'}), "(image, name='image')\n", (4256, 4277), True, 'import tensorflow as tf\n'), ((6513, 6538), 'tensorflow.pad', 'tf.pad', (['cropped', 'paddings'], {}), '(cropped, paddings)\n', (6519, 6538), True, 'import tensorflow as tf\n'), ((1142, 1157), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (1150, 1157), True, 'import tensorflow as tf\n'), ((4427, 4451), 'tensorflow.expand_dims', 'tf.expand_dims', (['image', '(0)'], {}), '(image, 0)\n', (4441, 4451), True, 'import tensorflow as tf\n'), ((5132, 5259), 'tensorflow.assert_greater_equal', 'tf.assert_greater_equal', (['[height, width, depth]', 'box[1::2]', "['The remote corner of box must not exceed image boundaries.']"], {}), "([height, width, depth], box[1::2], [\n 'The remote corner of box must not exceed image boundaries.'])\n", (5155, 5259), True, 'import tensorflow as tf\n'), ((5282, 5369), 'tensorflow.assert_non_negative', 'tf.assert_non_negative', (['box[::2]', "['The near corner of box must be non negative.']"], {}), "(box[::2], [\n 'The near corner of box must be non negative.'])\n", (5304, 5369), True, 'import tensorflow as tf\n'), ((5392, 5466), 'tensorflow.assert_non_negative', 'tf.assert_non_negative', (['box_size', "['The box size should be non negative.']"], {}), "(box_size, ['The box size should be non negative.'])\n", (5414, 5466), True, 'import tensorflow as tf\n'), ((5494, 5601), 'tensorflow.assert_greater_equal', 'tf.assert_greater_equal', (['target_size', 'box_size', "['The target size should be not less than box size. ']"], {}), "(target_size, box_size, [\n 'The target size should be not less than box size. '])\n", (5517, 5601), True, 'import tensorflow as tf\n'), ((5612, 5647), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['assert_ops'], {}), '(assert_ops)\n', (5635, 5647), True, 'import tensorflow as tf\n'), ((5772, 5812), 'tensorflow.stack', 'tf.stack', (['[0, box[0], box[2], box[4], 0]'], {}), '([0, box[0], box[2], box[4], 0])\n', (5780, 5812), True, 'import tensorflow as tf\n'), ((5826, 5883), 'tensorflow.stack', 'tf.stack', (['[-1, box_size[0], box_size[1], box_size[2], -1]'], {}), '([-1, box_size[0], box_size[1], box_size[2], -1])\n', (5834, 5883), True, 'import tensorflow as tf\n'), ((6237, 6398), 'tensorflow.stack', 'tf.stack', (['[0, 0, padding_offsets[0], after_padding_size[0], padding_offsets[1],\n after_padding_size[1], padding_offsets[2], after_padding_size[2], 0, 0]'], {}), '([0, 0, padding_offsets[0], after_padding_size[0], padding_offsets[\n 1], after_padding_size[1], padding_offsets[2], after_padding_size[2], 0, 0]\n )\n', (6245, 6398), True, 'import tensorflow as tf\n'), ((6789, 6817), 'tensorflow.squeeze', 'tf.squeeze', (['padded'], {'axis': '[0]'}), '(padded, axis=[0])\n', (6799, 6817), True, 'import tensorflow as tf\n'), ((2575, 2590), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (2583, 2590), True, 'import tensorflow as tf\n'), ((4541, 4565), 'tensorflow.expand_dims', 'tf.expand_dims', (['image', '(0)'], {}), '(image, 0)\n', (4555, 4565), True, 'import tensorflow as tf\n'), ((5990, 6006), 'tensorflow.maximum', 'tf.maximum', (['x', 'y'], {}), '(x, y)\n', (6000, 6006), True, 'import tensorflow as tf\n')] |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tensor_util tests."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import numpy as np
from tensorflow.contrib.framework.python.framework import tensor_util
from tensorflow.contrib.framework.python.ops import variables as variables_lib2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
class LocalVariabletest(test.TestCase):
def test_local_variable(self):
with self.cached_session() as sess:
self.assertEquals([], variables_lib.local_variables())
value0 = 42
variables_lib2.local_variable(value0)
value1 = 43
variables_lib2.local_variable(value1)
variables = variables_lib.local_variables()
self.assertEquals(2, len(variables))
self.assertRaises(errors_impl.OpError, sess.run, variables)
variables_lib.variables_initializer(variables).run()
self.assertAllEqual(set([value0, value1]), set(sess.run(variables)))
class ReduceSumNTest(test.TestCase):
def test_reduce_sum_n(self):
with self.cached_session():
a = constant_op.constant(1)
b = constant_op.constant([2])
c = constant_op.constant([[3, 4], [5, 6]])
self.assertEqual(21, tensor_util.reduce_sum_n([a, b, c]).eval())
class AssertScalarIntTest(test.TestCase):
def test_assert_scalar_int(self):
tensor_util.assert_scalar_int(constant_op.constant(3, dtype=dtypes.int32))
tensor_util.assert_scalar_int(constant_op.constant(3, dtype=dtypes.int64))
tensor_util.assert_scalar_int(3)
with self.assertRaisesRegexp(ValueError, "Expected integer"):
tensor_util.assert_scalar_int(
constant_op.constant(
3, dtype=dtypes.float32))
with self.assertRaisesRegexp(ValueError, "Expected scalar"):
tensor_util.assert_scalar_int(
constant_op.constant(
[3, 4], dtype=dtypes.int32))
class WithShapeTest(test.TestCase):
def _assert_with_shape(self, tensor, expected_value, expected_shape,
unexpected_shapes):
for unexpected_shape in unexpected_shapes:
self.assertRaises(ValueError, tensor_util.with_shape, unexpected_shape,
tensor)
pattern = (
r"\[Wrong shape for %s \[expected\] \[actual\].\] \[%s\] \[%s\]" %
(tensor.name, " ".join([str(dim) for dim in unexpected_shape]),
" ".join([str(dim) for dim in expected_shape])))
self.assertRaisesRegexp(errors_impl.OpError,
re.compile(pattern),
tensor_util.with_shape(
constant_op.constant(unexpected_shape),
tensor).eval)
expected_placeholder = array_ops.placeholder(dtypes.float32)
self.assertRaisesRegexp(errors_impl.OpError,
re.compile(pattern),
tensor_util.with_same_shape(expected_placeholder,
tensor).eval,
{expected_placeholder: np.ones(unexpected_shape)})
self.assertIs(tensor, tensor_util.with_shape(expected_shape, tensor))
self.assertIs(
tensor,
tensor_util.with_same_shape(
constant_op.constant(
1, shape=expected_shape), tensor))
tensor_with_shape = tensor_util.with_shape(
constant_op.constant(expected_shape), tensor)
np.testing.assert_array_equal(expected_value, tensor_with_shape.eval())
tensor_with_same_shape = tensor_util.with_same_shape(expected_placeholder,
tensor)
np.testing.assert_array_equal(expected_value,
tensor_with_same_shape.eval({
expected_placeholder:
np.ones(expected_shape)
}))
def test_with_shape_invalid_expected_shape(self):
with self.cached_session():
self.assertRaisesRegexp(ValueError, "Invalid rank",
tensor_util.with_shape, [[1], [2]],
constant_op.constant(1.0))
def test_with_shape_invalid_type(self):
with self.cached_session():
self.assertRaisesRegexp(ValueError, "Invalid dtype",
tensor_util.with_shape, [1.1],
constant_op.constant([1.0]))
self.assertRaisesRegexp(ValueError, "Invalid dtype",
tensor_util.with_shape,
np.array([1.1]), constant_op.constant(1.0))
self.assertRaisesRegexp(ValueError, "Invalid dtype",
tensor_util.with_shape,
constant_op.constant(np.array([1.1])),
constant_op.constant(1.0))
def test_with_shape_0(self):
with self.cached_session():
value = 42
shape = [0]
unexpected_shapes = [[1], [2], [1, 1]]
self._assert_with_shape(
constant_op.constant(
value, shape=shape),
value,
shape,
unexpected_shapes)
def test_with_shape_1(self):
with self.cached_session():
value = [42]
shape = [1]
unexpected_shapes = [[0], [2], [1, 1]]
self._assert_with_shape(
constant_op.constant(
value, shape=shape),
value,
shape,
unexpected_shapes)
def test_with_shape_2(self):
with self.cached_session():
value = [42, 43]
shape = [2]
unexpected_shapes = [[0], [1], [2, 1]]
self._assert_with_shape(
constant_op.constant(
value, shape=shape),
value,
shape,
unexpected_shapes)
def test_with_shape_2x2(self):
with self.cached_session():
value = [[42, 43], [44, 45]]
shape = [2, 2]
unexpected_shapes = [[0], [1], [2, 1]]
self._assert_with_shape(
constant_op.constant(
value, shape=shape),
value,
shape,
unexpected_shapes)
def test_with_shape_2x2_with_partial_expected_shape(self):
with self.cached_session():
value = [[42, 43], [44, 45]]
actual_shape = [2, 2]
tensor = constant_op.constant(value, shape=actual_shape)
partial_expected_shape = tensor_shape.TensorShape([None, 2])
# Won't raise any exception here:
tensor_with_shape = tensor_util.with_shape(partial_expected_shape, tensor)
np.testing.assert_array_equal(value, tensor_with_shape.eval())
def test_with_shape_none(self):
with self.cached_session():
tensor_no_shape = array_ops.placeholder(dtypes.float32)
compatible_shape = [2, 2]
with_present_2x2 = tensor_util.with_shape(compatible_shape,
tensor_no_shape)
self.assertEquals(compatible_shape, with_present_2x2.get_shape().dims)
with_future_2x2 = tensor_util.with_shape(
constant_op.constant(compatible_shape), tensor_no_shape)
array_2x2 = [[42.0, 43.0], [44.0, 45.0]]
for tensor_2x2 in [with_present_2x2, with_future_2x2]:
np.testing.assert_array_equal(array_2x2,
tensor_2x2.eval({
tensor_no_shape: array_2x2
}))
self.assertRaisesRegexp(errors_impl.OpError, "Wrong shape",
tensor_2x2.eval,
{tensor_no_shape: [42.0, 43.0]})
self.assertRaisesRegexp(errors_impl.OpError, "Wrong shape",
tensor_2x2.eval, {tensor_no_shape: [42.0]})
def test_with_shape_partial(self):
with self.cached_session():
tensor_partial_shape = array_ops.placeholder(dtypes.float32)
tensor_partial_shape.set_shape([None, 2])
for incompatible_shape in [[0], [1]]:
self.assertRaisesRegexp(
ValueError, "Shapes must be equal rank, but are 2 and 1",
tensor_util.with_shape, incompatible_shape, tensor_partial_shape)
for incompatible_shape in [[1, 2, 1]]:
self.assertRaisesRegexp(ValueError, "Dimensions must be equal",
tensor_util.with_shape, incompatible_shape,
tensor_partial_shape)
for incompatible_shape in [[2, 1]]:
self.assertRaisesRegexp(
ValueError,
r"Dimension 1 in both shapes must be equal, but are 2 and 1. "
r"Shapes are \[\?,2\] and \[2,1\].",
tensor_util.with_shape, incompatible_shape, tensor_partial_shape)
compatible_shape = [2, 2]
with_present_2x2 = tensor_util.with_shape(compatible_shape,
tensor_partial_shape)
self.assertEquals(compatible_shape, with_present_2x2.get_shape().dims)
with_future_2x2 = tensor_util.with_shape(
constant_op.constant(compatible_shape), tensor_partial_shape)
array_2x2 = [[42.0, 43.0], [44.0, 45.0]]
for tensor_2x2 in [with_present_2x2, with_future_2x2]:
np.testing.assert_array_equal(array_2x2,
tensor_2x2.eval({
tensor_partial_shape: array_2x2
}))
self.assertRaises(ValueError, tensor_2x2.eval,
{tensor_partial_shape: [42.0, 43.0]})
self.assertRaises(ValueError, tensor_2x2.eval,
{tensor_partial_shape: [42.0]})
class RemoveSqueezableDimensionsTest(test.TestCase):
def testRemoveSqueezableDimensions(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False,
predictions_have_extra_dim=False,
labels_have_static_shape=False,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_extraLabelDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False,
predictions_have_extra_dim=False,
labels_have_static_shape=False,
labels_have_extra_dim=True)
def testRemoveSqueezableDimensions_staticLabel(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False,
predictions_have_extra_dim=False,
labels_have_static_shape=True,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_staticLabel_extraLabelDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False,
predictions_have_extra_dim=False,
labels_have_static_shape=True,
labels_have_extra_dim=True)
def testRemoveSqueezableDimensions_extraPredictionDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False,
predictions_have_extra_dim=True,
labels_have_static_shape=False,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_extraPredictionDim_staticLabel(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False,
predictions_have_extra_dim=True,
labels_have_static_shape=True,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_staticPrediction(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True,
predictions_have_extra_dim=False,
labels_have_static_shape=False,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_staticPrediction_extraLabelDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True,
predictions_have_extra_dim=False,
labels_have_static_shape=False,
labels_have_extra_dim=True)
def testRemoveSqueezableDimensions_static(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True,
predictions_have_extra_dim=False,
labels_have_static_shape=True,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_static_extraLabelDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True,
predictions_have_extra_dim=False,
labels_have_static_shape=True,
labels_have_extra_dim=True)
def testRemoveSqueezableDimensions_staticPrediction_extraPredictionDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True,
predictions_have_extra_dim=True,
labels_have_static_shape=False,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_static_extraPredictionDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True,
predictions_have_extra_dim=True,
labels_have_static_shape=True,
labels_have_extra_dim=False)
# TODO(ptucker): Replace this with parameterized test.
def _testRemoveSqueezableDimensions(self, predictions_have_static_shape,
predictions_have_extra_dim,
labels_have_static_shape,
labels_have_extra_dim):
assert not (predictions_have_extra_dim and labels_have_extra_dim)
predictions_value = (0, 1, 1, 0, 0, 1, 0)
labels_value = (0, 0, 1, 1, 0, 0, 0)
input_predictions_value = ([[p] for p in predictions_value] if
predictions_have_extra_dim else
predictions_value)
input_labels_value = ([[l] for l in labels_value] if labels_have_extra_dim
else labels_value)
with ops.Graph().as_default() as g:
feed_dict = {}
if predictions_have_static_shape:
predictions = constant_op.constant(
input_predictions_value, dtype=dtypes.int32)
else:
predictions = array_ops.placeholder(
dtype=dtypes.int32, name="predictions")
feed_dict[predictions] = input_predictions_value
if labels_have_static_shape:
labels = constant_op.constant(input_labels_value, dtype=dtypes.int32)
else:
labels = array_ops.placeholder(dtype=dtypes.int32, name="labels")
feed_dict[labels] = input_labels_value
squeezed_predictions, squeezed_labels = (
tensor_util.remove_squeezable_dimensions(predictions, labels))
with self.session(g):
variables_lib.local_variables_initializer().run()
self.assertAllClose(
predictions_value, squeezed_predictions.eval(feed_dict=feed_dict))
self.assertAllClose(
labels_value, squeezed_labels.eval(feed_dict=feed_dict))
if __name__ == "__main__":
test.main()
| [
"tensorflow.contrib.framework.python.ops.variables.local_variable",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.contrib.framework.python.framework.tensor_util.reduce_sum_n",
"numpy.ones",
"re.compile",
"tensorflow.python.ops.variables.local_variables_initializer",
"tensorflow.python.ops.variables.local_variables",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.contrib.framework.python.framework.tensor_util.remove_squeezable_dimensions",
"tensorflow.contrib.framework.python.framework.tensor_util.assert_scalar_int",
"tensorflow.contrib.framework.python.framework.tensor_util.with_same_shape",
"numpy.array",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.framework.ops.Graph",
"tensorflow.contrib.framework.python.framework.tensor_util.with_shape",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.variables.variables_initializer"
] | [((16270, 16281), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (16279, 16281), False, 'from tensorflow.python.platform import test\n'), ((2684, 2716), 'tensorflow.contrib.framework.python.framework.tensor_util.assert_scalar_int', 'tensor_util.assert_scalar_int', (['(3)'], {}), '(3)\n', (2713, 2716), False, 'from tensorflow.contrib.framework.python.framework import tensor_util\n'), ((4772, 4829), 'tensorflow.contrib.framework.python.framework.tensor_util.with_same_shape', 'tensor_util.with_same_shape', (['expected_placeholder', 'tensor'], {}), '(expected_placeholder, tensor)\n', (4799, 4829), False, 'from tensorflow.contrib.framework.python.framework import tensor_util\n'), ((1730, 1767), 'tensorflow.contrib.framework.python.ops.variables.local_variable', 'variables_lib2.local_variable', (['value0'], {}), '(value0)\n', (1759, 1767), True, 'from tensorflow.contrib.framework.python.ops import variables as variables_lib2\n'), ((1794, 1831), 'tensorflow.contrib.framework.python.ops.variables.local_variable', 'variables_lib2.local_variable', (['value1'], {}), '(value1)\n', (1823, 1831), True, 'from tensorflow.contrib.framework.python.ops import variables as variables_lib2\n'), ((1851, 1882), 'tensorflow.python.ops.variables.local_variables', 'variables_lib.local_variables', ([], {}), '()\n', (1880, 1882), True, 'from tensorflow.python.ops import variables as variables_lib\n'), ((2250, 2273), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(1)'], {}), '(1)\n', (2270, 2273), False, 'from tensorflow.python.framework import constant_op\n'), ((2285, 2310), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[2]'], {}), '([2])\n', (2305, 2310), False, 'from tensorflow.python.framework import constant_op\n'), ((2322, 2360), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[[3, 4], [5, 6]]'], {}), '([[3, 4], [5, 6]])\n', (2342, 2360), False, 'from tensorflow.python.framework import constant_op\n'), ((2554, 2597), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(3)'], {'dtype': 'dtypes.int32'}), '(3, dtype=dtypes.int32)\n', (2574, 2597), False, 'from tensorflow.python.framework import constant_op\n'), ((2634, 2677), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(3)'], {'dtype': 'dtypes.int64'}), '(3, dtype=dtypes.int64)\n', (2654, 2677), False, 'from tensorflow.python.framework import constant_op\n'), ((3944, 3981), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtypes.float32'], {}), '(dtypes.float32)\n', (3965, 3981), False, 'from tensorflow.python.ops import array_ops\n'), ((4351, 4397), 'tensorflow.contrib.framework.python.framework.tensor_util.with_shape', 'tensor_util.with_shape', (['expected_shape', 'tensor'], {}), '(expected_shape, tensor)\n', (4373, 4397), False, 'from tensorflow.contrib.framework.python.framework import tensor_util\n'), ((4619, 4655), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['expected_shape'], {}), '(expected_shape)\n', (4639, 4655), False, 'from tensorflow.python.framework import constant_op\n'), ((7613, 7660), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['value'], {'shape': 'actual_shape'}), '(value, shape=actual_shape)\n', (7633, 7660), False, 'from tensorflow.python.framework import constant_op\n'), ((7693, 7728), 'tensorflow.python.framework.tensor_shape.TensorShape', 'tensor_shape.TensorShape', (['[None, 2]'], {}), '([None, 2])\n', (7717, 7728), False, 'from tensorflow.python.framework import tensor_shape\n'), ((7797, 7851), 'tensorflow.contrib.framework.python.framework.tensor_util.with_shape', 'tensor_util.with_shape', (['partial_expected_shape', 'tensor'], {}), '(partial_expected_shape, tensor)\n', (7819, 7851), False, 'from tensorflow.contrib.framework.python.framework import tensor_util\n'), ((8017, 8054), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtypes.float32'], {}), '(dtypes.float32)\n', (8038, 8054), False, 'from tensorflow.python.ops import array_ops\n'), ((8116, 8173), 'tensorflow.contrib.framework.python.framework.tensor_util.with_shape', 'tensor_util.with_shape', (['compatible_shape', 'tensor_no_shape'], {}), '(compatible_shape, tensor_no_shape)\n', (8138, 8173), False, 'from tensorflow.contrib.framework.python.framework import tensor_util\n'), ((9184, 9221), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtypes.float32'], {}), '(dtypes.float32)\n', (9205, 9221), False, 'from tensorflow.python.ops import array_ops\n'), ((10121, 10183), 'tensorflow.contrib.framework.python.framework.tensor_util.with_shape', 'tensor_util.with_shape', (['compatible_shape', 'tensor_partial_shape'], {}), '(compatible_shape, tensor_partial_shape)\n', (10143, 10183), False, 'from tensorflow.contrib.framework.python.framework import tensor_util\n'), ((15874, 15935), 'tensorflow.contrib.framework.python.framework.tensor_util.remove_squeezable_dimensions', 'tensor_util.remove_squeezable_dimensions', (['predictions', 'labels'], {}), '(predictions, labels)\n', (15914, 15935), False, 'from tensorflow.contrib.framework.python.framework import tensor_util\n'), ((1671, 1702), 'tensorflow.python.ops.variables.local_variables', 'variables_lib.local_variables', ([], {}), '()\n', (1700, 1702), True, 'from tensorflow.python.ops import variables as variables_lib\n'), ((2833, 2878), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(3)'], {'dtype': 'dtypes.float32'}), '(3, dtype=dtypes.float32)\n', (2853, 2878), False, 'from tensorflow.python.framework import constant_op\n'), ((3011, 3059), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[3, 4]'], {'dtype': 'dtypes.int32'}), '([3, 4], dtype=dtypes.int32)\n', (3031, 3059), False, 'from tensorflow.python.framework import constant_op\n'), ((3714, 3733), 're.compile', 're.compile', (['pattern'], {}), '(pattern)\n', (3724, 3733), False, 'import re\n'), ((4065, 4084), 're.compile', 're.compile', (['pattern'], {}), '(pattern)\n', (4075, 4084), False, 'import re\n'), ((4487, 4532), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(1)'], {'shape': 'expected_shape'}), '(1, shape=expected_shape)\n', (4507, 4532), False, 'from tensorflow.python.framework import constant_op\n'), ((5416, 5441), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(1.0)'], {}), '(1.0)\n', (5436, 5441), False, 'from tensorflow.python.framework import constant_op\n'), ((5674, 5701), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[1.0]'], {}), '([1.0])\n', (5694, 5701), False, 'from tensorflow.python.framework import constant_op\n'), ((5849, 5864), 'numpy.array', 'np.array', (['[1.1]'], {}), '([1.1])\n', (5857, 5864), True, 'import numpy as np\n'), ((5866, 5891), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(1.0)'], {}), '(1.0)\n', (5886, 5891), False, 'from tensorflow.python.framework import constant_op\n'), ((6109, 6134), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(1.0)'], {}), '(1.0)\n', (6129, 6134), False, 'from tensorflow.python.framework import constant_op\n'), ((6329, 6369), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['value'], {'shape': 'shape'}), '(value, shape=shape)\n', (6349, 6369), False, 'from tensorflow.python.framework import constant_op\n'), ((6648, 6688), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['value'], {'shape': 'shape'}), '(value, shape=shape)\n', (6668, 6688), False, 'from tensorflow.python.framework import constant_op\n'), ((6971, 7011), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['value'], {'shape': 'shape'}), '(value, shape=shape)\n', (6991, 7011), False, 'from tensorflow.python.framework import constant_op\n'), ((7311, 7351), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['value'], {'shape': 'shape'}), '(value, shape=shape)\n', (7331, 7351), False, 'from tensorflow.python.framework import constant_op\n'), ((8361, 8399), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['compatible_shape'], {}), '(compatible_shape)\n', (8381, 8399), False, 'from tensorflow.python.framework import constant_op\n'), ((10371, 10409), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['compatible_shape'], {}), '(compatible_shape)\n', (10391, 10409), False, 'from tensorflow.python.framework import constant_op\n'), ((15311, 15376), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['input_predictions_value'], {'dtype': 'dtypes.int32'}), '(input_predictions_value, dtype=dtypes.int32)\n', (15331, 15376), False, 'from tensorflow.python.framework import constant_op\n'), ((15427, 15488), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', ([], {'dtype': 'dtypes.int32', 'name': '"""predictions"""'}), "(dtype=dtypes.int32, name='predictions')\n", (15448, 15488), False, 'from tensorflow.python.ops import array_ops\n'), ((15615, 15675), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['input_labels_value'], {'dtype': 'dtypes.int32'}), '(input_labels_value, dtype=dtypes.int32)\n', (15635, 15675), False, 'from tensorflow.python.framework import constant_op\n'), ((15707, 15763), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', ([], {'dtype': 'dtypes.int32', 'name': '"""labels"""'}), "(dtype=dtypes.int32, name='labels')\n", (15728, 15763), False, 'from tensorflow.python.ops import array_ops\n'), ((2001, 2047), 'tensorflow.python.ops.variables.variables_initializer', 'variables_lib.variables_initializer', (['variables'], {}), '(variables)\n', (2036, 2047), True, 'from tensorflow.python.ops import variables as variables_lib\n'), ((4117, 4174), 'tensorflow.contrib.framework.python.framework.tensor_util.with_same_shape', 'tensor_util.with_same_shape', (['expected_placeholder', 'tensor'], {}), '(expected_placeholder, tensor)\n', (4144, 4174), False, 'from tensorflow.contrib.framework.python.framework import tensor_util\n'), ((4294, 4319), 'numpy.ones', 'np.ones', (['unexpected_shape'], {}), '(unexpected_shape)\n', (4301, 4319), True, 'import numpy as np\n'), ((5108, 5131), 'numpy.ones', 'np.ones', (['expected_shape'], {}), '(expected_shape)\n', (5115, 5131), True, 'import numpy as np\n'), ((6060, 6075), 'numpy.array', 'np.array', (['[1.1]'], {}), '([1.1])\n', (6068, 6075), True, 'import numpy as np\n'), ((15194, 15205), 'tensorflow.python.framework.ops.Graph', 'ops.Graph', ([], {}), '()\n', (15203, 15205), False, 'from tensorflow.python.framework import ops\n'), ((2389, 2424), 'tensorflow.contrib.framework.python.framework.tensor_util.reduce_sum_n', 'tensor_util.reduce_sum_n', (['[a, b, c]'], {}), '([a, b, c])\n', (2413, 2424), False, 'from tensorflow.contrib.framework.python.framework import tensor_util\n'), ((3825, 3863), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['unexpected_shape'], {}), '(unexpected_shape)\n', (3845, 3863), False, 'from tensorflow.python.framework import constant_op\n'), ((15975, 16018), 'tensorflow.python.ops.variables.local_variables_initializer', 'variables_lib.local_variables_initializer', ([], {}), '()\n', (16016, 16018), True, 'from tensorflow.python.ops import variables as variables_lib\n')] |
#!/usr/bin/env python3
# Copyright 2021 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
"""Simple script for updating musl from external git repo.
The upstream sources, along with our local changes, live at:
https://github.com/emscripten-core/musl
To update musl first make sure all changes from the emscripten repo
are present in the `emscripten` branch of the above repo. Then run
`git merge v<musl_version>` to pull in the latest musl changes from
a given musl version. Once any merge conflict are resolved those
change can then be copied back into emscripten using this script.
"""
import os
import sys
import shutil
import subprocess
script_dir = os.path.abspath(os.path.dirname(__file__))
local_src = os.path.join(script_dir, 'libc', 'musl')
exclude_dirs = (
# Top level directories we don't include
'tools', 'obj', 'lib', 'crt', 'musl', 'compat',
# Parts of src we don't build
'malloc',
# Arch-specific code we don't use
'arm', 'x32', 'sh', 'i386', 'x86_64', 'aarch64', 'riscv64',
's390x', 'mips', 'mips64', 'mipsn32', 'powerpc', 'powerpc64',
'm68k', 'microblaze', 'or1k', 'generic')
musl_dir = os.path.abspath(sys.argv[1])
def should_ignore(name):
return name in exclude_dirs or name[0] == '.'
def ignore(dirname, contents):
return [c for c in contents if should_ignore(c)]
def main():
assert os.path.exists(musl_dir)
# Remove old version
shutil.rmtree(local_src)
# Copy new version into place
shutil.copytree(musl_dir, local_src, ignore=ignore)
if __name__ == '__main__':
main()
| [
"os.path.exists",
"os.path.join",
"shutil.copytree",
"os.path.dirname",
"shutil.rmtree",
"os.path.abspath"
] | [((902, 942), 'os.path.join', 'os.path.join', (['script_dir', '"""libc"""', '"""musl"""'], {}), "(script_dir, 'libc', 'musl')\n", (914, 942), False, 'import os\n'), ((1315, 1343), 'os.path.abspath', 'os.path.abspath', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (1330, 1343), False, 'import os\n'), ((863, 888), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (878, 888), False, 'import os\n'), ((1526, 1550), 'os.path.exists', 'os.path.exists', (['musl_dir'], {}), '(musl_dir)\n', (1540, 1550), False, 'import os\n'), ((1577, 1601), 'shutil.rmtree', 'shutil.rmtree', (['local_src'], {}), '(local_src)\n', (1590, 1601), False, 'import shutil\n'), ((1637, 1688), 'shutil.copytree', 'shutil.copytree', (['musl_dir', 'local_src'], {'ignore': 'ignore'}), '(musl_dir, local_src, ignore=ignore)\n', (1652, 1688), False, 'import shutil\n')] |
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
# Used in the exec() call below.
from scout_apm.core.monkey import monkeypatch_method, unpatch_method # noqa: F401
from scout_apm.core.tracked_request import TrackedRequest # noqa: F401
logger = logging.getLogger(__name__)
class Instrument(object):
PYMONGO_METHODS = [
"aggregate",
"bulk_write",
"count",
"create_index",
"create_indexes",
"delete_many",
"delete_one",
"distinct",
"drop",
"drop_index",
"drop_indexes",
"ensure_index",
"find_and_modify",
"find_one",
"find_one_and_delete",
"find_one_and_replace",
"find_one_and_update",
"group",
"inline_map_reduce",
"insert",
"insert_many",
"insert_one",
"map_reduce",
"reindex",
"remove",
"rename",
"replace_one",
"save",
"update",
"update_many",
"update_one",
]
def __init__(self):
self.installed = False
def installable(self):
try:
from pymongo.collection import Collection # noqa: F401
except ImportError:
logger.info("Unable to import for PyMongo instruments")
return False
if self.installed:
logger.warn("PyMongo Instruments are already installed.")
return False
return True
def install(self):
if not self.installable():
logger.info("PyMongo instruments are not installable. Skipping.")
return False
self.installed = True
try:
from pymongo.collection import Collection # noqa: F401
# There is no way the import can fail if self.installable() succeeded.
except ImportError: # pragma: no cover
logger.info(
"Unable to import for PyMongo instruments. Instrument install failed."
)
return False
for method_str in self.__class__.PYMONGO_METHODS:
try:
code_str = """
@monkeypatch_method(Collection)
def {method_str}(original, self, *args, **kwargs):
tr = TrackedRequest.instance()
name = '/'.join(['MongoDB', self.name, '{camel_name}'])
span = tr.start_span(operation=name, ignore_children=True)
span.tag('name', self.name)
try:
return original(*args, **kwargs)
finally:
tr.stop_span()
""".format(
method_str=method_str,
camel_name="".join(c.title() for c in method_str.split("_")),
)
exec(code_str)
logger.info("Instrumented PyMongo Collection.%s", method_str)
except Exception as e:
logger.warn(
"Unable to instrument for PyMongo Collection.%s: %r", method_str, e
)
return False
return True
def uninstall(self):
if not self.installed:
logger.info("PyMongo instruments are not installed. Skipping.")
return False
self.installed = False
from pymongo.collection import Collection
for method_str in self.__class__.PYMONGO_METHODS:
unpatch_method(Collection, method_str)
| [
"logging.getLogger",
"scout_apm.core.monkey.unpatch_method"
] | [((298, 325), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (315, 325), False, 'import logging\n'), ((3319, 3357), 'scout_apm.core.monkey.unpatch_method', 'unpatch_method', (['Collection', 'method_str'], {}), '(Collection, method_str)\n', (3333, 3357), False, 'from scout_apm.core.monkey import monkeypatch_method, unpatch_method\n')] |
from abc import ABC, abstractmethod
import collections
import pandas as pd
from autoscalingsim.utils.error_check import ErrorChecker
class Correlator(ABC):
_Registry = {}
@abstractmethod
def _compute_correlation(self, metrics_vals_1 : pd.Series, metrics_vals_2 : pd.Series, lag : int):
pass
def __init__(self, config : dict):
history_buffer_size_raw = ErrorChecker.key_check_and_load('history_buffer_size', config, self.__class__.__name__)
history_buffer_size_value = ErrorChecker.key_check_and_load('value', history_buffer_size_raw, self.__class__.__name__)
history_buffer_size_unit = ErrorChecker.key_check_and_load('unit', history_buffer_size_raw, self.__class__.__name__)
self.history_buffer_size = pd.Timedelta(history_buffer_size_value, unit = history_buffer_size_unit)
max_time_lag_raw = ErrorChecker.key_check_and_load('max_time_lag', config, self.__class__.__name__)
max_time_lag_value = ErrorChecker.key_check_and_load('value', max_time_lag_raw, self.__class__.__name__)
max_time_lag_unit = ErrorChecker.key_check_and_load('unit', max_time_lag_raw, self.__class__.__name__)
self.max_time_lag = pd.Timedelta(max_time_lag_value, unit = max_time_lag_unit)
self.associated_service_metric_vals = pd.DataFrame()
self.other_service_metric_vals = collections.defaultdict(pd.DataFrame)
def _update_data(self, associated_service_metric_vals : pd.DataFrame, other_service_metric_vals : pd.DataFrame):
if len(self.associated_service_metric_vals.index) > 0:
self.associated_service_metric_vals = self.associated_service_metric_vals.append(associated_service_metric_vals[associated_service_metric_vals.index > max(self.associated_service_metric_vals.index)])
else:
self.associated_service_metric_vals = self.associated_service_metric_vals.append(associated_service_metric_vals)
if self.associated_service_metric_vals.shape[0] > 0:
self.associated_service_metric_vals = self.associated_service_metric_vals[self.associated_service_metric_vals.index >= max(self.associated_service_metric_vals.index) - self.history_buffer_size]
for service_name, metric_vals in other_service_metric_vals.items():
if len(self.other_service_metric_vals[service_name].index) > 0:
self.other_service_metric_vals[service_name] = self.other_service_metric_vals[service_name].append(metric_vals[metric_vals.index > max(self.other_service_metric_vals[service_name].index)])
else:
self.other_service_metric_vals[service_name] = self.other_service_metric_vals[service_name].append(metric_vals)
if self.other_service_metric_vals[service_name].shape[0] > 0:
self.other_service_metric_vals[service_name] = self.other_service_metric_vals[service_name][self.other_service_metric_vals[service_name].index >= max(self.other_service_metric_vals[service_name].index) - self.history_buffer_size]
def get_lagged_correlation(self, associated_service_metric_vals : pd.DataFrame, other_service_metric_vals : pd.DataFrame) -> dict:
self._update_data(associated_service_metric_vals, other_service_metric_vals)
min_resolution = self._get_minimal_resolution()
max_lag = self.max_time_lag // min_resolution
lags_range = range(-max_lag, max_lag)
lags_per_service = dict()
for service_name, metric_vals in self.other_service_metric_vals.items():
other_service_metric_vals_resampled = metric_vals.resample(min_resolution).mean()
associated_service_metric_vals_resampled = self.associated_service_metric_vals.resample(min_resolution).mean()
common_len = min(associated_service_metric_vals_resampled.shape[0], other_service_metric_vals_resampled.shape[0])
associated_service_metric_vals_inp = associated_service_metric_vals_resampled['value'][-common_len:]
other_service_metric_vals_inp = other_service_metric_vals_resampled['value'][-common_len:]
if associated_service_metric_vals_inp.shape == other_service_metric_vals_inp.shape:
corr_raw = { lag : self._compute_correlation(associated_service_metric_vals_inp, other_service_metric_vals_inp, lag) for lag in lags_range }
corr_pruned = { lag : corr for lag, corr in corr_raw.items() if not corr is None}
if len(corr_pruned) > 0:
linear_correlation_df = pd.DataFrame({'lags': list(corr_pruned.keys()), 'correlation': list(corr_pruned.values())}).set_index('lags')
lags_per_service[service_name] = { 'lag': int(linear_correlation_df.correlation.idxmax()) * min_resolution, 'correlation': linear_correlation_df.correlation.max() }
return lags_per_service
def _get_minimal_resolution(self):
minimas_to_consider = [pd.Timedelta(1, unit = 's')]
for service_name, metric_vals in self.other_service_metric_vals.items():
if metric_vals.shape[0] > 0:
other_service_metric_vals_min_resolution = min(metric_vals.index.to_series().diff()[1:])
if not other_service_metric_vals_min_resolution is pd.NaT: minimas_to_consider.append(other_service_metric_vals_min_resolution)
associated_service_metric_vals_min_resolution = min(self.associated_service_metric_vals.index.to_series().diff()[1:])
if not associated_service_metric_vals_min_resolution is pd.NaT: minimas_to_consider.append(associated_service_metric_vals_min_resolution)
return min(minimas_to_consider)
@classmethod
def register(cls, name : str):
def decorator(correlator_class):
cls._Registry[name] = correlator_class
return correlator_class
return decorator
@classmethod
def get(cls, name : str):
if not name in cls._Registry:
raise ValueError(f'An attempt to use a non-existent {cls.__name__} {name}')
return cls._Registry[name]
from .correlators import *
| [
"pandas.DataFrame",
"pandas.Timedelta",
"collections.defaultdict",
"autoscalingsim.utils.error_check.ErrorChecker.key_check_and_load"
] | [((391, 483), 'autoscalingsim.utils.error_check.ErrorChecker.key_check_and_load', 'ErrorChecker.key_check_and_load', (['"""history_buffer_size"""', 'config', 'self.__class__.__name__'], {}), "('history_buffer_size', config, self.\n __class__.__name__)\n", (422, 483), False, 'from autoscalingsim.utils.error_check import ErrorChecker\n'), ((515, 610), 'autoscalingsim.utils.error_check.ErrorChecker.key_check_and_load', 'ErrorChecker.key_check_and_load', (['"""value"""', 'history_buffer_size_raw', 'self.__class__.__name__'], {}), "('value', history_buffer_size_raw, self.\n __class__.__name__)\n", (546, 610), False, 'from autoscalingsim.utils.error_check import ErrorChecker\n'), ((641, 735), 'autoscalingsim.utils.error_check.ErrorChecker.key_check_and_load', 'ErrorChecker.key_check_and_load', (['"""unit"""', 'history_buffer_size_raw', 'self.__class__.__name__'], {}), "('unit', history_buffer_size_raw, self.\n __class__.__name__)\n", (672, 735), False, 'from autoscalingsim.utils.error_check import ErrorChecker\n'), ((766, 836), 'pandas.Timedelta', 'pd.Timedelta', (['history_buffer_size_value'], {'unit': 'history_buffer_size_unit'}), '(history_buffer_size_value, unit=history_buffer_size_unit)\n', (778, 836), True, 'import pandas as pd\n'), ((867, 952), 'autoscalingsim.utils.error_check.ErrorChecker.key_check_and_load', 'ErrorChecker.key_check_and_load', (['"""max_time_lag"""', 'config', 'self.__class__.__name__'], {}), "('max_time_lag', config, self.__class__.__name__\n )\n", (898, 952), False, 'from autoscalingsim.utils.error_check import ErrorChecker\n'), ((977, 1065), 'autoscalingsim.utils.error_check.ErrorChecker.key_check_and_load', 'ErrorChecker.key_check_and_load', (['"""value"""', 'max_time_lag_raw', 'self.__class__.__name__'], {}), "('value', max_time_lag_raw, self.__class__.\n __name__)\n", (1008, 1065), False, 'from autoscalingsim.utils.error_check import ErrorChecker\n'), ((1089, 1176), 'autoscalingsim.utils.error_check.ErrorChecker.key_check_and_load', 'ErrorChecker.key_check_and_load', (['"""unit"""', 'max_time_lag_raw', 'self.__class__.__name__'], {}), "('unit', max_time_lag_raw, self.__class__.\n __name__)\n", (1120, 1176), False, 'from autoscalingsim.utils.error_check import ErrorChecker\n'), ((1200, 1256), 'pandas.Timedelta', 'pd.Timedelta', (['max_time_lag_value'], {'unit': 'max_time_lag_unit'}), '(max_time_lag_value, unit=max_time_lag_unit)\n', (1212, 1256), True, 'import pandas as pd\n'), ((1306, 1320), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1318, 1320), True, 'import pandas as pd\n'), ((1362, 1399), 'collections.defaultdict', 'collections.defaultdict', (['pd.DataFrame'], {}), '(pd.DataFrame)\n', (1385, 1399), False, 'import collections\n'), ((4915, 4940), 'pandas.Timedelta', 'pd.Timedelta', (['(1)'], {'unit': '"""s"""'}), "(1, unit='s')\n", (4927, 4940), True, 'import pandas as pd\n')] |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""=================================================================
@Project : Algorithm_YuweiYin/LeetCode-All-Solution/Python3
@File : LC-0035-Search-Insert-Position.py
@Author : [YuweiYin](https://github.com/YuweiYin)
@Date : 2022-01-01
=================================================================="""
import sys
import time
from typing import List
"""
LeetCode - 0035 - (Easy) - Search Insert Position
https://leetcode.com/problems/search-insert-position/
Description:
Given a sorted array of distinct integers and a target value, return the index if the target is found.
If not, return the index where it would be if it were inserted in order.
Requirement:
You must write an algorithm with O(log n) runtime complexity.
Example 1:
Input: nums = [1,3,5,6], target = 5
Output: 2
Example 2:
Input: nums = [1,3,5,6], target = 2
Output: 1
Example 3:
Input: nums = [1,3,5,6], target = 7
Output: 4
Constraints:
1 <= nums.length <= 10^4
-10^4 <= nums[i] <= 10^4
nums contains distinct values sorted in ascending order.
-10^4 <= target <= 10^4
"""
class Solution:
def searchInsert(self, nums: List[int], target: int) -> int:
# exception case
if not isinstance(nums, list) or len(nums) == 0:
return 0
# main method: (loop) binary search of sorted list
return self._searchInsert(nums, target)
def _searchInsert(self, nums: List[int], target: int) -> int:
start_index, end_index = 0, len(nums) - 1
insert_index = 0
while start_index <= end_index:
cur_index = (end_index + start_index) >> 1 # current cursor
cur_num = nums[cur_index] # cache variable
if start_index == end_index: # border case: must decide the insert position now
return start_index if (target <= cur_num) else (start_index + 1)
if cur_num == target: # 1. hit the target
return cur_index
elif cur_num < target: # 2. go right
start_index = cur_index + 1 # change interval
insert_index = start_index # adjust the possible insert index
else: # 3. go left
end_index = cur_index - 1 # change interval
insert_index = cur_index # adjust the possible insert index
return insert_index
def main():
# Example 1: Output: 2
# nums = [1, 3, 5, 6]
# target = 5
# Example 2: Output: 1
# nums = [1, 3, 5, 6]
# target = 2
# Example 3: Output: 4
# nums = [1,3,5,6]
# target = 7
# Example 4: Output: 0
# nums = [1, 3, 5, 6]
# target = 0
# Example 5: Output: 0
nums = [1, 3]
target = 0
# init instance
solution = Solution()
# run & time
start = time.process_time()
ans = solution.searchInsert(nums, target)
end = time.process_time()
# show answer
print('\nAnswer:')
print(ans)
# show time consumption
print('Running Time: %.5f ms' % ((end - start) * 1000))
if __name__ == "__main__":
sys.exit(main())
| [
"time.process_time"
] | [((2847, 2866), 'time.process_time', 'time.process_time', ([], {}), '()\n', (2864, 2866), False, 'import time\n'), ((2923, 2942), 'time.process_time', 'time.process_time', ([], {}), '()\n', (2940, 2942), False, 'import time\n')] |
import requests
url = 'https://192.168.3.11/api/contextaware/v1/maps/info/DevNetCampus/DevNetBuilding/DevNetZone'
headers = {'Authorization': 'Basic bGVhcm5pbmc6bGVhcm5pbmc=='}
response = requests.get(url, headers=headers, verify=False)
responseString = response.text
print(responseString)
| [
"requests.get"
] | [((188, 236), 'requests.get', 'requests.get', (['url'], {'headers': 'headers', 'verify': '(False)'}), '(url, headers=headers, verify=False)\n', (200, 236), False, 'import requests\n')] |
""" Code for wrapping the motion primitive action in an object. """
from __future__ import division
from __future__ import absolute_import
import attr
import numpy as np
from bc_gym_planning_env.utilities.serialize import Serializable
@attr.s(cmp=False)
class Action(Serializable):
""" Object representing an 'action' - a motion primitive to execute in the environment """
VERSION = 1
command = attr.ib(type=np.ndarray)
@classmethod
def from_cmds(cls, wanted_linear_velocity_of_baselink, wanted_front_wheel_angle):
return cls(command=np.array([wanted_linear_velocity_of_baselink, wanted_front_wheel_angle]))
def __eq__(self, other):
if not isinstance(other, Action):
return False
if (self.command != other.command).any():
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
| [
"numpy.array",
"attr.s",
"attr.ib"
] | [((240, 257), 'attr.s', 'attr.s', ([], {'cmp': '(False)'}), '(cmp=False)\n', (246, 257), False, 'import attr\n'), ((411, 435), 'attr.ib', 'attr.ib', ([], {'type': 'np.ndarray'}), '(type=np.ndarray)\n', (418, 435), False, 'import attr\n'), ((567, 639), 'numpy.array', 'np.array', (['[wanted_linear_velocity_of_baselink, wanted_front_wheel_angle]'], {}), '([wanted_linear_velocity_of_baselink, wanted_front_wheel_angle])\n', (575, 639), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import pandas as pd
import math
import numpy as np
from scipy import stats
import seaborn as sns
data = pd.read_csv("data/500-4.txt", sep="\t")
# example1 = data[data["SIM_TIME"] == 500]
simulations = 500
simtimes = [5, 50, 150, 500, 1000]
# for i in [1, 2, 4]:
# data = pd.read_csv(f"data/500-{i}.txt", sep="\t")
# example = data[data["SIM_TIME"] == simtime]
rhos = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.975]
print("DONE")
print("\n START MEAN, STDEV, CONF INT")
data = pd.read_csv(f"data/500-2.txt", sep="\t")
example = data[data["SIM_TIME"] == 150]
example1 = data[data["SIM_TIME"] == 500]
ex = example[example['RHO'] == 0.1]['AVG_WAIT']
ex2 = example1[example1['RHO'] == 0.1]['AVG_WAIT']
ex_9 = example[example['RHO'] == 0.9]['AVG_WAIT']
ex2_9 = example1[example1['RHO'] == 0.9]['AVG_WAIT']
print("\nMEAN 150, 500, rho 0.1, rho 0.9")
print(ex.mean(), ex2.mean())
print(ex_9.mean(), ex2_9.mean())
print("\nSTDEV 150, 500, rho 0.1, rho 0.9")
print(ex.std(), ex2.std())
print(ex_9.std(), ex2_9.std())
fig = plt.figure(facecolor='w')
ax = fig.add_subplot(111, facecolor='whitesmoke', axisbelow=True)
ax.hist(ex_9, bins = 100, alpha=0.8, color = 'cornflowerblue', label="Simtime=150")
ax.hist(ex2_9, bins = 100, alpha = 0.5, color='springgreen', label="Simtime=500")
# sns.displot(ex_9,)
# sns.displot(ex2_9)
ax.set_xlabel('Mean waiting time / time unit', fontsize=12)
ax.set_ylabel('Density', fontsize=12)
ax.set_title('Distribution mean waiting time', fontsize = 14)
ax.yaxis.set_tick_params(length=0)
ax.xaxis.set_tick_params(length=0)
ax.grid(b=True, which='major', c='w', lw=2, ls='-')
legend = ax.legend()
legend.get_frame().set_alpha(0.5)
for spine in ('top', 'right', 'bottom', 'left'):
ax.spines[spine].set_visible(False)
plt.savefig("plots/histogram-150-500-01.png", dpi=300)
plt.show()
| [
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"pandas.read_csv",
"matplotlib.pyplot.show"
] | [((137, 176), 'pandas.read_csv', 'pd.read_csv', (['"""data/500-4.txt"""'], {'sep': '"""\t"""'}), "('data/500-4.txt', sep='\\t')\n", (148, 176), True, 'import pandas as pd\n'), ((531, 571), 'pandas.read_csv', 'pd.read_csv', (['f"""data/500-2.txt"""'], {'sep': '"""\t"""'}), "(f'data/500-2.txt', sep='\\t')\n", (542, 571), True, 'import pandas as pd\n'), ((1070, 1095), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'facecolor': '"""w"""'}), "(facecolor='w')\n", (1080, 1095), True, 'import matplotlib.pyplot as plt\n'), ((1797, 1851), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plots/histogram-150-500-01.png"""'], {'dpi': '(300)'}), "('plots/histogram-150-500-01.png', dpi=300)\n", (1808, 1851), True, 'import matplotlib.pyplot as plt\n'), ((1852, 1862), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1860, 1862), True, 'import matplotlib.pyplot as plt\n')] |
import os
import logging
import yaml
from schema import Use, Schema, SchemaError, Optional
class InvalidConfig(Exception):
pass
class MissingConfig(Exception):
pass
default_config = {
'logging': 30,
'migrate_from_0_3_2': True
}
schema = Schema({
'stellar_url': Use(str),
'url': Use(str),
'project_name': Use(str),
'tracked_databases': [Use(str)],
Optional('logging'): int,
Optional('migrate_from_0_3_2'): bool
})
def get_config_path():
current_directory = os.getcwd()
while True:
try:
with open(
os.path.join(current_directory, 'stellar.yaml'),
'rb'
) as fp:
return os.path.join(current_directory, 'stellar.yaml')
except IOError:
pass
current_directory = os.path.abspath(
os.path.join(current_directory, '..')
)
if current_directory == '/':
return None
def load_config():
config = {}
stellar_config_env = os.getenv('STELLAR_CONFIG')
if stellar_config_env:
if os.path.exists(stellar_config_env):
config = yaml.safe_load(open(stellar_config_env))
else:
current_directory = os.getcwd()
while True:
try:
with open(
os.path.join(current_directory, 'stellar.yaml'),
'rb'
) as fp:
config = yaml.safe_load(fp)
break
except IOError:
pass
if current_directory == '/':
break
current_directory = os.path.abspath(
os.path.join(current_directory, '..')
)
if not config:
raise MissingConfig()
for k, v in default_config.items():
if k not in config:
config[k] = v
try:
return schema.validate(config)
except SchemaError as e:
raise InvalidConfig(e)
def save_config(config):
logging.getLogger(__name__).debug('save_config()')
with open(get_config_path(), "w") as fp:
yaml.dump(config, fp)
| [
"logging.getLogger",
"os.path.exists",
"schema.Optional",
"os.getenv",
"schema.Use",
"yaml.dump",
"os.path.join",
"os.getcwd",
"yaml.safe_load"
] | [((508, 519), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (517, 519), False, 'import os\n'), ((1020, 1047), 'os.getenv', 'os.getenv', (['"""STELLAR_CONFIG"""'], {}), "('STELLAR_CONFIG')\n", (1029, 1047), False, 'import os\n'), ((389, 408), 'schema.Optional', 'Optional', (['"""logging"""'], {}), "('logging')\n", (397, 408), False, 'from schema import Use, Schema, SchemaError, Optional\n'), ((419, 449), 'schema.Optional', 'Optional', (['"""migrate_from_0_3_2"""'], {}), "('migrate_from_0_3_2')\n", (427, 449), False, 'from schema import Use, Schema, SchemaError, Optional\n'), ((287, 295), 'schema.Use', 'Use', (['str'], {}), '(str)\n', (290, 295), False, 'from schema import Use, Schema, SchemaError, Optional\n'), ((308, 316), 'schema.Use', 'Use', (['str'], {}), '(str)\n', (311, 316), False, 'from schema import Use, Schema, SchemaError, Optional\n'), ((338, 346), 'schema.Use', 'Use', (['str'], {}), '(str)\n', (341, 346), False, 'from schema import Use, Schema, SchemaError, Optional\n'), ((1086, 1120), 'os.path.exists', 'os.path.exists', (['stellar_config_env'], {}), '(stellar_config_env)\n', (1100, 1120), False, 'import os\n'), ((1222, 1233), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1231, 1233), False, 'import os\n'), ((2111, 2132), 'yaml.dump', 'yaml.dump', (['config', 'fp'], {}), '(config, fp)\n', (2120, 2132), False, 'import yaml\n'), ((374, 382), 'schema.Use', 'Use', (['str'], {}), '(str)\n', (377, 382), False, 'from schema import Use, Schema, SchemaError, Optional\n'), ((849, 886), 'os.path.join', 'os.path.join', (['current_directory', '""".."""'], {}), "(current_directory, '..')\n", (861, 886), False, 'import os\n'), ((2007, 2034), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2024, 2034), False, 'import logging\n'), ((702, 749), 'os.path.join', 'os.path.join', (['current_directory', '"""stellar.yaml"""'], {}), "(current_directory, 'stellar.yaml')\n", (714, 749), False, 'import os\n'), ((1670, 1707), 'os.path.join', 'os.path.join', (['current_directory', '""".."""'], {}), "(current_directory, '..')\n", (1682, 1707), False, 'import os\n'), ((588, 635), 'os.path.join', 'os.path.join', (['current_directory', '"""stellar.yaml"""'], {}), "(current_directory, 'stellar.yaml')\n", (600, 635), False, 'import os\n'), ((1446, 1464), 'yaml.safe_load', 'yaml.safe_load', (['fp'], {}), '(fp)\n', (1460, 1464), False, 'import yaml\n'), ((1318, 1365), 'os.path.join', 'os.path.join', (['current_directory', '"""stellar.yaml"""'], {}), "(current_directory, 'stellar.yaml')\n", (1330, 1365), False, 'import os\n')] |
#!/usr/bin/env python3
# Copyright 2021 <NAME> University (<NAME>)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import argparse
import os
import random
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-d", help="downloads directory", type=str, default="downloads")
args = parser.parse_args()
tsv_path = "%s/line_index.tsv" % args.d
with open(tsv_path, "r") as inf:
tsv_lines = inf.readlines()
tsv_lines = [line.strip() for line in tsv_lines]
spk2utt = {}
utt2text = {}
for line in tsv_lines:
l_list = line.split("\t")
fid = l_list[0]
spk = l_list[0].split("_")[1]
text = l_list[1]
path = "%s/%s.wav" % (args.d, fid)
if os.path.exists(path):
utt2text[fid] = text
if spk in spk2utt:
spk2utt[spk].append(fid)
else:
spk2utt[spk] = [fid]
spks = sorted(list(spk2utt.keys()))
num_fids = 0
num_test_spks = 0
for spk in spks:
num_test_spks += 1
fids = sorted(list(set(spk2utt[spk])))
num_fids += len(fids)
if num_fids >= 2000:
break
num_test_spks = 2
test_spks = spks[:num_test_spks]
train_dev_spks = spks[num_test_spks:]
random.Random(0).shuffle(train_dev_spks)
num_train = int(len(train_dev_spks) * 0.9)
train_spks = train_dev_spks[:num_train]
dev_spks = train_dev_spks[num_train:]
spks_by_phase = {"train": train_spks, "dev": dev_spks, "test": test_spks}
flac_dir = "%s" % args.d
sr = 16000
for phase in spks_by_phase:
spks = spks_by_phase[phase]
text_strs = []
wav_scp_strs = []
spk2utt_strs = []
num_fids = 0
for spk in spks:
fids = sorted(list(set(spk2utt[spk])))
num_fids += len(fids)
if phase == "test" and num_fids > 2000:
curr_num_fids = num_fids - 2000
random.Random(1).shuffle(fids)
fids = fids[:curr_num_fids]
utts = [spk + "-" + f for f in fids]
utts_str = " ".join(utts)
spk2utt_strs.append("%s %s" % (spk, utts_str))
for fid, utt in zip(fids, utts):
cmd = "ffmpeg -i %s/%s.wav -f wav -ar %d -ab 16 -ac 1 - |" % (
flac_dir,
fid,
sr,
)
text_strs.append("%s %s" % (utt, utt2text[fid]))
wav_scp_strs.append("%s %s" % (utt, cmd))
phase_dir = "data/marathi_%s" % phase
if not os.path.exists(phase_dir):
os.makedirs(phase_dir)
text_strs = sorted(text_strs)
wav_scp_strs = sorted(wav_scp_strs)
spk2utt_strs = sorted(spk2utt_strs)
with open(os.path.join(phase_dir, "text"), "w+") as ouf:
for s in text_strs:
ouf.write("%s\n" % s)
with open(os.path.join(phase_dir, "wav.scp"), "w+") as ouf:
for s in wav_scp_strs:
ouf.write("%s\n" % s)
with open(os.path.join(phase_dir, "spk2utt"), "w+") as ouf:
for s in spk2utt_strs:
ouf.write("%s\n" % s)
| [
"os.path.exists",
"argparse.ArgumentParser",
"os.makedirs",
"random.Random",
"os.path.join"
] | [((212, 237), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (235, 237), False, 'import argparse\n'), ((768, 788), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (782, 788), False, 'import os\n'), ((1308, 1324), 'random.Random', 'random.Random', (['(0)'], {}), '(0)\n', (1321, 1324), False, 'import random\n'), ((2621, 2646), 'os.path.exists', 'os.path.exists', (['phase_dir'], {}), '(phase_dir)\n', (2635, 2646), False, 'import os\n'), ((2660, 2682), 'os.makedirs', 'os.makedirs', (['phase_dir'], {}), '(phase_dir)\n', (2671, 2682), False, 'import os\n'), ((2829, 2860), 'os.path.join', 'os.path.join', (['phase_dir', '"""text"""'], {}), "(phase_dir, 'text')\n", (2841, 2860), False, 'import os\n'), ((2964, 2998), 'os.path.join', 'os.path.join', (['phase_dir', '"""wav.scp"""'], {}), "(phase_dir, 'wav.scp')\n", (2976, 2998), False, 'import os\n'), ((3105, 3139), 'os.path.join', 'os.path.join', (['phase_dir', '"""spk2utt"""'], {}), "(phase_dir, 'spk2utt')\n", (3117, 3139), False, 'import os\n'), ((1995, 2011), 'random.Random', 'random.Random', (['(1)'], {}), '(1)\n', (2008, 2011), False, 'import random\n')] |
import geojson
import pytest
from napari_geojson import write_shapes
ellipse = [[[0, 0], [0, 5], [5, 5], [5, 0]], "ellipse", "Polygon"]
line = [[[0, 0], [5, 5]], "line", "LineString"]
polygon = [[[0, 0], [5, 5], [0, 10]], "polygon", "Polygon"]
polyline = [[[0, 0], [5, 5], [0, 10]], "path", "LineString"]
rectangle = [[[0, 0], [0, 5], [5, 5], [5, 0]], "rectangle", "Polygon"]
sample_shapes = [ellipse, line, polygon, polyline, rectangle]
sample_shapes_ids = ["ellipse", "line", "polygon", "polyline", "rectangle"]
@pytest.mark.parametrize(
"coords,shape_type,expected", sample_shapes, ids=sample_shapes_ids
)
def test_write_each_shape(
make_napari_viewer, tmp_path, coords, shape_type, expected
): # noqa E501
"""Writer writes a shapes layer as GeoJSON."""
fname = str(tmp_path / "sample.geojson")
viewer = make_napari_viewer()
shapes_layer = viewer.add_shapes(coords, shape_type=shape_type)
# shape was written
assert len(shapes_layer.data) == 1
data, meta, _ = shapes_layer.as_layer_data_tuple()
write_shapes(fname, data, meta)
# read back
with open(fname) as fp:
collection = geojson.load(fp)
geom = collection["geometries"][0]
assert geom.type == expected
| [
"pytest.mark.parametrize",
"geojson.load",
"napari_geojson.write_shapes"
] | [((520, 616), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""coords,shape_type,expected"""', 'sample_shapes'], {'ids': 'sample_shapes_ids'}), "('coords,shape_type,expected', sample_shapes, ids=\n sample_shapes_ids)\n", (543, 616), False, 'import pytest\n'), ((1045, 1076), 'napari_geojson.write_shapes', 'write_shapes', (['fname', 'data', 'meta'], {}), '(fname, data, meta)\n', (1057, 1076), False, 'from napari_geojson import write_shapes\n'), ((1143, 1159), 'geojson.load', 'geojson.load', (['fp'], {}), '(fp)\n', (1155, 1159), False, 'import geojson\n')] |
#!/usr/bin/env python3
from collections import defaultdict
import numpy as np
from pgmpy.base import UndirectedGraph
from pgmpy.factors import factor_product
class ClusterGraph(UndirectedGraph):
r"""
Base class for representing Cluster Graph.
Cluster graph is an undirected graph which is associated with a subset of variables. The graph contains undirected
edges that connects clusters whose scopes have a non-empty intersection.
Formally, a cluster graph is :math:`\mathcal{U}` for a set of factors :math:`\Phi` over :math:`\mathcal{X}` is an
undirected graph, each of whose nodes :math:`i` is associated with a subset :math:`C_i \subseteq X`. A cluster
graph must be family-preserving - each factor :math:`\phi \in \Phi` must be associated with a cluster C, denoted
:math:`\alpha(\phi)`, such that :math:`Scope[\phi] \subseteq C_i`. Each edge between a pair of clusters :math:`C_i`
and :math:`C_j` is associated with a sepset :math:`S_{i,j} \subseteq C_i \cap C_j`.
Parameters
----------
data: input graph
Data to initialize graph. If data=None (default) an empty graph is created. The data is an edge list
Examples
--------
Create an empty ClusterGraph with no nodes and no edges
>>> from pgmpy.models import ClusterGraph
>>> G = ClusterGraph()
G can be grown by adding clique nodes.
**Nodes:**
Add a tuple (or list or set) of nodes as single clique node.
>>> G.add_node(('a', 'b', 'c'))
>>> G.add_nodes_from([('a', 'b'), ('a', 'b', 'c')])
**Edges:**
G can also be grown by adding edges.
>>> G.add_edge(('a', 'b', 'c'), ('a', 'b'))
or a list of edges
>>> G.add_edges_from([(('a', 'b', 'c'), ('a', 'b')),
... (('a', 'b', 'c'), ('a', 'c'))])
"""
def __init__(self, ebunch=None):
super(ClusterGraph, self).__init__()
if ebunch:
self.add_edges_from(ebunch)
self.factors = []
def add_node(self, node, **kwargs):
"""
Add a single node to the cluster graph.
Parameters
----------
node: node
A node should be a collection of nodes forming a clique. It can be
a list, set or tuple of nodes
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> G = ClusterGraph()
>>> G.add_node(('a', 'b', 'c'))
"""
if not isinstance(node, (list, set, tuple)):
raise TypeError(
"Node can only be a list, set or tuple of nodes forming a clique"
)
node = tuple(node)
super(ClusterGraph, self).add_node(node, **kwargs)
def add_nodes_from(self, nodes, **kwargs):
"""
Add multiple nodes to the cluster graph.
Parameters
----------
nodes: iterable container
A container of nodes (list, dict, set, etc.).
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> G = ClusterGraph()
>>> G.add_nodes_from([('a', 'b'), ('a', 'b', 'c')])
"""
for node in nodes:
self.add_node(node, **kwargs)
def add_edge(self, u, v, **kwargs):
"""
Add an edge between two clique nodes.
Parameters
----------
u, v: nodes
Nodes can be any list or set or tuple of nodes forming a clique.
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> G = ClusterGraph()
>>> G.add_nodes_from([('a', 'b', 'c'), ('a', 'b'), ('a', 'c')])
>>> G.add_edges_from([(('a', 'b', 'c'), ('a', 'b')),
... (('a', 'b', 'c'), ('a', 'c'))])
"""
set_u = set(u)
set_v = set(v)
if set_u.isdisjoint(set_v):
raise ValueError("No sepset found between these two edges.")
super(ClusterGraph, self).add_edge(u, v)
def add_factors(self, *factors):
"""
Associate a factor to the graph.
See factors class for the order of potential values
Parameters
----------
*factor: pgmpy.factors.factors object
A factor object on any subset of the variables of the model which
is to be associated with the model.
Returns
-------
None
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> student = ClusterGraph()
>>> student.add_node(('Alice', 'Bob'))
>>> factor = DiscreteFactor(['Alice', 'Bob'], cardinality=[3, 2],
... values=np.random.rand(6))
>>> student.add_factors(factor)
"""
for factor in factors:
factor_scope = set(factor.scope())
nodes = [set(node) for node in self.nodes()]
if factor_scope not in nodes:
raise ValueError(
"Factors defined on clusters of variable not" "present in model"
)
self.factors.append(factor)
def get_factors(self, node=None):
"""
Return the factors that have been added till now to the graph.
If node is not None, it would return the factor corresponding to the
given node.
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> G = ClusterGraph()
>>> G.add_nodes_from([('a', 'b', 'c'), ('a', 'b'), ('a', 'c')])
>>> G.add_edges_from([(('a', 'b', 'c'), ('a', 'b')),
... (('a', 'b', 'c'), ('a', 'c'))])
>>> phi1 = DiscreteFactor(['a', 'b', 'c'], [2, 2, 2], np.random.rand(8))
>>> phi2 = DiscreteFactor(['a', 'b'], [2, 2], np.random.rand(4))
>>> phi3 = DiscreteFactor(['a', 'c'], [2, 2], np.random.rand(4))
>>> G.add_factors(phi1, phi2, phi3)
>>> G.get_factors()
>>> G.get_factors(node=('a', 'b', 'c'))
"""
if node is None:
return self.factors
else:
nodes = [set(n) for n in self.nodes()]
if set(node) not in nodes:
raise ValueError("Node not present in Cluster Graph")
factors = filter(lambda x: set(x.scope()) == set(node), self.factors)
return next(factors)
def remove_factors(self, *factors):
"""
Removes the given factors from the added factors.
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> student = ClusterGraph()
>>> factor = DiscreteFactor(['Alice', 'Bob'], cardinality=[2, 2],
... value=np.random.rand(4))
>>> student.add_factors(factor)
>>> student.remove_factors(factor)
"""
for factor in factors:
self.factors.remove(factor)
def get_cardinality(self, node=None):
"""
Returns the cardinality of the node
Parameters
----------
node: any hashable python object (optional)
The node whose cardinality we want. If node is not specified returns a
dictionary with the given variable as keys and their respective cardinality
as values.
Returns
-------
int or dict : If node is specified returns the cardinality of the node.
If node is not specified returns a dictionary with the given
variable as keys and their respective cardinality as values.
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> student = ClusterGraph()
>>> factor = DiscreteFactor(['Alice', 'Bob'], cardinality=[2, 2],
... values=np.random.rand(4))
>>> student.add_node(('Alice', 'Bob'))
>>> student.add_factors(factor)
>>> student.get_cardinality()
defaultdict(<class 'int'>, {'Bob': 2, 'Alice': 2})
>>> student.get_cardinality(node='Alice')
2
"""
if node:
for factor in self.factors:
for variable, cardinality in zip(factor.scope(), factor.cardinality):
if node == variable:
return cardinality
else:
cardinalities = defaultdict(int)
for factor in self.factors:
for variable, cardinality in zip(factor.scope(), factor.cardinality):
cardinalities[variable] = cardinality
return cardinalities
def get_partition_function(self):
r"""
Returns the partition function for a given undirected graph.
A partition function is defined as
.. math:: \sum_{X}(\prod_{i=1}^{m} \phi_i)
where m is the number of factors present in the graph
and X are all the random variables present.
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> G = ClusterGraph()
>>> G.add_nodes_from([('a', 'b', 'c'), ('a', 'b'), ('a', 'c')])
>>> G.add_edges_from([(('a', 'b', 'c'), ('a', 'b')),
... (('a', 'b', 'c'), ('a', 'c'))])
>>> phi1 = DiscreteFactor(['a', 'b', 'c'], [2, 2, 2], np.random.rand(8))
>>> phi2 = DiscreteFactor(['a', 'b'], [2, 2], np.random.rand(4))
>>> phi3 = DiscreteFactor(['a', 'c'], [2, 2], np.random.rand(4))
>>> G.add_factors(phi1, phi2, phi3)
>>> G.get_partition_function()
"""
if self.check_model():
factor = self.factors[0]
factor = factor_product(
factor, *[self.factors[i] for i in range(1, len(self.factors))]
)
return np.sum(factor.values)
def check_model(self):
"""
Check the model for various errors. This method checks for the following
errors.
* Checks if factors are defined for all the cliques or not.
* Check for running intersection property is not done explicitly over
here as it done in the add_edges method.
* Checks if cardinality information for all the variables is availble or not. If
not it raises an error.
* Check if cardinality of random variable remains same across all the
factors.
Returns
-------
check: boolean
True if all the checks are passed
"""
for clique in self.nodes():
factors = filter(lambda x: set(x.scope()) == set(clique), self.factors)
if not any(factors):
raise ValueError("Factors for all the cliques or clusters not defined.")
cardinalities = self.get_cardinality()
if len(set((x for clique in self.nodes() for x in clique))) != len(
cardinalities
):
raise ValueError("Factors for all the variables not defined.")
for factor in self.factors:
for variable, cardinality in zip(factor.scope(), factor.cardinality):
if cardinalities[variable] != cardinality:
raise ValueError(
"Cardinality of variable {var} not matching among factors".format(
var=variable
)
)
return True
def copy(self):
"""
Returns a copy of ClusterGraph.
Returns
-------
ClusterGraph: copy of ClusterGraph
Examples
--------
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> G = ClusterGraph()
>>> G.add_nodes_from([('a', 'b'), ('b', 'c')])
>>> G.add_edge(('a', 'b'), ('b', 'c'))
>>> phi1 = DiscreteFactor(['a', 'b'], [2, 2], np.random.rand(4))
>>> phi2 = DiscreteFactor(['b', 'c'], [2, 2], np.random.rand(4))
>>> G.add_factors(phi1, phi2)
>>> graph_copy = G.copy()
>>> graph_copy.factors
[<DiscreteFactor representing phi(a:2, b:2) at 0xb71b19cc>,
<DiscreteFactor representing phi(b:2, c:2) at 0xb4eaf3ac>]
>>> graph_copy.edges()
[(('a', 'b'), ('b', 'c'))]
>>> graph_copy.nodes()
[('a', 'b'), ('b', 'c')]
"""
copy = ClusterGraph(self.edges())
if self.factors:
factors_copy = [factor.copy() for factor in self.factors]
copy.add_factors(*factors_copy)
return copy
| [
"numpy.sum",
"collections.defaultdict"
] | [((8516, 8532), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (8527, 8532), False, 'from collections import defaultdict\n'), ((9995, 10016), 'numpy.sum', 'np.sum', (['factor.values'], {}), '(factor.values)\n', (10001, 10016), True, 'import numpy as np\n')] |
from PySide6.QtWidgets import QListWidgetItem
from yapsy.IPlugin import IPlugin
class Plugin(IPlugin):
def __init__(self):
IPlugin.__init__(self)
def activate(self):
IPlugin.activate(self)
return
def deactivate(self):
IPlugin.deactivate(self)
def set_current_window(self, editor):
self.editor_ = editor
self.ctx.register_command('commands_list', self.show_commands_window, None,
False)
self.ctx.bind_key('Alt+X', 'commands_list')
def show_commands_window(self, ctx):
self.commands_ = ctx.get_commands()
self.content_window_ = cw = ctx.create_list_content_window()
self.list_widget_ = l = cw.list_widget_
self.text_edit_ = t = cw.text_edit_
self.list_items_ = []
f_c = self.ctx.get_theme_def_color('default', 'foreground')
b_c = self.ctx.get_theme_def_color('default', 'background')
for cmd in self.commands_:
item = QListWidgetItem(cmd, l)
item.setForeground(f_c)
item.setBackground(b_c)
self.list_items_.append(item)
t.returnPressed.connect(self.execute_command)
l.itemDoubleClicked[QListWidgetItem].connect(self.execute_command)
self.content_window_.select_first_visible_item()
cw.show()
def execute_command(self):
self.item_double_clicked(self.list_widget_.currentItem())
def item_double_clicked(self, item):
self.ctx.run_command(item.text())
| [
"PySide6.QtWidgets.QListWidgetItem",
"yapsy.IPlugin.IPlugin.__init__",
"yapsy.IPlugin.IPlugin.activate",
"yapsy.IPlugin.IPlugin.deactivate"
] | [((141, 163), 'yapsy.IPlugin.IPlugin.__init__', 'IPlugin.__init__', (['self'], {}), '(self)\n', (157, 163), False, 'from yapsy.IPlugin import IPlugin\n'), ((194, 216), 'yapsy.IPlugin.IPlugin.activate', 'IPlugin.activate', (['self'], {}), '(self)\n', (210, 216), False, 'from yapsy.IPlugin import IPlugin\n'), ((261, 285), 'yapsy.IPlugin.IPlugin.deactivate', 'IPlugin.deactivate', (['self'], {}), '(self)\n', (279, 285), False, 'from yapsy.IPlugin import IPlugin\n'), ((974, 997), 'PySide6.QtWidgets.QListWidgetItem', 'QListWidgetItem', (['cmd', 'l'], {}), '(cmd, l)\n', (989, 997), False, 'from PySide6.QtWidgets import QListWidgetItem\n')] |
"""Helpers for the Broadlink remote."""
from base64 import b64decode
from homeassistant.helpers import config_validation as cv
def decode_packet(value):
"""Decode a data packet given for a Broadlink remote."""
value = cv.string(value)
extra = len(value) % 4
if extra > 0:
value = value + ("=" * (4 - extra))
return b64decode(value)
def format_mac(mac):
"""Format a MAC address."""
return ":".join([format(octet, "02x") for octet in mac])
| [
"base64.b64decode",
"homeassistant.helpers.config_validation.string"
] | [((229, 245), 'homeassistant.helpers.config_validation.string', 'cv.string', (['value'], {}), '(value)\n', (238, 245), True, 'from homeassistant.helpers import config_validation as cv\n'), ((346, 362), 'base64.b64decode', 'b64decode', (['value'], {}), '(value)\n', (355, 362), False, 'from base64 import b64decode\n')] |