code
stringlengths 22
1.05M
| apis
sequencelengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
# Copyright 2020 <NAME>. All rights reserved
# Created on Tue Feb 11 12:29:35 2020
# Author: <NAME>, Purdue University
#
#
# The original code came with the following disclaimer:
#
# This software is provided "as-is". There are no expressed or implied
# warranties of any kind, including, but not limited to, the warranties
# of merchantability and fitness for a given application. In no event
# shall Zhi Huang be liable for any direct, indirect, incidental,
# special, exemplary or consequential damages (including, but not limited
# to, loss of use, data or profits, or business interruption) however
# caused and on any theory of liability, whether in contract, strict
# liability or tort (including negligence or otherwise) arising in any way
# out of the use of this software, even if advised of the possibility of
# such damage.
#
import numpy as np
def fisher_discriminant(H, label):
'''
Parameters
----------
H : Real-valued matrix with columns indicating samples.
label : Class indices.
Returns
-------
E_D : Real scalar value indicating fisher discriminant.
Notes
-----
This fisher discriminant is the equation (3 a,b) in
https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3495075
label is further sorted in ascending order, then apply its order to label and H.
Otherwise denominator will be wrong.
References
----------
.. [1] <NAME>, <NAME>, Amari SI. A new discriminant NMF algorithm and its
application to the extraction of subtle emotional differences in speech.
Cognitive neurodynamics. 2012 Dec 1;6(6):525-35.
'''
order = np.argsort(label)
H = H[:,order]
label = label[order]
numerator, denominator = 0, 0
mu_rkn = np.zeros((H.shape[0], 0))
mu_r_all = 1/H.shape[1] * np.sum(H, axis = 1)
for k in np.unique(label):
N_k = np.sum(k == label)
mu_rk_block = np.zeros((0, N_k))
for r in range(H.shape[0]):
mu_r = mu_r_all[r]
mu_rk = 1/N_k * np.sum(H[r, k == label])
mu_rk_block = np.concatenate((mu_rk_block, np.array([mu_rk] * N_k).reshape(1,N_k)), axis = 0)
numerator += N_k * (mu_rk - mu_r) ** 2
mu_rkn = np.concatenate((mu_rkn, mu_rk_block), axis = 1)
denominator = np.sum((H - mu_rkn)**2)
E_D = numerator / denominator
return E_D | [
"numpy.sum",
"numpy.unique",
"numpy.zeros",
"numpy.argsort",
"numpy.array",
"numpy.concatenate"
] | [((1662, 1679), 'numpy.argsort', 'np.argsort', (['label'], {}), '(label)\n', (1672, 1679), True, 'import numpy as np\n'), ((1771, 1796), 'numpy.zeros', 'np.zeros', (['(H.shape[0], 0)'], {}), '((H.shape[0], 0))\n', (1779, 1796), True, 'import numpy as np\n'), ((1860, 1876), 'numpy.unique', 'np.unique', (['label'], {}), '(label)\n', (1869, 1876), True, 'import numpy as np\n'), ((2312, 2337), 'numpy.sum', 'np.sum', (['((H - mu_rkn) ** 2)'], {}), '((H - mu_rkn) ** 2)\n', (2318, 2337), True, 'import numpy as np\n'), ((1827, 1844), 'numpy.sum', 'np.sum', (['H'], {'axis': '(1)'}), '(H, axis=1)\n', (1833, 1844), True, 'import numpy as np\n'), ((1892, 1910), 'numpy.sum', 'np.sum', (['(k == label)'], {}), '(k == label)\n', (1898, 1910), True, 'import numpy as np\n'), ((1933, 1951), 'numpy.zeros', 'np.zeros', (['(0, N_k)'], {}), '((0, N_k))\n', (1941, 1951), True, 'import numpy as np\n'), ((2246, 2291), 'numpy.concatenate', 'np.concatenate', (['(mu_rkn, mu_rk_block)'], {'axis': '(1)'}), '((mu_rkn, mu_rk_block), axis=1)\n', (2260, 2291), True, 'import numpy as np\n'), ((2047, 2071), 'numpy.sum', 'np.sum', (['H[r, k == label]'], {}), '(H[r, k == label])\n', (2053, 2071), True, 'import numpy as np\n'), ((2127, 2150), 'numpy.array', 'np.array', (['([mu_rk] * N_k)'], {}), '([mu_rk] * N_k)\n', (2135, 2150), True, 'import numpy as np\n')] |
import os
import sys
sys.path.append('.')
sys.path.append('../')
import numpy as np
import cv2
import tensorflow as tf
import matplotlib.pyplot as plt
from dataset.violence import Violence
from dataset.tianchi_guangdong_defect import TianChiGuangdongDefect
if __name__ == '__main__':
config = {
"output shape" : [224, 224, 3],
"mil" : False,
"use cache" : True,
"one hot" : True,
"show warning" : True
}
dataset = TianChiGuangdongDefect(config)
indices = dataset.get_image_indices('trainval')
print(len(indices))
img_list = []
for ind in indices:
img, label = dataset.read_image_by_index(ind, phase='trainval', method='supervised')
# print(label)
dataset.time1 = 0.0
dataset.count = 0
print("")
print("")
print("round 2")
print("")
print("")
for ind in indices:
img, label = dataset.read_image_by_index(ind, phase='trainval', method='supervised')
# print(label)
# if img is not None:
# plt.figure(0)
# plt.clf()
# plt.imshow(img)
# plt.pause(1)
config = {
"output shape" : [224, 224, 3],
}
dataset = TianChiGuangdongDefect(config)
indices = dataset.get_image_indices('trainval')
# for ind in indices:
# img_bag, label = dataset.read_image_by_index(ind, phase='trainval', method='supervised')
# print(label)
# if img_bag is not None:
# plt.figure(0)
# plt.clf()
# row = 4
# col = int(len(img_bag) / row)
# print(len(img_bag), row, col)
# for i in range(row):
# for j in range(col):
# plt.subplot(row, col, i * col+j+1)
# plt.imshow(img_bag[i*col+j])
# plt.pause(3)
| [
"sys.path.append",
"dataset.tianchi_guangdong_defect.TianChiGuangdongDefect"
] | [((22, 42), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (37, 42), False, 'import sys\n'), ((43, 65), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (58, 65), False, 'import sys\n'), ((445, 475), 'dataset.tianchi_guangdong_defect.TianChiGuangdongDefect', 'TianChiGuangdongDefect', (['config'], {}), '(config)\n', (467, 475), False, 'from dataset.tianchi_guangdong_defect import TianChiGuangdongDefect\n'), ((1079, 1109), 'dataset.tianchi_guangdong_defect.TianChiGuangdongDefect', 'TianChiGuangdongDefect', (['config'], {}), '(config)\n', (1101, 1109), False, 'from dataset.tianchi_guangdong_defect import TianChiGuangdongDefect\n')] |
# -*- coding: utf-8 -*-
from django.conf import settings
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from .models import Poll, PollOption, PollParticipant, PollTranslation
class PollTranslationInlineAdmin(admin.StackedInline):
verbose_name = _(u'poll translation')
verbose_name_plural = _(u'poll translations')
model = PollTranslation
max_num = len(settings.LANGUAGES)
extra = 1
class PollOptionInlineAdmin(admin.StackedInline):
verbose_name = _(u'poll option')
verbose_name_plural = _(u'poll options')
model = PollOption
extra = 1
class PollAdmin(admin.ModelAdmin):
inlines = [PollTranslationInlineAdmin, PollOptionInlineAdmin]
list_display = ['__unicode__', 'lan']
class PollParticipantAdmin(admin.ModelAdmin):
model = PollParticipant
readonly_fields = ['poll', 'user', 'option']
admin.site.register(Poll, PollAdmin)
admin.site.register(PollParticipant, PollParticipantAdmin)
| [
"django.utils.translation.ugettext_lazy",
"django.contrib.admin.site.register"
] | [((891, 927), 'django.contrib.admin.site.register', 'admin.site.register', (['Poll', 'PollAdmin'], {}), '(Poll, PollAdmin)\n', (910, 927), False, 'from django.contrib import admin\n'), ((928, 986), 'django.contrib.admin.site.register', 'admin.site.register', (['PollParticipant', 'PollParticipantAdmin'], {}), '(PollParticipant, PollParticipantAdmin)\n', (947, 986), False, 'from django.contrib import admin\n'), ((295, 317), 'django.utils.translation.ugettext_lazy', '_', (['u"""poll translation"""'], {}), "(u'poll translation')\n", (296, 317), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((344, 367), 'django.utils.translation.ugettext_lazy', '_', (['u"""poll translations"""'], {}), "(u'poll translations')\n", (345, 367), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((519, 536), 'django.utils.translation.ugettext_lazy', '_', (['u"""poll option"""'], {}), "(u'poll option')\n", (520, 536), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((563, 581), 'django.utils.translation.ugettext_lazy', '_', (['u"""poll options"""'], {}), "(u'poll options')\n", (564, 581), True, 'from django.utils.translation import ugettext_lazy as _\n')] |
from PyInstaller.utils.hooks import collect_data_files
datas = collect_data_files('cli', include_py_files=True) | [
"PyInstaller.utils.hooks.collect_data_files"
] | [((64, 112), 'PyInstaller.utils.hooks.collect_data_files', 'collect_data_files', (['"""cli"""'], {'include_py_files': '(True)'}), "('cli', include_py_files=True)\n", (82, 112), False, 'from PyInstaller.utils.hooks import collect_data_files\n')] |
# Generated by Django 2.1.1 on 2019-02-09 01:11
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('login', '0004_user_settings'),
]
operations = [
migrations.CreateModel(
name='Log',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time', models.DateTimeField(auto_now_add=True, verbose_name='操作时间')),
('obj', models.TextField(blank=True, null=True, verbose_name='操作对象')),
('result', models.TextField(blank=True, null=True, verbose_name='操作结果')),
('operator', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='login.User', verbose_name='操作人')),
],
options={
'verbose_name': '操作记录',
'verbose_name_plural': '操作记录',
'ordering': ['-time'],
},
),
]
| [
"django.db.models.ForeignKey",
"django.db.models.DateTimeField",
"django.db.models.TextField",
"django.db.models.AutoField"
] | [((373, 466), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (389, 466), False, 'from django.db import migrations, models\n'), ((490, 550), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'verbose_name': '"""操作时间"""'}), "(auto_now_add=True, verbose_name='操作时间')\n", (510, 550), False, 'from django.db import migrations, models\n'), ((577, 637), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""操作对象"""'}), "(blank=True, null=True, verbose_name='操作对象')\n", (593, 637), False, 'from django.db import migrations, models\n'), ((667, 727), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""操作结果"""'}), "(blank=True, null=True, verbose_name='操作结果')\n", (683, 727), False, 'from django.db import migrations, models\n'), ((759, 887), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'to': '"""login.User"""', 'verbose_name': '"""操作人"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.SET_NULL, to='login.User', verbose_name='操作人')\n", (776, 887), False, 'from django.db import migrations, models\n')] |
import os
from sklearn.metrics.pairwise import pairwise_distances_argmin
from chatterbot import ChatBot
from chatterbot.trainers import ListTrainer
from chatbot import *
from utils import *
import tensorflow as tf
class ThreadRanker(object):
def __init__(self, paths):
self.word_embeddings, self.embeddings_dim = load_embeddings(paths['WORD_EMBEDDINGS'])
self.thread_embeddings_folder = paths['THREAD_EMBEDDINGS_FOLDER']
def __load_embeddings_by_tag(self, tag_name):
embeddings_path = os.path.join(self.thread_embeddings_folder, tag_name + ".pkl")
thread_ids, thread_embeddings = unpickle_file(embeddings_path)
return thread_ids, thread_embeddings
def get_best_thread(self, question, tag_name):
""" Returns id of the most similar thread for the question.
The search is performed across the threads with a given tag.
"""
thread_ids, thread_embeddings = self.__load_embeddings_by_tag(tag_name)
# HINT: you have already implemented a similar routine in the 3rd assignment.
question_vec = question_to_vec(question, self.word_embeddings, self.embeddings_dim)
best_thread = pairwise_distances_argmin(
X=question_vec.reshape(1, -1),
Y=thread_embeddings,
metric='cosine'
)
return thread_ids[best_thread[0]]
class DialogueManager(object):
def __init__(self, paths):
print("Loading resources...")
self.create_chitchat_bot()
def create_chitchat_bot(self):
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
self.model = Seq2SeqModel(vocab_size=len(word2id),
embeddings_size=300,
hidden_size=128,
max_iter=20,
start_symbol_id=word2id['[^]'],
end_symbol_id=word2id['[$]'],
padding_symbol_id=word2id['[#]'])
saver = tf.train.Saver()
saver.restore(self.sess, 'checkpoints/model_four_691')
def generate_answer(self, question):
# Pass question to chitchat_bot to generate a response.
response = self.model.get_response(self.sess, question)
return response
| [
"tensorflow.ConfigProto",
"tensorflow.GPUOptions",
"os.path.join",
"tensorflow.train.Saver"
] | [((521, 583), 'os.path.join', 'os.path.join', (['self.thread_embeddings_folder', "(tag_name + '.pkl')"], {}), "(self.thread_embeddings_folder, tag_name + '.pkl')\n", (533, 583), False, 'import os\n'), ((1568, 1618), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'per_process_gpu_memory_fraction': '(0.1)'}), '(per_process_gpu_memory_fraction=0.1)\n', (1581, 1618), True, 'import tensorflow as tf\n'), ((2097, 2113), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (2111, 2113), True, 'import tensorflow as tf\n'), ((1658, 1697), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'gpu_options'}), '(gpu_options=gpu_options)\n', (1672, 1697), True, 'import tensorflow as tf\n')] |
import unittest
from poker.card import Card
class CardTest(unittest.TestCase):
def test_has_rank(self):
card = Card(rank = "Queen", suit = "Hearts")
self.assertEqual(card.rank, "Queen")
def test_has_suit(self):
card = Card(rank = "2", suit = "Clubs")
self.assertEqual(card.suit, "Clubs")
def test_knows_its_rank_index(self):
card = Card(rank = "Jack", suit = "Hearts")
self.assertEqual(card.rank_index, 9)
def test_has_string_representation_with_rank_and_suit(self):
card = Card("5", "Diamonds")
self.assertEqual(str(card), "5 of Diamonds")
def test_has_technical_representation(self):
card = Card("5", "Diamonds")
self.assertEqual(repr(card), "Card('5', 'Diamonds')")
def test_card_has_four_possible_suit_options(self):
self.assertEqual(
Card.SUITS,
("Hearts", "Clubs", "Spades", "Diamonds")
)
def test_card_has_thirteen_possible_rank_options(self):
self.assertEqual(
Card.RANKS,
(
"2", "3", "4", "5", "6", "7", "8", "9", "10",
"Jack", "Queen", "King", "Ace"
)
)
def test_card_only_allows_for_valid_rank(self):
with self.assertRaises(ValueError):
Card(rank = "Two", suit = "Hearts")
def test_card_only_allows_for_valid_suit(self):
with self.assertRaises(ValueError):
Card(rank = "2", suit = "Dots")
def test_can_create_standard_52_cards(self):
cards = Card.create_standard_52_cards()
self.assertEqual(len(cards), 52)
self.assertEqual(
cards[0],
Card(rank = "2", suit = "Hearts")
)
self.assertEqual(
cards[-1],
Card(rank = "Ace", suit = "Diamonds")
)
def test_figures_out_if_two_cards_are_equal(self):
self.assertEqual(
Card(rank = "2", suit = "Hearts"),
Card(rank = "2", suit = "Hearts")
)
def test_card_can_sort_itself_with_another_one(self):
queen_of_spades = Card(rank = "Queen", suit = "Spades")
king_of_spades = Card(rank = "King", suit = "Spades")
evaluation = queen_of_spades < king_of_spades
self.assertEqual(
evaluation,
True,
"The sort algorithm is not sorting the lower card first"
)
def test_sorts_cards(self):
two_of_spades = Card(rank = "2", suit = "Spades")
five_of_diamonds = Card(rank = "5", suit = "Diamonds")
five_of_hearts = Card(rank = "5", suit = "Hearts")
eight_of_hearts = Card(rank = "8", suit = "Hearts")
ace_of_clubs = Card(rank = "Ace", suit = "Clubs")
unsorted_cards = [
five_of_hearts,
five_of_diamonds,
two_of_spades,
ace_of_clubs,
eight_of_hearts
]
unsorted_cards.sort()
self.assertEqual(
unsorted_cards,
[
two_of_spades,
five_of_diamonds,
five_of_hearts,
eight_of_hearts,
ace_of_clubs
]
)
| [
"poker.card.Card",
"poker.card.Card.create_standard_52_cards"
] | [((124, 157), 'poker.card.Card', 'Card', ([], {'rank': '"""Queen"""', 'suit': '"""Hearts"""'}), "(rank='Queen', suit='Hearts')\n", (128, 157), False, 'from poker.card import Card\n'), ((252, 280), 'poker.card.Card', 'Card', ([], {'rank': '"""2"""', 'suit': '"""Clubs"""'}), "(rank='2', suit='Clubs')\n", (256, 280), False, 'from poker.card import Card\n'), ((387, 419), 'poker.card.Card', 'Card', ([], {'rank': '"""Jack"""', 'suit': '"""Hearts"""'}), "(rank='Jack', suit='Hearts')\n", (391, 419), False, 'from poker.card import Card\n'), ((550, 571), 'poker.card.Card', 'Card', (['"""5"""', '"""Diamonds"""'], {}), "('5', 'Diamonds')\n", (554, 571), False, 'from poker.card import Card\n'), ((690, 711), 'poker.card.Card', 'Card', (['"""5"""', '"""Diamonds"""'], {}), "('5', 'Diamonds')\n", (694, 711), False, 'from poker.card import Card\n'), ((1555, 1586), 'poker.card.Card.create_standard_52_cards', 'Card.create_standard_52_cards', ([], {}), '()\n', (1584, 1586), False, 'from poker.card import Card\n'), ((2113, 2146), 'poker.card.Card', 'Card', ([], {'rank': '"""Queen"""', 'suit': '"""Spades"""'}), "(rank='Queen', suit='Spades')\n", (2117, 2146), False, 'from poker.card import Card\n'), ((2176, 2208), 'poker.card.Card', 'Card', ([], {'rank': '"""King"""', 'suit': '"""Spades"""'}), "(rank='King', suit='Spades')\n", (2180, 2208), False, 'from poker.card import Card\n'), ((2471, 2500), 'poker.card.Card', 'Card', ([], {'rank': '"""2"""', 'suit': '"""Spades"""'}), "(rank='2', suit='Spades')\n", (2475, 2500), False, 'from poker.card import Card\n'), ((2532, 2563), 'poker.card.Card', 'Card', ([], {'rank': '"""5"""', 'suit': '"""Diamonds"""'}), "(rank='5', suit='Diamonds')\n", (2536, 2563), False, 'from poker.card import Card\n'), ((2593, 2622), 'poker.card.Card', 'Card', ([], {'rank': '"""5"""', 'suit': '"""Hearts"""'}), "(rank='5', suit='Hearts')\n", (2597, 2622), False, 'from poker.card import Card\n'), ((2653, 2682), 'poker.card.Card', 'Card', ([], {'rank': '"""8"""', 'suit': '"""Hearts"""'}), "(rank='8', suit='Hearts')\n", (2657, 2682), False, 'from poker.card import Card\n'), ((2710, 2740), 'poker.card.Card', 'Card', ([], {'rank': '"""Ace"""', 'suit': '"""Clubs"""'}), "(rank='Ace', suit='Clubs')\n", (2714, 2740), False, 'from poker.card import Card\n'), ((1312, 1343), 'poker.card.Card', 'Card', ([], {'rank': '"""Two"""', 'suit': '"""Hearts"""'}), "(rank='Two', suit='Hearts')\n", (1316, 1343), False, 'from poker.card import Card\n'), ((1457, 1484), 'poker.card.Card', 'Card', ([], {'rank': '"""2"""', 'suit': '"""Dots"""'}), "(rank='2', suit='Dots')\n", (1461, 1484), False, 'from poker.card import Card\n'), ((1689, 1718), 'poker.card.Card', 'Card', ([], {'rank': '"""2"""', 'suit': '"""Hearts"""'}), "(rank='2', suit='Hearts')\n", (1693, 1718), False, 'from poker.card import Card\n'), ((1795, 1828), 'poker.card.Card', 'Card', ([], {'rank': '"""Ace"""', 'suit': '"""Diamonds"""'}), "(rank='Ace', suit='Diamonds')\n", (1799, 1828), False, 'from poker.card import Card\n'), ((1937, 1966), 'poker.card.Card', 'Card', ([], {'rank': '"""2"""', 'suit': '"""Hearts"""'}), "(rank='2', suit='Hearts')\n", (1941, 1966), False, 'from poker.card import Card\n'), ((1984, 2013), 'poker.card.Card', 'Card', ([], {'rank': '"""2"""', 'suit': '"""Hearts"""'}), "(rank='2', suit='Hearts')\n", (1988, 2013), False, 'from poker.card import Card\n')] |
from random import randint
import datetime
import pymysql
import cgi
def getConnection():
return pymysql.connect(host='localhost',
user='root',
password='<PASSWORD>',
db='BookFetch')
def newBook():
Title = input("Enter the title of the new book: ")
ISBN = input("Enter the isbn of the new book: ")
ISBN13 = input ("Enter the isbn 13: ")
DPublished = input("Enter date published: ")
Quantity = input("Enter quantity: ")
Publisher = input("Enter publisher: ")
Edition = input("Edition: ")
Language = input("Language: ")
Category = input("Category: ")
Author = input("Author: ")
connection = getConnection()
connection.autocommit(True)
try:
with connection.cursor() as cursor:
sql = ("insert into BookDetails values (\"" + Title + "\", " + ISBN
+ ", " + ISBN13 + ", \"" + DPublished + "\", " + Quantity
+ ", \"" + Publisher + "\", " + Edition + ", \"" + Language
+ "\", \"" + Category + "\", \"" + Author + "\");")
cursor.execute(sql)
finally:
connection.close()
def newUniversity():
Name = input("Enter the name of the university: ")
RFName = input("First name of the representative: ")
RLName = input("Last name of the representative: ")
Street = input("Street: ")
City = input("City: ")
State = input("State: ")
Country = input("Country: ")
connection = getConnection()
connection.autocommit(True)
try:
with connection.cursor() as cursor:
sql = ("insert into Universities values(\"" + Name
+ "\", \"" + RFName + "\", \"" + RLName + "\", \""
+ Street + "\", \"" + City + "\", \"" + State
+ "\", \"" + Country + "\");")
cursor.execute(sql)
finally:
connection.close()
def newDepartment():
UniversityName = input("Enter the name of the university: ")
DeptName = input("Enter the name of the department: ")
connection = getConnection()
connection.autocommit(True)
try:
with connection.cursor() as cursor:
sql = ("insert into Departments values(\"" + UniversityName
+ "\", \"" + DeptName + "\");")
cursor.execute(sql)
finally:
connection.close()
def newCourses():
CourseName = input("Enter the name of the course: ")
UniversityName = input("Enter the name of the university: ")
DeptName = input("Enter the name of the department: ")
connection = getConnection()
connection.autocommit(True)
try:
with connection.cursor() as cursor:
sql = ("insert into Courses values(\"" + CourseName
+ "\", \"" + UniversityName + "\", \"" + DeptName + "\");")
cursor.execute(sql)
finally:
connection.close()
def newBAssociation():
print("All of these are foreign key constraints: ")
CourseName = input("Enter the name of the course: ")
UniversityName = input("Enter the name of the university: ")
ISBN = input("Enter the isbn of the book: ")
connection = getConnection()
connection.autocommit(True)
try:
with connection.cursor() as cursor:
sql = ("insert into CourseReq values(" + ISBN
+ ", \"" + CourseName + "\", \"" + UniversityName + "\");")
cursor.execute(sql)
finally:
connection.close()
def adminModuleMain():
welcomeMsg = ("---------------------\nAdmin Module\n---------------------")
mainOptionsMsg = (
"""Here are your options:
1) Create a new book with inventory
2) Create a new university
3) Create a new department
4) Create a new courses
5) Create a new book associations
6) Return
7) Quit
Enter [1-7]: """)
invalidInputMsg = "Invalid input, please enter a valid input."
print(welcomeMsg)
userInput = int(input(mainOptionsMsg))
print("\n")
while(userInput < 1 or userInput > 7):
print(invalidInputMsg)
userInput = int(input(mainOptionsMsg))
print("\n")
if (userInput == 1):
newBook()
elif (userInput == 2):
newUniversity()
elif (userInput == 3):
newDepartment()
elif (userInput == 4):
newCourses()
elif (userInput == 5):
newBAssociation()
elif (userInput == 6):
return
elif (userInput == 7):
quit()
adminModuleMain() | [
"pymysql.connect"
] | [((102, 192), 'pymysql.connect', 'pymysql.connect', ([], {'host': '"""localhost"""', 'user': '"""root"""', 'password': '"""<PASSWORD>"""', 'db': '"""BookFetch"""'}), "(host='localhost', user='root', password='<PASSWORD>', db=\n 'BookFetch')\n", (117, 192), False, 'import pymysql\n')] |
from django.contrib.auth import get_user_model
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse
from django.views.generic import DetailView, ListView, RedirectView, UpdateView
from allauth.socialaccount.providers.facebook.views import FacebookOAuth2Adapter
from allauth.socialaccount.providers.github.views import GitHubOAuth2Adapter
from allauth.socialaccount.providers.google.views import GoogleOAuth2Adapter
from allauth.socialaccount.providers.instagram.views import InstagramOAuth2Adapter
from allauth.socialaccount.providers.twitter.views import TwitterOAuthAdapter
from rest_auth.registration.views import SocialLoginView
from rest_auth.social_serializers import TwitterLoginSerializer
User = get_user_model()
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
slug_field = "username"
slug_url_kwarg = "username"
user_detail_view = UserDetailView.as_view()
class UserListView(LoginRequiredMixin, ListView):
model = User
slug_field = "username"
slug_url_kwarg = "username"
user_list_view = UserListView.as_view()
class UserUpdateView(LoginRequiredMixin, UpdateView):
model = User
fields = ["name"]
def get_success_url(self):
return reverse("users:detail", kwargs={"username": self.request.user.username})
def get_object(self):
return User.objects.get(username=self.request.user.username)
user_update_view = UserUpdateView.as_view()
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse("users:detail", kwargs={"username": self.request.user.username})
user_redirect_view = UserRedirectView.as_view()
# Social Apps
class FacebookLogin(SocialLoginView):
adapter_class = FacebookOAuth2Adapter
class GitHubLogin(SocialLoginView):
adapter_class = GitHubOAuth2Adapter
class GoogleLogin(SocialLoginView):
adapter_class = GoogleOAuth2Adapter
class InstagramLogin(SocialLoginView):
adapter_class = InstagramOAuth2Adapter
class TwitterLogin(SocialLoginView):
serializer_class = TwitterLoginSerializer
adapter_class = TwitterOAuthAdapter
| [
"django.urls.reverse",
"django.contrib.auth.get_user_model"
] | [((743, 759), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (757, 759), False, 'from django.contrib.auth import get_user_model\n'), ((1255, 1327), 'django.urls.reverse', 'reverse', (['"""users:detail"""'], {'kwargs': "{'username': self.request.user.username}"}), "('users:detail', kwargs={'username': self.request.user.username})\n", (1262, 1327), False, 'from django.urls import reverse\n'), ((1601, 1673), 'django.urls.reverse', 'reverse', (['"""users:detail"""'], {'kwargs': "{'username': self.request.user.username}"}), "('users:detail', kwargs={'username': self.request.user.username})\n", (1608, 1673), False, 'from django.urls import reverse\n')] |
import random
import string
import other_info
import re
from dicttoxml import dicttoxml
def make_csrf_state(size):
''' Makes a CSRF state by randomly choosing uppercase letters and digits '''
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in xrange(size))
def valid_item_name(item_name):
''' Test item name for bad words or format etc
-not fully implemented for this project'''
if len(item_name) > 50:
return False;
return True
def valid_item_description(item_description):
'''test item description for bad words or format etc
-not fully implemented for this project'''
if len(item_description) > 1000:
return False
return True
def allowed_file(filename):
''' Checks if an image file has the right extension '''
ALLOWED_EXTENSIONS = set(['jpg', 'jpeg', 'gif', "png"])
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
def valid_link(link):
''' Checks if the link provided is a valid link format
- not fully implemented for this project'''
pat = re.compile("/^(https?:\/\/)?([\da-z\.-]+)\.([a-z\.]{2,6})([\/\w \.-]*)*\/?$/")
pat.match(link)
if not pat:
return False
return True if link else False
def test_new_item(item_dict):
''' takes an item dictionary and checks if all fields are properly filled
returns: tuple (Bool: Success, string: Error) '''
# for now just test the surface things
category = item_dict.get("category")
if not category in other_info.item_categories:
return (False, "Category is invalid")
name = item_dict.get("name")
if not name or not valid_item_name(name) == True:
return (False, "Name not valid")
description = item_dict.get("description")
if not valid_item_description(description) == True:
return (False, "Description not valid")
link = item_dict.get("link")
if link and not valid_link(item_dict["link"]):
return (False, "Link is not valid")
return (True, None)
def test_item_prop(item_dict):
''' Tests all the properties passed in the item_dict
and checks if they are valid for updating the item '''
my_valid_vars = ['name', 'category', 'description', 'link']
for kw in item_dict:
if kw not in my_valid_vars:
return (False, "You are trying to update a property that doesn't exist: %s"%kw)
if kw == 'name' and not valid_item_name(item_dict[kw]):
return (False, "Name not valid")
if kw == 'description' and not valid_item_description(item_dict[kw]):
return (False, "Description not valid")
if kw == 'category' and item_dict[kw] not in other_info.item_categories:
return (False, "Category not valid")
if kw == 'link' and not valid_link(item_dict[kw]):
return (False, "Link not valid")
return (True, None)
def remove_special_characters(my_string):
return ''.join(e for e in my_string if e.isalnum()).lower()
def get_cat_regex():
return "(?i)"+'|'.join(other_info.item_categories)
def makexml(my_dict):
return dicttoxml(my_dict)
| [
"dicttoxml.dicttoxml",
"random.choice",
"re.compile"
] | [((1118, 1216), 're.compile', 're.compile', (['"""/^(https?:\\\\/\\\\/)?([\\\\da-z\\\\.-]+)\\\\.([a-z\\\\.]{2,6})([\\\\/\\\\w \\\\.-]*)*\\\\/?$/"""'], {}), "(\n '/^(https?:\\\\/\\\\/)?([\\\\da-z\\\\.-]+)\\\\.([a-z\\\\.]{2,6})([\\\\/\\\\w \\\\.-]*)*\\\\/?$/'\n )\n", (1128, 1216), False, 'import re\n'), ((3145, 3163), 'dicttoxml.dicttoxml', 'dicttoxml', (['my_dict'], {}), '(my_dict)\n', (3154, 3163), False, 'from dicttoxml import dicttoxml\n'), ((217, 270), 'random.choice', 'random.choice', (['(string.ascii_uppercase + string.digits)'], {}), '(string.ascii_uppercase + string.digits)\n', (230, 270), False, 'import random\n')] |
import numpy as np
import tensorflow as tf
import tensorflow.contrib.distributions as tfd
from integrators import ODERK4, SDEEM
from kernels import OperatorKernel
from gpflow import transforms
from param import Param
float_type = tf.float64
jitter0 = 1e-6
class NPODE:
def __init__(self,Z0,U0,sn0,kern,jitter=jitter0,
summ=False,whiten=True,fix_Z=False,fix_U=False,fix_sn=False):
""" Constructor for the NPODE model
Args:
Z0: Numpy matrix of initial inducing points of size MxD, M being the
number of inducing points.
U0: Numpy matrix of initial inducing vectors of size MxD, M being the
number of inducing points.
sn0: Numpy vector of size 1xD for initial signal variance
kern: Kernel object for GP interpolation
jitter: Float of jitter level
whiten: Boolean. Currently we perform the optimization only in the
white domain
summ: Boolean for Tensorflow summary
fix_Z: Boolean - whether inducing locations are fixed or optimized
fix_U: Boolean - whether inducing vectors are fixed or optimized
fix_sn: Boolean - whether noise variance is fixed or optimized
"""
self.name = 'npode'
self.whiten = whiten
self.kern = kern
self.jitter = jitter
with tf.name_scope("NPDE"):
Z = Param(Z0,
name = "Z",
summ = False,
fixed = fix_Z)
U = Param(U0,
name = "U",
summ = False,
fixed = fix_U)
sn = Param(np.array(sn0),
name = "sn",
summ = summ,
fixed = fix_sn,
transform = transforms.Log1pe())
self.Z = Z()
self.U = U()
self.sn = sn()
self.D = U.shape[1]
self.integrator = ODERK4(self)
self.fix_Z = fix_Z
self.fix_sn = fix_sn
self.fix_U = fix_U
def f(self,X,t=[0]):
""" Implements GP interpolation to compute the value of the differential
function at location(s) X.
Args:
X: TxD tensor of input locations, T is the number of locations.
Returns:
TxD tensor of differential function (GP conditional) computed on
input locations
"""
U = self.U
Z = self.Z
kern = self.kern
N = tf.shape(X)[0]
M = tf.shape(Z)[0]
D = tf.shape(Z)[1] # dim of state
if kern.ktype == "id":
Kzz = kern.K(Z) + tf.eye(M, dtype=float_type) * self.jitter
else:
Kzz = kern.K(Z) + tf.eye(M*D, dtype=float_type) * self.jitter
Lz = tf.cholesky(Kzz)
Kzx = kern.K(Z, X)
A = tf.matrix_triangular_solve(Lz, Kzx, lower=True)
if not self.whiten:
A = tf.matrix_triangular_solve(tf.transpose(Lz), A, lower=False)
f = tf.matmul(A, U, transpose_a=True)
# transformation for "id - rbf" kernel
if not kern.ktype == "id" and not kern.ktype == "kr" :
f = tf.reshape(f,[N,D])
return f
def build_prior(self):
if self.kern.ktype == "id" or self.kern.ktype == "kr":
if self.whiten:
mvn = tfd.MultivariateNormalDiag(
loc=tf.zeros_like(self.U[:,0]))
else:
mvn = tfd.MultivariateNormalFullCovariance(
loc=tf.zeros_like(self.U[:,0]),
covariance_matrix=self.kern.K(self.Z,self.Z))
probs = tf.add_n([mvn.log_prob(self.U[:,d]) for d in range(self.kern.ndims)])
else:
if self.whiten:
mvn = tfd.MultivariateNormalDiag(
loc=tf.zeros_like(self.U))
else:
mvn = tfd.MultivariateNormalFullCovariance(
loc=tf.zeros_like(self.U),
covariance_matrix=self.kern.K(self.Z,self.Z))
probs = tf.reduce_sum(mvn.log_prob(tf.squeeze(self.U)))
return probs
def forward(self,x0,ts):
return self.integrator.forward(x0=x0,ts=ts)
def predict(self,x0,t):
""" Computes the integral and returns the path
Args:
x0: Python/numpy array of initial value
t: Python/numpy array of time points the integral is evaluated at
Returns:
ODE solution computed at t, tensor of size [len(t),len(x0)]
"""
x0 = np.asarray(x0,dtype=np.float64).reshape((1,-1))
t = [t]
integrator = ODERK4(self)
path = integrator.forward(x0,t)
path = path[0]
return path
def Kzz(self):
kern = self.kern
Z = self.Z
M = tf.shape(Z)[0]
D = tf.shape(Z)[1] # dim of state
if kern.ktype == "id":
Kzz = kern.K(Z) + tf.eye(M, dtype=float_type) * self.jitter
else:
Kzz = kern.K(Z) + tf.eye(M*D, dtype=float_type) * self.jitter
return Kzz
def U(self):
U = self.U
if self.whiten:
Lz = tf.cholesky(self.Kzz())
U = tf.matmul(Lz,U)
return U
def __str__(self):
rep = 'noise variance: ' + str(self.sn.eval()) + \
'\nsignal variance: ' + str(self.kern.sf.eval()) + \
'\nlengthscales: ' + str(self.kern.ell.eval())
return rep
class NPSDE(NPODE):
def __init__(self,Z0,U0,sn0,kern,diffus,s=1,jitter=jitter0,
summ=False,whiten=True,fix_Z=False,fix_U=False,fix_sn=False):
""" Constructor for the NPSDE model
Args:
Z0: Numpy matrix of initial inducing points of size MxD, M being the
number of inducing points.
U0: Numpy matrix of initial inducing vectors of size MxD, M being the
number of inducing points.
sn0: Numpy vector of size 1xD for initial signal variance
kern: Kernel object for GP interpolation
diffus: BrownianMotion object for diffusion GP interpolation
s: Integer parameterizing how denser the integration points are
jitter: Float of jitter level
summ: Boolean for Tensorflow summary
whiten: Boolean. Currently we perform the optimization only in the
white domain
fix_Z: Boolean - whether inducing locations are fixed or optimized
fix_U: Boolean - whether inducing vectors are fixed or optimized
fix_sn: Boolean - whether noise variance is fixed or optimized
"""
super().__init__(Z0,U0,sn0,kern,jitter=jitter,
summ=summ,whiten=whiten,fix_Z=fix_Z,fix_U=fix_U,fix_sn=fix_sn)
self.name = 'npsde'
self.diffus = diffus
self.integrator = SDEEM(self)
def build_prior(self):
pf = super().build_prior()
pg = self.diffus.build_prior()
return pf + pg
def g(self,ts,Nw=1):
return self.diffus.g(ts=ts,Nw=Nw)
def forward(self,x0,ts,Nw=1):
return self.integrator.forward(x0=x0,ts=ts,Nw=Nw)
def sample(self,x0,t,Nw):
""" Draws random samples from a learned SDE system
Args:
Nw: Integer number of samples
x0: Python/numpy array of initial value
t: Python/numpy array of time points the integral is evaluated at
Returns:
Tensor of size [Nw,len(t),len(x0)] storing samples
"""
# returns (Nw, len(t), D)
x0 = np.asarray(x0,dtype=np.float64).reshape((1,-1))
t = [t]
path = self.integrator.forward(x0,t,Nw)
path = path[0]
return path
def __str__(self):
return super().__str__() + self.diffus.__str__()
class BrownianMotion:
def __init__(self,sf0,ell0,Z0,U0,whiten=False,summ=False,
fix_ell=True,fix_sf=True,fix_Z=True,fix_U=False):
with tf.name_scope('Brownian'):
Zg = Param(Z0,
name = "Z",
summ = False,
fixed = fix_Z)
Ug = Param(U0,
name = "U",
summ = False,
fixed = fix_U)
self.kern = OperatorKernel(sf0=sf0,
ell0=ell0,
ktype="id",
name='Kernel',
summ=summ,
fix_ell=fix_ell,
fix_sf=fix_sf)
self.Zg = Zg()
self.Ug = Ug()
self.jitter = 1e-6
self.whiten = whiten
self.fix_Z = fix_Z
self.fix_U = fix_U
def g(self,X,t):
""" generates state dependent brownian motion
Args:
X: current states (in rows)
t: current time (used if diffusion depends on time)
Returns:
A tensor of the same shape as X
"""
Ug = self.Ug
Zg = self.Zg
kern = self.kern
if not kern.ktype == "id":
raise NotImplementedError()
M = tf.shape(Zg)[0]
D = tf.shape(X)[1]
if kern.ktype == "id":
Kzz = kern.K(Zg) + tf.eye(M, dtype=float_type) * self.jitter
else:
Kzz = kern.K(Zg) + tf.eye(M*D, dtype=float_type) * self.jitter
Lz = tf.cholesky(Kzz)
Kzx = kern.K(Zg, X)
A = tf.matrix_triangular_solve(Lz, Kzx, lower=True)
if not self.whiten:
A = tf.matrix_triangular_solve(tf.transpose(Lz), A, lower=False)
g = tf.matmul(A, Ug, transpose_a=True)
dw = tf.random_normal(tf.shape(X),dtype=float_type)
return g*dw
def __str__(self):
rep = '\ndiff signal variance: ' + str(self.kern.sf.eval()) + \
'\ndiff lengthscales: ' + str(self.kern.ell.eval())
return rep
def build_prior(self):
if self.whiten:
mvn = tfd.MultivariateNormalDiag(
loc=tf.zeros_like(self.Ug))
else:
mvn = tfd.MultivariateNormalFullCovariance(
loc=tf.zeros_like(self.Ug),
covariance_matrix=self.kern.K(self.Zg,self.Zg))
return tf.reduce_sum(mvn.log_prob(self.Ug)) | [
"gpflow.transforms.Log1pe",
"kernels.OperatorKernel",
"numpy.asarray",
"tensorflow.reshape",
"integrators.SDEEM",
"tensorflow.eye",
"tensorflow.zeros_like",
"tensorflow.cholesky",
"tensorflow.transpose",
"tensorflow.matmul",
"tensorflow.shape",
"numpy.array",
"tensorflow.squeeze",
"tensorflow.matrix_triangular_solve",
"tensorflow.name_scope",
"integrators.ODERK4",
"param.Param"
] | [((2065, 2077), 'integrators.ODERK4', 'ODERK4', (['self'], {}), '(self)\n', (2071, 2077), False, 'from integrators import ODERK4, SDEEM\n'), ((2893, 2909), 'tensorflow.cholesky', 'tf.cholesky', (['Kzz'], {}), '(Kzz)\n', (2904, 2909), True, 'import tensorflow as tf\n'), ((2951, 2998), 'tensorflow.matrix_triangular_solve', 'tf.matrix_triangular_solve', (['Lz', 'Kzx'], {'lower': '(True)'}), '(Lz, Kzx, lower=True)\n', (2977, 2998), True, 'import tensorflow as tf\n'), ((3118, 3151), 'tensorflow.matmul', 'tf.matmul', (['A', 'U'], {'transpose_a': '(True)'}), '(A, U, transpose_a=True)\n', (3127, 3151), True, 'import tensorflow as tf\n'), ((4815, 4827), 'integrators.ODERK4', 'ODERK4', (['self'], {}), '(self)\n', (4821, 4827), False, 'from integrators import ODERK4, SDEEM\n'), ((7073, 7084), 'integrators.SDEEM', 'SDEEM', (['self'], {}), '(self)\n', (7078, 7084), False, 'from integrators import ODERK4, SDEEM\n'), ((9605, 9621), 'tensorflow.cholesky', 'tf.cholesky', (['Kzz'], {}), '(Kzz)\n', (9616, 9621), True, 'import tensorflow as tf\n'), ((9664, 9711), 'tensorflow.matrix_triangular_solve', 'tf.matrix_triangular_solve', (['Lz', 'Kzx'], {'lower': '(True)'}), '(Lz, Kzx, lower=True)\n', (9690, 9711), True, 'import tensorflow as tf\n'), ((9831, 9865), 'tensorflow.matmul', 'tf.matmul', (['A', 'Ug'], {'transpose_a': '(True)'}), '(A, Ug, transpose_a=True)\n', (9840, 9865), True, 'import tensorflow as tf\n'), ((1411, 1432), 'tensorflow.name_scope', 'tf.name_scope', (['"""NPDE"""'], {}), "('NPDE')\n", (1424, 1432), True, 'import tensorflow as tf\n'), ((1450, 1494), 'param.Param', 'Param', (['Z0'], {'name': '"""Z"""', 'summ': '(False)', 'fixed': 'fix_Z'}), "(Z0, name='Z', summ=False, fixed=fix_Z)\n", (1455, 1494), False, 'from param import Param\n'), ((1586, 1630), 'param.Param', 'Param', (['U0'], {'name': '"""U"""', 'summ': '(False)', 'fixed': 'fix_U'}), "(U0, name='U', summ=False, fixed=fix_U)\n", (1591, 1630), False, 'from param import Param\n'), ((2604, 2615), 'tensorflow.shape', 'tf.shape', (['X'], {}), '(X)\n', (2612, 2615), True, 'import tensorflow as tf\n'), ((2631, 2642), 'tensorflow.shape', 'tf.shape', (['Z'], {}), '(Z)\n', (2639, 2642), True, 'import tensorflow as tf\n'), ((2658, 2669), 'tensorflow.shape', 'tf.shape', (['Z'], {}), '(Z)\n', (2666, 2669), True, 'import tensorflow as tf\n'), ((3279, 3300), 'tensorflow.reshape', 'tf.reshape', (['f', '[N, D]'], {}), '(f, [N, D])\n', (3289, 3300), True, 'import tensorflow as tf\n'), ((4987, 4998), 'tensorflow.shape', 'tf.shape', (['Z'], {}), '(Z)\n', (4995, 4998), True, 'import tensorflow as tf\n'), ((5014, 5025), 'tensorflow.shape', 'tf.shape', (['Z'], {}), '(Z)\n', (5022, 5025), True, 'import tensorflow as tf\n'), ((5372, 5388), 'tensorflow.matmul', 'tf.matmul', (['Lz', 'U'], {}), '(Lz, U)\n', (5381, 5388), True, 'import tensorflow as tf\n'), ((8209, 8234), 'tensorflow.name_scope', 'tf.name_scope', (['"""Brownian"""'], {}), "('Brownian')\n", (8222, 8234), True, 'import tensorflow as tf\n'), ((8253, 8297), 'param.Param', 'Param', (['Z0'], {'name': '"""Z"""', 'summ': '(False)', 'fixed': 'fix_Z'}), "(Z0, name='Z', summ=False, fixed=fix_Z)\n", (8258, 8297), False, 'from param import Param\n'), ((8390, 8434), 'param.Param', 'Param', (['U0'], {'name': '"""U"""', 'summ': '(False)', 'fixed': 'fix_U'}), "(U0, name='U', summ=False, fixed=fix_U)\n", (8395, 8434), False, 'from param import Param\n'), ((8537, 8645), 'kernels.OperatorKernel', 'OperatorKernel', ([], {'sf0': 'sf0', 'ell0': 'ell0', 'ktype': '"""id"""', 'name': '"""Kernel"""', 'summ': 'summ', 'fix_ell': 'fix_ell', 'fix_sf': 'fix_sf'}), "(sf0=sf0, ell0=ell0, ktype='id', name='Kernel', summ=summ,\n fix_ell=fix_ell, fix_sf=fix_sf)\n", (8551, 8645), False, 'from kernels import OperatorKernel\n'), ((9355, 9367), 'tensorflow.shape', 'tf.shape', (['Zg'], {}), '(Zg)\n', (9363, 9367), True, 'import tensorflow as tf\n'), ((9383, 9394), 'tensorflow.shape', 'tf.shape', (['X'], {}), '(X)\n', (9391, 9394), True, 'import tensorflow as tf\n'), ((9896, 9907), 'tensorflow.shape', 'tf.shape', (['X'], {}), '(X)\n', (9904, 9907), True, 'import tensorflow as tf\n'), ((1761, 1774), 'numpy.array', 'np.array', (['sn0'], {}), '(sn0)\n', (1769, 1774), True, 'import numpy as np\n'), ((3071, 3087), 'tensorflow.transpose', 'tf.transpose', (['Lz'], {}), '(Lz)\n', (3083, 3087), True, 'import tensorflow as tf\n'), ((4730, 4762), 'numpy.asarray', 'np.asarray', (['x0'], {'dtype': 'np.float64'}), '(x0, dtype=np.float64)\n', (4740, 4762), True, 'import numpy as np\n'), ((7807, 7839), 'numpy.asarray', 'np.asarray', (['x0'], {'dtype': 'np.float64'}), '(x0, dtype=np.float64)\n', (7817, 7839), True, 'import numpy as np\n'), ((9784, 9800), 'tensorflow.transpose', 'tf.transpose', (['Lz'], {}), '(Lz)\n', (9796, 9800), True, 'import tensorflow as tf\n'), ((1922, 1941), 'gpflow.transforms.Log1pe', 'transforms.Log1pe', ([], {}), '()\n', (1939, 1941), False, 'from gpflow import transforms\n'), ((2750, 2777), 'tensorflow.eye', 'tf.eye', (['M'], {'dtype': 'float_type'}), '(M, dtype=float_type)\n', (2756, 2777), True, 'import tensorflow as tf\n'), ((2836, 2867), 'tensorflow.eye', 'tf.eye', (['(M * D)'], {'dtype': 'float_type'}), '(M * D, dtype=float_type)\n', (2842, 2867), True, 'import tensorflow as tf\n'), ((4251, 4269), 'tensorflow.squeeze', 'tf.squeeze', (['self.U'], {}), '(self.U)\n', (4261, 4269), True, 'import tensorflow as tf\n'), ((5105, 5132), 'tensorflow.eye', 'tf.eye', (['M'], {'dtype': 'float_type'}), '(M, dtype=float_type)\n', (5111, 5132), True, 'import tensorflow as tf\n'), ((5191, 5222), 'tensorflow.eye', 'tf.eye', (['(M * D)'], {'dtype': 'float_type'}), '(M * D, dtype=float_type)\n', (5197, 5222), True, 'import tensorflow as tf\n'), ((9461, 9488), 'tensorflow.eye', 'tf.eye', (['M'], {'dtype': 'float_type'}), '(M, dtype=float_type)\n', (9467, 9488), True, 'import tensorflow as tf\n'), ((9548, 9579), 'tensorflow.eye', 'tf.eye', (['(M * D)'], {'dtype': 'float_type'}), '(M * D, dtype=float_type)\n', (9554, 9579), True, 'import tensorflow as tf\n'), ((10259, 10281), 'tensorflow.zeros_like', 'tf.zeros_like', (['self.Ug'], {}), '(self.Ug)\n', (10272, 10281), True, 'import tensorflow as tf\n'), ((10381, 10403), 'tensorflow.zeros_like', 'tf.zeros_like', (['self.Ug'], {}), '(self.Ug)\n', (10394, 10403), True, 'import tensorflow as tf\n'), ((3518, 3545), 'tensorflow.zeros_like', 'tf.zeros_like', (['self.U[:, 0]'], {}), '(self.U[:, 0])\n', (3531, 3545), True, 'import tensorflow as tf\n'), ((3656, 3683), 'tensorflow.zeros_like', 'tf.zeros_like', (['self.U[:, 0]'], {}), '(self.U[:, 0])\n', (3669, 3683), True, 'import tensorflow as tf\n'), ((3974, 3995), 'tensorflow.zeros_like', 'tf.zeros_like', (['self.U'], {}), '(self.U)\n', (3987, 3995), True, 'import tensorflow as tf\n'), ((4107, 4128), 'tensorflow.zeros_like', 'tf.zeros_like', (['self.U'], {}), '(self.U)\n', (4120, 4128), True, 'import tensorflow as tf\n')] |
"""
Some methods for kenetics.s
"""
import carla
import numpy as np
import math
def get_speed(vehicle):
"""
Get speed consider only 2D velocity.
"""
vel = vehicle.get_velocity()
return math.sqrt(vel.x ** 2 + vel.y ** 2) # + vel.z ** 2)
def set_vehicle_speed(vehicle, speed: float):
"""
Set vehicle to a target speed.
Velocity vector coincide vehicle x-axis.
:param:speed in m/s
"""
# set a initial speed for ego vehicle
transform = vehicle.get_transform()
# transform matrix from actor coord system to world system
trans_matrix = get_transform_matrix(transform) # actor2world
# target velocity in local coordinate system, in m/s
target_vel = np.array([[speed], [0.], [0.]])
# target velocity in world coordinate system
target_vel_world = np.dot(trans_matrix, target_vel)
target_vel_world = np.squeeze(target_vel_world)
# in carla.Vector3D
target_velocity = carla.Vector3D(
x=target_vel_world[0],
y=target_vel_world[1],
z=target_vel_world[2],
)
#
vehicle.set_target_velocity(target_velocity)
def angle_reg(angle):
"""
Regularize angle into certain bound.
default range is [-pi, pi]
"""
while True:
if -np.pi <= angle <= np.pi:
return angle
if angle < -np.pi:
angle += 2 * np.pi
else:
angle -= 2 * np.pi
def get_transform_matrix(transform: carla.Transform):
"""
Get and parse a transformation matrix by transform.
Matrix is from Actor coord system to the world coord system.
:param transform:
:return trans_matrix: transform matrix in ndarray
"""
# original trans matrix in list
_T = transform.get_matrix()
# transform matrix from Actor system to world system
trans_matrix = np.array([[_T[0][0], _T[0][1], _T[0][2]],
[_T[1][0], _T[1][1], _T[1][2]],
[_T[2][0], _T[2][1], _T[2][2]]])
return trans_matrix
def get_inverse_transform_matrix(transform: carla.Transform):
"""
Get inverse transform matrix from a transform class.
Inverse transform refers to from world coord system to actor coord system.
"""
_T = transform.get_inverse_matrix()
# transform matrix from Actor system to world system
inverse_trans_matrix = np.array([[_T[0][0], _T[0][1], _T[0][2]],
[_T[1][0], _T[1][1], _T[1][2]],
[_T[2][0], _T[2][1], _T[2][2]]])
return inverse_trans_matrix
def vector2array(vector: carla.Vector3D):
"""
Transform carla.Vector3D instance to ndarray
"""
array = np.array([vector.x, vector.y, vector.z])
return array
def get_vehicle_kinetic(vehicle: carla.Vehicle):
"""
todo unfinished
Get kinetics of ego vehicle.
todo use a class to encapsulate all methods about getting kinetics
"""
kinetic_dict = {}
transform = vehicle.get_transform()
vehicle.get_acceleration()
vehicle.get_angular_velocity()
def get_distance_along_route(wmap, route, target_location):
"""
Calculate the distance of the given location along the route
Note: If the location is not along the route, the route length will be returned
:param wmap: carla.Map of current world
:param route: list of tuples, (carla.Transform, RoadOption)
:param target_location:
"""
covered_distance = 0
prev_position = None
found = False
# Don't use the input location, use the corresponding wp as location
target_location_from_wp = wmap.get_waypoint(target_location).transform.location
for trans, _ in route:
# input route is transform
position = trans.location
location = target_location_from_wp
# Don't perform any calculations for the first route point
if not prev_position:
prev_position = position
continue
# Calculate distance between previous and current route point
interval_length_squared = ((prev_position.x - position.x) ** 2) + ((prev_position.y - position.y) ** 2)
distance_squared = ((location.x - prev_position.x) ** 2) + ((location.y - prev_position.y) ** 2)
# Close to the current position? Stop calculation
if distance_squared < 1.0:
break
if distance_squared < 400 and not distance_squared < interval_length_squared:
# Check if a neighbor lane is closer to the route
# Do this only in a close distance to correct route interval, otherwise the computation load is too high
starting_wp = wmap.get_waypoint(location)
wp = starting_wp.get_left_lane()
while wp is not None:
new_location = wp.transform.location
new_distance_squared = ((new_location.x - prev_position.x) ** 2) + (
(new_location.y - prev_position.y) ** 2)
if np.sign(starting_wp.lane_id) != np.sign(wp.lane_id):
break
if new_distance_squared < distance_squared:
distance_squared = new_distance_squared
location = new_location
else:
break
wp = wp.get_left_lane()
wp = starting_wp.get_right_lane()
while wp is not None:
new_location = wp.transform.location
new_distance_squared = ((new_location.x - prev_position.x) ** 2) + (
(new_location.y - prev_position.y) ** 2)
if np.sign(starting_wp.lane_id) != np.sign(wp.lane_id):
break
if new_distance_squared < distance_squared:
distance_squared = new_distance_squared
location = new_location
else:
break
wp = wp.get_right_lane()
if distance_squared < interval_length_squared:
# The location could be inside the current route interval, if route/lane ids match
# Note: This assumes a sufficiently small route interval
# An alternative is to compare orientations, however, this also does not work for
# long route intervals
curr_wp = wmap.get_waypoint(position)
prev_wp = wmap.get_waypoint(prev_position)
wp = wmap.get_waypoint(location)
if prev_wp and curr_wp and wp:
if wp.road_id == prev_wp.road_id or wp.road_id == curr_wp.road_id:
# Roads match, now compare the sign of the lane ids
if (np.sign(wp.lane_id) == np.sign(prev_wp.lane_id) or
np.sign(wp.lane_id) == np.sign(curr_wp.lane_id)):
# The location is within the current route interval
covered_distance += math.sqrt(distance_squared)
found = True
break
covered_distance += math.sqrt(interval_length_squared)
prev_position = position
return covered_distance, found
| [
"math.sqrt",
"numpy.array",
"numpy.sign",
"numpy.squeeze",
"numpy.dot",
"carla.Vector3D"
] | [((211, 245), 'math.sqrt', 'math.sqrt', (['(vel.x ** 2 + vel.y ** 2)'], {}), '(vel.x ** 2 + vel.y ** 2)\n', (220, 245), False, 'import math\n'), ((718, 751), 'numpy.array', 'np.array', (['[[speed], [0.0], [0.0]]'], {}), '([[speed], [0.0], [0.0]])\n', (726, 751), True, 'import numpy as np\n'), ((822, 854), 'numpy.dot', 'np.dot', (['trans_matrix', 'target_vel'], {}), '(trans_matrix, target_vel)\n', (828, 854), True, 'import numpy as np\n'), ((878, 906), 'numpy.squeeze', 'np.squeeze', (['target_vel_world'], {}), '(target_vel_world)\n', (888, 906), True, 'import numpy as np\n'), ((953, 1041), 'carla.Vector3D', 'carla.Vector3D', ([], {'x': 'target_vel_world[0]', 'y': 'target_vel_world[1]', 'z': 'target_vel_world[2]'}), '(x=target_vel_world[0], y=target_vel_world[1], z=\n target_vel_world[2])\n', (967, 1041), False, 'import carla\n'), ((1834, 1945), 'numpy.array', 'np.array', (['[[_T[0][0], _T[0][1], _T[0][2]], [_T[1][0], _T[1][1], _T[1][2]], [_T[2][0],\n _T[2][1], _T[2][2]]]'], {}), '([[_T[0][0], _T[0][1], _T[0][2]], [_T[1][0], _T[1][1], _T[1][2]], [\n _T[2][0], _T[2][1], _T[2][2]]])\n', (1842, 1945), True, 'import numpy as np\n'), ((2366, 2477), 'numpy.array', 'np.array', (['[[_T[0][0], _T[0][1], _T[0][2]], [_T[1][0], _T[1][1], _T[1][2]], [_T[2][0],\n _T[2][1], _T[2][2]]]'], {}), '([[_T[0][0], _T[0][1], _T[0][2]], [_T[1][0], _T[1][1], _T[1][2]], [\n _T[2][0], _T[2][1], _T[2][2]]])\n', (2374, 2477), True, 'import numpy as np\n'), ((2702, 2742), 'numpy.array', 'np.array', (['[vector.x, vector.y, vector.z]'], {}), '([vector.x, vector.y, vector.z])\n', (2710, 2742), True, 'import numpy as np\n'), ((7055, 7089), 'math.sqrt', 'math.sqrt', (['interval_length_squared'], {}), '(interval_length_squared)\n', (7064, 7089), False, 'import math\n'), ((4992, 5020), 'numpy.sign', 'np.sign', (['starting_wp.lane_id'], {}), '(starting_wp.lane_id)\n', (4999, 5020), True, 'import numpy as np\n'), ((5024, 5043), 'numpy.sign', 'np.sign', (['wp.lane_id'], {}), '(wp.lane_id)\n', (5031, 5043), True, 'import numpy as np\n'), ((5625, 5653), 'numpy.sign', 'np.sign', (['starting_wp.lane_id'], {}), '(starting_wp.lane_id)\n', (5632, 5653), True, 'import numpy as np\n'), ((5657, 5676), 'numpy.sign', 'np.sign', (['wp.lane_id'], {}), '(wp.lane_id)\n', (5664, 5676), True, 'import numpy as np\n'), ((6931, 6958), 'math.sqrt', 'math.sqrt', (['distance_squared'], {}), '(distance_squared)\n', (6940, 6958), False, 'import math\n'), ((6682, 6701), 'numpy.sign', 'np.sign', (['wp.lane_id'], {}), '(wp.lane_id)\n', (6689, 6701), True, 'import numpy as np\n'), ((6705, 6729), 'numpy.sign', 'np.sign', (['prev_wp.lane_id'], {}), '(prev_wp.lane_id)\n', (6712, 6729), True, 'import numpy as np\n'), ((6761, 6780), 'numpy.sign', 'np.sign', (['wp.lane_id'], {}), '(wp.lane_id)\n', (6768, 6780), True, 'import numpy as np\n'), ((6784, 6808), 'numpy.sign', 'np.sign', (['curr_wp.lane_id'], {}), '(curr_wp.lane_id)\n', (6791, 6808), True, 'import numpy as np\n')] |
import MFRC522
import RPi.GPIO as GPIO
class SimpleMFRC522:
READER = None;
TAG = { 'id' : None, 'text' : ''};
KEY = [0xFF,0xFF,0xFF,0xFF,0xFF,0xFF]
def __init__(self):
self.READER = MFRC522.MFRC522()
def read(self):
tag = self.read_no_block()
while not tag:
tag = self.read_no_block()
return tag
def read_no_block(self):
(status, TagType) = self.READER.MFRC522_Request(self.READER.PICC_REQIDL)
if status != self.READER.MI_OK:
return None
(status, uid) = self.READER.MFRC522_Anticoll()
if status != self.READER.MI_OK:
return None
self.TAG['id'] = self.uid_to_num(uid)
self.READER.MFRC522_SelectTag(uid)
status = self.READER.MFRC522_Auth(self.READER.PICC_AUTHENT1A, 8, self.KEY, uid)
if status == self.READER.MI_OK:
text = self.READER.MFRC522_Read(8)
if text:
self.TAG['text'] = ''.join(chr(i) for i in text)
self.READER.MFRC522_StopCrypto1()
return self.TAG
def write(self, sector, text):
tag = self.write_no_block(8, text)
while not tag:
tag = self.write_no_block(8, text)
return tag
def write_no_block(self, sector, text):
(status, TagType) = self.READER.MFRC522_Request(self.READER.PICC_REQIDL)
if status != self.READER.MI_OK:
return None
(status, uid) = self.READER.MFRC522_Anticoll()
if status != self.READER.MI_OK:
return None
self.TAG['id'] = self.uid_to_num(uid)
self.READER.MFRC522_SelectTag(uid)
status = self.READER.MFRC522_Auth(self.READER.PICC_AUTHENT1A, 8, self.KEY, uid)
self.READER.MFRC522_Read(8)
if status == self.READER.MI_OK:
data = bytearray()
data.extend(text.ljust(16))
self.READER.MFRC522_Write(8, data)
text = self.READER.MFRC522_Read(8)
if text:
self.TAG['text'] = ''.join(chr(i) for i in text)
self.READER.MFRC522_StopCrypto1()
return self.TAG
def uid_to_num(self, uid):
n = 0
for i in range(0, 5):
n = n * 256 + uid[i]
return n
| [
"MFRC522.MFRC522"
] | [((211, 228), 'MFRC522.MFRC522', 'MFRC522.MFRC522', ([], {}), '()\n', (226, 228), False, 'import MFRC522\n')] |
import os
import glob
import json
import logging as lg
from pathlib import Path
from datetime import date, datetime
import yaml
import requests
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.dates import date2num, DateFormatter
import matplotlib.transforms as transforms
from jinja2 import Environment, FileSystemLoader
from jhu_handler import melt_data, get_jhu_stats, get_india_stats_from_jhu
from mohfw_handler import mohfw_data_to_df, add_lat_lon, get_mohfw_stats, extract_clean_df
from chloropleth import make_chloropleth_json
from clean import add_clean_state_data
lg.basicConfig(level=lg.DEBUG, format=("[%(asctime)s] [%(levelname)8s] %(filename)s - %(message)s"), datefmt="%d-%b-%Y %I:%M:%S %p")#, filename='log.txt', filemode='a+'
template_loader = FileSystemLoader('./templates')
template_env = Environment(loader=template_loader)
TEMPLATE = "template.html"
template = template_env.get_template(TEMPLATE)
sns.set(style="ticks")
sns.set_context("paper", rc={"font.size":8,"axes.titlesize":9,"axes.labelsize":10,"lines.linewidth": 1.5,'lines.markersize':3})#paper,talk,notebook
fig, ax = plt.subplots()
covid_data_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid-data', 'csse_covid_19_data', 'csse_covid_19_time_series')
cases_path = os.path.join(covid_data_path, 'time_series_covid19_confirmed_global.csv')
recoveries_path = os.path.join(covid_data_path, 'time_series_covid19_recovered_global.csv')
deaths_path = os.path.join(covid_data_path, 'time_series_covid19_deaths_global.csv')
Path(os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets', 'timeseries_records')).mkdir(parents=True, exist_ok=True)
Path(os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets', 'statewise_distribution')).mkdir(parents=True, exist_ok=True)
mohfw_data_df = mohfw_data_to_df()
table_df = extract_clean_df(mohfw_data_df)
table_df = add_lat_lon(table_df)
#print("Table DF")
#print(table_df)
if not table_df.empty:
table_df.to_csv(f'./datasets/statewise_distribution/{str(date.today())}.csv', sep=',', encoding='utf-8', index=False)
else:
lg.warning("Failed to write statewise distribution file. Map will use old file even though new data is available")
in_cases_df, in_recoveries_df, in_deaths_df = get_india_stats_from_jhu(cases_path, recoveries_path, deaths_path)
# Transforming data to a format lineplot likes
final_df = melt_data(in_cases_df, in_deaths_df, in_recoveries_df)
final_df.to_csv(f'./datasets/timeseries_records/cases_deaths_recoveries_timeseries.csv', sep=',', encoding='utf-8', index=False)
## Using data that is larger
live_cases = in_cases_df
live_recoveries = in_recoveries_df
live_deaths = in_deaths_df
date_today_str = date.today().strftime("%-m/%-d/%y")
print(f"Today's date is = {date_today_str}")
date_today = date.today()
print(date_today)
#check date in index
live_cases_latest_date = live_cases.columns[-1]
live_recoveries_latest_date = live_recoveries.columns[-1]
live_deaths_latest_date = live_deaths.columns[-1]
#get today's stats from mohfw
mohfw_stats = get_mohfw_stats(table_df)
print(mohfw_stats)
#compare dates
live_cases_latest_date = datetime.strptime(live_cases_latest_date, "%m/%d/%y").date()
live_recoveries_latest_date = datetime.strptime(live_recoveries_latest_date, "%m/%d/%y").date()
live_deaths_latest_date = datetime.strptime(live_deaths_latest_date, "%m/%d/%y").date()
print(live_cases_latest_date, live_recoveries_latest_date, live_deaths_latest_date)
if date_today > live_cases_latest_date:
if mohfw_stats['in_stats']['cases'] > int(live_cases.iloc[:,-1:].iloc[0]):
print(mohfw_stats['in_stats']['cases'], int(live_cases.iloc[:,-1:].iloc[0]))
live_cases[date_today_str] = mohfw_stats['in_stats']['cases']# new column in live with mohfw value
elif date_today == live_cases_latest_date:
if mohfw_stats['in_stats']['cases'] > int(live_cases.iloc[:,-1:].iloc[0]):
live_cases.iloc[:,-1:].iloc[0] = mohfw_stats['in_stats']['cases']
if date_today > live_recoveries_latest_date:
print(mohfw_stats['in_stats']['recovered'], int(live_recoveries.iloc[:,-1:].iloc[0]))
if mohfw_stats['in_stats']['recovered'] > int(live_recoveries.iloc[:,-1:].iloc[0]):
live_recoveries[date_today_str] = mohfw_stats['in_stats']['recovered']
elif date_today == live_recoveries_latest_date:
if mohfw_stats['in_stats']['recovered'] > int(live_recoveries.iloc[:,-1:].iloc[0]):
live_recoveries.iloc[:,-1:].iloc[0] = mohfw_stats['in_stats']['recovered']
if date_today > live_deaths_latest_date:
if mohfw_stats['in_stats']['deaths'] > int(live_deaths.iloc[:,-1:].iloc[0]):
live_deaths[date_today_str] = mohfw_stats['in_stats']['deaths']
elif date_today == live_deaths_latest_date:
if mohfw_stats['in_stats']['deaths'] > int(live_deaths.iloc[:,-1:].iloc[0]):
live_deaths.iloc[:,-1:].iloc[0] = mohfw_stats['in_stats']['deaths']
print(live_cases)
print(live_deaths)
print(live_recoveries)
plot_df = melt_data(live_cases, live_deaths, live_recoveries)
#plot_df['index'] = plot_df['index'].apply(lambda x: datetime.strptime(x, '%m/%d/%y'))
plot_df.to_csv(f'./datasets/timeseries_records/live_cases_deaths_recoveries_timeseries.csv', sep=',', encoding='utf-8', index=False)
jhu_df = melt_data(in_cases_df, in_deaths_df, in_recoveries_df)
#jhu_df['index'] = jhu_df['index'].apply(lambda x: datetime.strptime(x, '%m/%d/%y'))
jhu_df.to_csv(f'./datasets/timeseries_records/cases_deaths_recoveries_timeseries.csv', sep=',', encoding='utf-8', index=False)
# Make plot
ax = plt.axes()
kwargs = {'markeredgewidth': 0.25}
sns.lineplot(x='index', y='value', hue='category', hue_order=['cases', 'recoveries', 'deaths'], style='category', palette={'cases': 'Red', 'recoveries': 'Green', 'deaths': 'Gray'}, dashes=False, data=plot_df, markers={'deaths': 'X', 'recoveries': 'd', 'cases': 'o'}, ax=ax, **kwargs)
# Draw horizontal lines at max values
cases_max = int(plot_df['value'].where(plot_df['category'] == 'cases').max())
deaths_max = int(plot_df['value'].where(plot_df['category'] == 'deaths').max())
recoveries_max = int(plot_df['value'].where(plot_df['category'] == 'recoveries').max())
ax.axhline(cases_max, ls='dotted', linewidth=0.5)
ax.axhline(deaths_max, ls='dotted', linewidth=0.5)
ax.axhline(recoveries_max, ls='dotted', linewidth=0.5)
#'-', '--', '-.', ':', 'None', ' ', '', 'solid', 'dashed', 'dashdot', 'dotted'
plt.title('COVID-19 Cases, Recoveries & Deaths Graph')
ax.set(xlabel='Time ->', ylabel='Cases / Deaths')
ax.xaxis.label.set_visible(False)
ax.yaxis.label.set_visible(False)
ax.legend(labels=['Confirmed Cases', 'Recoveries', 'Deaths'], frameon=False)#loc='upper left'
myFmt = DateFormatter("%d %b") #myFmt = DateFormatter("%d %b %y")
ax.xaxis.set_major_formatter(myFmt)
#ax.set(xticks=final_df['index'].values)
ax.grid(color='#f3f3f3', linestyle=':', linewidth=0.5)##cdcdcd #f3f3f3 #D3D3D3
ratio = 0.5
ax.set_aspect(1.0/ax.get_data_ratio()*ratio)
plt.xticks(fontsize=5, rotation=0)#, ha='right')
#plt.yticks(fontsize=6)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
#ax.spines['left'].set_edgecolor('gray')
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.tick_params(axis="x", direction='in', length=3, width=0.5)
ax.get_yaxis().set_visible(False)
ax.spines['bottom'].set_linewidth(0.5)
ax.spines['left'].set_linewidth(0.5)
#trans = transforms.blended_transform_factory(ax.get_yticklabels()[0].get_transform(), ax.transData)
#ax.text(0, cases_max, color="red", s=cases_max, transform=trans, ha="right", va="center")
#ax.text(0, deaths_max, color="red", s=deaths_max, transform=trans, ha="right", va="center")
ax.text(0.01, cases_max, cases_max, color="red", transform=ax.get_yaxis_transform(), ha="left", va="bottom")
ax.text(0.01, deaths_max, deaths_max, color="red", transform=ax.get_yaxis_transform(), ha="left", va="bottom")
ax.text(0.01, recoveries_max, recoveries_max, color="green", transform=ax.get_yaxis_transform(), ha="left", va="bottom")
#ax.annotate(cases_max, [ax.get_xticks()[-1], cases_max], va='bottom', ha='right', color='red')
#ax.annotate(deaths_max, [ax.get_xticks()[-1], deaths_max], va='bottom', ha='left', color='red')
xt = ax.get_xticks().tolist()
last_x_tick = date2num(plot_df['index'].values[-1])
if xt[-1] > last_x_tick:
xt.pop(-1)
else:
if abs(xt[-1] - last_x_tick) < (xt[1] - xt[0])/2:
xt.pop(-1)
#xt = np.append(xt, last_x_tick)
xt.append(last_x_tick)
#xtl = xt.tolist()
ax.set_xticks(xt)
ax.axvline(last_x_tick, ls='dotted', linewidth=0.5)
plt.savefig("graph.svg", format='svg', dpi=1200, bbox_inches='tight')
#plt.show()
# Make index.html
# accquire latest statistics
covid_daily_reports_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid-data', 'csse_covid_19_data', 'csse_covid_19_daily_reports')
jhu_stats = get_jhu_stats(covid_daily_reports_path)
#Compare JHU Stats with MoHFW stats for india
if mohfw_stats['in_stats']['cases'] > jhu_stats['in_stats']['cases']:
in_cases_greater = mohfw_stats['in_stats']['cases']
else:
in_cases_greater = jhu_stats['in_stats']['cases']
if mohfw_stats['in_stats']['deaths'] > jhu_stats['in_stats']['deaths']:
in_deaths_greater = mohfw_stats['in_stats']['deaths']
else:
in_deaths_greater = jhu_stats['in_stats']['deaths']
if mohfw_stats['in_stats']['recovered'] > jhu_stats['in_stats']['recovered']:
in_recovered_greater = mohfw_stats['in_stats']['recovered']
else:
in_recovered_greater = jhu_stats['in_stats']['recovered']
#world stats
w_confirmed = jhu_stats['w_stats']['cases']
w_deaths = jhu_stats['w_stats']['deaths']
w_recovered = jhu_stats['w_stats']['recovered']
## read resource yaml
with open('resources.yaml') as fs:
resources = yaml.load(fs, yaml.SafeLoader)
# add clean datasets
state_data_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets')
print("adding clean datasets")
add_clean_state_data(state_data_path)
#clean_state_data_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets', 'clean_daily_statewise_distribution')
#map_json = make_chloropleth_json(clean_state_data_path)
# Get ready to pass data to template
stats_dict = {'w_cases': w_confirmed, 'w_deaths': w_deaths, 'w_recovered': w_recovered, 'i_cases': in_cases_greater, 'i_deaths': in_deaths_greater , 'i_recovered': in_recovered_greater}
commit_info_dict = {'current_time': datetime.now().strftime("%B %d, %Y at %I:%M %p"), 'commit_sha': os.environ['GITHUB_SHA']}
state_info = {'link': f"https://github.com/armsp/covid19.in/blob/master/datasets/statewise_distribution/{str(date.today())}.csv"}
namespace = {'statistics': stats_dict, 'safety_resources': resources['SAFETY & PREVENTION'], 'about': resources['Virus & the Disease'], 'fakes': resources['Fads, Fake News & Scams'], 'misc': resources['Miscellaneous'], 'commit_info': commit_info_dict, 'state_info': state_info}
#,'c_map': map_json
rendered_html = template.render(**namespace)
with open("index.html", "w+") as f:
f.write(rendered_html) | [
"matplotlib.pyplot.title",
"seaborn.lineplot",
"mohfw_handler.mohfw_data_to_df",
"yaml.load",
"matplotlib.pyplot.axes",
"matplotlib.dates.date2num",
"os.path.join",
"logging.warning",
"mohfw_handler.get_mohfw_stats",
"jinja2.FileSystemLoader",
"jinja2.Environment",
"matplotlib.dates.DateFormatter",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.subplots",
"mohfw_handler.add_lat_lon",
"seaborn.set",
"seaborn.set_context",
"datetime.datetime.now",
"datetime.date.today",
"datetime.datetime.strptime",
"jhu_handler.get_india_stats_from_jhu",
"logging.basicConfig",
"jhu_handler.melt_data",
"clean.add_clean_state_data",
"mohfw_handler.extract_clean_df",
"jhu_handler.get_jhu_stats",
"matplotlib.pyplot.savefig"
] | [((636, 776), 'logging.basicConfig', 'lg.basicConfig', ([], {'level': 'lg.DEBUG', 'format': '"""[%(asctime)s] [%(levelname)8s] %(filename)s - %(message)s"""', 'datefmt': '"""%d-%b-%Y %I:%M:%S %p"""'}), "(level=lg.DEBUG, format=\n '[%(asctime)s] [%(levelname)8s] %(filename)s - %(message)s', datefmt=\n '%d-%b-%Y %I:%M:%S %p')\n", (650, 776), True, 'import logging as lg\n'), ((823, 854), 'jinja2.FileSystemLoader', 'FileSystemLoader', (['"""./templates"""'], {}), "('./templates')\n", (839, 854), False, 'from jinja2 import Environment, FileSystemLoader\n'), ((870, 905), 'jinja2.Environment', 'Environment', ([], {'loader': 'template_loader'}), '(loader=template_loader)\n', (881, 905), False, 'from jinja2 import Environment, FileSystemLoader\n'), ((981, 1003), 'seaborn.set', 'sns.set', ([], {'style': '"""ticks"""'}), "(style='ticks')\n", (988, 1003), True, 'import seaborn as sns\n'), ((1004, 1143), 'seaborn.set_context', 'sns.set_context', (['"""paper"""'], {'rc': "{'font.size': 8, 'axes.titlesize': 9, 'axes.labelsize': 10,\n 'lines.linewidth': 1.5, 'lines.markersize': 3}"}), "('paper', rc={'font.size': 8, 'axes.titlesize': 9,\n 'axes.labelsize': 10, 'lines.linewidth': 1.5, 'lines.markersize': 3})\n", (1019, 1143), True, 'import seaborn as sns\n'), ((1162, 1176), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1174, 1176), True, 'import matplotlib.pyplot as plt\n'), ((1196, 1309), 'os.path.join', 'os.path.join', (["os.environ['GITHUB_WORKSPACE']", '"""covid-data"""', '"""csse_covid_19_data"""', '"""csse_covid_19_time_series"""'], {}), "(os.environ['GITHUB_WORKSPACE'], 'covid-data',\n 'csse_covid_19_data', 'csse_covid_19_time_series')\n", (1208, 1309), False, 'import os\n'), ((1320, 1393), 'os.path.join', 'os.path.join', (['covid_data_path', '"""time_series_covid19_confirmed_global.csv"""'], {}), "(covid_data_path, 'time_series_covid19_confirmed_global.csv')\n", (1332, 1393), False, 'import os\n'), ((1412, 1485), 'os.path.join', 'os.path.join', (['covid_data_path', '"""time_series_covid19_recovered_global.csv"""'], {}), "(covid_data_path, 'time_series_covid19_recovered_global.csv')\n", (1424, 1485), False, 'import os\n'), ((1500, 1570), 'os.path.join', 'os.path.join', (['covid_data_path', '"""time_series_covid19_deaths_global.csv"""'], {}), "(covid_data_path, 'time_series_covid19_deaths_global.csv')\n", (1512, 1570), False, 'import os\n'), ((1861, 1879), 'mohfw_handler.mohfw_data_to_df', 'mohfw_data_to_df', ([], {}), '()\n', (1877, 1879), False, 'from mohfw_handler import mohfw_data_to_df, add_lat_lon, get_mohfw_stats, extract_clean_df\n'), ((1891, 1922), 'mohfw_handler.extract_clean_df', 'extract_clean_df', (['mohfw_data_df'], {}), '(mohfw_data_df)\n', (1907, 1922), False, 'from mohfw_handler import mohfw_data_to_df, add_lat_lon, get_mohfw_stats, extract_clean_df\n'), ((1934, 1955), 'mohfw_handler.add_lat_lon', 'add_lat_lon', (['table_df'], {}), '(table_df)\n', (1945, 1955), False, 'from mohfw_handler import mohfw_data_to_df, add_lat_lon, get_mohfw_stats, extract_clean_df\n'), ((2309, 2375), 'jhu_handler.get_india_stats_from_jhu', 'get_india_stats_from_jhu', (['cases_path', 'recoveries_path', 'deaths_path'], {}), '(cases_path, recoveries_path, deaths_path)\n', (2333, 2375), False, 'from jhu_handler import melt_data, get_jhu_stats, get_india_stats_from_jhu\n'), ((2434, 2488), 'jhu_handler.melt_data', 'melt_data', (['in_cases_df', 'in_deaths_df', 'in_recoveries_df'], {}), '(in_cases_df, in_deaths_df, in_recoveries_df)\n', (2443, 2488), False, 'from jhu_handler import melt_data, get_jhu_stats, get_india_stats_from_jhu\n'), ((2847, 2859), 'datetime.date.today', 'date.today', ([], {}), '()\n', (2857, 2859), False, 'from datetime import date, datetime\n'), ((3099, 3124), 'mohfw_handler.get_mohfw_stats', 'get_mohfw_stats', (['table_df'], {}), '(table_df)\n', (3114, 3124), False, 'from mohfw_handler import mohfw_data_to_df, add_lat_lon, get_mohfw_stats, extract_clean_df\n'), ((5008, 5059), 'jhu_handler.melt_data', 'melt_data', (['live_cases', 'live_deaths', 'live_recoveries'], {}), '(live_cases, live_deaths, live_recoveries)\n', (5017, 5059), False, 'from jhu_handler import melt_data, get_jhu_stats, get_india_stats_from_jhu\n'), ((5290, 5344), 'jhu_handler.melt_data', 'melt_data', (['in_cases_df', 'in_deaths_df', 'in_recoveries_df'], {}), '(in_cases_df, in_deaths_df, in_recoveries_df)\n', (5299, 5344), False, 'from jhu_handler import melt_data, get_jhu_stats, get_india_stats_from_jhu\n'), ((5575, 5585), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (5583, 5585), True, 'import matplotlib.pyplot as plt\n'), ((5621, 5916), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': '"""index"""', 'y': '"""value"""', 'hue': '"""category"""', 'hue_order': "['cases', 'recoveries', 'deaths']", 'style': '"""category"""', 'palette': "{'cases': 'Red', 'recoveries': 'Green', 'deaths': 'Gray'}", 'dashes': '(False)', 'data': 'plot_df', 'markers': "{'deaths': 'X', 'recoveries': 'd', 'cases': 'o'}", 'ax': 'ax'}), "(x='index', y='value', hue='category', hue_order=['cases',\n 'recoveries', 'deaths'], style='category', palette={'cases': 'Red',\n 'recoveries': 'Green', 'deaths': 'Gray'}, dashes=False, data=plot_df,\n markers={'deaths': 'X', 'recoveries': 'd', 'cases': 'o'}, ax=ax, **kwargs)\n", (5633, 5916), True, 'import seaborn as sns\n'), ((6425, 6479), 'matplotlib.pyplot.title', 'plt.title', (['"""COVID-19 Cases, Recoveries & Deaths Graph"""'], {}), "('COVID-19 Cases, Recoveries & Deaths Graph')\n", (6434, 6479), True, 'import matplotlib.pyplot as plt\n'), ((6700, 6722), 'matplotlib.dates.DateFormatter', 'DateFormatter', (['"""%d %b"""'], {}), "('%d %b')\n", (6713, 6722), False, 'from matplotlib.dates import date2num, DateFormatter\n'), ((6971, 7005), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(5)', 'rotation': '(0)'}), '(fontsize=5, rotation=0)\n', (6981, 7005), True, 'import matplotlib.pyplot as plt\n'), ((8252, 8289), 'matplotlib.dates.date2num', 'date2num', (["plot_df['index'].values[-1]"], {}), "(plot_df['index'].values[-1])\n", (8260, 8289), False, 'from matplotlib.dates import date2num, DateFormatter\n'), ((8555, 8624), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""graph.svg"""'], {'format': '"""svg"""', 'dpi': '(1200)', 'bbox_inches': '"""tight"""'}), "('graph.svg', format='svg', dpi=1200, bbox_inches='tight')\n", (8566, 8624), True, 'import matplotlib.pyplot as plt\n'), ((8712, 8827), 'os.path.join', 'os.path.join', (["os.environ['GITHUB_WORKSPACE']", '"""covid-data"""', '"""csse_covid_19_data"""', '"""csse_covid_19_daily_reports"""'], {}), "(os.environ['GITHUB_WORKSPACE'], 'covid-data',\n 'csse_covid_19_data', 'csse_covid_19_daily_reports')\n", (8724, 8827), False, 'import os\n'), ((8836, 8875), 'jhu_handler.get_jhu_stats', 'get_jhu_stats', (['covid_daily_reports_path'], {}), '(covid_daily_reports_path)\n', (8849, 8875), False, 'from jhu_handler import melt_data, get_jhu_stats, get_india_stats_from_jhu\n'), ((9805, 9875), 'os.path.join', 'os.path.join', (["os.environ['GITHUB_WORKSPACE']", '"""covid19-in"""', '"""datasets"""'], {}), "(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets')\n", (9817, 9875), False, 'import os\n'), ((9907, 9944), 'clean.add_clean_state_data', 'add_clean_state_data', (['state_data_path'], {}), '(state_data_path)\n', (9927, 9944), False, 'from clean import add_clean_state_data\n'), ((2147, 2271), 'logging.warning', 'lg.warning', (['"""Failed to write statewise distribution file. Map will use old file even though new data is available"""'], {}), "(\n 'Failed to write statewise distribution file. Map will use old file even though new data is available'\n )\n", (2157, 2271), True, 'import logging as lg\n'), ((9734, 9764), 'yaml.load', 'yaml.load', (['fs', 'yaml.SafeLoader'], {}), '(fs, yaml.SafeLoader)\n', (9743, 9764), False, 'import yaml\n'), ((2753, 2765), 'datetime.date.today', 'date.today', ([], {}), '()\n', (2763, 2765), False, 'from datetime import date, datetime\n'), ((3184, 3237), 'datetime.datetime.strptime', 'datetime.strptime', (['live_cases_latest_date', '"""%m/%d/%y"""'], {}), "(live_cases_latest_date, '%m/%d/%y')\n", (3201, 3237), False, 'from datetime import date, datetime\n'), ((3275, 3333), 'datetime.datetime.strptime', 'datetime.strptime', (['live_recoveries_latest_date', '"""%m/%d/%y"""'], {}), "(live_recoveries_latest_date, '%m/%d/%y')\n", (3292, 3333), False, 'from datetime import date, datetime\n'), ((3367, 3421), 'datetime.datetime.strptime', 'datetime.strptime', (['live_deaths_latest_date', '"""%m/%d/%y"""'], {}), "(live_deaths_latest_date, '%m/%d/%y')\n", (3384, 3421), False, 'from datetime import date, datetime\n'), ((1577, 1673), 'os.path.join', 'os.path.join', (["os.environ['GITHUB_WORKSPACE']", '"""covid19-in"""', '"""datasets"""', '"""timeseries_records"""'], {}), "(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets',\n 'timeseries_records')\n", (1589, 1673), False, 'import os\n'), ((1711, 1811), 'os.path.join', 'os.path.join', (["os.environ['GITHUB_WORKSPACE']", '"""covid19-in"""', '"""datasets"""', '"""statewise_distribution"""'], {}), "(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets',\n 'statewise_distribution')\n", (1723, 1811), False, 'import os\n'), ((10398, 10412), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (10410, 10412), False, 'from datetime import date, datetime\n'), ((10598, 10610), 'datetime.date.today', 'date.today', ([], {}), '()\n', (10608, 10610), False, 'from datetime import date, datetime\n'), ((2076, 2088), 'datetime.date.today', 'date.today', ([], {}), '()\n', (2086, 2088), False, 'from datetime import date, datetime\n')] |
import requests
from requests.compat import urljoin
import json
import os
import datetime
# https://www.covid19api.dev/#intro
# creating a static dictionary for all the month in the 3 letter format. as this is the only
# sure way of getting it correct without having to do a lot of date parsing.
months = {
1: "jan",
2: "feb",
3: "mar",
4: "apr",
5: "may",
6: "jun",
7: "jul",
8: "aug",
9: "sep",
10: "oct",
11: "nov",
12: "dec"
}
# create a global variable for the bearer token.
# what is a bearer token? in simple words, it is a token we get when we authenticate with the server. when we send it to the server with the
bearer_token = ""
api_def = None
def read_api_def():
# we store this API definition in a file just so that whenever there is a change to the API we don't have to touch the code. We can just
# change the API endpoint, and the code should work the same way.
global api_def
# read this API definition from the file in the config folder, and then store it for later use.
api_def_file_path = os.path.join(
os.path.dirname(__file__), "config", "api-def.json")
with open(api_def_file_path, "r") as f:
api_def = json.load(f)
def generate_token(force=False):
# The covid 19 tracking API we want to use requires us to authenticate with some form of username and password. To this request,
# the API returns a bearer token, which in simpler terms is a way for it to know who is making a request and if that person can
# use that endpoint. It also helps keep a track of the number of requests a user has made and also manage telemetry.
global bearer_token
# the token that the server sends us has a lifetime of ~55 hours. Hence, we don't need to regenerate it. We can just store the token
# and load it the next time we bring up our script. However, it is to be noted that once 55 hours are up, we need to regenerate the token.
# you can write some code trivially by storing the date and time the token was generated on in the json file itself, and then using it with the code below
# to check if the token present is valid or not. If it is not, then you can refresh it. See a simple example below
token_file_path = os.path.join(
os.path.dirname(__file__), "config", "token.json")
# check if we have a valid token already in the file
if force == False and os.path.exists(token_file_path):
with open(token_file_path) as token_file:
# is the time difference between now and the date time the token was fetched > 50 hours? if no, then continue using this token
token_details = json.load(token_file)
token_load_dt_tm = datetime.datetime.strptime(
token_details["timestamp"], "%m/%d/%Y, %H:%M:%S")
if (datetime.datetime.now() - token_load_dt_tm).seconds < (50 * 3600):
return
# okay we either need to fetch a token from scratch or need a new one since the old one expired
auth_params = {
"username": api_def["username"],
"password": api_def["password"]
}
auth_response = requests.post(
url=urljoin(api_def["root_url"], api_def["api_defs"]["gen_token"]), data=auth_params)
# please take a look at REST response codes - https://developer.mozilla.org/en-US/docs/Web/HTTP/Status
if auth_response.status_code == 200:
bearer_token = json.loads(
auth_response.content.decode("utf-8"))["Document"]
auth_token = {
"token": bearer_token,
"timestamp": datetime.datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
}
with open(token_file_path, "w") as f:
json.dump(auth_token, f, indent=4)
else:
print("A problem occurred. Code: {}, Message: {}".format(
auth_response.status_code, auth_response.content.decode("utf-8")))
raise Exception("Problem with auth")
def get_global_daily_report(month: int, year: int):
# get today's date information
today = datetime.datetime.today()
# check if the month is valid
if month not in months.keys():
raise Exception(
"Invalid month range! please choose a month range between 1-12")
# check if the date range supplied actually makes sense. Covid data is tabulated from Jan 2020 till today.
if year < 2020 or year > today.year or (year == today.year and month > today.month):
raise Exception(
"Invalid date range! No valid data prior to Jan 2020 or in the future. Please choose a month and year between and including, Jan 2020 and current month and year")
# connect to the server to get the data. we also need to
api_req_param = api_def["api_defs"]["global_daily_reports"].format(
mon=str(months[month]), yyyy=year)
auth_token = {
"Authorization": "Bearer {0}".format(bearer_token)
}
stats_response = requests.get(
url=urljoin(api_def["root_url"], api_req_param), headers=auth_token)
return (
stats_response.status_code,
stats_response.content.decode("utf-8")
)
def init():
read_api_def()
generate_token()
def main(mon=0, year=0):
init()
mon = datetime.datetime.today().month if mon == 0 else mon
year = datetime.datetime.today().year if year == 0 else year
response = get_global_daily_report(mon, year)
if response[0] == 200:
with open(os.path.join(os.path.dirname(__file__), "data", "data_{}{}.json".format(mon, year)), "w") as f:
json.dump(json.loads(response[1]), f, indent=4)
if __name__ == "__main__":
main()
| [
"json.dump",
"json.load",
"datetime.datetime.today",
"json.loads",
"os.path.dirname",
"os.path.exists",
"requests.compat.urljoin",
"datetime.datetime.strptime",
"datetime.datetime.now"
] | [((4043, 4068), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (4066, 4068), False, 'import datetime\n'), ((1103, 1128), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1118, 1128), False, 'import os\n'), ((1218, 1230), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1227, 1230), False, 'import json\n'), ((2279, 2304), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2294, 2304), False, 'import os\n'), ((2413, 2444), 'os.path.exists', 'os.path.exists', (['token_file_path'], {}), '(token_file_path)\n', (2427, 2444), False, 'import os\n'), ((2663, 2684), 'json.load', 'json.load', (['token_file'], {}), '(token_file)\n', (2672, 2684), False, 'import json\n'), ((2716, 2792), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (["token_details['timestamp']", '"""%m/%d/%Y, %H:%M:%S"""'], {}), "(token_details['timestamp'], '%m/%d/%Y, %H:%M:%S')\n", (2742, 2792), False, 'import datetime\n'), ((3173, 3235), 'requests.compat.urljoin', 'urljoin', (["api_def['root_url']", "api_def['api_defs']['gen_token']"], {}), "(api_def['root_url'], api_def['api_defs']['gen_token'])\n", (3180, 3235), False, 'from requests.compat import urljoin\n'), ((3707, 3741), 'json.dump', 'json.dump', (['auth_token', 'f'], {'indent': '(4)'}), '(auth_token, f, indent=4)\n', (3716, 3741), False, 'import json\n'), ((4955, 4998), 'requests.compat.urljoin', 'urljoin', (["api_def['root_url']", 'api_req_param'], {}), "(api_def['root_url'], api_req_param)\n", (4962, 4998), False, 'from requests.compat import urljoin\n'), ((5225, 5250), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (5248, 5250), False, 'import datetime\n'), ((5289, 5314), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (5312, 5314), False, 'import datetime\n'), ((5556, 5579), 'json.loads', 'json.loads', (['response[1]'], {}), '(response[1])\n', (5566, 5579), False, 'import json\n'), ((3584, 3607), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3605, 3607), False, 'import datetime\n'), ((5451, 5476), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (5466, 5476), False, 'import os\n'), ((2827, 2850), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2848, 2850), False, 'import datetime\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 <NAME> <<EMAIL>>
__version__ = '0.2.5a'
__date__ = '2016-08-11'
__author__ = '<NAME> <<EMAIL>>'
__license__ = 'MIT'
import pprint
from hornet import *
from hornet.symbols import (
side, left, right, wing, segment, segments, section, sections, point,
Side, Id, S, Ss, W
)
def make_wing(db):
db.tell(
wing(Side, wing(side(Side), Ss)) <<
segments(Side, Ss),
segments(Side, segments(Ss)) <<
findall(segment(Id, S), segment(Side, Id) & sections(Id, S), Ss),
sections(Id, sections(Ss)) <<
findall(section(S), section(Id, S), Ss),
segment(left, 1),
segment(left, 2),
segment(right, 3),
segment(right, 4),
section(1, [point(1, 2), point(3, 4)]),
section(1, [point(5, 6), point(7, 8)]),
section(2, [point(2, 3), point(4, 5)]),
section(2, [point(6, 7), point(8, 9)]),
section(3, [point(11, 12), point(13, 14)]),
section(3, [point(15, 16), point(17, 18)]),
section(4, [point(12, 13), point(14, 15)]),
section(4, [point(16, 17), point(18, 19)]),
)
def ask_wing(db, side):
for subst in db.ask(wing(side, W)):
pprint.pprint(subst[W])
db = Database()
make_wing(db)
ask_wing(db, left)
ask_wing(db, right)
| [
"hornet.symbols.point",
"hornet.symbols.section",
"hornet.symbols.segment",
"hornet.symbols.segments",
"hornet.symbols.side",
"pprint.pprint",
"hornet.symbols.sections",
"hornet.symbols.wing"
] | [((685, 701), 'hornet.symbols.segment', 'segment', (['left', '(1)'], {}), '(left, 1)\n', (692, 701), False, 'from hornet.symbols import side, left, right, wing, segment, segments, section, sections, point, Side, Id, S, Ss, W\n'), ((711, 727), 'hornet.symbols.segment', 'segment', (['left', '(2)'], {}), '(left, 2)\n', (718, 727), False, 'from hornet.symbols import side, left, right, wing, segment, segments, section, sections, point, Side, Id, S, Ss, W\n'), ((737, 754), 'hornet.symbols.segment', 'segment', (['right', '(3)'], {}), '(right, 3)\n', (744, 754), False, 'from hornet.symbols import side, left, right, wing, segment, segments, section, sections, point, Side, Id, S, Ss, W\n'), ((764, 781), 'hornet.symbols.segment', 'segment', (['right', '(4)'], {}), '(right, 4)\n', (771, 781), False, 'from hornet.symbols import side, left, right, wing, segment, segments, section, sections, point, Side, Id, S, Ss, W\n'), ((1240, 1253), 'hornet.symbols.wing', 'wing', (['side', 'W'], {}), '(side, W)\n', (1244, 1253), False, 'from hornet.symbols import side, left, right, wing, segment, segments, section, sections, point, Side, Id, S, Ss, W\n'), ((1264, 1287), 'pprint.pprint', 'pprint.pprint', (['subst[W]'], {}), '(subst[W])\n', (1277, 1287), False, 'import pprint\n'), ((445, 463), 'hornet.symbols.segments', 'segments', (['Side', 'Ss'], {}), '(Side, Ss)\n', (453, 463), False, 'from hornet.symbols import side, left, right, wing, segment, segments, section, sections, point, Side, Id, S, Ss, W\n'), ((489, 501), 'hornet.symbols.segments', 'segments', (['Ss'], {}), '(Ss)\n', (497, 501), False, 'from hornet.symbols import side, left, right, wing, segment, segments, section, sections, point, Side, Id, S, Ss, W\n'), ((526, 540), 'hornet.symbols.segment', 'segment', (['Id', 'S'], {}), '(Id, S)\n', (533, 540), False, 'from hornet.symbols import side, left, right, wing, segment, segments, section, sections, point, Side, Id, S, Ss, W\n'), ((606, 618), 'hornet.symbols.sections', 'sections', (['Ss'], {}), '(Ss)\n', (614, 618), False, 'from hornet.symbols import side, left, right, wing, segment, segments, section, sections, point, Side, Id, S, Ss, W\n'), ((643, 653), 'hornet.symbols.section', 'section', (['S'], {}), '(S)\n', (650, 653), False, 'from hornet.symbols import side, left, right, wing, segment, segments, section, sections, point, Side, Id, S, Ss, W\n'), ((655, 669), 'hornet.symbols.section', 'section', (['Id', 'S'], {}), '(Id, S)\n', (662, 669), False, 'from hornet.symbols import side, left, right, wing, segment, segments, section, sections, point, Side, Id, S, Ss, W\n'), ((804, 815), 'hornet.symbols.point', 'point', (['(1)', '(2)'], {}), '(1, 2)\n', (809, 815), False, 'from hornet.symbols import side, left, right, wing, segment, segments, section, sections, point, Side, Id, S, Ss, W\n'), ((817, 828), 'hornet.symbols.point', 'point', (['(3)', '(4)'], {}), '(3, 4)\n', (822, 828), False, 'from hornet.symbols import side, left, right, wing, segment, segments, section, sections, point, Side, Id, S, Ss, W\n'), ((852, 863), 'hornet.symbols.point', 'point', (['(5)', '(6)'], {}), '(5, 6)\n', (857, 863), False, 'from hornet.symbols import side, left, right, wing, segment, segments, section, sections, point, Side, Id, S, Ss, W\n'), ((865, 876), 'hornet.symbols.point', 'point', (['(7)', '(8)'], {}), '(7, 8)\n', (870, 876), False, 'from hornet.symbols import side, left, right, wing, segment, segments, section, sections, point, Side, Id, S, Ss, W\n'), ((900, 911), 'hornet.symbols.point', 'point', (['(2)', '(3)'], {}), '(2, 3)\n', (905, 911), False, 'from hornet.symbols import side, left, right, wing, segment, segments, section, sections, point, Side, Id, S, Ss, W\n'), ((913, 924), 'hornet.symbols.point', 'point', (['(4)', '(5)'], {}), '(4, 5)\n', (918, 924), False, 'from hornet.symbols import side, left, right, wing, segment, segments, section, sections, point, Side, Id, S, Ss, W\n'), ((948, 959), 'hornet.symbols.point', 'point', (['(6)', '(7)'], {}), '(6, 7)\n', (953, 959), False, 'from hornet.symbols import side, left, right, wing, segment, segments, section, sections, point, Side, Id, S, Ss, W\n'), ((961, 972), 'hornet.symbols.point', 'point', (['(8)', '(9)'], {}), '(8, 9)\n', (966, 972), False, 'from hornet.symbols import side, left, right, wing, segment, segments, section, sections, point, Side, Id, S, Ss, W\n'), ((996, 1009), 'hornet.symbols.point', 'point', (['(11)', '(12)'], {}), '(11, 12)\n', (1001, 1009), False, 'from hornet.symbols import side, left, right, wing, segment, segments, section, sections, point, Side, Id, S, Ss, W\n'), ((1011, 1024), 'hornet.symbols.point', 'point', (['(13)', '(14)'], {}), '(13, 14)\n', (1016, 1024), False, 'from hornet.symbols import side, left, right, wing, segment, segments, section, sections, point, Side, Id, S, Ss, W\n'), ((1048, 1061), 'hornet.symbols.point', 'point', (['(15)', '(16)'], {}), '(15, 16)\n', (1053, 1061), False, 'from hornet.symbols import side, left, right, wing, segment, segments, section, sections, point, Side, Id, S, Ss, W\n'), ((1063, 1076), 'hornet.symbols.point', 'point', (['(17)', '(18)'], {}), '(17, 18)\n', (1068, 1076), False, 'from hornet.symbols import side, left, right, wing, segment, segments, section, sections, point, Side, Id, S, Ss, W\n'), ((1100, 1113), 'hornet.symbols.point', 'point', (['(12)', '(13)'], {}), '(12, 13)\n', (1105, 1113), False, 'from hornet.symbols import side, left, right, wing, segment, segments, section, sections, point, Side, Id, S, Ss, W\n'), ((1115, 1128), 'hornet.symbols.point', 'point', (['(14)', '(15)'], {}), '(14, 15)\n', (1120, 1128), False, 'from hornet.symbols import side, left, right, wing, segment, segments, section, sections, point, Side, Id, S, Ss, W\n'), ((1152, 1165), 'hornet.symbols.point', 'point', (['(16)', '(17)'], {}), '(16, 17)\n', (1157, 1165), False, 'from hornet.symbols import side, left, right, wing, segment, segments, section, sections, point, Side, Id, S, Ss, W\n'), ((1167, 1180), 'hornet.symbols.point', 'point', (['(18)', '(19)'], {}), '(18, 19)\n', (1172, 1180), False, 'from hornet.symbols import side, left, right, wing, segment, segments, section, sections, point, Side, Id, S, Ss, W\n'), ((413, 423), 'hornet.symbols.side', 'side', (['Side'], {}), '(Side)\n', (417, 423), False, 'from hornet.symbols import side, left, right, wing, segment, segments, section, sections, point, Side, Id, S, Ss, W\n'), ((542, 559), 'hornet.symbols.segment', 'segment', (['Side', 'Id'], {}), '(Side, Id)\n', (549, 559), False, 'from hornet.symbols import side, left, right, wing, segment, segments, section, sections, point, Side, Id, S, Ss, W\n'), ((562, 577), 'hornet.symbols.sections', 'sections', (['Id', 'S'], {}), '(Id, S)\n', (570, 577), False, 'from hornet.symbols import side, left, right, wing, segment, segments, section, sections, point, Side, Id, S, Ss, W\n')] |
import unittest
import random_util
class MyTestCase(unittest.TestCase):
def test_visualize_results(self):
column_width = 20
print("Generated id:".rjust(column_width, ' ') + random_util.generate_id())
print("Generated uuid:".rjust(column_width, ' ') + random_util.generate_uuid())
print("Generated token:".rjust(column_width, ' ') + random_util.generate_token())
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"random_util.generate_token",
"random_util.generate_uuid",
"random_util.generate_id"
] | [((433, 448), 'unittest.main', 'unittest.main', ([], {}), '()\n', (446, 448), False, 'import unittest\n'), ((195, 220), 'random_util.generate_id', 'random_util.generate_id', ([], {}), '()\n', (218, 220), False, 'import random_util\n'), ((281, 308), 'random_util.generate_uuid', 'random_util.generate_uuid', ([], {}), '()\n', (306, 308), False, 'import random_util\n'), ((370, 398), 'random_util.generate_token', 'random_util.generate_token', ([], {}), '()\n', (396, 398), False, 'import random_util\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A common training and evaluation runner to allow for easy and consistent model creation and evalutation
"""
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__copyright__ = "Copyright 2019, <NAME>"
__license__ = "Creative Commons Attribution-ShareAlike 4.0 International License"
__version__ = "1.0"
import pandas as pd
from collections import Counter
from skopt import BayesSearchCV
from sklearn.base import clone
from sklearn.externals.joblib import Parallel, delayed
from sklearn.model_selection import StratifiedKFold, train_test_split
from sklearn.utils import shuffle
from utility import batch_predict, batch_predict_proba, EvaluationFrame, Evaluator, Logger, use_project_path, batch_fit_classifier
def crossfold_classifier(estimator, transformer, x_train, y_train, train_index, test_index,
record_predict_proba, verbose, fit_increment, warm_start, max_iters, random_state):
"""
This method allows for training to be done using the joblib parallelism in scikit learn. Overall a hacky
method to allow for incremental training. Really needs to be refactored into a cleaner form.
"""
if hasattr(x_train, 'iloc'):
x_fold_train, x_fold_test = x_train.iloc[train_index], x_train.iloc[test_index]
else:
x_fold_train, x_fold_test = x_train[train_index], x_train[test_index]
if hasattr(y_train, 'iloc'):
y_fold_train, y_fold_test = y_train.iloc[train_index], y_train.iloc[test_index]
else:
y_fold_train, y_fold_test = y_train[train_index], y_train[test_index]
if fit_increment is not None:
if max_iters is not None:
for iter in range(max_iters):
x_fold_train, y_fold_train = shuffle(x_fold_train, y_fold_train, random_state=random_state)
batch_fit_classifier(estimator, x_fold_train, y_fold_train, transformer=transformer, increment=fit_increment, verbose=verbose)
else:
batch_fit_classifier(estimator, x_fold_train, y_fold_train, transformer=transformer, increment=fit_increment, verbose=verbose)
else:
if transformer is not None:
x_fold_train = transformer.transform(x_fold_train)
estimator.fit(x_fold_train, y_fold_train)
y_fold_test_predict = batch_predict(estimator, x_fold_test, transformer=transformer, verbose=False)
fold_predict_frame = EvaluationFrame(y_fold_test, y_fold_test_predict)
fold_predict_proba_frame = None
if record_predict_proba:
y_fold_test_predict_proba = batch_predict_proba(estimator, x_fold_test, transformer=transformer, verbose=False)
fold_predict_proba_frame = EvaluationFrame(y_fold_test, y_fold_test_predict_proba)
return Evaluator.evaluate_classifier_fold(fold_predict_frame, fold_predict_proba_frame)
class Runner:
"""
The runner supports bare estimator fitting and searvh-based fitting. By default it will make use of the a
BayesianSearchCV to perform hyperparameter tuning. Ensures everything is cleanly logged, evaluated, and pickled.
"""
def __init__(
self,
name,
df,
target,
estimator,
hyper_parameters=None):
self.name = name
self.df = df
self.target = target
self.estimator = estimator
self.hyper_parameters = hyper_parameters
self.trained_estimator = None
def run_classification_experiment(
self,
sample=None,
random_state=None,
test_size=0.20,
multiclass=False,
record_predict_proba=False,
sampling=None,
cv=5,
verbose=True,
transformer=None,
fit_increment=None,
warm_start=False,
max_iters=None,
n_jobs=-1):
use_project_path()
logger = Logger('%s.txt' % self.name)
evaluator = Evaluator(logger)
data_frame = self.df
if sample is not None:
data_frame = data_frame.sample(n=sample, random_state=random_state)
x_train, x_test, y_train, y_test = train_test_split(data_frame, data_frame[self.target], test_size=test_size)
if transformer is not None:
logger.time_log('Fitting Transformer...')
transformer.fit(x_train)
logger.time_log('Transformer Fit Complete.\n')
if sampling is not None:
logger.time_log('Starting Data Re-Sampling...')
logger.log('Original Training Shape is %s' % Counter(y_train))
x_new, y_new = sampling.fit_resample(x_train, y_train)
logger.log('Balanced Training Shape is %s' % Counter(y_new))
if hasattr(x_train, 'columns'):
x_new = pd.DataFrame(x_new, columns=x_train.columns)
x_train, y_train = x_new, y_new
logger.time_log('Re-Sampling Complete.\n')
logger.time_log('Shuffling Re-Sampled Data.\n')
x_train, y_train = shuffle(x_train, y_train, random_state=random_state)
logger.time_log('Shuffling Complete.\n')
if self.hyper_parameters is not None:
self.estimator.set_params(**self.hyper_parameters.params)
if cv is not None:
kfold = StratifiedKFold(n_splits=cv, random_state=random_state)
logger.time_log('Cross Validating Model...')
fold_scores = Parallel(n_jobs=n_jobs, verbose=3)(
delayed(crossfold_classifier)(
clone(self.estimator),
transformer,
x_train, y_train,
train_index, test_index,
record_predict_proba, verbose,
fit_increment, warm_start, max_iters, random_state
)
for train_index, test_index in kfold.split(x_train, y_train)
)
logger.time_log('Cross Validation Complete.\n')
logger.time_log('Training Model...')
if fit_increment is not None:
if max_iters is not None:
for iter in range(max_iters):
x_iter_train, y_iter_train = shuffle(x_train, y_train, random_state=random_state)
batch_fit_classifier(self.estimator, x_iter_train, y_iter_train, transformer=transformer, increment=fit_increment, verbose=verbose)
else:
batch_fit_classifier(self.estimator, x_train, y_train, transformer=transformer, increment=fit_increment, verbose=verbose)
else:
if transformer is not None:
x_train_transformed = transformer.transform(x_train)
self.estimator.fit(x_train_transformed, y_train)
else:
self.estimator.fit(x_train, y_train)
logger.time_log('Training Complete.\n')
logger.time_log('Testing Training Partition...')
y_train_predict = batch_predict(self.estimator, x_train, transformer=transformer, verbose=verbose)
logger.time_log('Testing Complete.\n')
train_evaluation_frame = EvaluationFrame(y_train, y_train_predict)
logger.time_log('Testing Holdout Partition...')
y_test_predict = batch_predict(self.estimator, x_test, transformer=transformer, verbose=verbose)
logger.time_log('Testing Complete.\n')
test_evaluation_frame = EvaluationFrame(y_test, y_test_predict)
test_evaluation_frame.save('%s_predict.p' % self.name)
test_proba_evaluation_frame = None
if record_predict_proba:
logger.time_log('Testing Holdout Partition (probability)...')
y_test_predict_proba = batch_predict_proba(self.estimator, x_test, transformer=transformer, verbose=verbose)
test_proba_evaluation_frame = EvaluationFrame(y_test, y_test_predict_proba)
test_proba_evaluation_frame.save('%s_predict_proba.p' % self.name)
logger.time_log('Testing Complete.\n')
if cv is not None:
evaluator.evaluate_fold_scores(fold_scores)
evaluator.evaluate_classifier_result(
self.estimator,
test_evaluation_frame,
train=train_evaluation_frame,
test_proba=test_proba_evaluation_frame,
multiclass=multiclass
)
logger.close()
if self.hyper_parameters is not None:
self.hyper_parameters.save('%s_params.p' % self.name)
self.trained_estimator = self.estimator
def run_classification_search_experiment(
self,
scoring,
sample=None,
random_state=None,
test_size=0.20,
n_jobs=-1,
n_iter=2,
cv=5,
verbose=3,
multiclass=False,
record_predict_proba=False,
sampling=None):
use_project_path()
logger = Logger('%s.txt' % self.name)
search = BayesSearchCV(
self.estimator,
self.hyper_parameters.search_space,
n_jobs=n_jobs,
n_iter=n_iter,
cv=cv,
verbose=verbose,
scoring=scoring,
return_train_score=True
)
data_frame = self.df
if sample is not None:
data_frame = data_frame.sample(n=sample, random_state=random_state)
x_train, x_test, y_train, y_test = train_test_split(data_frame, data_frame[self.target], test_size=test_size)
if sampling is not None:
logger.time_log('Starting Data Re-Sampling...')
logger.log('Original Training Shape is %s' % Counter(y_train))
x_new, y_new = sampling.fit_resample(x_train, y_train)
logger.log('Balanced Training Shape is %s' % Counter(y_new))
if hasattr(x_train, 'columns'):
x_new = pd.DataFrame(x_new, columns=x_train.columns)
x_train, y_train = x_new, y_new
logger.time_log('Re-Sampling Complete.\n')
logger.time_log('Shuffling Re-Sampled Data.\n')
x_train, y_train = shuffle(x_train, y_train, random_state=random_state)
logger.time_log('Shuffling Complete.\n')
logger.time_log('Starting HyperParameter Search...')
results = search.fit(x_train, y_train)
logger.time_log('Search Complete.\n')
logger.time_log('Testing Training Partition...')
y_train_predict = batch_predict(results.best_estimator_, x_train)
logger.time_log('Testing Complete.\n')
train_evaluation_frame = EvaluationFrame(y_train, y_train_predict)
logger.time_log('Testing Holdout Partition...')
y_test_predict = batch_predict(results.best_estimator_, x_test)
logger.time_log('Testing Complete.\n')
test_evaluation_frame = EvaluationFrame(y_test, y_test_predict)
test_evaluation_frame.save('%s_predict.p' % self.name)
test_proba_evaluation_frame = None
if record_predict_proba:
logger.time_log('Testing Holdout Partition (probability)...')
y_test_predict_proba = batch_predict_proba(results.best_estimator_, x_test)
test_proba_evaluation_frame = EvaluationFrame(y_test, y_test_predict_proba)
test_proba_evaluation_frame.save('%s_predict_proba.p' % self.name)
logger.time_log('Testing Complete.\n')
evaluator = Evaluator(logger)
evaluator.evaluate_classifier_result(
results,
test_evaluation_frame,
train=train_evaluation_frame,
test_proba=test_proba_evaluation_frame,
multiclass=multiclass
)
logger.close()
self.hyper_parameters.params = results.best_params_
self.hyper_parameters.save('%s_params.p' % self.name)
self.trained_estimator = results.best_estimator_
| [
"pandas.DataFrame",
"utility.batch_predict_proba",
"utility.Evaluator",
"sklearn.base.clone",
"utility.use_project_path",
"sklearn.externals.joblib.Parallel",
"utility.Evaluator.evaluate_classifier_fold",
"sklearn.externals.joblib.delayed",
"sklearn.model_selection.train_test_split",
"skopt.BayesSearchCV",
"utility.EvaluationFrame",
"sklearn.model_selection.StratifiedKFold",
"utility.Logger",
"collections.Counter",
"sklearn.utils.shuffle",
"utility.batch_predict",
"utility.batch_fit_classifier"
] | [((2308, 2385), 'utility.batch_predict', 'batch_predict', (['estimator', 'x_fold_test'], {'transformer': 'transformer', 'verbose': '(False)'}), '(estimator, x_fold_test, transformer=transformer, verbose=False)\n', (2321, 2385), False, 'from utility import batch_predict, batch_predict_proba, EvaluationFrame, Evaluator, Logger, use_project_path, batch_fit_classifier\n'), ((2411, 2460), 'utility.EvaluationFrame', 'EvaluationFrame', (['y_fold_test', 'y_fold_test_predict'], {}), '(y_fold_test, y_fold_test_predict)\n', (2426, 2460), False, 'from utility import batch_predict, batch_predict_proba, EvaluationFrame, Evaluator, Logger, use_project_path, batch_fit_classifier\n'), ((2750, 2835), 'utility.Evaluator.evaluate_classifier_fold', 'Evaluator.evaluate_classifier_fold', (['fold_predict_frame', 'fold_predict_proba_frame'], {}), '(fold_predict_frame, fold_predict_proba_frame\n )\n', (2784, 2835), False, 'from utility import batch_predict, batch_predict_proba, EvaluationFrame, Evaluator, Logger, use_project_path, batch_fit_classifier\n'), ((2563, 2650), 'utility.batch_predict_proba', 'batch_predict_proba', (['estimator', 'x_fold_test'], {'transformer': 'transformer', 'verbose': '(False)'}), '(estimator, x_fold_test, transformer=transformer,\n verbose=False)\n', (2582, 2650), False, 'from utility import batch_predict, batch_predict_proba, EvaluationFrame, Evaluator, Logger, use_project_path, batch_fit_classifier\n'), ((2682, 2737), 'utility.EvaluationFrame', 'EvaluationFrame', (['y_fold_test', 'y_fold_test_predict_proba'], {}), '(y_fold_test, y_fold_test_predict_proba)\n', (2697, 2737), False, 'from utility import batch_predict, batch_predict_proba, EvaluationFrame, Evaluator, Logger, use_project_path, batch_fit_classifier\n'), ((3873, 3891), 'utility.use_project_path', 'use_project_path', ([], {}), '()\n', (3889, 3891), False, 'from utility import batch_predict, batch_predict_proba, EvaluationFrame, Evaluator, Logger, use_project_path, batch_fit_classifier\n'), ((3910, 3938), 'utility.Logger', 'Logger', (["('%s.txt' % self.name)"], {}), "('%s.txt' % self.name)\n", (3916, 3938), False, 'from utility import batch_predict, batch_predict_proba, EvaluationFrame, Evaluator, Logger, use_project_path, batch_fit_classifier\n'), ((3959, 3976), 'utility.Evaluator', 'Evaluator', (['logger'], {}), '(logger)\n', (3968, 3976), False, 'from utility import batch_predict, batch_predict_proba, EvaluationFrame, Evaluator, Logger, use_project_path, batch_fit_classifier\n'), ((4163, 4237), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data_frame', 'data_frame[self.target]'], {'test_size': 'test_size'}), '(data_frame, data_frame[self.target], test_size=test_size)\n', (4179, 4237), False, 'from sklearn.model_selection import StratifiedKFold, train_test_split\n'), ((6949, 7034), 'utility.batch_predict', 'batch_predict', (['self.estimator', 'x_train'], {'transformer': 'transformer', 'verbose': 'verbose'}), '(self.estimator, x_train, transformer=transformer, verbose=verbose\n )\n', (6962, 7034), False, 'from utility import batch_predict, batch_predict_proba, EvaluationFrame, Evaluator, Logger, use_project_path, batch_fit_classifier\n'), ((7111, 7152), 'utility.EvaluationFrame', 'EvaluationFrame', (['y_train', 'y_train_predict'], {}), '(y_train, y_train_predict)\n', (7126, 7152), False, 'from utility import batch_predict, batch_predict_proba, EvaluationFrame, Evaluator, Logger, use_project_path, batch_fit_classifier\n'), ((7235, 7314), 'utility.batch_predict', 'batch_predict', (['self.estimator', 'x_test'], {'transformer': 'transformer', 'verbose': 'verbose'}), '(self.estimator, x_test, transformer=transformer, verbose=verbose)\n', (7248, 7314), False, 'from utility import batch_predict, batch_predict_proba, EvaluationFrame, Evaluator, Logger, use_project_path, batch_fit_classifier\n'), ((7395, 7434), 'utility.EvaluationFrame', 'EvaluationFrame', (['y_test', 'y_test_predict'], {}), '(y_test, y_test_predict)\n', (7410, 7434), False, 'from utility import batch_predict, batch_predict_proba, EvaluationFrame, Evaluator, Logger, use_project_path, batch_fit_classifier\n'), ((8868, 8886), 'utility.use_project_path', 'use_project_path', ([], {}), '()\n', (8884, 8886), False, 'from utility import batch_predict, batch_predict_proba, EvaluationFrame, Evaluator, Logger, use_project_path, batch_fit_classifier\n'), ((8905, 8933), 'utility.Logger', 'Logger', (["('%s.txt' % self.name)"], {}), "('%s.txt' % self.name)\n", (8911, 8933), False, 'from utility import batch_predict, batch_predict_proba, EvaluationFrame, Evaluator, Logger, use_project_path, batch_fit_classifier\n'), ((8952, 9122), 'skopt.BayesSearchCV', 'BayesSearchCV', (['self.estimator', 'self.hyper_parameters.search_space'], {'n_jobs': 'n_jobs', 'n_iter': 'n_iter', 'cv': 'cv', 'verbose': 'verbose', 'scoring': 'scoring', 'return_train_score': '(True)'}), '(self.estimator, self.hyper_parameters.search_space, n_jobs=\n n_jobs, n_iter=n_iter, cv=cv, verbose=verbose, scoring=scoring,\n return_train_score=True)\n', (8965, 9122), False, 'from skopt import BayesSearchCV\n'), ((9406, 9480), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data_frame', 'data_frame[self.target]'], {'test_size': 'test_size'}), '(data_frame, data_frame[self.target], test_size=test_size)\n', (9422, 9480), False, 'from sklearn.model_selection import StratifiedKFold, train_test_split\n'), ((10438, 10485), 'utility.batch_predict', 'batch_predict', (['results.best_estimator_', 'x_train'], {}), '(results.best_estimator_, x_train)\n', (10451, 10485), False, 'from utility import batch_predict, batch_predict_proba, EvaluationFrame, Evaluator, Logger, use_project_path, batch_fit_classifier\n'), ((10567, 10608), 'utility.EvaluationFrame', 'EvaluationFrame', (['y_train', 'y_train_predict'], {}), '(y_train, y_train_predict)\n', (10582, 10608), False, 'from utility import batch_predict, batch_predict_proba, EvaluationFrame, Evaluator, Logger, use_project_path, batch_fit_classifier\n'), ((10691, 10737), 'utility.batch_predict', 'batch_predict', (['results.best_estimator_', 'x_test'], {}), '(results.best_estimator_, x_test)\n', (10704, 10737), False, 'from utility import batch_predict, batch_predict_proba, EvaluationFrame, Evaluator, Logger, use_project_path, batch_fit_classifier\n'), ((10818, 10857), 'utility.EvaluationFrame', 'EvaluationFrame', (['y_test', 'y_test_predict'], {}), '(y_test, y_test_predict)\n', (10833, 10857), False, 'from utility import batch_predict, batch_predict_proba, EvaluationFrame, Evaluator, Logger, use_project_path, batch_fit_classifier\n'), ((11399, 11416), 'utility.Evaluator', 'Evaluator', (['logger'], {}), '(logger)\n', (11408, 11416), False, 'from utility import batch_predict, batch_predict_proba, EvaluationFrame, Evaluator, Logger, use_project_path, batch_fit_classifier\n'), ((1995, 2126), 'utility.batch_fit_classifier', 'batch_fit_classifier', (['estimator', 'x_fold_train', 'y_fold_train'], {'transformer': 'transformer', 'increment': 'fit_increment', 'verbose': 'verbose'}), '(estimator, x_fold_train, y_fold_train, transformer=\n transformer, increment=fit_increment, verbose=verbose)\n', (2015, 2126), False, 'from utility import batch_predict, batch_predict_proba, EvaluationFrame, Evaluator, Logger, use_project_path, batch_fit_classifier\n'), ((5037, 5089), 'sklearn.utils.shuffle', 'shuffle', (['x_train', 'y_train'], {'random_state': 'random_state'}), '(x_train, y_train, random_state=random_state)\n', (5044, 5089), False, 'from sklearn.utils import shuffle\n'), ((5308, 5363), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'cv', 'random_state': 'random_state'}), '(n_splits=cv, random_state=random_state)\n', (5323, 5363), False, 'from sklearn.model_selection import StratifiedKFold, train_test_split\n'), ((7684, 7773), 'utility.batch_predict_proba', 'batch_predict_proba', (['self.estimator', 'x_test'], {'transformer': 'transformer', 'verbose': 'verbose'}), '(self.estimator, x_test, transformer=transformer,\n verbose=verbose)\n', (7703, 7773), False, 'from utility import batch_predict, batch_predict_proba, EvaluationFrame, Evaluator, Logger, use_project_path, batch_fit_classifier\n'), ((7812, 7857), 'utility.EvaluationFrame', 'EvaluationFrame', (['y_test', 'y_test_predict_proba'], {}), '(y_test, y_test_predict_proba)\n', (7827, 7857), False, 'from utility import batch_predict, batch_predict_proba, EvaluationFrame, Evaluator, Logger, use_project_path, batch_fit_classifier\n'), ((10093, 10145), 'sklearn.utils.shuffle', 'shuffle', (['x_train', 'y_train'], {'random_state': 'random_state'}), '(x_train, y_train, random_state=random_state)\n', (10100, 10145), False, 'from sklearn.utils import shuffle\n'), ((11107, 11159), 'utility.batch_predict_proba', 'batch_predict_proba', (['results.best_estimator_', 'x_test'], {}), '(results.best_estimator_, x_test)\n', (11126, 11159), False, 'from utility import batch_predict, batch_predict_proba, EvaluationFrame, Evaluator, Logger, use_project_path, batch_fit_classifier\n'), ((11202, 11247), 'utility.EvaluationFrame', 'EvaluationFrame', (['y_test', 'y_test_predict_proba'], {}), '(y_test, y_test_predict_proba)\n', (11217, 11247), False, 'from utility import batch_predict, batch_predict_proba, EvaluationFrame, Evaluator, Logger, use_project_path, batch_fit_classifier\n'), ((1763, 1825), 'sklearn.utils.shuffle', 'shuffle', (['x_fold_train', 'y_fold_train'], {'random_state': 'random_state'}), '(x_fold_train, y_fold_train, random_state=random_state)\n', (1770, 1825), False, 'from sklearn.utils import shuffle\n'), ((1842, 1973), 'utility.batch_fit_classifier', 'batch_fit_classifier', (['estimator', 'x_fold_train', 'y_fold_train'], {'transformer': 'transformer', 'increment': 'fit_increment', 'verbose': 'verbose'}), '(estimator, x_fold_train, y_fold_train, transformer=\n transformer, increment=fit_increment, verbose=verbose)\n', (1862, 1973), False, 'from utility import batch_predict, batch_predict_proba, EvaluationFrame, Evaluator, Logger, use_project_path, batch_fit_classifier\n'), ((4802, 4846), 'pandas.DataFrame', 'pd.DataFrame', (['x_new'], {'columns': 'x_train.columns'}), '(x_new, columns=x_train.columns)\n', (4814, 4846), True, 'import pandas as pd\n'), ((5447, 5481), 'sklearn.externals.joblib.Parallel', 'Parallel', ([], {'n_jobs': 'n_jobs', 'verbose': '(3)'}), '(n_jobs=n_jobs, verbose=3)\n', (5455, 5481), False, 'from sklearn.externals.joblib import Parallel, delayed\n'), ((6436, 6562), 'utility.batch_fit_classifier', 'batch_fit_classifier', (['self.estimator', 'x_train', 'y_train'], {'transformer': 'transformer', 'increment': 'fit_increment', 'verbose': 'verbose'}), '(self.estimator, x_train, y_train, transformer=\n transformer, increment=fit_increment, verbose=verbose)\n', (6456, 6562), False, 'from utility import batch_predict, batch_predict_proba, EvaluationFrame, Evaluator, Logger, use_project_path, batch_fit_classifier\n'), ((9858, 9902), 'pandas.DataFrame', 'pd.DataFrame', (['x_new'], {'columns': 'x_train.columns'}), '(x_new, columns=x_train.columns)\n', (9870, 9902), True, 'import pandas as pd\n'), ((4576, 4592), 'collections.Counter', 'Counter', (['y_train'], {}), '(y_train)\n', (4583, 4592), False, 'from collections import Counter\n'), ((4718, 4732), 'collections.Counter', 'Counter', (['y_new'], {}), '(y_new)\n', (4725, 4732), False, 'from collections import Counter\n'), ((6197, 6249), 'sklearn.utils.shuffle', 'shuffle', (['x_train', 'y_train'], {'random_state': 'random_state'}), '(x_train, y_train, random_state=random_state)\n', (6204, 6249), False, 'from sklearn.utils import shuffle\n'), ((6270, 6405), 'utility.batch_fit_classifier', 'batch_fit_classifier', (['self.estimator', 'x_iter_train', 'y_iter_train'], {'transformer': 'transformer', 'increment': 'fit_increment', 'verbose': 'verbose'}), '(self.estimator, x_iter_train, y_iter_train,\n transformer=transformer, increment=fit_increment, verbose=verbose)\n', (6290, 6405), False, 'from utility import batch_predict, batch_predict_proba, EvaluationFrame, Evaluator, Logger, use_project_path, batch_fit_classifier\n'), ((9632, 9648), 'collections.Counter', 'Counter', (['y_train'], {}), '(y_train)\n', (9639, 9648), False, 'from collections import Counter\n'), ((9774, 9788), 'collections.Counter', 'Counter', (['y_new'], {}), '(y_new)\n', (9781, 9788), False, 'from collections import Counter\n'), ((5499, 5528), 'sklearn.externals.joblib.delayed', 'delayed', (['crossfold_classifier'], {}), '(crossfold_classifier)\n', (5506, 5528), False, 'from sklearn.externals.joblib import Parallel, delayed\n'), ((5550, 5571), 'sklearn.base.clone', 'clone', (['self.estimator'], {}), '(self.estimator)\n', (5555, 5571), False, 'from sklearn.base import clone\n')] |
from utime import ticks_ms
import network
import time
from umqtt.simple import MQTTClient
STATE_DISCONNECTED = 0
STATE_WLAN_CONNECTING = 1
STATE_WLAN_CONNECTED = 2
STATE_MQTT_CONNECTING = 3
STATE_MQTT_CONNECTED = 4
WLAN_CONNECTION_TIMEOUT_MS = 30 * 1000
MQTT_CONNECTION_TIMEOUT_MS = 30 * 1000
class ConnectionManager:
def __init__(self):
self._wlan = network.WLAN(network.STA_IF)
self._wlanSsid = None
self._wlanPassword = None
self._wlanConnectingTimestamp = None
self._mqtt = None
self._mqttConnectingTimestamp = None
self._state = STATE_DISCONNECTED
self._data = {}
def configureWlan(self, ssid, password):
self._wlanSsid = ssid
self._wlanPassword = password
def configureMqtt(self, mqttClientId, mqttServer, mqttUsername, mqttPassword):
self._mqtt = MQTTClient(mqttClientId, mqttServer, 0, mqttUsername, mqttPassword)
def initConnection(self):
if self._state == STATE_DISCONNECTED:
self.__connectWlan()
def publish(self, topic, data):
# keeping only the latest value
self._data[topic] = data
self.__flush()
def update(self):
if self._state > STATE_WLAN_CONNECTING \
and not self._wlan.isconnected:
self._state = STATE_DISCONNECTED
if self._state == STATE_WLAN_CONNECTING:
self.__updateWlanConnectingState()
if self._state == STATE_WLAN_CONNECTED:
self.__updateWlanConnectedState()
if self._state == STATE_MQTT_CONNECTING:
self.__updateMqttConnectingState()
def __connectWlan(self):
if self._wlanSsid:
print("connecting to wlan...")
self._wlanConnectingTimestamp = ticks_ms()
self._state = STATE_WLAN_CONNECTING
try:
self._wlan.active(True)
self._wlan.disconnect()
self._wlan.connect(self._wlanSsid, self._wlanPassword)
except Exception as ex:
self.__printException(ex)
def __updateWlanConnectingState(self):
if ticks_ms() - self._wlanConnectingTimestamp > WLAN_CONNECTION_TIMEOUT_MS:
print("Could not connect to wlan. Falling back to disconnected state")
self._state = STATE_DISCONNECTED
elif self._wlan.isconnected() \
and not self._wlan.ifconfig()[0]=='0.0.0.0':
self._state = STATE_WLAN_CONNECTED
print("wlan connected")
def __updateWlanConnectedState(self):
if self._mqtt:
print("connecting to mqtt")
self._state = STATE_MQTT_CONNECTING
self._mqttConnectingTimestamp = ticks_ms()
try:
self._mqtt.connect()
except Exception as ex:
self.__printException(ex)
def __updateMqttConnectingState(self):
if ticks_ms() - self._mqttConnectingTimestamp > MQTT_CONNECTION_TIMEOUT_MS:
print("MQTT connection failed.")
self._state = STATE_WLAN_CONNECTED
else:
try:
self._mqtt.ping()
self._state = STATE_MQTT_CONNECTED
self.__flush()
print("mqtt connection established")
except Exception as ex:
self.__printException(ex)
def __flush(self):
if self._state == STATE_MQTT_CONNECTED:
try:
for key in list(self._data):
self._mqtt.publish(key, self._data[key])
del self._data[key]
except Exception as ex:
self._state = STATE_WLAN_CONNECTED
self.__printException(ex)
def __printException(self, ex):
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
| [
"umqtt.simple.MQTTClient",
"utime.ticks_ms",
"network.WLAN"
] | [((398, 426), 'network.WLAN', 'network.WLAN', (['network.STA_IF'], {}), '(network.STA_IF)\n', (410, 426), False, 'import network\n'), ((855, 922), 'umqtt.simple.MQTTClient', 'MQTTClient', (['mqttClientId', 'mqttServer', '(0)', 'mqttUsername', 'mqttPassword'], {}), '(mqttClientId, mqttServer, 0, mqttUsername, mqttPassword)\n', (865, 922), False, 'from umqtt.simple import MQTTClient\n'), ((1680, 1690), 'utime.ticks_ms', 'ticks_ms', ([], {}), '()\n', (1688, 1690), False, 'from utime import ticks_ms\n'), ((2513, 2523), 'utime.ticks_ms', 'ticks_ms', ([], {}), '()\n', (2521, 2523), False, 'from utime import ticks_ms\n'), ((1990, 2000), 'utime.ticks_ms', 'ticks_ms', ([], {}), '()\n', (1998, 2000), False, 'from utime import ticks_ms\n'), ((2681, 2691), 'utime.ticks_ms', 'ticks_ms', ([], {}), '()\n', (2689, 2691), False, 'from utime import ticks_ms\n')] |
import cv2
import math
POINTS = []
class PointFilter:
def __init__(self, points):
self._points = points
def deletePoints(self, event, xCoordinate, yCoordinate, flags, params):
if event == cv2.EVENT_RBUTTONDOWN:
diff = list()
for point in self._points:
xd = math.pow((point[0] - xCoordinate), 2)
yd = math.pow((point[1] - yCoordinate), 2)
d = math.sqrt(xd + yd)
diff.append(d)
pointToDelete = diff.index(min(diff))
self._points.pop(pointToDelete) | [
"math.sqrt",
"math.pow"
] | [((325, 360), 'math.pow', 'math.pow', (['(point[0] - xCoordinate)', '(2)'], {}), '(point[0] - xCoordinate, 2)\n', (333, 360), False, 'import math\n'), ((384, 419), 'math.pow', 'math.pow', (['(point[1] - yCoordinate)', '(2)'], {}), '(point[1] - yCoordinate, 2)\n', (392, 419), False, 'import math\n'), ((442, 460), 'math.sqrt', 'math.sqrt', (['(xd + yd)'], {}), '(xd + yd)\n', (451, 460), False, 'import math\n')] |
import numpy as np
from fluiddyn.clusters.legi import Calcul2 as Cluster
from critical_Ra_RB import Ra_c_RB as Ra_c_RB_tests
prandtl = 1.0
dim = 2
dt_max = 0.005
end_time = 30
nb_procs = 10
nx = 8
order = 10
stretch_factor = 0.0
Ra_vert = 1750
x_periodicity = False
z_periodicity = False
cluster = Cluster()
cluster.commands_setting_env = [
"PROJET_DIR=/fsnet/project/meige/2020/20CONVECTION",
"source /etc/profile",
"source $PROJET_DIR/miniconda3/etc/profile.d/conda.sh",
"conda activate env-snek",
"export NEK_SOURCE_ROOT=$HOME/Dev/snek5000/lib/Nek5000",
"export PATH=$PATH:$NEK_SOURCE_ROOT/bin",
"export FLUIDSIM_PATH=$PROJET_DIR/numerical/",
]
for aspect_ratio, Ra_c_test in Ra_c_RB_tests.items():
ny = int(nx * aspect_ratio)
if nx * aspect_ratio - ny:
continue
Ra_vert_nums = np.logspace(np.log10(Ra_c_test), np.log10(1.04 * Ra_c_test), 4)
for Ra_vert_num in Ra_vert_nums:
command = (
f"run_simul_check_from_python.py -Pr {prandtl} -nx {nx} --dim {dim} "
f"--order {order} --dt-max {dt_max} --end-time {end_time} -np {nb_procs} "
f"-a_y {aspect_ratio} --stretch-factor {stretch_factor} "
f"--Ra-vert {Ra_vert_num}"
)
if x_periodicity:
command += " --x-periodicity"
elif z_periodicity:
command += " --z-periodicity"
print(command)
name_run = f"RB_asp{aspect_ratio:.3f}_Ra{Ra_vert_num:.3e}_Pr{prandtl:.2f}_msh{nx*order}x{round(nx*aspect_ratio)*order}"
cluster.submit_script(
command,
name_run=name_run,
nb_cores_per_node=nb_procs,
omp_num_threads=1,
ask=False,
)
| [
"fluiddyn.clusters.legi.Calcul2",
"numpy.log10",
"critical_Ra_RB.Ra_c_RB.items"
] | [((306, 315), 'fluiddyn.clusters.legi.Calcul2', 'Cluster', ([], {}), '()\n', (313, 315), True, 'from fluiddyn.clusters.legi import Calcul2 as Cluster\n'), ((718, 739), 'critical_Ra_RB.Ra_c_RB.items', 'Ra_c_RB_tests.items', ([], {}), '()\n', (737, 739), True, 'from critical_Ra_RB import Ra_c_RB as Ra_c_RB_tests\n'), ((854, 873), 'numpy.log10', 'np.log10', (['Ra_c_test'], {}), '(Ra_c_test)\n', (862, 873), True, 'import numpy as np\n'), ((875, 901), 'numpy.log10', 'np.log10', (['(1.04 * Ra_c_test)'], {}), '(1.04 * Ra_c_test)\n', (883, 901), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import os
import sys
import numpy as np
import matplotlib
if matplotlib.get_backend() != "TKAgg":
matplotlib.use("TKAgg")
import pandas as pd
from matplotlib import pyplot as plt
import pmagpy.pmag as pmag
import pmagpy.pmagplotlib as pmagplotlib
from pmag_env import set_env
import operator
OPS = {'<' : operator.lt, '<=' : operator.le,
'>' : operator.gt, '>=': operator.ge, '=': operator.eq}
def main():
"""
NAME
foldtest_magic.py
DESCRIPTION
does a fold test (Tauxe, 2010) on data
INPUT FORMAT
pmag_specimens format file, er_samples.txt format file (for bedding)
SYNTAX
foldtest_magic.py [command line options]
OPTIONS
-h prints help message and quits
-f sites formatted file [default for 3.0 is sites.txt, for 2.5, pmag_sites.txt]
-fsa samples formatted file
-fsi sites formatted file
-exc use criteria to set acceptance criteria (supported only for data model 3)
-n NB, set number of bootstraps, default is 1000
-b MIN, MAX, set bounds for untilting, default is -10, 150
-fmt FMT, specify format - default is svg
-sav saves plots and quits
-DM NUM MagIC data model number (2 or 3, default 3)
OUTPUT
Geographic: is an equal area projection of the input data in
original coordinates
Stratigraphic: is an equal area projection of the input data in
tilt adjusted coordinates
% Untilting: The dashed (red) curves are representative plots of
maximum eigenvalue (tau_1) as a function of untilting
The solid line is the cumulative distribution of the
% Untilting required to maximize tau for all the
bootstrapped data sets. The dashed vertical lines
are 95% confidence bounds on the % untilting that yields
the most clustered result (maximum tau_1).
Command line: prints out the bootstrapped iterations and
finally the confidence bounds on optimum untilting.
If the 95% conf bounds include 0, then a pre-tilt magnetization is indicated
If the 95% conf bounds include 100, then a post-tilt magnetization is indicated
If the 95% conf bounds exclude both 0 and 100, syn-tilt magnetization is
possible as is vertical axis rotation or other pathologies
"""
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
kappa = 0
dir_path = pmag.get_named_arg("-WD", ".")
nboot = int(float(pmag.get_named_arg("-n", 1000))) # number of bootstraps
fmt = pmag.get_named_arg("-fmt", "svg")
data_model_num = int(float(pmag.get_named_arg("-DM", 3)))
if data_model_num == 3:
infile = pmag.get_named_arg("-f", 'sites.txt')
orfile = 'samples.txt'
site_col = 'site'
dec_col = 'dir_dec'
inc_col = 'dir_inc'
tilt_col = 'dir_tilt_correction'
dipkey, azkey = 'bed_dip', 'bed_dip_direction'
crit_col = 'criterion'
critfile = 'criteria.txt'
else:
infile = pmag.get_named_arg("-f", 'pmag_sites.txt')
orfile = 'er_samples.txt'
site_col = 'er_site_name'
dec_col = 'site_dec'
inc_col = 'site_inc'
tilt_col = 'site_tilt_correction'
dipkey, azkey = 'sample_bed_dip', 'sample_bed_dip_direction'
crit_col = 'pmag_criteria_code'
critfile = 'pmag_criteria.txt'
if '-sav' in sys.argv:
plot = 1
else:
plot = 0
if '-b' in sys.argv:
ind = sys.argv.index('-b')
untilt_min = int(sys.argv[ind+1])
untilt_max = int(sys.argv[ind+2])
else:
untilt_min, untilt_max = -10, 150
if '-fsa' in sys.argv:
orfile = pmag.get_named_arg("-fsa", "")
elif '-fsi' in sys.argv:
orfile = pmag.get_named_arg("-fsi", "")
if data_model_num == 3:
dipkey, azkey = 'bed_dip', 'bed_dip_direction'
else:
dipkey, azkey = 'site_bed_dip', 'site_bed_dip_direction'
else:
if data_model_num == 3:
orfile = 'sites.txt'
else:
orfile = 'pmag_sites.txt'
orfile = pmag.resolve_file_name(orfile, dir_path)
infile = pmag.resolve_file_name(infile, dir_path)
critfile = pmag.resolve_file_name(critfile, dir_path)
df = pd.read_csv(infile, sep='\t', header=1)
# keep only records with tilt_col
data = df.copy()
data = data[data[tilt_col].notnull()]
data = data.where(data.notnull(), "")
# turn into pmag data list
data = list(data.T.apply(dict))
# get orientation data
if data_model_num == 3:
# often orientation will be in infile (sites table)
if os.path.split(orfile)[1] == os.path.split(infile)[1]:
ordata = df[df[azkey].notnull()]
ordata = ordata[ordata[dipkey].notnull()]
ordata = list(ordata.T.apply(dict))
# sometimes orientation might be in a sample file instead
else:
ordata = pd.read_csv(orfile, sep='\t', header=1)
ordata = list(ordata.T.apply(dict))
else:
ordata, file_type = pmag.magic_read(orfile)
if '-exc' in sys.argv:
crits, file_type = pmag.magic_read(critfile)
SiteCrits = []
for crit in crits:
if crit[crit_col] == "DE-SITE":
SiteCrits.append(crit)
#break
# get to work
#
PLTS = {'geo': 1, 'strat': 2, 'taus': 3} # make plot dictionary
if not set_env.IS_WIN:
pmagplotlib.plot_init(PLTS['geo'], 5, 5)
pmagplotlib.plot_init(PLTS['strat'], 5, 5)
pmagplotlib.plot_init(PLTS['taus'], 5, 5)
if data_model_num == 2:
GEOrecs = pmag.get_dictitem(data, tilt_col, '0', 'T')
else:
GEOrecs = data
if len(GEOrecs) > 0: # have some geographic data
num_dropped = 0
DIDDs = [] # set up list for dec inc dip_direction, dip
for rec in GEOrecs: # parse data
dip, dip_dir = 0, -1
Dec = float(rec[dec_col])
Inc = float(rec[inc_col])
orecs = pmag.get_dictitem(
ordata, site_col, rec[site_col], 'T')
if len(orecs) > 0:
if orecs[0][azkey] != "":
dip_dir = float(orecs[0][azkey])
if orecs[0][dipkey] != "":
dip = float(orecs[0][dipkey])
if dip != 0 and dip_dir != -1:
if '-exc' in sys.argv:
keep = 1
for site_crit in SiteCrits:
crit_name = site_crit['table_column'].split('.')[1]
if crit_name and crit_name in rec.keys() and rec[crit_name]:
# get the correct operation (<, >=, =, etc.)
op = OPS[site_crit['criterion_operation']]
# then make sure the site record passes
if op(float(rec[crit_name]), float(site_crit['criterion_value'])):
keep = 0
if keep == 1:
DIDDs.append([Dec, Inc, dip_dir, dip])
else:
num_dropped += 1
else:
DIDDs.append([Dec, Inc, dip_dir, dip])
if num_dropped:
print("-W- Dropped {} records because each failed one or more criteria".format(num_dropped))
else:
print('no geographic directional data found')
sys.exit()
pmagplotlib.plot_eq(PLTS['geo'], DIDDs, 'Geographic')
data = np.array(DIDDs)
D, I = pmag.dotilt_V(data)
TCs = np.array([D, I]).transpose()
pmagplotlib.plot_eq(PLTS['strat'], TCs, 'Stratigraphic')
if plot == 0:
pmagplotlib.draw_figs(PLTS)
Percs = list(range(untilt_min, untilt_max))
Cdf, Untilt = [], []
plt.figure(num=PLTS['taus'])
print('doing ', nboot, ' iterations...please be patient.....')
for n in range(nboot): # do bootstrap data sets - plot first 25 as dashed red line
if n % 50 == 0:
print(n)
Taus = [] # set up lists for taus
PDs = pmag.pseudo(DIDDs)
if kappa != 0:
for k in range(len(PDs)):
d, i = pmag.fshdev(kappa)
dipdir, dip = pmag.dodirot(d, i, PDs[k][2], PDs[k][3])
PDs[k][2] = dipdir
PDs[k][3] = dip
for perc in Percs:
tilt = np.array([1., 1., 1., 0.01*perc])
D, I = pmag.dotilt_V(PDs*tilt)
TCs = np.array([D, I]).transpose()
ppars = pmag.doprinc(TCs) # get principal directions
Taus.append(ppars['tau1'])
if n < 25:
plt.plot(Percs, Taus, 'r--')
# tilt that gives maximum tau
Untilt.append(Percs[Taus.index(np.max(Taus))])
Cdf.append(float(n) / float(nboot))
plt.plot(Percs, Taus, 'k')
plt.xlabel('% Untilting')
plt.ylabel('tau_1 (red), CDF (green)')
Untilt.sort() # now for CDF of tilt of maximum tau
plt.plot(Untilt, Cdf, 'g')
lower = int(.025*nboot)
upper = int(.975*nboot)
plt.axvline(x=Untilt[lower], ymin=0, ymax=1, linewidth=1, linestyle='--')
plt.axvline(x=Untilt[upper], ymin=0, ymax=1, linewidth=1, linestyle='--')
tit = '%i - %i %s' % (Untilt[lower], Untilt[upper], 'Percent Unfolding')
print(tit)
plt.title(tit)
if plot == 0:
pmagplotlib.draw_figs(PLTS)
ans = input('S[a]ve all figures, <Return> to quit \n ')
if ans != 'a':
print("Good bye")
sys.exit()
files = {}
for key in list(PLTS.keys()):
files[key] = ('foldtest_'+'%s' % (key.strip()[:2])+'.'+fmt)
pmagplotlib.save_plots(PLTS, files)
if __name__ == "__main__":
main()
| [
"matplotlib.pyplot.title",
"pandas.read_csv",
"pmagpy.pmag.fshdev",
"matplotlib.pyplot.figure",
"matplotlib.get_backend",
"pmagpy.pmagplotlib.plot_init",
"matplotlib.pyplot.axvline",
"pmagpy.pmag.pseudo",
"pmagpy.pmag.get_dictitem",
"pmagpy.pmagplotlib.draw_figs",
"pmagpy.pmag.dotilt_V",
"pmagpy.pmag.get_named_arg",
"numpy.max",
"sys.argv.index",
"pmagpy.pmag.resolve_file_name",
"pmagpy.pmagplotlib.plot_eq",
"pmagpy.pmag.dodirot",
"pmagpy.pmag.magic_read",
"matplotlib.use",
"matplotlib.pyplot.ylabel",
"sys.exit",
"matplotlib.pyplot.plot",
"pmagpy.pmag.doprinc",
"numpy.array",
"matplotlib.pyplot.xlabel",
"pmagpy.pmagplotlib.save_plots",
"os.path.split"
] | [((83, 107), 'matplotlib.get_backend', 'matplotlib.get_backend', ([], {}), '()\n', (105, 107), False, 'import matplotlib\n'), ((124, 147), 'matplotlib.use', 'matplotlib.use', (['"""TKAgg"""'], {}), "('TKAgg')\n", (138, 147), False, 'import matplotlib\n'), ((2632, 2662), 'pmagpy.pmag.get_named_arg', 'pmag.get_named_arg', (['"""-WD"""', '"""."""'], {}), "('-WD', '.')\n", (2650, 2662), True, 'import pmagpy.pmag as pmag\n'), ((2755, 2788), 'pmagpy.pmag.get_named_arg', 'pmag.get_named_arg', (['"""-fmt"""', '"""svg"""'], {}), "('-fmt', 'svg')\n", (2773, 2788), True, 'import pmagpy.pmag as pmag\n'), ((4327, 4367), 'pmagpy.pmag.resolve_file_name', 'pmag.resolve_file_name', (['orfile', 'dir_path'], {}), '(orfile, dir_path)\n', (4349, 4367), True, 'import pmagpy.pmag as pmag\n'), ((4381, 4421), 'pmagpy.pmag.resolve_file_name', 'pmag.resolve_file_name', (['infile', 'dir_path'], {}), '(infile, dir_path)\n', (4403, 4421), True, 'import pmagpy.pmag as pmag\n'), ((4437, 4479), 'pmagpy.pmag.resolve_file_name', 'pmag.resolve_file_name', (['critfile', 'dir_path'], {}), '(critfile, dir_path)\n', (4459, 4479), True, 'import pmagpy.pmag as pmag\n'), ((4489, 4528), 'pandas.read_csv', 'pd.read_csv', (['infile'], {'sep': '"""\t"""', 'header': '(1)'}), "(infile, sep='\\t', header=1)\n", (4500, 4528), True, 'import pandas as pd\n'), ((7679, 7732), 'pmagpy.pmagplotlib.plot_eq', 'pmagplotlib.plot_eq', (["PLTS['geo']", 'DIDDs', '"""Geographic"""'], {}), "(PLTS['geo'], DIDDs, 'Geographic')\n", (7698, 7732), True, 'import pmagpy.pmagplotlib as pmagplotlib\n'), ((7744, 7759), 'numpy.array', 'np.array', (['DIDDs'], {}), '(DIDDs)\n', (7752, 7759), True, 'import numpy as np\n'), ((7771, 7790), 'pmagpy.pmag.dotilt_V', 'pmag.dotilt_V', (['data'], {}), '(data)\n', (7784, 7790), True, 'import pmagpy.pmag as pmag\n'), ((7834, 7890), 'pmagpy.pmagplotlib.plot_eq', 'pmagplotlib.plot_eq', (["PLTS['strat']", 'TCs', '"""Stratigraphic"""'], {}), "(PLTS['strat'], TCs, 'Stratigraphic')\n", (7853, 7890), True, 'import pmagpy.pmagplotlib as pmagplotlib\n'), ((8022, 8050), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': "PLTS['taus']"}), "(num=PLTS['taus'])\n", (8032, 8050), True, 'from matplotlib import pyplot as plt\n'), ((9044, 9070), 'matplotlib.pyplot.plot', 'plt.plot', (['Percs', 'Taus', '"""k"""'], {}), "(Percs, Taus, 'k')\n", (9052, 9070), True, 'from matplotlib import pyplot as plt\n'), ((9075, 9100), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""% Untilting"""'], {}), "('% Untilting')\n", (9085, 9100), True, 'from matplotlib import pyplot as plt\n'), ((9105, 9143), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""tau_1 (red), CDF (green)"""'], {}), "('tau_1 (red), CDF (green)')\n", (9115, 9143), True, 'from matplotlib import pyplot as plt\n'), ((9204, 9230), 'matplotlib.pyplot.plot', 'plt.plot', (['Untilt', 'Cdf', '"""g"""'], {}), "(Untilt, Cdf, 'g')\n", (9212, 9230), True, 'from matplotlib import pyplot as plt\n'), ((9291, 9364), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': 'Untilt[lower]', 'ymin': '(0)', 'ymax': '(1)', 'linewidth': '(1)', 'linestyle': '"""--"""'}), "(x=Untilt[lower], ymin=0, ymax=1, linewidth=1, linestyle='--')\n", (9302, 9364), True, 'from matplotlib import pyplot as plt\n'), ((9369, 9442), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': 'Untilt[upper]', 'ymin': '(0)', 'ymax': '(1)', 'linewidth': '(1)', 'linestyle': '"""--"""'}), "(x=Untilt[upper], ymin=0, ymax=1, linewidth=1, linestyle='--')\n", (9380, 9442), True, 'from matplotlib import pyplot as plt\n'), ((9539, 9553), 'matplotlib.pyplot.title', 'plt.title', (['tit'], {}), '(tit)\n', (9548, 9553), True, 'from matplotlib import pyplot as plt\n'), ((9870, 9905), 'pmagpy.pmagplotlib.save_plots', 'pmagplotlib.save_plots', (['PLTS', 'files'], {}), '(PLTS, files)\n', (9892, 9905), True, 'import pmagpy.pmagplotlib as pmagplotlib\n'), ((2573, 2583), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2581, 2583), False, 'import sys\n'), ((2896, 2933), 'pmagpy.pmag.get_named_arg', 'pmag.get_named_arg', (['"""-f"""', '"""sites.txt"""'], {}), "('-f', 'sites.txt')\n", (2914, 2933), True, 'import pmagpy.pmag as pmag\n'), ((3235, 3277), 'pmagpy.pmag.get_named_arg', 'pmag.get_named_arg', (['"""-f"""', '"""pmag_sites.txt"""'], {}), "('-f', 'pmag_sites.txt')\n", (3253, 3277), True, 'import pmagpy.pmag as pmag\n'), ((3704, 3724), 'sys.argv.index', 'sys.argv.index', (['"""-b"""'], {}), "('-b')\n", (3718, 3724), False, 'import sys\n'), ((3905, 3935), 'pmagpy.pmag.get_named_arg', 'pmag.get_named_arg', (['"""-fsa"""', '""""""'], {}), "('-fsa', '')\n", (3923, 3935), True, 'import pmagpy.pmag as pmag\n'), ((5293, 5316), 'pmagpy.pmag.magic_read', 'pmag.magic_read', (['orfile'], {}), '(orfile)\n', (5308, 5316), True, 'import pmagpy.pmag as pmag\n'), ((5372, 5397), 'pmagpy.pmag.magic_read', 'pmag.magic_read', (['critfile'], {}), '(critfile)\n', (5387, 5397), True, 'import pmagpy.pmag as pmag\n'), ((5675, 5715), 'pmagpy.pmagplotlib.plot_init', 'pmagplotlib.plot_init', (["PLTS['geo']", '(5)', '(5)'], {}), "(PLTS['geo'], 5, 5)\n", (5696, 5715), True, 'import pmagpy.pmagplotlib as pmagplotlib\n'), ((5724, 5766), 'pmagpy.pmagplotlib.plot_init', 'pmagplotlib.plot_init', (["PLTS['strat']", '(5)', '(5)'], {}), "(PLTS['strat'], 5, 5)\n", (5745, 5766), True, 'import pmagpy.pmagplotlib as pmagplotlib\n'), ((5775, 5816), 'pmagpy.pmagplotlib.plot_init', 'pmagplotlib.plot_init', (["PLTS['taus']", '(5)', '(5)'], {}), "(PLTS['taus'], 5, 5)\n", (5796, 5816), True, 'import pmagpy.pmagplotlib as pmagplotlib\n'), ((5863, 5906), 'pmagpy.pmag.get_dictitem', 'pmag.get_dictitem', (['data', 'tilt_col', '"""0"""', '"""T"""'], {}), "(data, tilt_col, '0', 'T')\n", (5880, 5906), True, 'import pmagpy.pmag as pmag\n'), ((7663, 7673), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7671, 7673), False, 'import sys\n'), ((7917, 7944), 'pmagpy.pmagplotlib.draw_figs', 'pmagplotlib.draw_figs', (['PLTS'], {}), '(PLTS)\n', (7938, 7944), True, 'import pmagpy.pmagplotlib as pmagplotlib\n'), ((8308, 8326), 'pmagpy.pmag.pseudo', 'pmag.pseudo', (['DIDDs'], {}), '(DIDDs)\n', (8319, 8326), True, 'import pmagpy.pmag as pmag\n'), ((9580, 9607), 'pmagpy.pmagplotlib.draw_figs', 'pmagplotlib.draw_figs', (['PLTS'], {}), '(PLTS)\n', (9601, 9607), True, 'import pmagpy.pmagplotlib as pmagplotlib\n'), ((2685, 2715), 'pmagpy.pmag.get_named_arg', 'pmag.get_named_arg', (['"""-n"""', '(1000)'], {}), "('-n', 1000)\n", (2703, 2715), True, 'import pmagpy.pmag as pmag\n'), ((2820, 2848), 'pmagpy.pmag.get_named_arg', 'pmag.get_named_arg', (['"""-DM"""', '(3)'], {}), "('-DM', 3)\n", (2838, 2848), True, 'import pmagpy.pmag as pmag\n'), ((3982, 4012), 'pmagpy.pmag.get_named_arg', 'pmag.get_named_arg', (['"""-fsi"""', '""""""'], {}), "('-fsi', '')\n", (4000, 4012), True, 'import pmagpy.pmag as pmag\n'), ((5167, 5206), 'pandas.read_csv', 'pd.read_csv', (['orfile'], {'sep': '"""\t"""', 'header': '(1)'}), "(orfile, sep='\\t', header=1)\n", (5178, 5206), True, 'import pandas as pd\n'), ((6256, 6311), 'pmagpy.pmag.get_dictitem', 'pmag.get_dictitem', (['ordata', 'site_col', 'rec[site_col]', '"""T"""'], {}), "(ordata, site_col, rec[site_col], 'T')\n", (6273, 6311), True, 'import pmagpy.pmag as pmag\n'), ((7801, 7817), 'numpy.array', 'np.array', (['[D, I]'], {}), '([D, I])\n', (7809, 7817), True, 'import numpy as np\n'), ((8614, 8652), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0, 0.01 * perc]'], {}), '([1.0, 1.0, 1.0, 0.01 * perc])\n', (8622, 8652), True, 'import numpy as np\n'), ((8667, 8692), 'pmagpy.pmag.dotilt_V', 'pmag.dotilt_V', (['(PDs * tilt)'], {}), '(PDs * tilt)\n', (8680, 8692), True, 'import pmagpy.pmag as pmag\n'), ((8758, 8775), 'pmagpy.pmag.doprinc', 'pmag.doprinc', (['TCs'], {}), '(TCs)\n', (8770, 8775), True, 'import pmagpy.pmag as pmag\n'), ((8874, 8902), 'matplotlib.pyplot.plot', 'plt.plot', (['Percs', 'Taus', '"""r--"""'], {}), "(Percs, Taus, 'r--')\n", (8882, 8902), True, 'from matplotlib import pyplot as plt\n'), ((9738, 9748), 'sys.exit', 'sys.exit', ([], {}), '()\n', (9746, 9748), False, 'import sys\n'), ((4865, 4886), 'os.path.split', 'os.path.split', (['orfile'], {}), '(orfile)\n', (4878, 4886), False, 'import os\n'), ((4893, 4914), 'os.path.split', 'os.path.split', (['infile'], {}), '(infile)\n', (4906, 4914), False, 'import os\n'), ((8411, 8429), 'pmagpy.pmag.fshdev', 'pmag.fshdev', (['kappa'], {}), '(kappa)\n', (8422, 8429), True, 'import pmagpy.pmag as pmag\n'), ((8460, 8500), 'pmagpy.pmag.dodirot', 'pmag.dodirot', (['d', 'i', 'PDs[k][2]', 'PDs[k][3]'], {}), '(d, i, PDs[k][2], PDs[k][3])\n', (8472, 8500), True, 'import pmagpy.pmag as pmag\n'), ((8709, 8725), 'numpy.array', 'np.array', (['[D, I]'], {}), '([D, I])\n', (8717, 8725), True, 'import numpy as np\n'), ((8980, 8992), 'numpy.max', 'np.max', (['Taus'], {}), '(Taus)\n', (8986, 8992), True, 'import numpy as np\n')] |
# coding=utf-8
import numpy as np
import reikna.cluda as cluda
from reikna.fft import FFT, FFTShift
import pyopencl.array as clarray
from pyopencl import clmath
from reikna.core import Computation, Transformation, Parameter, Annotation, Type
from reikna.algorithms import PureParallel
from matplotlib import cm
import time as t
import matplotlib.pyplot as plt
import statistic_functions4 as sf
#import mylog as Log
np.set_printoptions(threshold=np.inf)
batch = 100
N = 1024
api = cluda.any_api()
thr = api.Thread.create()
data = np.load('8psk_data.npy')
data = np.reshape(data, (batch*4, N)) # 一共 batch*4 = 400次
t1 = t.clock()
data0 = data[0:batch, :].astype(np.complex128)
data_g = thr.to_device(data0)
print(t.clock()-t1)
#compile
fft = FFT(data_g, (0,1))
fftc = fft.compile(thr)
data_f = thr.array(data0.shape, dtype=np.complex128)
shift = FFTShift(data_f, (0,1))
shiftc = shift.compile(thr)
data_shift = thr.array(data0.shape, dtype=np.complex128)
sum = sf.stat(thr)
logg10 = sf.logg10(thr)
def myfft(data):
'''
input:
data: cluda-Array (100, 1024)
-----------------------------------------------
output:
TS_gpu: cluda-Array (1000, 1024)
'''
#FFT
t_fft = t.clock()
data_f = thr.array(data.shape, dtype=np.complex128)
STAT_gpu = thr.array(data.shape, dtype=np.complex128)
fftc(data_f, data)
shiftc(STAT_gpu, data_f)
#log
t_log = t.clock()
STAT_gpu = abs(STAT_gpu)
logg10(STAT_gpu, global_size = (N, batch))
#统计,插值
t_st = t.clock()
TS_gpu = cluda.ocl.Array(thr, shape=(1000, N), dtype=np.int)
sum(TS_gpu, STAT_gpu, global_size = (N,batch))
print('fft: %f, log: %f, stat: %f'%(t_log-t_fft, t_st-t_log, t.clock()-t_st))
print('total: %f'%(t.clock()-t_fft))
return TS_gpu
i=0
j=0
fig=plt.figure()
#fig, ax = plt.subplots()
summ = 0
while i<100:
t1 = t.clock()
data0 = data[j:(j+1)*batch, :].astype(np.complex128)
data_g = thr.to_device(data0)
out = myfft(data_g)
out = out.get()
t2 = t.clock()
#nipy_spectral
plt.clf()
#plt.imshow(out, cmap = cm.hot)
plt.imshow(out, cmap = 'nipy_spectral')
plt.ylim(0,1000)
plt.pause(0.00000001)
print('No. %d, transmission+compute: %f, plot: %f'%(i, t2-t1, t.clock()-t2))
summ = summ + t2-t1
j = j + 1
i = i + 1
if j == 4:
j=0
print('avg compute: %f'%(summ/100))
| [
"numpy.load",
"numpy.set_printoptions",
"statistic_functions4.logg10",
"matplotlib.pyplot.clf",
"reikna.fft.FFTShift",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.ylim",
"time.clock",
"statistic_functions4.stat",
"matplotlib.pyplot.figure",
"numpy.reshape",
"reikna.cluda.any_api",
"reikna.cluda.ocl.Array",
"matplotlib.pyplot.pause",
"reikna.fft.FFT"
] | [((430, 467), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.inf'}), '(threshold=np.inf)\n', (449, 467), True, 'import numpy as np\n'), ((500, 515), 'reikna.cluda.any_api', 'cluda.any_api', ([], {}), '()\n', (513, 515), True, 'import reikna.cluda as cluda\n'), ((555, 579), 'numpy.load', 'np.load', (['"""8psk_data.npy"""'], {}), "('8psk_data.npy')\n", (562, 579), True, 'import numpy as np\n'), ((588, 620), 'numpy.reshape', 'np.reshape', (['data', '(batch * 4, N)'], {}), '(data, (batch * 4, N))\n', (598, 620), True, 'import numpy as np\n'), ((647, 656), 'time.clock', 't.clock', ([], {}), '()\n', (654, 656), True, 'import time as t\n'), ((775, 794), 'reikna.fft.FFT', 'FFT', (['data_g', '(0, 1)'], {}), '(data_g, (0, 1))\n', (778, 794), False, 'from reikna.fft import FFT, FFTShift\n'), ((882, 906), 'reikna.fft.FFTShift', 'FFTShift', (['data_f', '(0, 1)'], {}), '(data_f, (0, 1))\n', (890, 906), False, 'from reikna.fft import FFT, FFTShift\n'), ((1000, 1012), 'statistic_functions4.stat', 'sf.stat', (['thr'], {}), '(thr)\n', (1007, 1012), True, 'import statistic_functions4 as sf\n'), ((1023, 1037), 'statistic_functions4.logg10', 'sf.logg10', (['thr'], {}), '(thr)\n', (1032, 1037), True, 'import statistic_functions4 as sf\n'), ((1856, 1868), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1866, 1868), True, 'import matplotlib.pyplot as plt\n'), ((1249, 1258), 'time.clock', 't.clock', ([], {}), '()\n', (1256, 1258), True, 'import time as t\n'), ((1452, 1461), 'time.clock', 't.clock', ([], {}), '()\n', (1459, 1461), True, 'import time as t\n'), ((1565, 1574), 'time.clock', 't.clock', ([], {}), '()\n', (1572, 1574), True, 'import time as t\n'), ((1589, 1640), 'reikna.cluda.ocl.Array', 'cluda.ocl.Array', (['thr'], {'shape': '(1000, N)', 'dtype': 'np.int'}), '(thr, shape=(1000, N), dtype=np.int)\n', (1604, 1640), True, 'import reikna.cluda as cluda\n'), ((1930, 1939), 'time.clock', 't.clock', ([], {}), '()\n', (1937, 1939), True, 'import time as t\n'), ((2089, 2098), 'time.clock', 't.clock', ([], {}), '()\n', (2096, 2098), True, 'import time as t\n'), ((2124, 2133), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2131, 2133), True, 'import matplotlib.pyplot as plt\n'), ((2176, 2213), 'matplotlib.pyplot.imshow', 'plt.imshow', (['out'], {'cmap': '"""nipy_spectral"""'}), "(out, cmap='nipy_spectral')\n", (2186, 2213), True, 'import matplotlib.pyplot as plt\n'), ((2221, 2238), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1000)'], {}), '(0, 1000)\n', (2229, 2238), True, 'import matplotlib.pyplot as plt\n'), ((2253, 2269), 'matplotlib.pyplot.pause', 'plt.pause', (['(1e-08)'], {}), '(1e-08)\n', (2262, 2269), True, 'import matplotlib.pyplot as plt\n'), ((743, 752), 'time.clock', 't.clock', ([], {}), '()\n', (750, 752), True, 'import time as t\n'), ((1802, 1811), 'time.clock', 't.clock', ([], {}), '()\n', (1809, 1811), True, 'import time as t\n'), ((1761, 1770), 'time.clock', 't.clock', ([], {}), '()\n', (1768, 1770), True, 'import time as t\n'), ((2342, 2351), 'time.clock', 't.clock', ([], {}), '()\n', (2349, 2351), True, 'import time as t\n')] |
from bot.db.entities.Homework import Homework
class HomeworkManager:
def __init__(self, db):
self._db = db
def getAll(self):
cur = self._db.cursor()
cur.execute('SELECT * FROM Homework')
homeworks = []
for homework in cur.fetchall():
homeworks.append(Homework(homework[1], homework[2], homework[3], homework[4], homework[5], homework[6], homework[7], homework[8], homework[9], homework[10]))
return homeworks
def insert(self, homework):
cur = self._db.cursor()
cur.execute('INSERT INTO Homework (date, matiere, codeMatiere, aFaire, idDevoir'
', documentsAFaire, donneLe, effectue, interrogation, rendreEnLigne'
') values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',
(homework.getDate(), homework.getMatiere(), homework.getCodeMatiere(),
homework.getAFaire(), homework.getIdDevoir(), homework.getDocumentAFaire(),
homework.getDonneLe(), homework.getEffectue(), homework.getInterrogation(),
homework.getRendreEnLigne(),))
self._db.commit() | [
"bot.db.entities.Homework.Homework"
] | [((316, 459), 'bot.db.entities.Homework.Homework', 'Homework', (['homework[1]', 'homework[2]', 'homework[3]', 'homework[4]', 'homework[5]', 'homework[6]', 'homework[7]', 'homework[8]', 'homework[9]', 'homework[10]'], {}), '(homework[1], homework[2], homework[3], homework[4], homework[5],\n homework[6], homework[7], homework[8], homework[9], homework[10])\n', (324, 459), False, 'from bot.db.entities.Homework import Homework\n')] |
import unittest
from atbash_cipher import AtbashCipher
test = AtbashCipher() #instantiate test caesar cipher class
class AtbashCipherEncryptTests(unittest.TestCase):
def test_empty_string(self):
self.assertMultiLineEqual(test.encrypt(''), '')
def test_string_with_only_spaces(self):
self.assertMultiLineEqual(test.encrypt(' '), ' ')
def test_string_no_wrap_around(self):
self.assertMultiLineEqual(test.encrypt('abc'), 'zyx')
def test_string_wrap_around(self):
self.assertMultiLineEqual(
test.encrypt('wvu'),
'def')
def test_multi_word(self):
self.assertMultiLineEqual(test.encrypt('abc wvu'), 'zyx def')
#for values of key less than 0, JavaScript frontend is tasked with validating
#that key must be >= 0. Hence the test is skipped here
class AtbashCipherDecryptTests(unittest.TestCase):
def test_empty_string(self):
self.assertMultiLineEqual(test.decrypt(''), '')
def test_string_with_only_spaces(self):
self.assertMultiLineEqual(test.decrypt(' '), ' ')
def test_string_no_wrap_around(self):
self.assertMultiLineEqual(test.decrypt('zyx'), 'abc')
def test_string_wrap_around(self):
self.assertMultiLineEqual(
test.decrypt('def'),
'wvu')
def test_multi_word(self):
self.assertMultiLineEqual(test.decrypt('zyx def'), 'abc wvu')
if __name__ == '__main__':
unittest.main()
unittest.main()
| [
"unittest.main",
"atbash_cipher.AtbashCipher"
] | [((64, 78), 'atbash_cipher.AtbashCipher', 'AtbashCipher', ([], {}), '()\n', (76, 78), False, 'from atbash_cipher import AtbashCipher\n'), ((1465, 1480), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1478, 1480), False, 'import unittest\n'), ((1485, 1500), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1498, 1500), False, 'import unittest\n')] |
from tests.common import DummyPostData
from wtforms.fields import IntegerField
from wtforms.form import Form
class F(Form):
a = IntegerField()
b = IntegerField(default=48)
def test_integer_field():
form = F(DummyPostData(a=["v"], b=["-15"]))
assert form.a.data is None
assert form.a.raw_data == ["v"]
assert form.a() == """<input id="a" name="a" type="number" value="v">"""
assert form.b.data == -15
assert form.b() == """<input id="b" name="b" type="number" value="-15">"""
assert not form.a.validate(form)
assert form.b.validate(form)
form = F(DummyPostData(a=[], b=[""]))
assert form.a.data is None
assert form.a.raw_data == []
assert form.b.data is None
assert form.b.raw_data == [""]
assert not form.validate()
assert len(form.b.process_errors) == 1
assert len(form.b.errors) == 1
form = F(b=9)
assert form.b.data == 9
assert form.a._value() == ""
assert form.b._value() == "9"
form = F(DummyPostData(), data=dict(b="v"))
assert form.b.data is None
assert form.a._value() == ""
assert form.b._value() == ""
assert not form.validate()
assert len(form.b.process_errors) == 1
assert len(form.b.errors) == 1
| [
"wtforms.fields.IntegerField",
"tests.common.DummyPostData"
] | [((135, 149), 'wtforms.fields.IntegerField', 'IntegerField', ([], {}), '()\n', (147, 149), False, 'from wtforms.fields import IntegerField\n'), ((158, 182), 'wtforms.fields.IntegerField', 'IntegerField', ([], {'default': '(48)'}), '(default=48)\n', (170, 182), False, 'from wtforms.fields import IntegerField\n'), ((224, 257), 'tests.common.DummyPostData', 'DummyPostData', ([], {'a': "['v']", 'b': "['-15']"}), "(a=['v'], b=['-15'])\n", (237, 257), False, 'from tests.common import DummyPostData\n'), ((595, 622), 'tests.common.DummyPostData', 'DummyPostData', ([], {'a': '[]', 'b': "['']"}), "(a=[], b=[''])\n", (608, 622), False, 'from tests.common import DummyPostData\n'), ((989, 1004), 'tests.common.DummyPostData', 'DummyPostData', ([], {}), '()\n', (1002, 1004), False, 'from tests.common import DummyPostData\n')] |
import os
import argparse
import numpy as np
import tensorflow as tf
import tensorflow.keras as K
from sklearn.metrics import classification_report
from dataset import FLIRDataset
def grid_search(train_labels: str,
test_labels: str,
output:str,
res:tuple=(120, 160),
lazy:bool=True,
batch_size:int=16,
epochs:int=20):
"""
Runs a grid search over all known models.
Params
------
train_labels: str
Path to training labels
test_labels: str
Path to testing labels
output: str
Path to output directory
res: tuple
Input resolution of network
lazy: bool
Whether to load data lazily in batches during training
batch_size: int
Batch size in case of lazy loading
epochs: int
Training epochs
"""
# Data
print("=> Loading data.")
train = FLIRDataset(train_labels, res=res, batch_size=batch_size)
test = FLIRDataset(test_labels, res=res, batch_size=batch_size)
# In eager loading mode, train on everything.
if not lazy:
X_train, y_train = train.get_all()
X_test, y_test = test.get_all()
X_train = np.concatenate([X_train, X_test], axis=0)
y_train = np.concatenate([y_train, y_test], axis=0)
def net(x, num_classes=1):
x = K.applications.resnet_v2.ResNet50V2(include_top=False, weights=None, input_shape=x.shape[1:])(x)
x = K.layers.Flatten()(x)
x = K.layers.Dense(num_classes, activation="softmax")(x)
return x
print("\n=> Training model.")
input_tensor = K.layers.Input((160, 120, 1))
output_tensor = net(input_tensor, num_classes=train.num_classes())
model = K.Model(input_tensor, output_tensor)
model.compile(optimizer="sgd",
loss="categorical_crossentropy",
metrics=["accuracy"])
# Train model
if lazy:
model.fit(x=train,
epochs=epochs,
validation_data=train,
verbose=2)
else:
model.fit(x=X_train,
y=y_train,
epochs=epochs,
batch_size=batch_size,
verbose=2)
# Save weights
model.save_weights(os.path.join(output, "flir_pretrained_weights.h5"))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Train model on FLIR dataset.")
parser.add_argument("train", help="Directory containing training labels")
parser.add_argument("test", help="Directory containing testing labels")
parser.add_argument("out", help="Output directory for results")
parser.add_argument("epochs", help="Number of epochs")
parser.add_argument("-l", "--lazy", dest="lazy", help="Load data lazily", action="store_true")
args = vars(parser.parse_args())
grid_search(args["train"],
args["test"],
args["out"],
res=(120, 160),
lazy=bool(args["lazy"]),
epochs=int(args["epochs"]))
print("\n=> Finished.") | [
"argparse.ArgumentParser",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.applications.resnet_v2.ResNet50V2",
"dataset.FLIRDataset",
"tensorflow.keras.Model",
"tensorflow.keras.layers.Input",
"os.path.join",
"numpy.concatenate",
"tensorflow.keras.layers.Flatten"
] | [((1006, 1063), 'dataset.FLIRDataset', 'FLIRDataset', (['train_labels'], {'res': 'res', 'batch_size': 'batch_size'}), '(train_labels, res=res, batch_size=batch_size)\n', (1017, 1063), False, 'from dataset import FLIRDataset\n'), ((1075, 1131), 'dataset.FLIRDataset', 'FLIRDataset', (['test_labels'], {'res': 'res', 'batch_size': 'batch_size'}), '(test_labels, res=res, batch_size=batch_size)\n', (1086, 1131), False, 'from dataset import FLIRDataset\n'), ((1715, 1744), 'tensorflow.keras.layers.Input', 'K.layers.Input', (['(160, 120, 1)'], {}), '((160, 120, 1))\n', (1729, 1744), True, 'import tensorflow.keras as K\n'), ((1828, 1864), 'tensorflow.keras.Model', 'K.Model', (['input_tensor', 'output_tensor'], {}), '(input_tensor, output_tensor)\n', (1835, 1864), True, 'import tensorflow.keras as K\n'), ((2477, 2544), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train model on FLIR dataset."""'}), "(description='Train model on FLIR dataset.')\n", (2500, 2544), False, 'import argparse\n'), ((1301, 1342), 'numpy.concatenate', 'np.concatenate', (['[X_train, X_test]'], {'axis': '(0)'}), '([X_train, X_test], axis=0)\n', (1315, 1342), True, 'import numpy as np\n'), ((1361, 1402), 'numpy.concatenate', 'np.concatenate', (['[y_train, y_test]'], {'axis': '(0)'}), '([y_train, y_test], axis=0)\n', (1375, 1402), True, 'import numpy as np\n'), ((2380, 2430), 'os.path.join', 'os.path.join', (['output', '"""flir_pretrained_weights.h5"""'], {}), "(output, 'flir_pretrained_weights.h5')\n", (2392, 2430), False, 'import os\n'), ((1448, 1545), 'tensorflow.keras.applications.resnet_v2.ResNet50V2', 'K.applications.resnet_v2.ResNet50V2', ([], {'include_top': '(False)', 'weights': 'None', 'input_shape': 'x.shape[1:]'}), '(include_top=False, weights=None,\n input_shape=x.shape[1:])\n', (1483, 1545), True, 'import tensorflow.keras as K\n'), ((1557, 1575), 'tensorflow.keras.layers.Flatten', 'K.layers.Flatten', ([], {}), '()\n', (1573, 1575), True, 'import tensorflow.keras as K\n'), ((1591, 1640), 'tensorflow.keras.layers.Dense', 'K.layers.Dense', (['num_classes'], {'activation': '"""softmax"""'}), "(num_classes, activation='softmax')\n", (1605, 1640), True, 'import tensorflow.keras as K\n')] |
"""
Module to read the query and other inputs
"""
from Bio import Entrez
from filter import filter_selector
def inputnow():
"""
Reads the inputs' values
:return: query
"""
# the email must be the user's individual/personal email (NOT an institutional email or a default email
# as this could lead to exceeding the maximum allowable frequency of requests per user, of 3 per second)
Entrez.email = "<EMAIL>"
# The maximum number of search results to be displayed could be the following default value or an input value < 100
results_number = 5
query = input("enter your search query: ")
filter_option = input("would you like to use advanced search filter? (yes/no): ")
if filter_option == "yes":
query = filter_selector(query)
return query, results_number
| [
"filter.filter_selector"
] | [((763, 785), 'filter.filter_selector', 'filter_selector', (['query'], {}), '(query)\n', (778, 785), False, 'from filter import filter_selector\n')] |
import sys, re
from datetime import date
version = sys.argv[1]
release_date = date.today().strftime('%Y-%m-%d')
major, minor, patch = version.split('.')
def replace(file_path, pattern, replacement):
updated = re.sub(pattern, replacement, open(file_path).read())
with open(file_path, 'w') as f:
f.write(updated)
# Update changelog
SEP = '---------------------'
NEXT = f'Next\n{SEP}'
changelog_header = f'{NEXT}\n\n{version} ({release_date})\n{SEP}'
replace('CHANGELOG.md', NEXT, changelog_header)
# Update Doxyfile
DOXY_VERSION = 'PROJECT_NUMBER = '
replace('Doxyfile', DOXY_VERSION + '.*', DOXY_VERSION + version)
# Update CMakeLists.txt
replace('CMakeLists.txt',
'''SET\\(CBOR_VERSION_MAJOR "0"\\)
SET\\(CBOR_VERSION_MINOR "7"\\)
SET\\(CBOR_VERSION_PATCH "0"\\)''',
f'''SET(CBOR_VERSION_MAJOR "{major}")
SET(CBOR_VERSION_MINOR "{minor}")
SET(CBOR_VERSION_PATCH "{patch}")''')
# Update Sphinx
replace('doc/source/conf.py',
"""version = '.*'
release = '.*'""",
f"""version = '{major}.{minor}'
release = '{major}.{minor}.{patch}'""")
| [
"datetime.date.today"
] | [((79, 91), 'datetime.date.today', 'date.today', ([], {}), '()\n', (89, 91), False, 'from datetime import date\n')] |
from OpenGLCffi.GL import params
@params(api='gl', prms=['equation'])
def glReferencePlaneSGIX(equation):
pass
| [
"OpenGLCffi.GL.params"
] | [((34, 69), 'OpenGLCffi.GL.params', 'params', ([], {'api': '"""gl"""', 'prms': "['equation']"}), "(api='gl', prms=['equation'])\n", (40, 69), False, 'from OpenGLCffi.GL import params\n')] |
from .modules import DropBlock, SEModule, SKConv2d, BlurPool2d, SplitAttentionModule
import torch.nn as nn
import collections
class BasicOperation(nn.Sequential):
def __init__(self, in_channels, out_channels, stride, groups, bottleneck,
normalization, activation, dropblock, **kwargs):
channels = round(out_channels / bottleneck)
super().__init__(collections.OrderedDict(m for m in [
('conv1', nn.Conv2d(
in_channels, channels, kernel_size=3, padding=1,
stride=stride, groups=groups, bias=False)),
('norm1', normalization(channels)),
('drop1', None if not dropblock else DropBlock()),
('act1', activation(inplace=True)),
('conv2', nn.Conv2d(
channels, out_channels, kernel_size=3, padding=1,
stride=1, groups=1, bias=False)),
('norm2', normalization(out_channels)),
('drop2', None if not dropblock else DropBlock()),
] if m[1] is not None))
class BottleneckOperation(nn.Sequential):
def __init__(self, in_channels, out_channels, stride, groups, bottleneck,
normalization, activation, dropblock, **kwargs):
channels = round(out_channels / bottleneck * groups)
super().__init__(collections.OrderedDict(m for m in [
('conv1', nn.Conv2d(
in_channels, channels, kernel_size=1, padding=0,
stride=stride, groups=1, bias=False)),
('norm1', normalization(channels)),
('drop1', None if not dropblock else DropBlock()),
('act1', activation(inplace=True)),
('conv2', nn.Conv2d(
channels, channels, kernel_size=3, padding=1,
stride=1, groups=groups, bias=False)),
('norm2', normalization(channels)),
('drop2', None if not dropblock else DropBlock()),
('act2', activation(inplace=True)),
('conv3', nn.Conv2d(
channels, out_channels, kernel_size=1, padding=0,
stride=1, groups=1, bias=False)),
('norm3', normalization(out_channels)),
('drop3', None if not dropblock else DropBlock()),
] if m[1] is not None))
class SelectedKernelOperation(nn.Sequential):
def __init__(self, in_channels, out_channels, stride, radix, groups, bottleneck,
normalization, activation, dropblock, **kwargs):
channels = round(out_channels / bottleneck * groups)
super().__init__(collections.OrderedDict(m for m in [
('conv1', nn.Conv2d(
in_channels, channels, kernel_size=1, padding=0,
stride=stride, groups=1, bias=False)),
('norm1', normalization(channels)),
('drop1', None if not dropblock else DropBlock()),
('act1', activation(inplace=True)),
('conv2', SKConv2d(
channels, channels, kernel_size=3, padding=1,
stride=1, radix=radix, groups=groups)),
('norm2', normalization(channels)),
('drop2', None if not dropblock else DropBlock()),
('act2', activation(inplace=True)),
('conv3', nn.Conv2d(
channels, out_channels, kernel_size=1, padding=0,
stride=1, groups=1, bias=False)),
('norm3', normalization(out_channels)),
('drop3', None if not dropblock else DropBlock()),
] if m[1] is not None))
class PreActBasicOperation(nn.Sequential):
def __init__(self, in_channels, out_channels, stride, groups, bottleneck,
normalization, activation, dropblock, **kwargs):
channels = round(out_channels / bottleneck)
super().__init__(collections.OrderedDict(m for m in [
('norm1', normalization(in_channels)),
('drop1', None if not dropblock else DropBlock()),
('act1', activation(inplace=True)),
('conv1', nn.Conv2d(
in_channels, channels, kernel_size=3, padding=1,
stride=stride, groups=groups, bias=False)),
('norm2', normalization(channels)),
('drop2', None if not dropblock else DropBlock()),
('act2', activation(inplace=True)),
('conv2', nn.Conv2d(
channels, out_channels, kernel_size=3, padding=1,
stride=1, groups=1, bias=False)),
] if m[1] is not None))
class SingleActBasicOperation(nn.Sequential):
def __init__(self, in_channels, out_channels, stride, groups, bottleneck,
normalization, activation, dropblock, **kwargs):
channels = round(out_channels / bottleneck)
super().__init__(collections.OrderedDict(m for m in [
('norm1', normalization(in_channels)),
('conv1', nn.Conv2d(
in_channels, channels, kernel_size=3, padding=1,
stride=stride, groups=groups, bias=False)),
('norm2', normalization(channels)),
('drop2', None if not dropblock else DropBlock()),
('act2', activation(inplace=True)),
('conv2', nn.Conv2d(
channels, out_channels, kernel_size=3, padding=1,
stride=1, groups=1, bias=False)),
('norm3', normalization(out_channels)),
('drop3', None if not dropblock else DropBlock()),
] if m[1] is not None))
class SingleActBottleneckOperation(nn.Sequential):
def __init__(self, in_channels, out_channels, stride, groups, bottleneck,
normalization, activation, dropblock, **kwargs):
channels = round(out_channels / bottleneck * groups)
super().__init__(collections.OrderedDict(m for m in [
('norm1', normalization(in_channels)),
('conv1', nn.Conv2d(
in_channels, channels, kernel_size=1, padding=0,
stride=1, groups=1, bias=False)),
('norm2', normalization(channels)),
('drop2', None if not dropblock else DropBlock()),
('act2', activation(inplace=True)),
('conv2', nn.Conv2d(
channels, channels, kernel_size=3, padding=1,
stride=stride, groups=groups, bias=False)),
('norm3', normalization(channels)),
('drop3', None if not dropblock else DropBlock()),
('act3', activation(inplace=True)),
('conv3', nn.Conv2d(
channels, out_channels, kernel_size=1, padding=0,
stride=1, groups=1, bias=False)),
('norm4', normalization(out_channels)),
('drop4', None if not dropblock else DropBlock()),
] if m[1] is not None))
class TweakedBottleneckOperation(nn.Sequential):
def __init__(self, in_channels, out_channels, stride, groups, bottleneck,
normalization, activation, dropblock, **kwargs):
channels = round(out_channels / bottleneck)
super().__init__(collections.OrderedDict(m for m in [
('conv1', nn.Conv2d(
in_channels, channels, kernel_size=1, padding=0,
stride=1, groups=1, bias=False)),
('norm1', normalization(channels)),
('drop1', None if not dropblock else DropBlock()),
('act1', activation(inplace=True)),
('conv2', nn.Conv2d(
channels, channels, kernel_size=3, padding=1,
stride=1, groups=groups, bias=False)),
('norm2', normalization(channels)),
('drop2', None if not dropblock else DropBlock()),
('act2', activation(inplace=True)),
('pool', None if stride == 1 else BlurPool2d(channels, stride=stride)),
('conv3', nn.Conv2d(
channels, out_channels, kernel_size=1, padding=0,
stride=1, groups=1, bias=False)),
('norm3', normalization(channels)),
('drop3', None if not dropblock else DropBlock()),
] if m[1] is not None))
class TweakedSlectedKernelOperation(nn.Sequential):
def __init__(self, in_channels, out_channels, stride, radix, groups, bottleneck,
normalization, activation, dropblock, **kwargs):
channels = round(out_channels / bottleneck)
super().__init__(collections.OrderedDict(m for m in [
('conv1', nn.Conv2d(
in_channels, channels, kernel_size=1, padding=0,
stride=1, groups=1, bias=False)),
('norm1', normalization(channels)),
('drop1', None if not dropblock else DropBlock()),
('act1', activation(inplace=True)),
('conv2', SKConv2d(
channels, channels, kernel_size=3, padding=1,
stride=1, radix=radix, groups=groups)),
('drop2', None if not dropblock else DropBlock()),
('pool', None if stride == 1 else BlurPool2d(channels, stride=stride)),
('conv3', nn.Conv2d(
channels, out_channels, kernel_size=1, padding=0,
stride=1, groups=1, bias=False)),
('norm3', normalization(out_channels)),
('drop3', None if not dropblock else DropBlock()),
] if m[1] is not None))
class MobileNetOperation(nn.Sequential):
def __init__(self, in_channels, out_channels, kernel, stride, expansion,
normalization, activation, dropblock,
seoperation, sereduction, sesigmoid, **kwargs):
channels = int(in_channels * expansion)
modules = []
if in_channels != channels:
modules.extend([
('conv1', nn.Conv2d(
in_channels, channels, kernel_size=1, padding=0,
stride=1, groups=1, bias=False)),
('norm1', normalization(channels)),
('drop1', None if not dropblock else DropBlock()),
('act1', activation(inplace=True)),
])
modules.extend([
('conv2', nn.Conv2d(
channels, channels, kernel_size=kernel, padding=kernel // 2,
stride=stride, groups=channels, bias=False)),
('norm2', normalization(channels)),
('drop2', None if not dropblock else DropBlock()),
('semodule', None if not seoperation else SEModule(
channels, reduction=sereduction, activation=nn.ReLU, sigmoid=sesigmoid)),
('act2', activation(inplace=True)),
('conv3', nn.Conv2d(
channels, out_channels, kernel_size=1, padding=0,
stride=1, groups=1, bias=False)),
('norm3', normalization(out_channels)),
('drop3', None if not dropblock else DropBlock()),
])
super().__init__(collections.OrderedDict(m for m in modules if m[1] is not None))
class SplitAttentionOperation(nn.Sequential):
def __init__(self, in_channels, out_channels, stride, radix, groups, bottleneck,
normalization, activation, dropblock, **kwargs):
channels = round(out_channels / bottleneck)
super().__init__(collections.OrderedDict(m for m in [
('conv1', nn.Conv2d(
in_channels, channels, kernel_size=1, padding=0,
stride=1, groups=1, bias=False)),
('norm1', normalization(channels)),
('drop1', None if not dropblock else DropBlock()),
('act1', activation(inplace=True)),
('conv2', nn.Conv2d(
channels, channels * radix, kernel_size=3, padding=1,
stride=1, groups=groups * radix, bias=False)),
('norm2', normalization(channels * radix)),
('drop2', None if not dropblock else DropBlock()),
('act2', activation(inplace=True)),
('attention', SplitAttentionModule(
channels, radix=radix, groups=groups,
normalization=normalization, activation=activation)),
('downsample', None if stride == 1 else nn.AvgPool2d(
kernel_size=3, stride=stride, padding=1)),
('conv3', nn.Conv2d(
channels, out_channels, kernel_size=1, padding=0,
stride=1, groups=1, bias=False)),
('norm3', normalization(out_channels)),
('drop3', None if not dropblock else DropBlock()),
] if m[1] is not None))
class DenseNetOperation(nn.Sequential):
def __init__(self, in_channels, out_channels, stride, growth, expansion,
normalization, activation, dropblock, **kwargs):
if stride != 1:
super().__init__(collections.OrderedDict(m for m in [
('norm1', normalization(in_channels)),
('act1', activation(inplace=True)),
('conv1', nn.Conv2d(
in_channels, out_channels, kernel_size=1, padding=0,
stride=1, groups=1, bias=False)),
('pool1', nn.AvgPool2d(kernel_size=2, stride=stride)),
] if m[1] is not None))
else:
channels = growth * expansion
super().__init__(collections.OrderedDict(m for m in [
('norm1', normalization(in_channels)),
('drop1', None if not dropblock else DropBlock()),
('act1', activation(inplace=True)),
('conv1', nn.Conv2d(
in_channels, channels, kernel_size=1, padding=0,
stride=1, groups=1, bias=False)),
('norm2', normalization(channels)),
('drop2', None if not dropblock else DropBlock()),
('act2', activation(inplace=True)),
('conv2', nn.Conv2d(
channels, growth, kernel_size=3, padding=1,
stride=1, bias=False)),
] if m[1] is not None))
| [
"collections.OrderedDict",
"torch.nn.AvgPool2d",
"torch.nn.Conv2d"
] | [((11036, 11099), 'collections.OrderedDict', 'collections.OrderedDict', (['(m for m in modules if m[1] is not None)'], {}), '(m for m in modules if m[1] is not None)\n', (11059, 11099), False, 'import collections\n'), ((10257, 10375), 'torch.nn.Conv2d', 'nn.Conv2d', (['channels', 'channels'], {'kernel_size': 'kernel', 'padding': '(kernel // 2)', 'stride': 'stride', 'groups': 'channels', 'bias': '(False)'}), '(channels, channels, kernel_size=kernel, padding=kernel // 2,\n stride=stride, groups=channels, bias=False)\n', (10266, 10375), True, 'import torch.nn as nn\n'), ((10750, 10845), 'torch.nn.Conv2d', 'nn.Conv2d', (['channels', 'out_channels'], {'kernel_size': '(1)', 'padding': '(0)', 'stride': '(1)', 'groups': '(1)', 'bias': '(False)'}), '(channels, out_channels, kernel_size=1, padding=0, stride=1,\n groups=1, bias=False)\n', (10759, 10845), True, 'import torch.nn as nn\n'), ((9880, 9975), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'channels'], {'kernel_size': '(1)', 'padding': '(0)', 'stride': '(1)', 'groups': '(1)', 'bias': '(False)'}), '(in_channels, channels, kernel_size=1, padding=0, stride=1, groups\n =1, bias=False)\n', (9889, 9975), True, 'import torch.nn as nn\n'), ((461, 565), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'channels'], {'kernel_size': '(3)', 'padding': '(1)', 'stride': 'stride', 'groups': 'groups', 'bias': '(False)'}), '(in_channels, channels, kernel_size=3, padding=1, stride=stride,\n groups=groups, bias=False)\n', (470, 565), True, 'import torch.nn as nn\n'), ((784, 879), 'torch.nn.Conv2d', 'nn.Conv2d', (['channels', 'out_channels'], {'kernel_size': '(3)', 'padding': '(1)', 'stride': '(1)', 'groups': '(1)', 'bias': '(False)'}), '(channels, out_channels, kernel_size=3, padding=1, stride=1,\n groups=1, bias=False)\n', (793, 879), True, 'import torch.nn as nn\n'), ((1408, 1507), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'channels'], {'kernel_size': '(1)', 'padding': '(0)', 'stride': 'stride', 'groups': '(1)', 'bias': '(False)'}), '(in_channels, channels, kernel_size=1, padding=0, stride=stride,\n groups=1, bias=False)\n', (1417, 1507), True, 'import torch.nn as nn\n'), ((1726, 1823), 'torch.nn.Conv2d', 'nn.Conv2d', (['channels', 'channels'], {'kernel_size': '(3)', 'padding': '(1)', 'stride': '(1)', 'groups': 'groups', 'bias': '(False)'}), '(channels, channels, kernel_size=3, padding=1, stride=1, groups=\n groups, bias=False)\n', (1735, 1823), True, 'import torch.nn as nn\n'), ((2041, 2136), 'torch.nn.Conv2d', 'nn.Conv2d', (['channels', 'out_channels'], {'kernel_size': '(1)', 'padding': '(0)', 'stride': '(1)', 'groups': '(1)', 'bias': '(False)'}), '(channels, out_channels, kernel_size=1, padding=0, stride=1,\n groups=1, bias=False)\n', (2050, 2136), True, 'import torch.nn as nn\n'), ((2676, 2775), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'channels'], {'kernel_size': '(1)', 'padding': '(0)', 'stride': 'stride', 'groups': '(1)', 'bias': '(False)'}), '(in_channels, channels, kernel_size=1, padding=0, stride=stride,\n groups=1, bias=False)\n', (2685, 2775), True, 'import torch.nn as nn\n'), ((3309, 3404), 'torch.nn.Conv2d', 'nn.Conv2d', (['channels', 'out_channels'], {'kernel_size': '(1)', 'padding': '(0)', 'stride': '(1)', 'groups': '(1)', 'bias': '(False)'}), '(channels, out_channels, kernel_size=1, padding=0, stride=1,\n groups=1, bias=False)\n', (3318, 3404), True, 'import torch.nn as nn\n'), ((4090, 4194), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'channels'], {'kernel_size': '(3)', 'padding': '(1)', 'stride': 'stride', 'groups': 'groups', 'bias': '(False)'}), '(in_channels, channels, kernel_size=3, padding=1, stride=stride,\n groups=groups, bias=False)\n', (4099, 4194), True, 'import torch.nn as nn\n'), ((4413, 4508), 'torch.nn.Conv2d', 'nn.Conv2d', (['channels', 'out_channels'], {'kernel_size': '(3)', 'padding': '(1)', 'stride': '(1)', 'groups': '(1)', 'bias': '(False)'}), '(channels, out_channels, kernel_size=3, padding=1, stride=1,\n groups=1, bias=False)\n', (4422, 4508), True, 'import torch.nn as nn\n'), ((4967, 5071), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'channels'], {'kernel_size': '(3)', 'padding': '(1)', 'stride': 'stride', 'groups': 'groups', 'bias': '(False)'}), '(in_channels, channels, kernel_size=3, padding=1, stride=stride,\n groups=groups, bias=False)\n', (4976, 5071), True, 'import torch.nn as nn\n'), ((5290, 5385), 'torch.nn.Conv2d', 'nn.Conv2d', (['channels', 'out_channels'], {'kernel_size': '(3)', 'padding': '(1)', 'stride': '(1)', 'groups': '(1)', 'bias': '(False)'}), '(channels, out_channels, kernel_size=3, padding=1, stride=1,\n groups=1, bias=False)\n', (5299, 5385), True, 'import torch.nn as nn\n'), ((5975, 6070), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'channels'], {'kernel_size': '(1)', 'padding': '(0)', 'stride': '(1)', 'groups': '(1)', 'bias': '(False)'}), '(in_channels, channels, kernel_size=1, padding=0, stride=1, groups\n =1, bias=False)\n', (5984, 6070), True, 'import torch.nn as nn\n'), ((6288, 6389), 'torch.nn.Conv2d', 'nn.Conv2d', (['channels', 'channels'], {'kernel_size': '(3)', 'padding': '(1)', 'stride': 'stride', 'groups': 'groups', 'bias': '(False)'}), '(channels, channels, kernel_size=3, padding=1, stride=stride,\n groups=groups, bias=False)\n', (6297, 6389), True, 'import torch.nn as nn\n'), ((6608, 6703), 'torch.nn.Conv2d', 'nn.Conv2d', (['channels', 'out_channels'], {'kernel_size': '(1)', 'padding': '(0)', 'stride': '(1)', 'groups': '(1)', 'bias': '(False)'}), '(channels, out_channels, kernel_size=1, padding=0, stride=1,\n groups=1, bias=False)\n', (6617, 6703), True, 'import torch.nn as nn\n'), ((7230, 7325), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'channels'], {'kernel_size': '(1)', 'padding': '(0)', 'stride': '(1)', 'groups': '(1)', 'bias': '(False)'}), '(in_channels, channels, kernel_size=1, padding=0, stride=1, groups\n =1, bias=False)\n', (7239, 7325), True, 'import torch.nn as nn\n'), ((7543, 7640), 'torch.nn.Conv2d', 'nn.Conv2d', (['channels', 'channels'], {'kernel_size': '(3)', 'padding': '(1)', 'stride': '(1)', 'groups': 'groups', 'bias': '(False)'}), '(channels, channels, kernel_size=3, padding=1, stride=1, groups=\n groups, bias=False)\n', (7552, 7640), True, 'import torch.nn as nn\n'), ((7943, 8038), 'torch.nn.Conv2d', 'nn.Conv2d', (['channels', 'out_channels'], {'kernel_size': '(1)', 'padding': '(0)', 'stride': '(1)', 'groups': '(1)', 'bias': '(False)'}), '(channels, out_channels, kernel_size=1, padding=0, stride=1,\n groups=1, bias=False)\n', (7952, 8038), True, 'import torch.nn as nn\n'), ((8571, 8666), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'channels'], {'kernel_size': '(1)', 'padding': '(0)', 'stride': '(1)', 'groups': '(1)', 'bias': '(False)'}), '(in_channels, channels, kernel_size=1, padding=0, stride=1, groups\n =1, bias=False)\n', (8580, 8666), True, 'import torch.nn as nn\n'), ((9186, 9281), 'torch.nn.Conv2d', 'nn.Conv2d', (['channels', 'out_channels'], {'kernel_size': '(1)', 'padding': '(0)', 'stride': '(1)', 'groups': '(1)', 'bias': '(False)'}), '(channels, out_channels, kernel_size=1, padding=0, stride=1,\n groups=1, bias=False)\n', (9195, 9281), True, 'import torch.nn as nn\n'), ((11448, 11543), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'channels'], {'kernel_size': '(1)', 'padding': '(0)', 'stride': '(1)', 'groups': '(1)', 'bias': '(False)'}), '(in_channels, channels, kernel_size=1, padding=0, stride=1, groups\n =1, bias=False)\n', (11457, 11543), True, 'import torch.nn as nn\n'), ((11761, 11873), 'torch.nn.Conv2d', 'nn.Conv2d', (['channels', '(channels * radix)'], {'kernel_size': '(3)', 'padding': '(1)', 'stride': '(1)', 'groups': '(groups * radix)', 'bias': '(False)'}), '(channels, channels * radix, kernel_size=3, padding=1, stride=1,\n groups=groups * radix, bias=False)\n', (11770, 11873), True, 'import torch.nn as nn\n'), ((12402, 12497), 'torch.nn.Conv2d', 'nn.Conv2d', (['channels', 'out_channels'], {'kernel_size': '(1)', 'padding': '(0)', 'stride': '(1)', 'groups': '(1)', 'bias': '(False)'}), '(channels, out_channels, kernel_size=1, padding=0, stride=1,\n groups=1, bias=False)\n', (12411, 12497), True, 'import torch.nn as nn\n'), ((12305, 12358), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', ([], {'kernel_size': '(3)', 'stride': 'stride', 'padding': '(1)'}), '(kernel_size=3, stride=stride, padding=1)\n', (12317, 12358), True, 'import torch.nn as nn\n'), ((13101, 13199), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels'], {'kernel_size': '(1)', 'padding': '(0)', 'stride': '(1)', 'groups': '(1)', 'bias': '(False)'}), '(in_channels, out_channels, kernel_size=1, padding=0, stride=1,\n groups=1, bias=False)\n', (13110, 13199), True, 'import torch.nn as nn\n'), ((13268, 13310), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', ([], {'kernel_size': '(2)', 'stride': 'stride'}), '(kernel_size=2, stride=stride)\n', (13280, 13310), True, 'import torch.nn as nn\n'), ((13679, 13774), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'channels'], {'kernel_size': '(1)', 'padding': '(0)', 'stride': '(1)', 'groups': '(1)', 'bias': '(False)'}), '(in_channels, channels, kernel_size=1, padding=0, stride=1, groups\n =1, bias=False)\n', (13688, 13774), True, 'import torch.nn as nn\n'), ((14016, 14091), 'torch.nn.Conv2d', 'nn.Conv2d', (['channels', 'growth'], {'kernel_size': '(3)', 'padding': '(1)', 'stride': '(1)', 'bias': '(False)'}), '(channels, growth, kernel_size=3, padding=1, stride=1, bias=False)\n', (14025, 14091), True, 'import torch.nn as nn\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon May 4 09:33:16 2020
@author: jvergere
Ideas: Something similar to the Iridium Constellation:
66 Sats
781 km (7159 semimajor axis)
86.4 inclination
6 Orbit planes 30 degrees apart
11 in each plane
"""
import datetime as dt
import numpy as np
import os
#Need to cleanup this file before running each time,
#or refactor code to avoid writing to file in append mode
if os.path.exists("MaxOutageData.txt"):
os.remove("MaxOutageData.txt")
from comtypes.client import CreateObject # Will allow you to launch STK
#from comtypes.client import GetActiveObject #Will allow you to connect a running instance of STK
#Start the application, it will return a pointer to the Application Interface
app = CreateObject("STK12.Application")
#app = GetActiveObject("STK12.Application")
#app is a pointer to IAgUiApplication
#type info is available with python builtin type method
#type(app)
#More info is available via python built in dir method, which will list
#all the available properties and methods available
#dir(app)
#Additional useful information is available via the python builtin help
#help(app)
app.Visible = True
app.UserControl = True
root = app.Personality2 #root ->IAgStkObjectRoot
#These are not available to import until this point if this is the first time
#running STK via COM with python....it won't hurt to leave them there, but after running once they can be
#included at the top with all the other import statements
from comtypes.gen import STKUtil
from comtypes.gen import STKObjects
root.NewScenario("NewTestScenario")
scenario = root.CurrentScenario #scenario -> IAgStkObject
scenario2 = scenario.QueryInterface(STKObjects.IAgScenario) #scenaro2 -> IAgScenario
scenario2.StartTime = "1 Jun 2016 16:00:00.000"
scenario2.StopTime = "2 Jun 2016 16:00:00.000"
root.Rewind()
#Insert Facilites from text file using connect. Each line of the text file is
#formatted:
#FacName,Longitude,Latitude
with open("Facilities.txt", "r") as faclist:
for line in faclist:
facData = line.strip().split(",")
insertNewFacCmd = "New / */Facility {}".format(facData[0])
root.ExecuteCommand(insertNewFacCmd)
setPositionCmd = "SetPosition */Facility/{} Geodetic {} {} Terrain".format(facData[0], facData[2], facData[1])
root.ExecuteCommand(setPositionCmd)
setColorCommand = "Graphics */Facility/{} SetColor blue".format(facData[0])
root.ExecuteCommand(setColorCommand)
#Create sensor constellation, used later to hold all the sensor objects
sensorConst = scenario.Children.New(STKObjects.eConstellation, "SensorConst")
sensorConst2 = sensorConst.QueryInterface(STKObjects.IAgConstellation)
#Build satellite constellation, attach sensors, assign sensor to constellation object
i = 1
for RAAN in range(0,180,45): # 4 orbit planes
j = 1
for trueAnomaly in range(0,360,45): # 8 sats per plane
#insert satellite
newSat = scenario.Children.New(STKObjects.eSatellite, "Sat{}{}".format(i,j))
newSat2 = newSat.QueryInterface(STKObjects.IAgSatellite)
#change some basic display attributes
newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Color = 65535
newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Line.Width = STKObjects.e1
newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Inherit = False
newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesOrbit).IsGroundTrackVisible = False
#Buildup Initial State using TwoBody Propagator and Classical Orbital Elements
keplarian = newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).InitialState.Representation.ConvertTo(STKUtil.eOrbitStateClassical).QueryInterface(STKObjects.IAgOrbitStateClassical)
keplarian.SizeShapeTpye = STKObjects.eSizeShapeSemimajorAxis
keplarian.SizeShape.QueryInterface(STKObjects.IAgClassicalSizeShapeSemimajorAxis).SemiMajorAxis = 7159
keplarian.SizeShape.QueryInterface(STKObjects.IAgClassicalSizeShapeSemimajorAxis).Eccentricity = 0
keplarian.Orientation.Inclination = 86.4
keplarian.Orientation.ArgOfPerigee = 0
keplarian.Orientation.AscNodeType = STKObjects.eAscNodeRAAN
keplarian.Orientation.AscNode.QueryInterface(STKObjects.IAgOrientationAscNodeRAAN).Value = RAAN
keplarian.LocationType = STKObjects.eLocationTrueAnomaly
keplarian.Location.QueryInterface(STKObjects.IAgClassicalLocationTrueAnomaly).Value = trueAnomaly + (45/2)*(i%2) #Stagger TrueAnomalies for every other orbital plane
newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).InitialState.Representation.Assign(keplarian)
newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).Propagate()
#Attach sensors to each satellite
sensor = newSat.Children.New(STKObjects.eSensor,"Sensor{}{}".format(i,j))
sensor2 = sensor.QueryInterface(STKObjects.IAgSensor)
sensor2.CommonTasks.SetPatternSimpleConic(62.5, 2)
#Add the sensor to the SensorConstellation
sensorConst2.Objects.Add("Satellite/Sat{0}{1}/Sensor/Sensor{0}{1}".format(i,j))
#Adjust the translucenty of the sensor projections
sensor2.VO.PercentTranslucency = 75
sensor2.Graphics.LineStyle = STKUtil.eDotted
j+=1
i+=1
#Create a Chain object for each Facility to the constellation.
facCount = scenario.Children.GetElements(STKObjects.eFacility).Count
for i in range(facCount):
#Create Chain
facName = scenario.Children.GetElements(STKObjects.eFacility).Item(i).InstanceName
chain = scenario.Children.New(STKObjects.eChain, "{}ToSensorConst".format(facName))
chain2 = chain.QueryInterface(STKObjects.IAgChain)
#Modify some display properties
chain2.Graphics.Animation.Color = 65280
chain2.Graphics.Animation.LineWidth = STKObjects.e1
chain2.Graphics.Animation.IsHighlightVisible = False
#Add objects to the chain
chain2.Objects.Add("Facility/{}".format(facName))
chain2.Objects.Add("Constellation/SensorConst")
#Get complete chain access data
compAcc = chain.DataProviders.Item("Complete Access").QueryInterface(STKObjects.IAgDataPrvInterval).Exec(scenario2.StartTime,scenario2.StopTime)
el = compAcc.DataSets.ElementNames
numRows = compAcc.DataSets.RowCount
maxOutage = []
#Save out the report to a text file
with open("{}CompleteChainAccess.txt".format(facName),"w") as dataFile:
dataFile.write("{},{},{},{}\n".format(el[0],el[1],el[2],el[3]))
for row in range(numRows):
rowData = compAcc.DataSets.GetRow(row)
dataFile.write("{},{},{},{}\n".format(rowData[0],rowData[1],rowData[2],rowData[3]))
dataFile.close()
#Get max outage time for each chain, print to console and save to file
with open("MaxOutageData.txt", "a") as outageFile:
if numRows == 1:
outageFile.write("{},NA,NA,NA\n".format(facName))
print("{}: No Outage".format(facName))
else:
#Get StartTimes and StopTimes as lists
startTimes = list(compAcc.DataSets.GetDataSetByName("Start Time").GetValues())
stopTimes = list(compAcc.DataSets.GetDataSetByName("Stop Time").GetValues())
#convert to from strings to datetimes
startDatetimes = np.array([dt.datetime.strptime(startTime[:-3], "%d %b %Y %H:%M:%S.%f") for startTime in startTimes])
stopDatetimes = np.array([dt.datetime.strptime(stopTime[:-3], "%d %b %Y %H:%M:%S.%f") for stopTime in stopTimes])
outages = startDatetimes[1:] - stopDatetimes[:-1]
maxOutage = np.amax(outages).total_seconds()
start = stopTimes[np.argmax(outages)]
stop = startTimes[np.argmax(outages)+1]
outageFile.write("{},{},{},{}\n".format(facName,maxOutage,start,stop))
print("{}: {} seconds from {} until {}".format(facName, maxOutage, start, stop))
root.Rewind()
root.Save() | [
"os.remove",
"numpy.argmax",
"os.path.exists",
"numpy.amax",
"datetime.datetime.strptime",
"comtypes.client.CreateObject"
] | [((433, 468), 'os.path.exists', 'os.path.exists', (['"""MaxOutageData.txt"""'], {}), "('MaxOutageData.txt')\n", (447, 468), False, 'import os\n'), ((763, 796), 'comtypes.client.CreateObject', 'CreateObject', (['"""STK12.Application"""'], {}), "('STK12.Application')\n", (775, 796), False, 'from comtypes.client import CreateObject\n'), ((474, 504), 'os.remove', 'os.remove', (['"""MaxOutageData.txt"""'], {}), "('MaxOutageData.txt')\n", (483, 504), False, 'import os\n'), ((8044, 8062), 'numpy.argmax', 'np.argmax', (['outages'], {}), '(outages)\n', (8053, 8062), True, 'import numpy as np\n'), ((7665, 7725), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['startTime[:-3]', '"""%d %b %Y %H:%M:%S.%f"""'], {}), "(startTime[:-3], '%d %b %Y %H:%M:%S.%f')\n", (7685, 7725), True, 'import datetime as dt\n'), ((7794, 7853), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['stopTime[:-3]', '"""%d %b %Y %H:%M:%S.%f"""'], {}), "(stopTime[:-3], '%d %b %Y %H:%M:%S.%f')\n", (7814, 7853), True, 'import datetime as dt\n'), ((7981, 7997), 'numpy.amax', 'np.amax', (['outages'], {}), '(outages)\n', (7988, 7997), True, 'import numpy as np\n'), ((8094, 8112), 'numpy.argmax', 'np.argmax', (['outages'], {}), '(outages)\n', (8103, 8112), True, 'import numpy as np\n')] |
#####################################################
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright 2019 <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################
####################################################
# IMPORT STATEMENTS
####################################################
# >>>>>> Native Imports <<<<<<<
import os
# >>>>>> Package Imports <<<<<<<
import tensorflow as tf
import csv
# >>>>>> Local Imports <<<<<<<
from retrograph.models import tokenization
####################################################
# CODE
####################################################
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
class XnliProcessor(DataProcessor):
"""Processor for the XNLI data set."""
def __init__(self):
self.language = "zh"
def get_train_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(
os.path.join(data_dir, "multinli",
"multinli.train.%s.tsv" % self.language))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "train-%d" % (i)
text_a = tokenization.convert_to_unicode(line[0])
text_b = tokenization.convert_to_unicode(line[1])
label = tokenization.convert_to_unicode(line[2])
if label == tokenization.convert_to_unicode("contradictory"):
label = tokenization.convert_to_unicode("contradiction")
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_dev_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv"))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "dev-%d" % (i)
language = tokenization.convert_to_unicode(line[0])
if language != tokenization.convert_to_unicode(self.language):
continue
text_a = tokenization.convert_to_unicode(line[6])
text_b = tokenization.convert_to_unicode(line[7])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir, matched=True):
"""See base class."""
if matched:
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
else:
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_mismatched.tsv")),
"dev_mismatched")
def get_test_examples(self, data_dir, matched=True):
"""See base class."""
if matched:
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test")
else:
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test_mismatched.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0]))
text_a = tokenization.convert_to_unicode(line[8])
text_b = tokenization.convert_to_unicode(line[9])
if set_type == "test":
label = "contradiction"
else:
label = tokenization.convert_to_unicode(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class DiagnosticProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "diagnostic.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0]))
text_a = tokenization.convert_to_unicode(line[1])
text_b = tokenization.convert_to_unicode(line[2])
if set_type == "test":
label = "contradiction"
else:
label = tokenization.convert_to_unicode(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class WNLIProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")),
"dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0]))
text_a = tokenization.convert_to_unicode(line[1])
text_b = tokenization.convert_to_unicode(line[2])
if set_type == "test":
label = "0"
else:
label = tokenization.convert_to_unicode(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[3])
text_b = tokenization.convert_to_unicode(line[4])
if set_type == "test":
label = "0"
else:
label = tokenization.convert_to_unicode(line[0])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# Only the test set has a header
if set_type == "test" and i == 0:
continue
guid = "%s-%s" % (set_type, i)
if set_type == "test":
text_a = tokenization.convert_to_unicode(line[1])
label = "0"
else:
text_a = tokenization.convert_to_unicode(line[3])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
'''Added by Anne'''
class SST2Processor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
if set_type == "test":
text_a = tokenization.convert_to_unicode(line[1])
label = "0"
else:
text_a = tokenization.convert_to_unicode(line[0])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class QQPProcessor(DataProcessor):
"""Processor for the QQP data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0]))
if set_type == "test":
text_a = tokenization.convert_to_unicode(line[1])
text_b = tokenization.convert_to_unicode(line[2])
label = "0"
else:
if len(line) != 6:
# there is a problematic line
print(line)
continue
text_a = tokenization.convert_to_unicode(line[3])
text_b = tokenization.convert_to_unicode(line[4])
label = tokenization.convert_to_unicode(line[5])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class QNLIProcessor(DataProcessor):
"""Processor for the QQP data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0]))
text_a = tokenization.convert_to_unicode(line[1])
text_b = tokenization.convert_to_unicode(line[2])
if set_type == "test":
label = "entailment"
else:
label = tokenization.convert_to_unicode(line[3])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class STSBProcessor(DataProcessor):
"""Processor for the STS-B data set."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0]))
if set_type == 'test':
text_a = tokenization.convert_to_unicode(line[-2])
text_b = tokenization.convert_to_unicode(line[-1])
label = 0.0
else:
text_a = tokenization.convert_to_unicode(line[-3])
text_b = tokenization.convert_to_unicode(line[-2])
label = float(line[-1])
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class RTEProcessor(DataProcessor):
"""Processor for the RTE data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0]))
text_a = tokenization.convert_to_unicode(line[1])
text_b = tokenization.convert_to_unicode(line[2])
if set_type == "test":
label = "entailment"
else:
label = tokenization.convert_to_unicode(line[3])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class SICKEntailmentProcessor(DataProcessor):
"""Processor for the SICK data set (SentEval version)."""
def loadFile(self, fpath):
skipFirstLine = True
sick_data = {'X_A': [], 'X_B': [], 'y': []}
with os.open(fpath, 'r', encoding='utf-8') as f:
for line in f:
if skipFirstLine:
skipFirstLine = False
else:
text = line.strip().split('\t')
sick_data['X_A'].append(text[1].split())
sick_data['X_B'].append(text[2].split())
sick_data['y'].append(text[4])
return sick_data
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self.loadFile(os.path.join(data_dir, 'SICK_train.txt')), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self.loadFile(os.path.join(data_dir, 'SICK_trial.txt')), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self.loadFile(os.path.join(data_dir, 'SICK_test_annotated.txt')), "test")
def get_labels(self):
"""See base class."""
return ['CONTRADICTION', 'NEUTRAL', 'ENTAILMENT']
def _create_examples(self, dicts, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, dict) in enumerate(dicts):
guid = "%s-%s" % (set_type, str(i))
text_a = tokenization.convert_to_unicode(dict['X_A'])
text_b = tokenization.convert_to_unicode(dict['X_B'])
label = tokenization.convert_to_unicode(dict['y'])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class TRECProcessor(DataProcessor):
"""Processor for the TREC data set (SentEval version)."""
def loadFile(self, fpath):
trec_data = {'X': [], 'y': []}
with os.open(fpath, 'r', encoding='latin-1') as f:
for line in f:
target, sample = line.strip().split(':', 1)
sample = sample.split(' ', 1)[1].split()
trec_data['X'].append(sample)
trec_data['y'].append(target)
return trec_data
def get_train_examples(self, data_dir):
"""See base class."""
data = self.loadFile(os.path.join(data_dir, 'train_5500.label'))
split_index = len(data)*0.7
return self._create_examples(data[:split_index], "train")
def get_dev_examples(self, data_dir):
"""See base class."""
data = self.loadFile(os.path.join(data_dir, 'train_5500.label'))
split_index = len(data)*0.7
return self._create_examples(data[split_index:], "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self.loadFile(os.path.join(data_dir, 'TREC_10.label')), "test")
def get_labels(self):
"""See base class."""
return ['ABBR', 'DESC', 'ENTY', 'HUM', 'LOC', 'NUM']
def _create_examples(self, dicts, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, dict) in enumerate(dicts):
guid = "%s-%s" % (set_type, str(i))
text_a = tokenization.convert_to_unicode(dict['X'])
label = tokenization.convert_to_unicode(dict['y'])
examples.append(
InputExample(guid=guid, text_a=text_a, label=label))
return examples
####################################################
# MAIN
####################################################
# EOF
| [
"os.open",
"csv.reader",
"retrograph.models.tokenization.convert_to_unicode",
"tensorflow.gfile.Open",
"os.path.join"
] | [((2736, 2766), 'tensorflow.gfile.Open', 'tf.gfile.Open', (['input_file', '"""r"""'], {}), "(input_file, 'r')\n", (2749, 2766), True, 'import tensorflow as tf\n'), ((2788, 2838), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '"""\t"""', 'quotechar': 'quotechar'}), "(f, delimiter='\\t', quotechar=quotechar)\n", (2798, 2838), False, 'import csv\n'), ((3160, 3235), 'os.path.join', 'os.path.join', (['data_dir', '"""multinli"""', "('multinli.train.%s.tsv' % self.language)"], {}), "(data_dir, 'multinli', 'multinli.train.%s.tsv' % self.language)\n", (3172, 3235), False, 'import os\n'), ((3394, 3434), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[0]'], {}), '(line[0])\n', (3425, 3434), False, 'from retrograph.models import tokenization\n'), ((3450, 3490), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[1]'], {}), '(line[1])\n', (3481, 3490), False, 'from retrograph.models import tokenization\n'), ((3505, 3545), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[2]'], {}), '(line[2])\n', (3536, 3545), False, 'from retrograph.models import tokenization\n'), ((3894, 3932), 'os.path.join', 'os.path.join', (['data_dir', '"""xnli.dev.tsv"""'], {}), "(data_dir, 'xnli.dev.tsv')\n", (3906, 3932), False, 'import os\n'), ((4070, 4110), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[0]'], {}), '(line[0])\n', (4101, 4110), False, 'from retrograph.models import tokenization\n'), ((4212, 4252), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[6]'], {}), '(line[6])\n', (4243, 4252), False, 'from retrograph.models import tokenization\n'), ((4268, 4308), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[7]'], {}), '(line[7])\n', (4299, 4308), False, 'from retrograph.models import tokenization\n'), ((4323, 4363), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[1]'], {}), '(line[1])\n', (4354, 4363), False, 'from retrograph.models import tokenization\n'), ((5968, 6008), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[8]'], {}), '(line[8])\n', (5999, 6008), False, 'from retrograph.models import tokenization\n'), ((6024, 6064), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[9]'], {}), '(line[9])\n', (6055, 6064), False, 'from retrograph.models import tokenization\n'), ((6993, 7033), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[1]'], {}), '(line[1])\n', (7024, 7033), False, 'from retrograph.models import tokenization\n'), ((7049, 7089), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[2]'], {}), '(line[2])\n', (7080, 7089), False, 'from retrograph.models import tokenization\n'), ((8319, 8359), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[1]'], {}), '(line[1])\n', (8350, 8359), False, 'from retrograph.models import tokenization\n'), ((8375, 8415), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[2]'], {}), '(line[2])\n', (8406, 8415), False, 'from retrograph.models import tokenization\n'), ((9582, 9622), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[3]'], {}), '(line[3])\n', (9613, 9622), False, 'from retrograph.models import tokenization\n'), ((9638, 9678), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[4]'], {}), '(line[4])\n', (9669, 9678), False, 'from retrograph.models import tokenization\n'), ((15033, 15073), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[1]'], {}), '(line[1])\n', (15064, 15073), False, 'from retrograph.models import tokenization\n'), ((15089, 15129), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[2]'], {}), '(line[2])\n', (15120, 15129), False, 'from retrograph.models import tokenization\n'), ((17658, 17698), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[1]'], {}), '(line[1])\n', (17689, 17698), False, 'from retrograph.models import tokenization\n'), ((17714, 17754), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[2]'], {}), '(line[2])\n', (17745, 17754), False, 'from retrograph.models import tokenization\n'), ((18222, 18259), 'os.open', 'os.open', (['fpath', '"""r"""'], {'encoding': '"""utf-8"""'}), "(fpath, 'r', encoding='utf-8')\n", (18229, 18259), False, 'import os\n'), ((19397, 19441), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (["dict['X_A']"], {}), "(dict['X_A'])\n", (19428, 19441), False, 'from retrograph.models import tokenization\n'), ((19457, 19501), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (["dict['X_B']"], {}), "(dict['X_B'])\n", (19488, 19501), False, 'from retrograph.models import tokenization\n'), ((19516, 19558), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (["dict['y']"], {}), "(dict['y'])\n", (19547, 19558), False, 'from retrograph.models import tokenization\n'), ((19852, 19891), 'os.open', 'os.open', (['fpath', '"""r"""'], {'encoding': '"""latin-1"""'}), "(fpath, 'r', encoding='latin-1')\n", (19859, 19891), False, 'import os\n'), ((20211, 20253), 'os.path.join', 'os.path.join', (['data_dir', '"""train_5500.label"""'], {}), "(data_dir, 'train_5500.label')\n", (20223, 20253), False, 'import os\n'), ((20441, 20483), 'os.path.join', 'os.path.join', (['data_dir', '"""train_5500.label"""'], {}), "(data_dir, 'train_5500.label')\n", (20453, 20483), False, 'import os\n'), ((21070, 21112), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (["dict['X']"], {}), "(dict['X'])\n", (21101, 21112), False, 'from retrograph.models import tokenization\n'), ((21127, 21169), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (["dict['y']"], {}), "(dict['y'])\n", (21158, 21169), False, 'from retrograph.models import tokenization\n'), ((3564, 3612), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['"""contradictory"""'], {}), "('contradictory')\n", (3595, 3612), False, 'from retrograph.models import tokenization\n'), ((3630, 3678), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['"""contradiction"""'], {}), "('contradiction')\n", (3661, 3678), False, 'from retrograph.models import tokenization\n'), ((4132, 4178), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['self.language'], {}), '(self.language)\n', (4163, 4178), False, 'from retrograph.models import tokenization\n'), ((4814, 4849), 'os.path.join', 'os.path.join', (['data_dir', '"""train.tsv"""'], {}), "(data_dir, 'train.tsv')\n", (4826, 4849), False, 'import os\n'), ((6154, 6195), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[-1]'], {}), '(line[-1])\n', (6185, 6195), False, 'from retrograph.models import tokenization\n'), ((6549, 6589), 'os.path.join', 'os.path.join', (['data_dir', '"""diagnostic.tsv"""'], {}), "(data_dir, 'diagnostic.tsv')\n", (6561, 6589), False, 'import os\n'), ((7179, 7220), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[-1]'], {}), '(line[-1])\n', (7210, 7220), False, 'from retrograph.models import tokenization\n'), ((7566, 7601), 'os.path.join', 'os.path.join', (['data_dir', '"""train.tsv"""'], {}), "(data_dir, 'train.tsv')\n", (7578, 7601), False, 'import os\n'), ((7737, 7770), 'os.path.join', 'os.path.join', (['data_dir', '"""dev.tsv"""'], {}), "(data_dir, 'dev.tsv')\n", (7749, 7770), False, 'import os\n'), ((7913, 7947), 'os.path.join', 'os.path.join', (['data_dir', '"""test.tsv"""'], {}), "(data_dir, 'test.tsv')\n", (7925, 7947), False, 'import os\n'), ((8493, 8534), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[-1]'], {}), '(line[-1])\n', (8524, 8534), False, 'from retrograph.models import tokenization\n'), ((8876, 8911), 'os.path.join', 'os.path.join', (['data_dir', '"""train.tsv"""'], {}), "(data_dir, 'train.tsv')\n", (8888, 8911), False, 'import os\n'), ((9047, 9080), 'os.path.join', 'os.path.join', (['data_dir', '"""dev.tsv"""'], {}), "(data_dir, 'dev.tsv')\n", (9059, 9080), False, 'import os\n'), ((9215, 9249), 'os.path.join', 'os.path.join', (['data_dir', '"""test.tsv"""'], {}), "(data_dir, 'test.tsv')\n", (9227, 9249), False, 'import os\n'), ((9756, 9796), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[0]'], {}), '(line[0])\n', (9787, 9796), False, 'from retrograph.models import tokenization\n'), ((10138, 10173), 'os.path.join', 'os.path.join', (['data_dir', '"""train.tsv"""'], {}), "(data_dir, 'train.tsv')\n", (10150, 10173), False, 'import os\n'), ((10309, 10342), 'os.path.join', 'os.path.join', (['data_dir', '"""dev.tsv"""'], {}), "(data_dir, 'dev.tsv')\n", (10321, 10342), False, 'import os\n'), ((10477, 10511), 'os.path.join', 'os.path.join', (['data_dir', '"""test.tsv"""'], {}), "(data_dir, 'test.tsv')\n", (10489, 10511), False, 'import os\n'), ((10937, 10977), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[1]'], {}), '(line[1])\n', (10968, 10977), False, 'from retrograph.models import tokenization\n'), ((11027, 11067), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[3]'], {}), '(line[3])\n', (11058, 11067), False, 'from retrograph.models import tokenization\n'), ((11084, 11124), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[1]'], {}), '(line[1])\n', (11115, 11124), False, 'from retrograph.models import tokenization\n'), ((11482, 11517), 'os.path.join', 'os.path.join', (['data_dir', '"""train.tsv"""'], {}), "(data_dir, 'train.tsv')\n", (11494, 11517), False, 'import os\n'), ((11651, 11684), 'os.path.join', 'os.path.join', (['data_dir', '"""dev.tsv"""'], {}), "(data_dir, 'dev.tsv')\n", (11663, 11684), False, 'import os\n'), ((11817, 11851), 'os.path.join', 'os.path.join', (['data_dir', '"""test.tsv"""'], {}), "(data_dir, 'test.tsv')\n", (11829, 11851), False, 'import os\n'), ((12215, 12255), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[1]'], {}), '(line[1])\n', (12246, 12255), False, 'from retrograph.models import tokenization\n'), ((12305, 12345), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[0]'], {}), '(line[0])\n', (12336, 12345), False, 'from retrograph.models import tokenization\n'), ((12362, 12402), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[1]'], {}), '(line[1])\n', (12393, 12402), False, 'from retrograph.models import tokenization\n'), ((12738, 12773), 'os.path.join', 'os.path.join', (['data_dir', '"""train.tsv"""'], {}), "(data_dir, 'train.tsv')\n", (12750, 12773), False, 'import os\n'), ((12909, 12942), 'os.path.join', 'os.path.join', (['data_dir', '"""dev.tsv"""'], {}), "(data_dir, 'dev.tsv')\n", (12921, 12942), False, 'import os\n'), ((13077, 13111), 'os.path.join', 'os.path.join', (['data_dir', '"""test.tsv"""'], {}), "(data_dir, 'test.tsv')\n", (13089, 13111), False, 'import os\n'), ((13514, 13554), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[1]'], {}), '(line[1])\n', (13545, 13554), False, 'from retrograph.models import tokenization\n'), ((13572, 13612), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[2]'], {}), '(line[2])\n', (13603, 13612), False, 'from retrograph.models import tokenization\n'), ((13770, 13810), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[3]'], {}), '(line[3])\n', (13801, 13810), False, 'from retrograph.models import tokenization\n'), ((13828, 13868), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[4]'], {}), '(line[4])\n', (13859, 13868), False, 'from retrograph.models import tokenization\n'), ((13885, 13925), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[5]'], {}), '(line[5])\n', (13916, 13925), False, 'from retrograph.models import tokenization\n'), ((14266, 14301), 'os.path.join', 'os.path.join', (['data_dir', '"""train.tsv"""'], {}), "(data_dir, 'train.tsv')\n", (14278, 14301), False, 'import os\n'), ((14437, 14470), 'os.path.join', 'os.path.join', (['data_dir', '"""dev.tsv"""'], {}), "(data_dir, 'dev.tsv')\n", (14449, 14470), False, 'import os\n'), ((14605, 14639), 'os.path.join', 'os.path.join', (['data_dir', '"""test.tsv"""'], {}), "(data_dir, 'test.tsv')\n", (14617, 14639), False, 'import os\n'), ((15216, 15256), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[3]'], {}), '(line[3])\n', (15247, 15256), False, 'from retrograph.models import tokenization\n'), ((15581, 15616), 'os.path.join', 'os.path.join', (['data_dir', '"""train.tsv"""'], {}), "(data_dir, 'train.tsv')\n", (15593, 15616), False, 'import os\n'), ((15750, 15783), 'os.path.join', 'os.path.join', (['data_dir', '"""dev.tsv"""'], {}), "(data_dir, 'dev.tsv')\n", (15762, 15783), False, 'import os\n'), ((15916, 15950), 'os.path.join', 'os.path.join', (['data_dir', '"""test.tsv"""'], {}), "(data_dir, 'test.tsv')\n", (15928, 15950), False, 'import os\n'), ((16280, 16321), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[-2]'], {}), '(line[-2])\n', (16311, 16321), False, 'from retrograph.models import tokenization\n'), ((16339, 16380), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[-1]'], {}), '(line[-1])\n', (16370, 16380), False, 'from retrograph.models import tokenization\n'), ((16430, 16471), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[-3]'], {}), '(line[-3])\n', (16461, 16471), False, 'from retrograph.models import tokenization\n'), ((16489, 16530), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[-2]'], {}), '(line[-2])\n', (16520, 16530), False, 'from retrograph.models import tokenization\n'), ((16891, 16926), 'os.path.join', 'os.path.join', (['data_dir', '"""train.tsv"""'], {}), "(data_dir, 'train.tsv')\n", (16903, 16926), False, 'import os\n'), ((17062, 17095), 'os.path.join', 'os.path.join', (['data_dir', '"""dev.tsv"""'], {}), "(data_dir, 'dev.tsv')\n", (17074, 17095), False, 'import os\n'), ((17230, 17264), 'os.path.join', 'os.path.join', (['data_dir', '"""test.tsv"""'], {}), "(data_dir, 'test.tsv')\n", (17242, 17264), False, 'import os\n'), ((17841, 17881), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[3]'], {}), '(line[3])\n', (17872, 17881), False, 'from retrograph.models import tokenization\n'), ((18681, 18721), 'os.path.join', 'os.path.join', (['data_dir', '"""SICK_train.txt"""'], {}), "(data_dir, 'SICK_train.txt')\n", (18693, 18721), False, 'import os\n'), ((18847, 18887), 'os.path.join', 'os.path.join', (['data_dir', '"""SICK_trial.txt"""'], {}), "(data_dir, 'SICK_trial.txt')\n", (18859, 18887), False, 'import os\n'), ((19012, 19061), 'os.path.join', 'os.path.join', (['data_dir', '"""SICK_test_annotated.txt"""'], {}), "(data_dir, 'SICK_test_annotated.txt')\n", (19024, 19061), False, 'import os\n'), ((20692, 20731), 'os.path.join', 'os.path.join', (['data_dir', '"""TREC_10.label"""'], {}), "(data_dir, 'TREC_10.label')\n", (20704, 20731), False, 'import os\n'), ((5019, 5060), 'os.path.join', 'os.path.join', (['data_dir', '"""dev_matched.tsv"""'], {}), "(data_dir, 'dev_matched.tsv')\n", (5031, 5060), False, 'import os\n'), ((5159, 5203), 'os.path.join', 'os.path.join', (['data_dir', '"""dev_mismatched.tsv"""'], {}), "(data_dir, 'dev_mismatched.tsv')\n", (5171, 5203), False, 'import os\n'), ((5394, 5436), 'os.path.join', 'os.path.join', (['data_dir', '"""test_matched.tsv"""'], {}), "(data_dir, 'test_matched.tsv')\n", (5406, 5436), False, 'import os\n'), ((5518, 5563), 'os.path.join', 'os.path.join', (['data_dir', '"""test_mismatched.tsv"""'], {}), "(data_dir, 'test_mismatched.tsv')\n", (5530, 5563), False, 'import os\n'), ((5911, 5951), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[0]'], {}), '(line[0])\n', (5942, 5951), False, 'from retrograph.models import tokenization\n'), ((6936, 6976), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[0]'], {}), '(line[0])\n', (6967, 6976), False, 'from retrograph.models import tokenization\n'), ((8262, 8302), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[0]'], {}), '(line[0])\n', (8293, 8302), False, 'from retrograph.models import tokenization\n'), ((13426, 13466), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[0]'], {}), '(line[0])\n', (13457, 13466), False, 'from retrograph.models import tokenization\n'), ((14976, 15016), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[0]'], {}), '(line[0])\n', (15007, 15016), False, 'from retrograph.models import tokenization\n'), ((16192, 16232), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[0]'], {}), '(line[0])\n', (16223, 16232), False, 'from retrograph.models import tokenization\n'), ((17601, 17641), 'retrograph.models.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[0]'], {}), '(line[0])\n', (17632, 17641), False, 'from retrograph.models import tokenization\n')] |
"""
handle preprocessing and loading of data.
"""
import html
import os.path
import pandas as pd
import re
from nltk import word_tokenize, pos_tag
from nltk.corpus import stopwords, wordnet
from nltk.stem.wordnet import WordNetLemmatizer
class LoadData:
@classmethod
def preprocess_stocktwits_data(cls, file_location, columns=['datetime', 'message']):
"""
preprocess the data in file location and saves it as a csv file (appending
'_preprocessed' before '.csv). The preprocessing us in following ways:
1) extract message and datetime columns.
2) sort according to datetime in descending order (newest first)
3) remove links, @ and $ references, extra whitespaces, extra '.', digits, slashes,
hyphons
4) decode html entities
5) convert everything to lower case
"""
if 'datetime' in columns:
dataFrame = pd.read_csv(file_location, usecols=columns, parse_dates=['datetime'], infer_datetime_format=True)
dataFrame.sort_values(by='datetime', ascending=False)
else:
dataFrame = pd.read_csv(file_location, usecols=columns)
dataFrame['message'] = dataFrame['message'].apply(lambda x: html.unescape(x))
dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'(www\.|https?://).*?(\s|$)|@.*?(\s|$)|\$.*?(\s|$)|\d|\%|\\|/|-|_', ' ', x))
dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\.+', '. ', x))
dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\,+', ', ', x))
dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\?+', '? ', x))
dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\s+', ' ', x))
dataFrame['message'] = dataFrame['message'].apply(lambda x: x.lower())
dataFrame.to_csv(file_location[:-4]+'_preprocessed.csv', index=False)
@classmethod
def labelled_data_lexicon_analysis(cls):
"""
extract keywords from labelled stocktwits data for improved accuracy in scoring
for each labelled message do
1) tokenize the message
2) perform POS tagging
3) if a sense is present in wordnet then, lemmatize the word and remove stop words else ignore the word
remove intersections from the two lists before saving
"""
dataFrame = LoadData.get_labelled_data()
bullish_keywords = set()
bearish_keywords = set()
lemmatizer = WordNetLemmatizer()
stop_words = set(stopwords.words('english'))
for index, row in dataFrame.iterrows():
tokens = word_tokenize(row['message'])
pos = pos_tag(tokens)
selected_tags = set()
for i in range(len(pos)):
if len(wordnet.synsets(pos[i][0])):
if pos[i][1].startswith('J'):
selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'a'))
elif pos[i][1].startswith('V'):
selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'v'))
elif pos[i][1].startswith('N'):
selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'n'))
elif pos[i][1].startswith('R'):
selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'r'))
selected_tags -= stop_words
if row['sentiment'] == 'Bullish':
bullish_keywords = bullish_keywords.union(selected_tags)
elif row['sentiment'] == 'Bearish':
bearish_keywords = bearish_keywords.union(selected_tags)
updated_bullish_keywords = bullish_keywords - bearish_keywords
updated_bearish_keywords = bearish_keywords - bullish_keywords
with open('data-extractor/lexicon_bullish_words.txt', 'a') as file:
for word in updated_bullish_keywords:
file.write(word+"\n")
with open('data-extractor/lexicon_bearish_words.txt', 'a') as file:
for word in updated_bearish_keywords:
file.write(word+"\n")
@classmethod
def get_stocktwits_data(cls, symbol):
"""
get_data loads the preprocessed data of 'symbol' from data-extractor
and returns a pandas dataframe with columns [message(object), datetime(datetime64[ns])].
"""
file_location = 'data-extractor/stocktwits_'+symbol+'_preprocessed.csv'
if os.path.isfile(file_location) is False:
LoadData.preprocess_stocktwits_data('data-extractor/stocktwits_'+symbol+'.csv')
dataFrame = pd.read_csv(file_location)
return dataFrame
@classmethod
def get_price_data(cls, symbol):
"""
loads the price data of 'symbol' from data-extractor
and returns a pandas dataframe with columns [Date(datetime64[ns]), Opening Price(float64), Closing Price(float64), Volume(float64)].
"""
file_location = 'data-extractor/stock_prices_'+symbol+'.csv'
dataFrame = pd.read_csv(file_location, usecols=['Date', 'Opening Price', 'Closing Price', 'Volume'], parse_dates=['Date'], infer_datetime_format=True)
return dataFrame
@classmethod
def get_labelled_data(cls, type='complete'):
"""
get_labelled_data loads the preprocessed labelled data of stocktwits from data-extractor
and returns a pandas dataframe with columns [sentiment(object), message(object)].
"""
if type == 'complete':
file_location = 'data-extractor/labelled_data_complete_preprocessed.csv'
if os.path.isfile(file_location) is False:
LoadData.preprocess_stocktwits_data('data-extractor/labelled_data_complete.csv', columns=['sentiment', 'message'])
elif type == 'training':
file_location = 'data-extractor/labelled_data_training_preprocessed.csv'
if os.path.isfile(file_location) is False:
LoadData.get_training_data()
elif type == 'test':
file_location = 'data-extractor/labelled_data_test_preprocessed.csv'
if os.path.isfile(file_location) is False:
LoadData.preprocess_stocktwits_data('data-extractor/labelled_data_test.csv', columns=['sentiment', 'message'])
dataFrame = pd.read_csv(file_location)
return dataFrame
@classmethod
def get_custom_lexicon(cls):
"""
get custom lexicon of bearish and bullish words respectively
"""
file_location1 = 'data-extractor/lexicon_bearish_words.txt'
file_location2 = 'data-extractor/lexicon_bullish_words.txt'
if os.path.isfile(file_location1) is False or os.path.isfile(file_location2) is False:
LoadData.labelled_data_lexicon_analysis()
dataFrameBearish = pd.read_csv(file_location1, header=None, names=['word'])
dataFrameBullish = pd.read_csv(file_location2, header=None, names=['word'])
return dataFrameBearish, dataFrameBullish
@classmethod
def get_training_data(cls):
"""
get labelled training data with equal bearish and bullish messages
"""
try:
os.remove('data-extractor/labelled_data_training.csv')
except OSError:
pass
dataFrame = LoadData.get_labelled_data(type='complete')
dataFrameBearish = dataFrame[dataFrame['sentiment']=='Bearish']
dataFrameBullish = dataFrame[dataFrame['sentiment']=='Bullish']
dataFrameBearishTraining = dataFrameBearish
dataFrameBullishTraining = dataFrameBullish[:len(dataFrameBearish)]
dataFrameTraining = dataFrameBearishTraining.append(dataFrameBullishTraining, ignore_index=True).sample(frac=1).reset_index(drop=True)
dataFrameTraining.to_csv('data-extractor/labelled_data_training_preprocessed.csv', index=False)
@classmethod
def combine_price_and_sentiment(cls, sentimentFrame, priceFrame):
from datetime import timedelta
"""
receive sentimentFrame as (date, sentiment, message) indexed by date and sentiment
and priceFrame as (Date, Opening Price, Closing Price, Volume) and return a combined
frame as (sentiment_calculated_bullish, sentiment_calculated_bearish,
sentiment_actual_previous, tweet_volume_change, cash_volume, label)
"""
dataFrame = pd.DataFrame()
for date, df in sentimentFrame.groupby(level=0, sort=False):
price_current = priceFrame[priceFrame['Date'] == date]
if price_current.empty or date-timedelta(days=1) not in sentimentFrame.index:
continue
tweet_minus1 = sentimentFrame.loc[date-timedelta(days=1)]
days = 1
price_plus1 = priceFrame[priceFrame['Date'] == date+timedelta(days=days)]
while price_plus1.empty:
days += 1
price_plus1 = priceFrame[priceFrame['Date'] == date+timedelta(days=days)]
days = 1
price_minus1 = priceFrame[priceFrame['Date'] == date-timedelta(days=days)]
while price_minus1.empty:
days += 1
price_minus1 = priceFrame[priceFrame['Date'] == date-timedelta(days=days)]
new_row = {}
new_row['date'] = date
new_row['sentiment_calculated_bullish'] = df.loc[(date, 'Bullish')]['message']
new_row['sentiment_calculated_bearish'] = df.loc[(date, 'Bearish')]['message']
new_row['sentiment_actual_previous'] = 1 if ((price_minus1.iloc[0]['Closing Price'] - price_minus1.iloc[0]['Opening Price']) >= 0) else -1
new_row['tweet_volume_change'] = df['message'].sum() - tweet_minus1['message'].sum()
new_row['cash_volume'] = price_current['Volume'].iloc[0]
new_row['label'] = 1 if ((price_plus1.iloc[0]['Closing Price'] - price_current.iloc[0]['Closing Price']) >= 0) else -1
print(new_row)
dataFrame = dataFrame.append(new_row, ignore_index=True)
return dataFrame
@classmethod
def aggregate_stock_price_data(cls):
"""
compile stocktwits data for stock prediction analysis in the following form
(date, sentiment_calculated_bullish, sentiment_calculated_bearish, sentiment_actual_previous, tweet_volume_change, cash_volume, label)
we have choice to take previous n days sentiment_calculated and using label of next nth day
returns dataframes for AAPL, AMZN, GOOGL respectively
"""
if not (os.path.isfile('data-extractor/stocktwits_AAPL_sharedata.csv') and os.path.isfile('data-extractor/stocktwits_AMZN_sharedata.csv') and os.path.isfile('data-extractor/stocktwits_GOOGL_sharedata.csv')):
from sklearn.externals import joblib
file_location = 'naive_bayes_classifier.pkl'
priceAAPL = LoadData.get_price_data('AAPL')
priceAMZN = LoadData.get_price_data('AMZN')
priceGOOGL = LoadData.get_price_data('GOOGL')
sentimented_file = 'data-extractor/stocktwits_AAPL_withsentiment.csv'
if os.path.isfile(sentimented_file) is False:
tweet_classifier = joblib.load(file_location)
dataAAPL = LoadData.get_stocktwits_data('AAPL')
dataAAPL['sentiment'] = dataAAPL['message'].apply(lambda x: tweet_classifier.predict([x])[0])
dataAAPL['datetime'] = dataAAPL['datetime'].apply(lambda x: x.date())
dataAAPL.rename(columns={'datetime':'date'}, inplace=True)
dataAAPL.to_csv('data-extractor/stocktwits_AAPL_withsentiment.csv', index=False)
sentimented_file = 'data-extractor/stocktwits_AMZN_withsentiment.csv'
if os.path.isfile(sentimented_file) is False:
tweet_classifier = joblib.load(file_location)
dataAMZN = LoadData.get_stocktwits_data('AMZN')
dataAMZN['sentiment'] = dataAMZN['message'].apply(lambda x: tweet_classifier.predict([x])[0])
dataAMZN['datetime'] = dataAMZN['datetime'].apply(lambda x: x.date())
dataAMZN.rename(columns={'datetime':'date'}, inplace=True)
dataAMZN.to_csv('data-extractor/stocktwits_AMZN_withsentiment.csv', index=False)
sentimented_file = 'data-extractor/stocktwits_GOOGL_withsentiment.csv'
if os.path.isfile(sentimented_file) is False:
tweet_classifier = joblib.load(file_location)
dataGOOGL = LoadData.get_stocktwits_data('GOOGL')
dataGOOGL['sentiment'] = dataGOOGL['message'].apply(lambda x: tweet_classifier.predict([x])[0])
dataGOOGL['datetime'] = dataGOOGL['datetime'].apply(lambda x: x.date())
dataGOOGL.rename(columns={'datetime':'date'}, inplace=True)
dataGOOGL.to_csv('data-extractor/stocktwits_GOOGL_withsentiment.csv', index=False)
dataAAPL = pd.read_csv('data-extractor/stocktwits_AAPL_withsentiment.csv', parse_dates=['date'], infer_datetime_format=True)
dataAMZN = pd.read_csv('data-extractor/stocktwits_AMZN_withsentiment.csv', parse_dates=['date'], infer_datetime_format=True)
dataGOOGL = pd.read_csv('data-extractor/stocktwits_GOOGL_withsentiment.csv', parse_dates=['date'], infer_datetime_format=True)
dataAAPL = dataAAPL.groupby(['date','sentiment'], sort=False).count()
dataAMZN = dataAMZN.groupby(['date','sentiment'], sort=False).count()
dataGOOGL = dataGOOGL.groupby(['date','sentiment'], sort=False).count()
dataAAPL = LoadData.combine_price_and_sentiment(dataAAPL, priceAAPL)
dataAMZN = LoadData.combine_price_and_sentiment(dataAMZN, priceAMZN)
dataGOOGL = LoadData.combine_price_and_sentiment(dataGOOGL, priceGOOGL)
dataAAPL.to_csv('data-extractor/stocktwits_AAPL_sharedata.csv', index=False)
dataAMZN.to_csv('data-extractor/stocktwits_AMZN_sharedata.csv', index=False)
dataGOOGL.to_csv('data-extractor/stocktwits_GOOGL_sharedata.csv', index=False)
dataAAPL = pd.read_csv('data-extractor/stocktwits_AAPL_sharedata.csv', parse_dates=['date'], infer_datetime_format=True)
dataAMZN = pd.read_csv('data-extractor/stocktwits_AMZN_sharedata.csv', parse_dates=['date'], infer_datetime_format=True)
dataGOOGL = pd.read_csv('data-extractor/stocktwits_GOOGL_sharedata.csv', parse_dates=['date'], infer_datetime_format=True)
return dataAAPL, dataAMZN, dataGOOGL
@classmethod
def get_stock_prediction_data(cls, symbol='ALL', type='training'):
"""
get the training and test data for stock prediction in format
(sentiment_calculated_bullish, sentiment_calculated_bearish, sentiment_actual_previous,
tweet_volume_change, cash_volume, label)
Standardize the data before using.
"""
file_location = 'data-extractor/stockdata_'+symbol+'_'+type+'.csv'
if not os.path.isfile(file_location):
import numpy as np
dataAAPL, dataAMZN, dataGOOGL = LoadData.aggregate_stock_price_data()
combined_data = dataAAPL.append([dataAMZN, dataGOOGL], ignore_index=True)
combined_data.sort_values('date')
combined_data.drop(columns='date', inplace=True)
combined_training, combined_test = np.split(combined_data.sample(frac=1), [int(.9*len(combined_data))])
combined_training.to_csv('data-extractor/stockdata_ALL_training.csv', index=False)
combined_test.to_csv('data-extractor/stockdata_ALL_test.csv', index=False)
dataAAPL.sort_values('date')
dataAAPL.drop(columns='date', inplace=True)
AAPL_training, AAPL_test = np.split(dataAAPL.sample(frac=1), [int(.9*len(dataAAPL))])
AAPL_training.to_csv('data-extractor/stockdata_AAPL_training.csv', index=False)
AAPL_test.to_csv('data-extractor/stockdata_AAPL_test.csv', index=False)
dataAMZN.sort_values('date')
dataAMZN.drop(columns='date', inplace=True)
AMZN_training, AMZN_test = np.split(dataAMZN.sample(frac=1), [int(.9*len(dataAMZN))])
AMZN_training.to_csv('data-extractor/stockdata_AMZN_training.csv', index=False)
AMZN_test.to_csv('data-extractor/stockdata_AMZN_test.csv', index=False)
dataGOOGL.sort_values('date')
dataGOOGL.drop(columns='date', inplace=True)
GOOGL_training, GOOGL_test = np.split(dataGOOGL.sample(frac=1), [int(.9*len(dataGOOGL))])
GOOGL_training.to_csv('data-extractor/stockdata_GOOGL_training.csv', index=False)
GOOGL_test.to_csv('data-extractor/stockdata_GOOGL_test.csv', index=False)
data = pd.read_csv(file_location)
return data
| [
"pandas.DataFrame",
"html.unescape",
"pandas.read_csv",
"nltk.pos_tag",
"nltk.corpus.wordnet.synsets",
"datetime.timedelta",
"nltk.corpus.stopwords.words",
"sklearn.externals.joblib.load",
"nltk.stem.wordnet.WordNetLemmatizer",
"re.sub",
"nltk.word_tokenize"
] | [((2575, 2594), 'nltk.stem.wordnet.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (2592, 2594), False, 'from nltk.stem.wordnet import WordNetLemmatizer\n'), ((4695, 4721), 'pandas.read_csv', 'pd.read_csv', (['file_location'], {}), '(file_location)\n', (4706, 4721), True, 'import pandas as pd\n'), ((5126, 5273), 'pandas.read_csv', 'pd.read_csv', (['file_location'], {'usecols': "['Date', 'Opening Price', 'Closing Price', 'Volume']", 'parse_dates': "['Date']", 'infer_datetime_format': '(True)'}), "(file_location, usecols=['Date', 'Opening Price',\n 'Closing Price', 'Volume'], parse_dates=['Date'], infer_datetime_format\n =True)\n", (5137, 5273), True, 'import pandas as pd\n'), ((6410, 6436), 'pandas.read_csv', 'pd.read_csv', (['file_location'], {}), '(file_location)\n', (6421, 6436), True, 'import pandas as pd\n'), ((6924, 6980), 'pandas.read_csv', 'pd.read_csv', (['file_location1'], {'header': 'None', 'names': "['word']"}), "(file_location1, header=None, names=['word'])\n", (6935, 6980), True, 'import pandas as pd\n'), ((7008, 7064), 'pandas.read_csv', 'pd.read_csv', (['file_location2'], {'header': 'None', 'names': "['word']"}), "(file_location2, header=None, names=['word'])\n", (7019, 7064), True, 'import pandas as pd\n'), ((8501, 8515), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (8513, 8515), True, 'import pandas as pd\n'), ((14269, 14383), 'pandas.read_csv', 'pd.read_csv', (['"""data-extractor/stocktwits_AAPL_sharedata.csv"""'], {'parse_dates': "['date']", 'infer_datetime_format': '(True)'}), "('data-extractor/stocktwits_AAPL_sharedata.csv', parse_dates=[\n 'date'], infer_datetime_format=True)\n", (14280, 14383), True, 'import pandas as pd\n'), ((14398, 14512), 'pandas.read_csv', 'pd.read_csv', (['"""data-extractor/stocktwits_AMZN_sharedata.csv"""'], {'parse_dates': "['date']", 'infer_datetime_format': '(True)'}), "('data-extractor/stocktwits_AMZN_sharedata.csv', parse_dates=[\n 'date'], infer_datetime_format=True)\n", (14409, 14512), True, 'import pandas as pd\n'), ((14528, 14643), 'pandas.read_csv', 'pd.read_csv', (['"""data-extractor/stocktwits_GOOGL_sharedata.csv"""'], {'parse_dates': "['date']", 'infer_datetime_format': '(True)'}), "('data-extractor/stocktwits_GOOGL_sharedata.csv', parse_dates=[\n 'date'], infer_datetime_format=True)\n", (14539, 14643), True, 'import pandas as pd\n'), ((16943, 16969), 'pandas.read_csv', 'pd.read_csv', (['file_location'], {}), '(file_location)\n', (16954, 16969), True, 'import pandas as pd\n'), ((950, 1051), 'pandas.read_csv', 'pd.read_csv', (['file_location'], {'usecols': 'columns', 'parse_dates': "['datetime']", 'infer_datetime_format': '(True)'}), "(file_location, usecols=columns, parse_dates=['datetime'],\n infer_datetime_format=True)\n", (961, 1051), True, 'import pandas as pd\n'), ((1152, 1195), 'pandas.read_csv', 'pd.read_csv', (['file_location'], {'usecols': 'columns'}), '(file_location, usecols=columns)\n', (1163, 1195), True, 'import pandas as pd\n'), ((2620, 2646), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (2635, 2646), False, 'from nltk.corpus import stopwords, wordnet\n'), ((2717, 2746), 'nltk.word_tokenize', 'word_tokenize', (["row['message']"], {}), "(row['message'])\n", (2730, 2746), False, 'from nltk import word_tokenize, pos_tag\n'), ((2765, 2780), 'nltk.pos_tag', 'pos_tag', (['tokens'], {}), '(tokens)\n', (2772, 2780), False, 'from nltk import word_tokenize, pos_tag\n'), ((13096, 13214), 'pandas.read_csv', 'pd.read_csv', (['"""data-extractor/stocktwits_AAPL_withsentiment.csv"""'], {'parse_dates': "['date']", 'infer_datetime_format': '(True)'}), "('data-extractor/stocktwits_AAPL_withsentiment.csv', parse_dates\n =['date'], infer_datetime_format=True)\n", (13107, 13214), True, 'import pandas as pd\n'), ((13233, 13351), 'pandas.read_csv', 'pd.read_csv', (['"""data-extractor/stocktwits_AMZN_withsentiment.csv"""'], {'parse_dates': "['date']", 'infer_datetime_format': '(True)'}), "('data-extractor/stocktwits_AMZN_withsentiment.csv', parse_dates\n =['date'], infer_datetime_format=True)\n", (13244, 13351), True, 'import pandas as pd\n'), ((13371, 13489), 'pandas.read_csv', 'pd.read_csv', (['"""data-extractor/stocktwits_GOOGL_withsentiment.csv"""'], {'parse_dates': "['date']", 'infer_datetime_format': '(True)'}), "('data-extractor/stocktwits_GOOGL_withsentiment.csv',\n parse_dates=['date'], infer_datetime_format=True)\n", (13382, 13489), True, 'import pandas as pd\n'), ((1265, 1281), 'html.unescape', 'html.unescape', (['x'], {}), '(x)\n', (1278, 1281), False, 'import html\n'), ((1351, 1452), 're.sub', 're.sub', (['"""(www\\\\.|https?://).*?(\\\\s|$)|@.*?(\\\\s|$)|\\\\$.*?(\\\\s|$)|\\\\d|\\\\%|\\\\\\\\|/|-|_"""', '""" """', 'x'], {}), "(\n '(www\\\\.|https?://).*?(\\\\s|$)|@.*?(\\\\s|$)|\\\\$.*?(\\\\s|$)|\\\\d|\\\\%|\\\\\\\\|/|-|_'\n , ' ', x)\n", (1357, 1452), False, 'import re\n'), ((1504, 1527), 're.sub', 're.sub', (['"""\\\\.+"""', '""". """', 'x'], {}), "('\\\\.+', '. ', x)\n", (1510, 1527), False, 'import re\n'), ((1597, 1620), 're.sub', 're.sub', (['"""\\\\,+"""', '""", """', 'x'], {}), "('\\\\,+', ', ', x)\n", (1603, 1620), False, 'import re\n'), ((1690, 1713), 're.sub', 're.sub', (['"""\\\\?+"""', '"""? """', 'x'], {}), "('\\\\?+', '? ', x)\n", (1696, 1713), False, 'import re\n'), ((1783, 1805), 're.sub', 're.sub', (['"""\\\\s+"""', '""" """', 'x'], {}), "('\\\\s+', ' ', x)\n", (1789, 1805), False, 'import re\n'), ((11335, 11361), 'sklearn.externals.joblib.load', 'joblib.load', (['file_location'], {}), '(file_location)\n', (11346, 11361), False, 'from sklearn.externals import joblib\n'), ((11969, 11995), 'sklearn.externals.joblib.load', 'joblib.load', (['file_location'], {}), '(file_location)\n', (11980, 11995), False, 'from sklearn.externals import joblib\n'), ((12604, 12630), 'sklearn.externals.joblib.load', 'joblib.load', (['file_location'], {}), '(file_location)\n', (12615, 12630), False, 'from sklearn.externals import joblib\n'), ((2877, 2903), 'nltk.corpus.wordnet.synsets', 'wordnet.synsets', (['pos[i][0]'], {}), '(pos[i][0])\n', (2892, 2903), False, 'from nltk.corpus import stopwords, wordnet\n'), ((8819, 8836), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (8828, 8836), False, 'from datetime import timedelta\n'), ((8696, 8713), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (8705, 8713), False, 'from datetime import timedelta\n'), ((8923, 8943), 'datetime.timedelta', 'timedelta', ([], {'days': 'days'}), '(days=days)\n', (8932, 8943), False, 'from datetime import timedelta\n'), ((9184, 9204), 'datetime.timedelta', 'timedelta', ([], {'days': 'days'}), '(days=days)\n', (9193, 9204), False, 'from datetime import timedelta\n'), ((9076, 9096), 'datetime.timedelta', 'timedelta', ([], {'days': 'days'}), '(days=days)\n', (9085, 9096), False, 'from datetime import timedelta\n'), ((9339, 9359), 'datetime.timedelta', 'timedelta', ([], {'days': 'days'}), '(days=days)\n', (9348, 9359), False, 'from datetime import timedelta\n')] |
# ------------------------------------------------------------------------------
# Copyright (c) 2010-2013, EVEthing team
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
# ------------------------------------------------------------------------------
from django.contrib.auth.models import User
from django.db import models
class SkillPlan(models.Model):
PRIVATE_VISIBILITY = 1
PUBLIC_VISIBILITY = 2
GLOBAL_VISIBILITY = 3
MASTERY_VISIBILITY = 99
VISIBILITY_CHOICES = (
(PRIVATE_VISIBILITY, 'Private'),
(PUBLIC_VISIBILITY, 'Public'),
(GLOBAL_VISIBILITY, 'Global'),
)
user = models.ForeignKey(User, null=True, blank=True)
name = models.CharField(max_length=64)
visibility = models.IntegerField(default=1, choices=VISIBILITY_CHOICES)
class Meta:
app_label = 'thing'
ordering = ('name',)
def __unicode__(self):
if hasattr(self.user, 'username'):
return '%s - %s' % (self.user.username, self.name)
else:
return '%s' % self.name
| [
"django.db.models.ForeignKey",
"django.db.models.IntegerField",
"django.db.models.CharField"
] | [((1897, 1943), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'null': '(True)', 'blank': '(True)'}), '(User, null=True, blank=True)\n', (1914, 1943), False, 'from django.db import models\n'), ((1956, 1987), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)'}), '(max_length=64)\n', (1972, 1987), False, 'from django.db import models\n'), ((2005, 2063), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(1)', 'choices': 'VISIBILITY_CHOICES'}), '(default=1, choices=VISIBILITY_CHOICES)\n', (2024, 2063), False, 'from django.db import models\n')] |
import unittest
# time complexity O(n**2)
# space complexity O(1)
def shell_sort(arr):
n = len(arr)
gap = n//2
while gap >= 1:
for start in range(gap):
gap_insertion_sort(arr, start, gap)
gap = gap//2
return arr
def gap_insertion_sort(arr, start, gap):
n = len(arr)
for i in range(start, n, gap):
j = i - gap
while (j >= start) and (arr[i] < arr[j]):
arr[i], arr[j] = arr[j], arr[i]
i = j
j -= gap
class Test(unittest.TestCase):
def test_shell_sort(self):
arr = [3,6,9,7,8,4,2,5,1,9,6]
self.assertEqual(shell_sort(arr), [1,2,3,4,5,6,6,7,8,9,9]);
if __name__ == "__main__":
unittest.main()
| [
"unittest.main"
] | [((706, 721), 'unittest.main', 'unittest.main', ([], {}), '()\n', (719, 721), False, 'import unittest\n')] |
# -*- coding: utf-8 -*-
"""
test_parameter
~~~~~~~~~~~~~~~
Tests for `gagepy.parameter` class
:copyright: 2015 by <NAME>, see AUTHORS
:license: United States Geological Survey (USGS), see LICENSE file
"""
import pytest
import os
import numpy as np
from datetime import datetime
from gagepy.parameter import Parameter
def test_parameter_init(dates_daily):
parameter = Parameter(name = "Discharge",
dates = dates_daily,
values = np.array([100, 110, 105, 107, 112]),
units = "cubic feet per second (Mean)",
code = "06_00060_00003")
assert list(parameter.dates) == list(dates_daily)
assert parameter.code == "06_00060_00003"
assert parameter.name == "Discharge"
assert parameter.units == "cubic feet per second (Mean)"
assert list(parameter.values) == list(np.array([100, 110, 105, 107, 112]))
def test_parameter_values_mean_max_min_without_nan(dates_daily):
parameter = Parameter(name = "Discharge",
dates = dates_daily,
units = "cubic feet per second (Mean)",
code = "06_00060_00003",
values = np.array([1, 2, 3, 4, 5]))
assert parameter.mean == 3.0
assert parameter.max == 5.0
assert parameter.min == 1.0
def test_parameter_values_mean_max_min_with_nan(dates_daily):
parameter = Parameter(name = "Discharge",
dates = dates_daily,
units = "cubic feet per second (Mean)",
code = "06_00060_00003",
values = np.array([1, 2, 3, np.nan, 12]))
assert parameter.mean == 4.5 # sum(values)/len(values) -> 18/4 = 4.5
assert parameter.max == 12.0
assert parameter.min == 1.0
def test_max_min_date(dates_daily):
parameter = Parameter(name = "Discharge",
dates = dates_daily,
units = "cubic feet per second (Mean)",
code = "06_00060_00003",
values = np.array([1, 2, 3, 4, 5]))
assert parameter.max_date == datetime(2015, 8, 5, 0, 0)
assert parameter.min_date == datetime(2015, 8, 1, 0, 0)
def test_max_min_date_with_nan(dates_daily):
parameter = Parameter(name = "Discharge",
dates = dates_daily,
units = "cubic feet per second (Mean)",
code = "06_00060_00003",
values = np.array([1, 2, np.nan, 4, 5]))
assert parameter.max_date == datetime(2015, 8, 5, 0, 0)
assert parameter.min_date == datetime(2015, 8, 1, 0, 0)
def test_print_parameter_by_not_capturing_stdout(dates_daily):
parameter = Parameter(name = "Discharge",
dates = dates_daily,
units = "cubic feet per second (Mean)",
code = "06_00060_00003",
values = np.array([1, 2, 3, 4, 5]))
print(parameter)
| [
"numpy.array",
"datetime.datetime"
] | [((2211, 2237), 'datetime.datetime', 'datetime', (['(2015)', '(8)', '(5)', '(0)', '(0)'], {}), '(2015, 8, 5, 0, 0)\n', (2219, 2237), False, 'from datetime import datetime\n'), ((2271, 2297), 'datetime.datetime', 'datetime', (['(2015)', '(8)', '(1)', '(0)', '(0)'], {}), '(2015, 8, 1, 0, 0)\n', (2279, 2297), False, 'from datetime import datetime\n'), ((2657, 2683), 'datetime.datetime', 'datetime', (['(2015)', '(8)', '(5)', '(0)', '(0)'], {}), '(2015, 8, 5, 0, 0)\n', (2665, 2683), False, 'from datetime import datetime\n'), ((2717, 2743), 'datetime.datetime', 'datetime', (['(2015)', '(8)', '(1)', '(0)', '(0)'], {}), '(2015, 8, 1, 0, 0)\n', (2725, 2743), False, 'from datetime import datetime\n'), ((509, 544), 'numpy.array', 'np.array', (['[100, 110, 105, 107, 112]'], {}), '([100, 110, 105, 107, 112])\n', (517, 544), True, 'import numpy as np\n'), ((908, 943), 'numpy.array', 'np.array', (['[100, 110, 105, 107, 112]'], {}), '([100, 110, 105, 107, 112])\n', (916, 943), True, 'import numpy as np\n'), ((1258, 1283), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (1266, 1283), True, 'import numpy as np\n'), ((1693, 1724), 'numpy.array', 'np.array', (['[1, 2, 3, np.nan, 12]'], {}), '([1, 2, 3, np.nan, 12])\n', (1701, 1724), True, 'import numpy as np\n'), ((2150, 2175), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (2158, 2175), True, 'import numpy as np\n'), ((2591, 2621), 'numpy.array', 'np.array', (['[1, 2, np.nan, 4, 5]'], {}), '([1, 2, np.nan, 4, 5])\n', (2599, 2621), True, 'import numpy as np\n'), ((3055, 3080), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (3063, 3080), True, 'import numpy as np\n')] |
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.template import loader
from haitiwater.settings import PROJECT_VERSION, PROJECT_NAME
from ..utils.get_data import *
@login_required(login_url='/login/')
def index(request):
template = loader.get_template('consumers.html')
context = {
'project_version': PROJECT_VERSION,
'project_name': PROJECT_NAME,
'zone_name': get_zone(request),
'current_period': get_current_month_fr(),
'water_outlets': get_outlets(request),
'consumer_groups': get_amount_household(request),
'consumer_individuals': get_total_consumers(request),
'unpaid_bills': 42, # Todo, but for later as we can't mark a payment yet
}
return HttpResponse(template.render(context, request))
| [
"django.contrib.auth.decorators.login_required",
"django.template.loader.get_template"
] | [((227, 262), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {'login_url': '"""/login/"""'}), "(login_url='/login/')\n", (241, 262), False, 'from django.contrib.auth.decorators import login_required\n'), ((298, 335), 'django.template.loader.get_template', 'loader.get_template', (['"""consumers.html"""'], {}), "('consumers.html')\n", (317, 335), False, 'from django.template import loader\n')] |
from datetime import date
import pandas as pd
from pyrich.record import Record
class Asset(Record):
def __init__(self, table: str) -> None:
super().__init__(table)
def record_current_asset(self, current_asset: float) -> None:
table = 'current_asset'
query = (f'SELECT date FROM {table} '
f'WHERE id=(SELECT MAX(id) FROM {table});')
self.db.run_query(query)
date_format = '%Y-%m-%d'
today = date.today()
timestamp = today.strftime(date_format)
record = {
'date': timestamp,
'amount': current_asset,
}
try:
latest_date = self.db.cur.fetchone()[0]
except TypeError:
self.db.insert(table, record, msg=False)
else:
if today > latest_date:
self.db.insert(table, record, msg=False)
def __repr__(self) -> str:
return f"Asset(table='{self.table}')"
| [
"datetime.date.today"
] | [((467, 479), 'datetime.date.today', 'date.today', ([], {}), '()\n', (477, 479), False, 'from datetime import date\n')] |
import os, sys, shutil
import zipfile, subprocess
from serverdaemon.logsetup import setup_logging, logger, log_event
import boto3
from boto3.s3.transfer import S3Transfer, TransferConfig
REGION = "eu-west-1"
BUCKET_NAME = "directive-tiers.dg-api.com"
UE4_BUILDS_FOLDER = "ue4-builds"
INSTALL_FOLDER = r"c:\drift-serverdaemon"
def get_my_version():
t = [0, 0, 0]
try:
with open("VERSION") as f:
version = f.read().strip()
# increment version each time the script is called
t = [int(p) for p in version.split(".")]
except:
logger.warning("Old version invalid")
return t
def kill_python_processes():
command = ["tasklist"]
popen = subprocess.Popen(command, stdout=subprocess.PIPE)
stdout, stderr = popen.communicate()
lst = stdout.split("\n")
for l in lst:
ll = l.split()
if not len(ll):
continue
name = ll[0]
try:
pid = int(ll[1])
except:
continue
if pid == os.getpid():
continue
if "python.exe" in l:
try:
logger.info("Killing task '%s' with pid %s..." % (name, pid))
command = ["taskkill", "/PID", str(pid), "/f"]
subprocess.check_call(command, shell=True)
except Exception as e:
logger.error('Could not kill task. Error = %s' % e)
def check_download():
client = boto3.client('s3', REGION)
files = client.list_objects(Bucket=BUCKET_NAME, Prefix=UE4_BUILDS_FOLDER)['Contents']
max_version = get_my_version()
my_version = max_version
logger.info("My version is %s", ".".join(str(p) for p in max_version))
max_key = None
for s3_key in files:
filename = s3_key['Key']
if "drift-serverdaemon-" in filename:
lst = filename.split("-")[-1].split(".")
try:
file_version = [int(p) for p in lst[0:-1]]
except ValueError:
continue
is_more = False
if file_version[0] > max_version[0]:
is_more = True
elif file_version[1] > max_version[1]:
is_more = True
elif file_version[2] > max_version[2]:
is_more = True
if is_more:
max_version = file_version
max_key = filename
if not max_key:
logger.info("No new version found. Bailing out.")
return None
log_event("upgrade_daemon", "Upgrading Serverdaemon from version %s to %s" % (my_version, max_version), severity="WARNING")
logger.info("found version %s, %s", max_version, max_key)
transfer = S3Transfer(client)
out_filename = "c:\\temp\\drift-serverdaemon.zip"
transfer.download_file(BUCKET_NAME, max_key, out_filename)
return out_filename
if __name__ == "__main__":
setup_logging("updatedaemon")
filename = check_download()
if not filename:
sys.exit(0)
zip_file = zipfile.ZipFile(filename, 'r')
for member in zip_file.namelist():
# copy file (taken from zipfile's extract)
filename = "/".join(member.split("/")[1:])
source = zip_file.open(member)
out_filename = os.path.join(INSTALL_FOLDER, filename)
try:
out_dirname = os.path.dirname(out_filename)
os.makedirs(out_dirname)
except:
pass
target = file(out_filename, "wb")
with source, target:
shutil.copyfileobj(source, target)
zip_file.close()
kill_python_processes()
log_event("upgrade_daemon_complete", "Done Upgrading Serverdaemon. All python processes have been killed", severity="WARNING") | [
"subprocess.Popen",
"subprocess.check_call",
"zipfile.ZipFile",
"os.getpid",
"boto3.client",
"os.makedirs",
"serverdaemon.logsetup.setup_logging",
"serverdaemon.logsetup.log_event",
"os.path.dirname",
"serverdaemon.logsetup.logger.info",
"boto3.s3.transfer.S3Transfer",
"serverdaemon.logsetup.logger.error",
"serverdaemon.logsetup.logger.warning",
"shutil.copyfileobj",
"os.path.join",
"sys.exit"
] | [((699, 748), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'stdout': 'subprocess.PIPE'}), '(command, stdout=subprocess.PIPE)\n', (715, 748), False, 'import zipfile, subprocess\n'), ((1445, 1471), 'boto3.client', 'boto3.client', (['"""s3"""', 'REGION'], {}), "('s3', REGION)\n", (1457, 1471), False, 'import boto3\n'), ((2486, 2613), 'serverdaemon.logsetup.log_event', 'log_event', (['"""upgrade_daemon"""', "('Upgrading Serverdaemon from version %s to %s' % (my_version, max_version))"], {'severity': '"""WARNING"""'}), "('upgrade_daemon', 'Upgrading Serverdaemon from version %s to %s' %\n (my_version, max_version), severity='WARNING')\n", (2495, 2613), False, 'from serverdaemon.logsetup import setup_logging, logger, log_event\n'), ((2614, 2671), 'serverdaemon.logsetup.logger.info', 'logger.info', (['"""found version %s, %s"""', 'max_version', 'max_key'], {}), "('found version %s, %s', max_version, max_key)\n", (2625, 2671), False, 'from serverdaemon.logsetup import setup_logging, logger, log_event\n'), ((2687, 2705), 'boto3.s3.transfer.S3Transfer', 'S3Transfer', (['client'], {}), '(client)\n', (2697, 2705), False, 'from boto3.s3.transfer import S3Transfer, TransferConfig\n'), ((2879, 2908), 'serverdaemon.logsetup.setup_logging', 'setup_logging', (['"""updatedaemon"""'], {}), "('updatedaemon')\n", (2892, 2908), False, 'from serverdaemon.logsetup import setup_logging, logger, log_event\n'), ((2998, 3028), 'zipfile.ZipFile', 'zipfile.ZipFile', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (3013, 3028), False, 'import zipfile, subprocess\n'), ((3581, 3715), 'serverdaemon.logsetup.log_event', 'log_event', (['"""upgrade_daemon_complete"""', '"""Done Upgrading Serverdaemon. All python processes have been killed"""'], {'severity': '"""WARNING"""'}), "('upgrade_daemon_complete',\n 'Done Upgrading Serverdaemon. All python processes have been killed',\n severity='WARNING')\n", (3590, 3715), False, 'from serverdaemon.logsetup import setup_logging, logger, log_event\n'), ((2411, 2460), 'serverdaemon.logsetup.logger.info', 'logger.info', (['"""No new version found. Bailing out."""'], {}), "('No new version found. Bailing out.')\n", (2422, 2460), False, 'from serverdaemon.logsetup import setup_logging, logger, log_event\n'), ((2970, 2981), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2978, 2981), False, 'import os, sys, shutil\n'), ((3232, 3270), 'os.path.join', 'os.path.join', (['INSTALL_FOLDER', 'filename'], {}), '(INSTALL_FOLDER, filename)\n', (3244, 3270), False, 'import os, sys, shutil\n'), ((579, 616), 'serverdaemon.logsetup.logger.warning', 'logger.warning', (['"""Old version invalid"""'], {}), "('Old version invalid')\n", (593, 616), False, 'from serverdaemon.logsetup import setup_logging, logger, log_event\n'), ((1024, 1035), 'os.getpid', 'os.getpid', ([], {}), '()\n', (1033, 1035), False, 'import os, sys, shutil\n'), ((3310, 3339), 'os.path.dirname', 'os.path.dirname', (['out_filename'], {}), '(out_filename)\n', (3325, 3339), False, 'import os, sys, shutil\n'), ((3352, 3376), 'os.makedirs', 'os.makedirs', (['out_dirname'], {}), '(out_dirname)\n', (3363, 3376), False, 'import os, sys, shutil\n'), ((3493, 3527), 'shutil.copyfileobj', 'shutil.copyfileobj', (['source', 'target'], {}), '(source, target)\n', (3511, 3527), False, 'import os, sys, shutil\n'), ((1122, 1183), 'serverdaemon.logsetup.logger.info', 'logger.info', (['("Killing task \'%s\' with pid %s..." % (name, pid))'], {}), '("Killing task \'%s\' with pid %s..." % (name, pid))\n', (1133, 1183), False, 'from serverdaemon.logsetup import setup_logging, logger, log_event\n'), ((1263, 1305), 'subprocess.check_call', 'subprocess.check_call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (1284, 1305), False, 'import zipfile, subprocess\n'), ((1357, 1408), 'serverdaemon.logsetup.logger.error', 'logger.error', (["('Could not kill task. Error = %s' % e)"], {}), "('Could not kill task. Error = %s' % e)\n", (1369, 1408), False, 'from serverdaemon.logsetup import setup_logging, logger, log_event\n')] |
from baselines.common.cmd_util import make_mujoco_env
from baselines.common import tf_util as U
from baselines import logger
from baselines.ppo1 import pposgd_simple
from cartpole.cartpole_sim import cartpole_policy
def train(env_id, num_timesteps, seed=0):
U.make_session(num_cpu=1).__enter__()
def policy_fn(name, ob_space, ac_space):
return cartpole_policy.CartPolePolicy(name=name, ob_space=ob_space, ac_space=ac_space, hid_size=6,
num_hid_layers=2)
env = make_mujoco_env(env_id, seed)
pi = pposgd_simple.learn(env, policy_fn,
max_timesteps=num_timesteps,
timesteps_per_actorbatch=2048,
clip_param=0.2, entcoeff=0.0,
optim_epochs=10, optim_stepsize=3e-4, optim_batchsize=64,
gamma=0.99, lam=0.95, schedule='linear',
)
env.close()
return pi
if __name__ == '__main__':
logger.configure(dir = "./tensorboard_test", format_strs=["tensorboard"] )
pi = train('InvertedPendulum-v2', num_timesteps=5000, seed=0)
| [
"baselines.common.tf_util.make_session",
"baselines.ppo1.pposgd_simple.learn",
"baselines.common.cmd_util.make_mujoco_env",
"baselines.logger.configure",
"cartpole.cartpole_sim.cartpole_policy.CartPolePolicy"
] | [((531, 560), 'baselines.common.cmd_util.make_mujoco_env', 'make_mujoco_env', (['env_id', 'seed'], {}), '(env_id, seed)\n', (546, 560), False, 'from baselines.common.cmd_util import make_mujoco_env\n'), ((570, 808), 'baselines.ppo1.pposgd_simple.learn', 'pposgd_simple.learn', (['env', 'policy_fn'], {'max_timesteps': 'num_timesteps', 'timesteps_per_actorbatch': '(2048)', 'clip_param': '(0.2)', 'entcoeff': '(0.0)', 'optim_epochs': '(10)', 'optim_stepsize': '(0.0003)', 'optim_batchsize': '(64)', 'gamma': '(0.99)', 'lam': '(0.95)', 'schedule': '"""linear"""'}), "(env, policy_fn, max_timesteps=num_timesteps,\n timesteps_per_actorbatch=2048, clip_param=0.2, entcoeff=0.0,\n optim_epochs=10, optim_stepsize=0.0003, optim_batchsize=64, gamma=0.99,\n lam=0.95, schedule='linear')\n", (589, 808), False, 'from baselines.ppo1 import pposgd_simple\n'), ((1035, 1106), 'baselines.logger.configure', 'logger.configure', ([], {'dir': '"""./tensorboard_test"""', 'format_strs': "['tensorboard']"}), "(dir='./tensorboard_test', format_strs=['tensorboard'])\n", (1051, 1106), False, 'from baselines import logger\n'), ((364, 478), 'cartpole.cartpole_sim.cartpole_policy.CartPolePolicy', 'cartpole_policy.CartPolePolicy', ([], {'name': 'name', 'ob_space': 'ob_space', 'ac_space': 'ac_space', 'hid_size': '(6)', 'num_hid_layers': '(2)'}), '(name=name, ob_space=ob_space, ac_space=\n ac_space, hid_size=6, num_hid_layers=2)\n', (394, 478), False, 'from cartpole.cartpole_sim import cartpole_policy\n'), ((265, 290), 'baselines.common.tf_util.make_session', 'U.make_session', ([], {'num_cpu': '(1)'}), '(num_cpu=1)\n', (279, 290), True, 'from baselines.common import tf_util as U\n')] |
from collections import OrderedDict
class AutoFormSettings(object):
def __init__(self):
if not hasattr(self, "spec"):
raise RuntimeError("%s instance has no 'spec' attribute"
% self.__class__.__name__)
for attrname in self.spec.keys():
setattr(self, attrname, None)
class WallSettings(AutoFormSettings):
spec = OrderedDict([
("north", {"type": "bool", "tooltip": "Enable/disable wall to the north"}),
("south", {"type": "bool", "tooltip": "Enable/disable wall to the south"}),
("east", {"type": "bool", "tooltip": "Enable/disable wall to the east"}),
("west", {"type": "bool", "tooltip": "Enable/disable wall to the west"})
])
class DoorSettings(AutoFormSettings):
spec = OrderedDict([
("direction", {"type": "choice", "choices": ["north", "south", "east", "west"],
"tooltip": "Set the direction to this door from currently"
" selected tile"}),
("prefix", {"type": "str", "tooltip": "Set the word that should precede "
"the name of this door, usually 'a' or 'an' (e.g. 'a' "
"wooden door, 'an' oak door)"}),
("name", {"type": "str", "tooltip": "name of this door, e.g. "
"'wooden door' or 'oak door'"}),
("tile_id", {"type": "str", "label": "tile ID", "tooltip": "unique "
"identifier for programmatic access to this door"})
])
class KeypadDoorSettings(AutoFormSettings):
spec = OrderedDict([
("direction", {"type": "choice", "choices": ["north", "south", "east", "west"],
"tooltip": "Set the direction to this door from currently"
" selected tile"}),
("prefix", {"type": "str", "tooltip": "Set the word that should precede "
"the name of this door, usually 'a' or 'an' (e.g. 'a' "
"wooden door, 'an' oak door)"}),
("name", {"type": "str", "tooltip": "name of this door, e.g. "
"'wooden door' or 'oak door'"}),
("tile_id", {"type": "str", "label": "tile ID", "tooltip": "unique "
"identifier for programmatic access to this door"}),
("code", {"type": "int", "label": "keypad code", "tooltip": "Integer "
"code required to unlock this door"}),
("prompt", {"type": "str", "label": "keypad prompt", "tooltip": "String "
"used to prompt player for keypad code entry"})
])
class TileSettings(AutoFormSettings):
spec = OrderedDict([
('tile_id', {'type': 'str', 'label': 'tile ID', "tooltip": "Unique "
"identifier for programmatic access to this tile"}),
('name', {'type': 'str', 'tooltip': "Short string used to describe this "
"tile to the player from afar, e.g. 'a scary room'"}),
('description', {'type':'long_str', 'tooltip': "String used to describe "
"the tile to player when they enter it. Note that this "
"string will always be prefixed with 'You are' during "
"gameplay"}),
('dark', {'type': 'bool', 'tooltip': "If enabled, player will need a "
"light source to see anything on this tile"}),
('first_visit_message', {'type': 'long_str', 'label': 'first visit message',
'tooltip': "String displayed only when player "
"enters this tile for the first time"}),
('first_visit_message_in_dark', {'type': 'bool', 'label': 'show first visit message if dark',
'tooltip': "Enable/disable showing the "
"first visit message if the current tile "
"is dark"}),
('smell_description', {'type': 'str', 'label': 'smell description',
'tooltip': "String displayed when player smells "
"the air on the current tile"}),
('ground_smell_description', {'type': 'str', 'label': 'ground smell description',
'tooltip': "String displayed when player "
"smells the ground on the current tile"}),
('ground_taste_description', {'type': 'str', 'label': 'ground taste description',
'tooltip': "String displayed when player "
"tastes the ground on the current tile"}),
('name_from_north', {'type': 'str', 'label': 'name from south',
'tooltip': 'String used to describe this tile when'
' player is on the adjacent tile to the south'}),
('name_from_south', {'type': 'str', 'label': 'name from south',
'tooltip': 'String used to describe this tile when'
' player is on the adjacent tile to the south'}),
('name_from_east', {'type': 'str', 'label': 'name from east',
'tooltip': 'String used to describe this tile when'
' player is on the adjacent tile to the east'}),
('name_from_west', {'type': 'str', 'label': 'name from west',
'tooltip': 'String used to describe this tile when'
' player is on the adjacent tile to the west'})
])
| [
"collections.OrderedDict"
] | [((394, 724), 'collections.OrderedDict', 'OrderedDict', (["[('north', {'type': 'bool', 'tooltip': 'Enable/disable wall to the north'}),\n ('south', {'type': 'bool', 'tooltip':\n 'Enable/disable wall to the south'}), ('east', {'type': 'bool',\n 'tooltip': 'Enable/disable wall to the east'}), ('west', {'type':\n 'bool', 'tooltip': 'Enable/disable wall to the west'})]"], {}), "([('north', {'type': 'bool', 'tooltip':\n 'Enable/disable wall to the north'}), ('south', {'type': 'bool',\n 'tooltip': 'Enable/disable wall to the south'}), ('east', {'type':\n 'bool', 'tooltip': 'Enable/disable wall to the east'}), ('west', {\n 'type': 'bool', 'tooltip': 'Enable/disable wall to the west'})])\n", (405, 724), False, 'from collections import OrderedDict\n'), ((796, 1369), 'collections.OrderedDict', 'OrderedDict', (['[(\'direction\', {\'type\': \'choice\', \'choices\': [\'north\', \'south\', \'east\',\n \'west\'], \'tooltip\':\n \'Set the direction to this door from currently selected tile\'}), (\n \'prefix\', {\'type\': \'str\', \'tooltip\':\n "Set the word that should precede the name of this door, usually \'a\' or \'an\' (e.g. \'a\' wooden door, \'an\' oak door)"\n }), (\'name\', {\'type\': \'str\', \'tooltip\':\n "name of this door, e.g. \'wooden door\' or \'oak door\'"}), (\'tile_id\', {\n \'type\': \'str\', \'label\': \'tile ID\', \'tooltip\':\n \'unique identifier for programmatic access to this door\'})]'], {}), '([(\'direction\', {\'type\': \'choice\', \'choices\': [\'north\', \'south\',\n \'east\', \'west\'], \'tooltip\':\n \'Set the direction to this door from currently selected tile\'}), (\n \'prefix\', {\'type\': \'str\', \'tooltip\':\n "Set the word that should precede the name of this door, usually \'a\' or \'an\' (e.g. \'a\' wooden door, \'an\' oak door)"\n }), (\'name\', {\'type\': \'str\', \'tooltip\':\n "name of this door, e.g. \'wooden door\' or \'oak door\'"}), (\'tile_id\', {\n \'type\': \'str\', \'label\': \'tile ID\', \'tooltip\':\n \'unique identifier for programmatic access to this door\'})])\n', (807, 1369), False, 'from collections import OrderedDict\n'), ((1563, 2380), 'collections.OrderedDict', 'OrderedDict', (['[(\'direction\', {\'type\': \'choice\', \'choices\': [\'north\', \'south\', \'east\',\n \'west\'], \'tooltip\':\n \'Set the direction to this door from currently selected tile\'}), (\n \'prefix\', {\'type\': \'str\', \'tooltip\':\n "Set the word that should precede the name of this door, usually \'a\' or \'an\' (e.g. \'a\' wooden door, \'an\' oak door)"\n }), (\'name\', {\'type\': \'str\', \'tooltip\':\n "name of this door, e.g. \'wooden door\' or \'oak door\'"}), (\'tile_id\', {\n \'type\': \'str\', \'label\': \'tile ID\', \'tooltip\':\n \'unique identifier for programmatic access to this door\'}), (\'code\', {\n \'type\': \'int\', \'label\': \'keypad code\', \'tooltip\':\n \'Integer code required to unlock this door\'}), (\'prompt\', {\'type\':\n \'str\', \'label\': \'keypad prompt\', \'tooltip\':\n \'String used to prompt player for keypad code entry\'})]'], {}), '([(\'direction\', {\'type\': \'choice\', \'choices\': [\'north\', \'south\',\n \'east\', \'west\'], \'tooltip\':\n \'Set the direction to this door from currently selected tile\'}), (\n \'prefix\', {\'type\': \'str\', \'tooltip\':\n "Set the word that should precede the name of this door, usually \'a\' or \'an\' (e.g. \'a\' wooden door, \'an\' oak door)"\n }), (\'name\', {\'type\': \'str\', \'tooltip\':\n "name of this door, e.g. \'wooden door\' or \'oak door\'"}), (\'tile_id\', {\n \'type\': \'str\', \'label\': \'tile ID\', \'tooltip\':\n \'unique identifier for programmatic access to this door\'}), (\'code\', {\n \'type\': \'int\', \'label\': \'keypad code\', \'tooltip\':\n \'Integer code required to unlock this door\'}), (\'prompt\', {\'type\':\n \'str\', \'label\': \'keypad prompt\', \'tooltip\':\n \'String used to prompt player for keypad code entry\'})])\n', (1574, 2380), False, 'from collections import OrderedDict\n'), ((2609, 4798), 'collections.OrderedDict', 'OrderedDict', (['[(\'tile_id\', {\'type\': \'str\', \'label\': \'tile ID\', \'tooltip\':\n \'Unique identifier for programmatic access to this tile\'}), (\'name\', {\n \'type\': \'str\', \'tooltip\':\n "Short string used to describe this tile to the player from afar, e.g. \'a scary room\'"\n }), (\'description\', {\'type\': \'long_str\', \'tooltip\':\n "String used to describe the tile to player when they enter it. Note that this string will always be prefixed with \'You are\' during gameplay"\n }), (\'dark\', {\'type\': \'bool\', \'tooltip\':\n \'If enabled, player will need a light source to see anything on this tile\'\n }), (\'first_visit_message\', {\'type\': \'long_str\', \'label\':\n \'first visit message\', \'tooltip\':\n \'String displayed only when player enters this tile for the first time\'\n }), (\'first_visit_message_in_dark\', {\'type\': \'bool\', \'label\':\n \'show first visit message if dark\', \'tooltip\':\n \'Enable/disable showing the first visit message if the current tile is dark\'\n }), (\'smell_description\', {\'type\': \'str\', \'label\': \'smell description\',\n \'tooltip\':\n \'String displayed when player smells the air on the current tile\'}), (\n \'ground_smell_description\', {\'type\': \'str\', \'label\':\n \'ground smell description\', \'tooltip\':\n \'String displayed when player smells the ground on the current tile\'}),\n (\'ground_taste_description\', {\'type\': \'str\', \'label\':\n \'ground taste description\', \'tooltip\':\n \'String displayed when player tastes the ground on the current tile\'}),\n (\'name_from_north\', {\'type\': \'str\', \'label\': \'name from south\',\n \'tooltip\':\n \'String used to describe this tile when player is on the adjacent tile to the south\'\n }), (\'name_from_south\', {\'type\': \'str\', \'label\': \'name from south\',\n \'tooltip\':\n \'String used to describe this tile when player is on the adjacent tile to the south\'\n }), (\'name_from_east\', {\'type\': \'str\', \'label\': \'name from east\',\n \'tooltip\':\n \'String used to describe this tile when player is on the adjacent tile to the east\'\n }), (\'name_from_west\', {\'type\': \'str\', \'label\': \'name from west\',\n \'tooltip\':\n \'String used to describe this tile when player is on the adjacent tile to the west\'\n })]'], {}), '([(\'tile_id\', {\'type\': \'str\', \'label\': \'tile ID\', \'tooltip\':\n \'Unique identifier for programmatic access to this tile\'}), (\'name\', {\n \'type\': \'str\', \'tooltip\':\n "Short string used to describe this tile to the player from afar, e.g. \'a scary room\'"\n }), (\'description\', {\'type\': \'long_str\', \'tooltip\':\n "String used to describe the tile to player when they enter it. Note that this string will always be prefixed with \'You are\' during gameplay"\n }), (\'dark\', {\'type\': \'bool\', \'tooltip\':\n \'If enabled, player will need a light source to see anything on this tile\'\n }), (\'first_visit_message\', {\'type\': \'long_str\', \'label\':\n \'first visit message\', \'tooltip\':\n \'String displayed only when player enters this tile for the first time\'\n }), (\'first_visit_message_in_dark\', {\'type\': \'bool\', \'label\':\n \'show first visit message if dark\', \'tooltip\':\n \'Enable/disable showing the first visit message if the current tile is dark\'\n }), (\'smell_description\', {\'type\': \'str\', \'label\': \'smell description\',\n \'tooltip\':\n \'String displayed when player smells the air on the current tile\'}), (\n \'ground_smell_description\', {\'type\': \'str\', \'label\':\n \'ground smell description\', \'tooltip\':\n \'String displayed when player smells the ground on the current tile\'}),\n (\'ground_taste_description\', {\'type\': \'str\', \'label\':\n \'ground taste description\', \'tooltip\':\n \'String displayed when player tastes the ground on the current tile\'}),\n (\'name_from_north\', {\'type\': \'str\', \'label\': \'name from south\',\n \'tooltip\':\n \'String used to describe this tile when player is on the adjacent tile to the south\'\n }), (\'name_from_south\', {\'type\': \'str\', \'label\': \'name from south\',\n \'tooltip\':\n \'String used to describe this tile when player is on the adjacent tile to the south\'\n }), (\'name_from_east\', {\'type\': \'str\', \'label\': \'name from east\',\n \'tooltip\':\n \'String used to describe this tile when player is on the adjacent tile to the east\'\n }), (\'name_from_west\', {\'type\': \'str\', \'label\': \'name from west\',\n \'tooltip\':\n \'String used to describe this tile when player is on the adjacent tile to the west\'\n })])\n', (2620, 4798), False, 'from collections import OrderedDict\n')] |
# encoding: utf-8
import time
import unittest
from timingsutil import Stopwatch
import logging_helper
logging = logging_helper.setup_logging()
class TestConfiguration(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_stopwatch(self):
stopwatch = Stopwatch()
for _ in range(3):
time.sleep(1)
self.assertEqual(round(stopwatch.lap()), 1)
self.assertEqual(round(stopwatch.stop()), 3)
if __name__ == u'__main__':
unittest.main()
| [
"unittest.main",
"time.sleep",
"logging_helper.setup_logging",
"timingsutil.Stopwatch"
] | [((113, 143), 'logging_helper.setup_logging', 'logging_helper.setup_logging', ([], {}), '()\n', (141, 143), False, 'import logging_helper\n'), ((525, 540), 'unittest.main', 'unittest.main', ([], {}), '()\n', (538, 540), False, 'import unittest\n'), ((315, 326), 'timingsutil.Stopwatch', 'Stopwatch', ([], {}), '()\n', (324, 326), False, 'from timingsutil import Stopwatch\n'), ((367, 380), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (377, 380), False, 'import time\n')] |
# -*- coding: utf-8 -*-
"""
apx_data
mantém as informações sobre o dicionário de procedimento de aproximação
revision 0.2 2015/nov mlabru
pep8 style conventions
revision 0.1 2014/nov mlabru
initial release (Linux/Python)
"""
# < imports >--------------------------------------------------------------------------------------
# python library
import logging
import sys
# PyQt library
from PyQt5 import QtCore
# FIXME QtXml is no longer supported.
from PyQt5 import QtXml
# model
import model.items.apx_new as model
import model.items.parser_utils as parser
# control
import control.events.events_basic as events
# < class CApxData >-------------------------------------------------------------------------------
class CApxData(dict):
"""
mantém as informações sobre o dicionário de procedimento de aproximação
<aproximacao nApx="1">
<descricao>FINAL H3</descricao>
<aerodromo>SBSP</aerodromo>
<pista>17R</pista>
<ils>N</ils>
<aproxperd>N</aproxperd>
<espera>2</espera>
<breakpoint nBrk="1"> ... </breakpoint>
</aproximacao>
"""
# ---------------------------------------------------------------------------------------------
def __init__(self, f_model, f_data=None):
"""
@param f_model: model manager
@param f_data: dados dos procedimentos de aproximação
"""
# check input
assert f_model
# inicia a super class
super(CApxData, self).__init__()
# salva o model manager
self._model = f_model
# salva o event manager
self._event = f_model.event
# recebeu dados ?
if f_data is not None:
# recebeu uma lista ?
if isinstance(f_data, list):
# cria um procedimento de aproximação com os dados da lista
pass # self.make_apx(f_data)
# recebeu um procedimento de aproximação ?
elif isinstance(f_data, CApxData):
# copia o procedimento de aproximação
pass # self.copy_apx(f_data)
# senão, recebeu o pathname de um arquivo de procedimento de aproximação
else:
# carrega o dicionário de procedimento de aproximação de um arquivo em disco
self.load_file(f_data)
# ---------------------------------------------------------------------------------------------
def load_file(self, fs_apx_pn):
"""
carrega os dados do procedimento de aproximação de um arquivo em disco
@param fs_apx_pn: pathname do arquivo em disco
"""
# check input
assert fs_apx_pn
# carrega o arquivo de procedimento de aproximação
self.parse_apx_xml(fs_apx_pn + ".xml")
# ---------------------------------------------------------------------------------------------
def make_apx(self, fdct_root, fdct_data):
"""
carrega os dados de procedimento de aproximação a partir de um dicionário
@param fdct_data: lista de dados de procedimento de aproximação
@return flag e mensagem
"""
# check input
assert fdct_root is not None
assert fdct_data is not None
# é uma procedimento de aproximação do newton ?
if "aproximacoes" != fdct_root["tagName"]:
# logger
l_log = logging.getLogger("CApxData::make_apx")
l_log.setLevel(logging.CRITICAL)
l_log.critical("<E01: não é um arquivo de procedimentos de aproximação.")
# cria um evento de quit
l_evt = events.CQuit()
assert l_evt
# dissemina o evento
self._event.post(l_evt)
# se não for, cai fora...
sys.exit(1)
# é um arquivo do newton ?
if "NEWTON" != fdct_root["FORMAT"]:
# logger
l_log = logging.getLogger("CApxData::make_apx")
l_log.setLevel(logging.CRITICAL)
l_log.critical("<E02: não está em um formato aceito.")
# cria um evento de quit
l_evt = events.CQuit()
assert l_evt
# dissemina o evento
self._event.post(l_evt)
# se não for, cai fora...
sys.exit(1)
# é a assinatura do newton ?
if "1961" != fdct_root["CODE"]:
# logger
l_log = logging.getLogger("CApxData::make_apx")
l_log.setLevel(logging.CRITICAL)
l_log.critical("<E03: não tem a assinatura correta.")
# cria um evento de quit
l_evt = events.CQuit()
assert l_evt
# dissemina o evento
self._event.post(l_evt)
# se não for, cai fora...
sys.exit(1)
# verifica se existe identificação
if "nApx" in fdct_data:
# cria procedimento de aproximação
l_apx = model.CApxNEW(self._model, fdct_data, fdct_root["VERSION"])
assert l_apx
# coloca a procedimento de aproximação no dicionário
self[fdct_data["nApx"]] = l_apx
# senão, não existe identificação
else:
# monta uma mensagem
ls_msg = "não tem identificação. Aproximação não incluída."
# logger
l_log = logging.getLogger("CApxData::make_apx")
l_log.setLevel(logging.WARNING)
l_log.warning("<E04: {}".format(ls_msg))
# se não for, cai fora...
return False, ls_msg
# retorna Ok
return True, None
# ---------------------------------------------------------------------------------------------
def parse_apx_xml(self, fs_apx_pn):
"""
carrega o arquivo de procedimentos de aproximação
@param fs_apx_pn: pathname do arquivo em disco
"""
# check input
assert fs_apx_pn
# cria o QFile para o arquivo XML do procedimentos de aproximação
l_data_file = QtCore.QFile(fs_apx_pn)
assert l_data_file is not None
# abre o arquivo XML do procedimentos de aproximação
l_data_file.open(QtCore.QIODevice.ReadOnly)
# erro na abertura do arquivo ?
if not l_data_file.isOpen():
# logger
l_log = logging.getLogger("CApxData::make_apx")
l_log.setLevel(logging.CRITICAL)
l_log.critical("<E01: erro na abertura de {}.".format(fs_apx_pn))
# cria um evento de quit
l_evt = events.CQuit()
assert l_evt
# dissemina o evento
self._event.post(l_evt)
# termina a aplicação
sys.exit(1)
# cria o documento XML do procedimento de aproximação
# FIXME QtXml is no longer supported.
l_xdoc_apx = QtXml.QDomDocument("aproximacoes")
assert l_xdoc_apx is not None
# erro na carga do documento ?
if not l_xdoc_apx.setContent(l_data_file):
# fecha o arquivo
l_data_file.close()
# logger
l_log = logging.getLogger("CApxData::make_apx")
l_log.setLevel(logging.CRITICAL)
l_log.critical("<E02: falha no parse de {}.".format(fs_apx_pn))
# cria um evento de quit
l_evt = events.CQuit()
assert l_evt
# dissemina o evento
self._event.post(l_evt)
# termina a aplicação
sys.exit(1)
# fecha o arquivo
l_data_file.close()
# obtém o elemento raíz do documento
l_elem_root = l_xdoc_apx.documentElement()
assert l_elem_root is not None
# faz o parse dos atributos do elemento raíz
ldct_root = parser.parse_root_element(l_elem_root)
# cria uma lista com os elementos de procedimento de aproximação
l_node_list = l_elem_root.elementsByTagName("aproximacao")
# para todos os nós na lista...
for li_ndx in range(l_node_list.length()):
# inicia o dicionário de dados
ldct_data = {}
# inicia a lista de breakpoints
ldct_data["breakpoints"] = []
# obtém um nó da lista
l_element = l_node_list.at(li_ndx).toElement()
assert l_element is not None
# read identification if available
if l_element.hasAttribute("nApx"):
ldct_data["nApx"] = int(l_element.attribute("nApx"))
# obtém o primeiro nó da sub-árvore
l_node = l_element.firstChild()
assert l_node is not None
# percorre a sub-árvore
while not l_node.isNull():
# tenta converter o nó em um elemento
l_element = l_node.toElement()
assert l_element is not None
# o nó é um elemento ?
if not l_element.isNull():
# faz o parse do elemento
ldct_tmp = parser.parse_aproximacao(l_element)
# atualiza o dicionário com o breakpoint
if "breakpoint" in ldct_tmp:
# atualiza o dicionário com o breakpoint
ldct_data["breakpoints"].append(ldct_tmp["breakpoint"])
# apaga este elemento
del ldct_tmp["breakpoint"]
# atualiza o dicionário de dados
ldct_data.update(ldct_tmp)
# próximo nó
l_node = l_node.nextSibling()
assert l_node is not None
# carrega os dados de procedimento de aproximação a partir de um dicionário
self.make_apx(ldct_root, ldct_data)
# ---------------------------------------------------------------------------------------------
def save2disk(self, fs_apx_pn=None):
"""
salva os dados da procedimento de aproximação em um arquivo em disco
@param fs_apx_pn: path name do arquivo onde salvar
@return flag e mensagem
"""
# return code
lv_ok = True
# mensagem
ls_msg = "save Ok"
# retorna flag e mensagem
return lv_ok, ls_msg
# < the end >--------------------------------------------------------------------------------------
| [
"PyQt5.QtCore.QFile",
"model.items.parser_utils.parse_root_element",
"control.events.events_basic.CQuit",
"model.items.parser_utils.parse_aproximacao",
"sys.exit",
"PyQt5.QtXml.QDomDocument",
"logging.getLogger",
"model.items.apx_new.CApxNEW"
] | [((6016, 6039), 'PyQt5.QtCore.QFile', 'QtCore.QFile', (['fs_apx_pn'], {}), '(fs_apx_pn)\n', (6028, 6039), False, 'from PyQt5 import QtCore\n'), ((6832, 6866), 'PyQt5.QtXml.QDomDocument', 'QtXml.QDomDocument', (['"""aproximacoes"""'], {}), "('aproximacoes')\n", (6850, 6866), False, 'from PyQt5 import QtXml\n'), ((7753, 7791), 'model.items.parser_utils.parse_root_element', 'parser.parse_root_element', (['l_elem_root'], {}), '(l_elem_root)\n', (7778, 7791), True, 'import model.items.parser_utils as parser\n'), ((3387, 3426), 'logging.getLogger', 'logging.getLogger', (['"""CApxData::make_apx"""'], {}), "('CApxData::make_apx')\n", (3404, 3426), False, 'import logging\n'), ((3616, 3630), 'control.events.events_basic.CQuit', 'events.CQuit', ([], {}), '()\n', (3628, 3630), True, 'import control.events.events_basic as events\n'), ((3777, 3788), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3785, 3788), False, 'import sys\n'), ((3910, 3949), 'logging.getLogger', 'logging.getLogger', (['"""CApxData::make_apx"""'], {}), "('CApxData::make_apx')\n", (3927, 3949), False, 'import logging\n'), ((4120, 4134), 'control.events.events_basic.CQuit', 'events.CQuit', ([], {}), '()\n', (4132, 4134), True, 'import control.events.events_basic as events\n'), ((4281, 4292), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4289, 4292), False, 'import sys\n'), ((4412, 4451), 'logging.getLogger', 'logging.getLogger', (['"""CApxData::make_apx"""'], {}), "('CApxData::make_apx')\n", (4429, 4451), False, 'import logging\n'), ((4621, 4635), 'control.events.events_basic.CQuit', 'events.CQuit', ([], {}), '()\n', (4633, 4635), True, 'import control.events.events_basic as events\n'), ((4782, 4793), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4790, 4793), False, 'import sys\n'), ((4937, 4996), 'model.items.apx_new.CApxNEW', 'model.CApxNEW', (['self._model', 'fdct_data', "fdct_root['VERSION']"], {}), "(self._model, fdct_data, fdct_root['VERSION'])\n", (4950, 4996), True, 'import model.items.apx_new as model\n'), ((5336, 5375), 'logging.getLogger', 'logging.getLogger', (['"""CApxData::make_apx"""'], {}), "('CApxData::make_apx')\n", (5353, 5375), False, 'import logging\n'), ((6312, 6351), 'logging.getLogger', 'logging.getLogger', (['"""CApxData::make_apx"""'], {}), "('CApxData::make_apx')\n", (6329, 6351), False, 'import logging\n'), ((6533, 6547), 'control.events.events_basic.CQuit', 'events.CQuit', ([], {}), '()\n', (6545, 6547), True, 'import control.events.events_basic as events\n'), ((6690, 6701), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6698, 6701), False, 'import sys\n'), ((7100, 7139), 'logging.getLogger', 'logging.getLogger', (['"""CApxData::make_apx"""'], {}), "('CApxData::make_apx')\n", (7117, 7139), False, 'import logging\n'), ((7319, 7333), 'control.events.events_basic.CQuit', 'events.CQuit', ([], {}), '()\n', (7331, 7333), True, 'import control.events.events_basic as events\n'), ((7476, 7487), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7484, 7487), False, 'import sys\n'), ((8995, 9030), 'model.items.parser_utils.parse_aproximacao', 'parser.parse_aproximacao', (['l_element'], {}), '(l_element)\n', (9019, 9030), True, 'import model.items.parser_utils as parser\n')] |
# Generated by Django 3.1.5 on 2021-01-18 03:51
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('classes', '0003_auto_20210117_2058'),
]
operations = [
migrations.AlterField(
model_name='class',
name='cost',
field=models.DecimalField(decimal_places=2, max_digits=8),
),
migrations.AlterField(
model_name='review',
name='rating',
field=models.DecimalField(decimal_places=1, max_digits=2, validators=[django.core.validators.MaxValueValidator(5), django.core.validators.MinValueValidator(0)]),
),
]
| [
"django.db.models.DecimalField"
] | [((363, 414), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(2)', 'max_digits': '(8)'}), '(decimal_places=2, max_digits=8)\n', (382, 414), False, 'from django.db import migrations, models\n')] |
#coding:utf-8
#
# id: bugs.core_6108
# title: Regression: FB3 throws "Datatypes are not comparable in expression" in procedure parameters
# decription:
# Confirmed bug on 4.0.0.1567; 3.0.5.33160.
# Works fine on 4.0.0.1573; 3.0.x is still affected
#
# tracker_id: CORE-6108
# min_versions: ['2.5']
# versions: 2.5
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.5
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
#---
#
# proc_ddl='''
# create or alter procedure test_proc ( a_dts timestamp) returns ( o_dts timestamp) as
# begin
# o_dts = a_dts;
# suspend;
# end
# '''
#
# db_conn.execute_immediate( proc_ddl )
# db_conn.commit()
#
# cur=db_conn.cursor()
#
# sttm="select o_dts from test_proc('2019-'|| COALESCE( ?, 1) ||'-01' )"
# cur.execute( sttm, ( 3, ) )
# for r in cur:
# print(r[0])
# cur.close()
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
2019-03-01 00:00:00
"""
@pytest.mark.version('>=2.5')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
| [
"pytest.mark.version",
"pytest.fail",
"firebird.qa.db_factory"
] | [((569, 614), 'firebird.qa.db_factory', 'db_factory', ([], {'sql_dialect': '(3)', 'init': 'init_script_1'}), '(sql_dialect=3, init=init_script_1)\n', (579, 614), False, 'from firebird.qa import db_factory, isql_act, Action\n'), ((1224, 1252), 'pytest.mark.version', 'pytest.mark.version', (['""">=2.5"""'], {}), "('>=2.5')\n", (1243, 1252), False, 'import pytest\n'), ((1294, 1329), 'pytest.fail', 'pytest.fail', (['"""Test not IMPLEMENTED"""'], {}), "('Test not IMPLEMENTED')\n", (1305, 1329), False, 'import pytest\n')] |
# AUTOGENERATED! DO NOT EDIT! File to edit: 01_batchbald.ipynb (unless otherwise specified).
__all__ = ['compute_conditional_entropy', 'compute_entropy', 'CandidateBatch', 'get_batchbald_batch', 'get_bald_batch']
# Cell
from dataclasses import dataclass
from typing import List
import torch
import math
from tqdm.auto import tqdm
from toma import toma
from batchbald_redux import joint_entropy
# Cell
def compute_conditional_entropy(probs_N_K_C: torch.Tensor) -> torch.Tensor:
N, K, C = probs_N_K_C.shape
entropies_N = torch.empty(N, dtype=torch.double)
pbar = tqdm(total=N, desc="Conditional Entropy", leave=False)
@toma.execute.chunked(probs_N_K_C, 1024)
def compute(probs_n_K_C, start: int, end: int):
nats_n_K_C = probs_n_K_C * torch.log(probs_n_K_C)
nats_n_K_C[probs_n_K_C ==0] = 0.
entropies_N[start:end].copy_(-torch.sum(nats_n_K_C, dim=(1, 2)) / K)
pbar.update(end - start)
pbar.close()
return entropies_N
def compute_entropy(probs_N_K_C: torch.Tensor) -> torch.Tensor:
N, K, C = probs_N_K_C.shape
entropies_N = torch.empty(N, dtype=torch.double)
pbar = tqdm(total=N, desc="Entropy", leave=False)
@toma.execute.chunked(probs_N_K_C, 1024)
def compute(probs_n_K_C, start: int, end: int):
mean_probs_n_C = probs_n_K_C.mean(dim=1)
nats_n_C = mean_probs_n_C * torch.log(mean_probs_n_C)
nats_n_C[mean_probs_n_C ==0] = 0.
entropies_N[start:end].copy_(-torch.sum(nats_n_C, dim=1))
pbar.update(end - start)
pbar.close()
return entropies_N
# Internal Cell
# Not publishing these at the moment.
def compute_conditional_entropy_from_logits(logits_N_K_C: torch.Tensor) -> torch.Tensor:
N, K, C = logits_N_K_C.shape
entropies_N = torch.empty(N, dtype=torch.double)
pbar = tqdm(total=N, desc="Conditional Entropy", leave=False)
@toma.execute.chunked(logits_N_K_C, 1024)
def compute(logits_n_K_C, start: int, end: int):
nats_n_K_C = logits_n_K_C * torch.exp(logits_n_K_C)
entropies_N[start:end].copy_(
-torch.sum(nats_n_K_C, dim=(1, 2)) / K)
pbar.update(end - start)
pbar.close()
return entropies_N
def compute_entropy_from_logits(logits_N_K_C: torch.Tensor) -> torch.Tensor:
N, K, C = logits_N_K_C.shape
entropies_N = torch.empty(N, dtype=torch.double)
pbar = tqdm(total=N, desc="Entropy", leave=False)
@toma.execute.chunked(logits_N_K_C, 1024)
def compute(logits_n_K_C, start: int, end: int):
mean_logits_n_C = torch.logsumexp(logits_n_K_C, dim=1) - math.log(K)
nats_n_C = mean_logits_n_C * torch.exp(mean_logits_n_C)
entropies_N[start:end].copy_(
-torch.sum(nats_n_C, dim=1))
pbar.update(end - start)
pbar.close()
return entropies_N
# Cell
@dataclass
class CandidateBatch:
scores: List[float]
indices: List[int]
def get_batchbald_batch(probs_N_K_C: torch.Tensor,
batch_size: int,
num_samples: int,
dtype=None,
device=None) -> CandidateBatch:
N, K, C = probs_N_K_C.shape
batch_size = min(batch_size, N)
candidate_indices = []
candidate_scores = []
if batch_size == 0:
return CandidateBatch(candidate_scores, candidate_indices)
conditional_entropies_N = compute_conditional_entropy(probs_N_K_C)
batch_joint_entropy = joint_entropy.DynamicJointEntropy(num_samples,
batch_size - 1,
K,
C,
dtype=dtype,
device=device)
# We always keep these on the CPU.
scores_N = torch.empty(N, dtype=torch.double, pin_memory=torch.cuda.is_available())
for i in tqdm(range(batch_size), desc="BatchBALD", leave=False):
if i > 0:
latest_index = candidate_indices[-1]
batch_joint_entropy.add_variables(
probs_N_K_C[latest_index:latest_index + 1])
shared_conditinal_entropies = conditional_entropies_N[
candidate_indices].sum()
batch_joint_entropy.compute_batch(probs_N_K_C,
output_entropies_B=scores_N)
scores_N -= conditional_entropies_N + shared_conditinal_entropies
scores_N[candidate_indices] = -float('inf')
candidate_score, candidate_index = scores_N.max(dim=0)
candidate_indices.append(candidate_index.item())
candidate_scores.append(candidate_score.item())
return CandidateBatch(candidate_scores, candidate_indices)
# Cell
def get_bald_batch(probs_N_K_C: torch.Tensor,
batch_size: int,
dtype=None,
device=None) -> CandidateBatch:
N, K, C = probs_N_K_C.shape
batch_size = min(batch_size, N)
candidate_indices = []
candidate_scores = []
scores_N = -compute_conditional_entropy(probs_N_K_C)
scores_N += compute_entropy(probs_N_K_C)
candiate_scores, candidate_indices = torch.topk(scores_N, batch_size)
return CandidateBatch(candiate_scores.tolist(), candidate_indices.tolist()) | [
"torch.logsumexp",
"torch.topk",
"torch.empty",
"toma.toma.execute.chunked",
"tqdm.auto.tqdm",
"torch.exp",
"batchbald_redux.joint_entropy.DynamicJointEntropy",
"torch.cuda.is_available",
"math.log",
"torch.sum",
"torch.log"
] | [((534, 568), 'torch.empty', 'torch.empty', (['N'], {'dtype': 'torch.double'}), '(N, dtype=torch.double)\n', (545, 568), False, 'import torch\n'), ((581, 635), 'tqdm.auto.tqdm', 'tqdm', ([], {'total': 'N', 'desc': '"""Conditional Entropy"""', 'leave': '(False)'}), "(total=N, desc='Conditional Entropy', leave=False)\n", (585, 635), False, 'from tqdm.auto import tqdm\n'), ((642, 681), 'toma.toma.execute.chunked', 'toma.execute.chunked', (['probs_N_K_C', '(1024)'], {}), '(probs_N_K_C, 1024)\n', (662, 681), False, 'from toma import toma\n'), ((1103, 1137), 'torch.empty', 'torch.empty', (['N'], {'dtype': 'torch.double'}), '(N, dtype=torch.double)\n', (1114, 1137), False, 'import torch\n'), ((1150, 1192), 'tqdm.auto.tqdm', 'tqdm', ([], {'total': 'N', 'desc': '"""Entropy"""', 'leave': '(False)'}), "(total=N, desc='Entropy', leave=False)\n", (1154, 1192), False, 'from tqdm.auto import tqdm\n'), ((1199, 1238), 'toma.toma.execute.chunked', 'toma.execute.chunked', (['probs_N_K_C', '(1024)'], {}), '(probs_N_K_C, 1024)\n', (1219, 1238), False, 'from toma import toma\n'), ((1783, 1817), 'torch.empty', 'torch.empty', (['N'], {'dtype': 'torch.double'}), '(N, dtype=torch.double)\n', (1794, 1817), False, 'import torch\n'), ((1830, 1884), 'tqdm.auto.tqdm', 'tqdm', ([], {'total': 'N', 'desc': '"""Conditional Entropy"""', 'leave': '(False)'}), "(total=N, desc='Conditional Entropy', leave=False)\n", (1834, 1884), False, 'from tqdm.auto import tqdm\n'), ((1891, 1931), 'toma.toma.execute.chunked', 'toma.execute.chunked', (['logits_N_K_C', '(1024)'], {}), '(logits_N_K_C, 1024)\n', (1911, 1931), False, 'from toma import toma\n'), ((2342, 2376), 'torch.empty', 'torch.empty', (['N'], {'dtype': 'torch.double'}), '(N, dtype=torch.double)\n', (2353, 2376), False, 'import torch\n'), ((2389, 2431), 'tqdm.auto.tqdm', 'tqdm', ([], {'total': 'N', 'desc': '"""Entropy"""', 'leave': '(False)'}), "(total=N, desc='Entropy', leave=False)\n", (2393, 2431), False, 'from tqdm.auto import tqdm\n'), ((2438, 2478), 'toma.toma.execute.chunked', 'toma.execute.chunked', (['logits_N_K_C', '(1024)'], {}), '(logits_N_K_C, 1024)\n', (2458, 2478), False, 'from toma import toma\n'), ((3460, 3561), 'batchbald_redux.joint_entropy.DynamicJointEntropy', 'joint_entropy.DynamicJointEntropy', (['num_samples', '(batch_size - 1)', 'K', 'C'], {'dtype': 'dtype', 'device': 'device'}), '(num_samples, batch_size - 1, K, C, dtype=\n dtype, device=device)\n', (3493, 3561), False, 'from batchbald_redux import joint_entropy\n'), ((5269, 5301), 'torch.topk', 'torch.topk', (['scores_N', 'batch_size'], {}), '(scores_N, batch_size)\n', (5279, 5301), False, 'import torch\n'), ((769, 791), 'torch.log', 'torch.log', (['probs_n_K_C'], {}), '(probs_n_K_C)\n', (778, 791), False, 'import torch\n'), ((1376, 1401), 'torch.log', 'torch.log', (['mean_probs_n_C'], {}), '(mean_probs_n_C)\n', (1385, 1401), False, 'import torch\n'), ((2021, 2044), 'torch.exp', 'torch.exp', (['logits_n_K_C'], {}), '(logits_n_K_C)\n', (2030, 2044), False, 'import torch\n'), ((2558, 2594), 'torch.logsumexp', 'torch.logsumexp', (['logits_n_K_C'], {'dim': '(1)'}), '(logits_n_K_C, dim=1)\n', (2573, 2594), False, 'import torch\n'), ((2597, 2608), 'math.log', 'math.log', (['K'], {}), '(K)\n', (2605, 2608), False, 'import math\n'), ((2646, 2672), 'torch.exp', 'torch.exp', (['mean_logits_n_C'], {}), '(mean_logits_n_C)\n', (2655, 2672), False, 'import torch\n'), ((3959, 3984), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3982, 3984), False, 'import torch\n'), ((1483, 1509), 'torch.sum', 'torch.sum', (['nats_n_C'], {'dim': '(1)'}), '(nats_n_C, dim=1)\n', (1492, 1509), False, 'import torch\n'), ((2725, 2751), 'torch.sum', 'torch.sum', (['nats_n_C'], {'dim': '(1)'}), '(nats_n_C, dim=1)\n', (2734, 2751), False, 'import torch\n'), ((872, 905), 'torch.sum', 'torch.sum', (['nats_n_K_C'], {'dim': '(1, 2)'}), '(nats_n_K_C, dim=(1, 2))\n', (881, 905), False, 'import torch\n'), ((2097, 2130), 'torch.sum', 'torch.sum', (['nats_n_K_C'], {'dim': '(1, 2)'}), '(nats_n_K_C, dim=(1, 2))\n', (2106, 2130), False, 'import torch\n')] |
#/usr/bin/python
import re;
import subprocess;
cmd="\nfind . -name \"*nti21_cut.grd\"\n";
pipe=subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout;
ntis=pipe.read().split();
pipe.close();
for nti in ntis:
jday=nti[nti.find(".A")+6:nti.find(".A")+9];
vdir=nti[nti.find("/")+1:nti.rfind("/")];
image="data_more/"+nti[nti.rfind("/")+1:nti.find("_cut")]+".grd";
cmd="\ngrdinfo "+nti+"\n";
pipe=subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout;
info=pipe.read().strip();
pipe.close();
zmax=info[re.search("z_max:\s*",info).end(0):re.search("z_max:\s*\S*\s*",info).end(0)].strip();
if zmax != "0":
print(jday+" "+zmax+" "+vdir+" "+image);
#if zmax != "0" and float(zmax) > -0.861:
#print(nti+" "+zmax);
exit();
"""
exit();
"""
| [
"subprocess.Popen",
"re.search"
] | [((97, 154), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'shell': '(True)', 'stdout': 'subprocess.PIPE'}), '(cmd, shell=True, stdout=subprocess.PIPE)\n', (113, 154), False, 'import subprocess\n'), ((409, 466), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'shell': '(True)', 'stdout': 'subprocess.PIPE'}), '(cmd, shell=True, stdout=subprocess.PIPE)\n', (425, 466), False, 'import subprocess\n'), ((526, 555), 're.search', 're.search', (['"""z_max:\\\\s*"""', 'info'], {}), "('z_max:\\\\s*', info)\n", (535, 555), False, 'import re\n'), ((561, 598), 're.search', 're.search', (['"""z_max:\\\\s*\\\\S*\\\\s*"""', 'info'], {}), "('z_max:\\\\s*\\\\S*\\\\s*', info)\n", (570, 598), False, 'import re\n')] |
import re
import csv
import pandas as pd
f= open("data-errors.txt",'r',encoding="utf8")
fc = f.read()
fcbRegex = re.compile(r"line(\s\d+)")
clear = re.findall(fcbRegex,fc)
for i in clear:
print("lines",i)
arr=clear
print("array is",arr)
count = 1
reader = csv.reader(open('amazon_reviews_us_Watches_v1_00.tsv', 'r',encoding="utf8"), delimiter="\t")
writer = csv.writer(open('amazon_reviews_us_Watches_v1_00_clean.tsv', 'w',encoding="utf8"), delimiter="\t")
for row in reader:
if count in arr:
print("skipping ", count)
count += 1
continue
else:
print("writting ", count)
writer.writerow(row)
count += 1
| [
"re.findall",
"re.compile"
] | [((116, 143), 're.compile', 're.compile', (['"""line(\\\\s\\\\d+)"""'], {}), "('line(\\\\s\\\\d+)')\n", (126, 143), False, 'import re\n'), ((152, 176), 're.findall', 're.findall', (['fcbRegex', 'fc'], {}), '(fcbRegex, fc)\n', (162, 176), False, 'import re\n')] |
# coding: utf-8
"""
Running a slave instance.
"""
import logging
import sys
import time
import traceback
import dill as pickle
import zmq
logger = logging.getLogger(__name__)
class ExceptionPicklingError(Exception):
"""Represent an error attempting to pickle the result of a task"""
class TaskSystemExit(Exception):
"""For when the task raised a SystemExit exception, trying to quit"""
def do_task(task_id, task_function):
"""Do a task, as specified in a pickle bundle.
:arg byte data: The pickle-data to load
:returns: Pickle data of the result, or an exception
"""
try:
logger.debug("Running task with ID {}".format(task_id))
# Run whatever task we've been given
result = task_function()
logger.debug("Completed task")
# An error pickling here counts as a job failure
return b"YAY " + pickle.dumps((task_id, result))
except KeyboardInterrupt:
# This is interactive so we want to let it float up - we'll handle the
# special case in the parent context
raise
except BaseException:
logger.debug("Exception processing task")
# Everything else: We want to pass back across the network
(_, exc_value, exc_trace) = sys.exc_info()
exc_trace = traceback.format_tb(exc_trace)
# We don't want to propagate a SystemExit to the other side
if isinstance(exc_value, SystemExit):
logger.debug("Intercepted task calling sys.exit")
exc_value = TaskSystemExit()
# Be careful - we might not be able to pickle the exception?? Go to lengths
# to make sure that we pass something sensible back
try:
pickle.dumps(exc_value)
except pickle.PicklingError:
exc_value = ExceptionPicklingError("{}: {}".format(
str(type(exc_value)), str(exc_value)))
return b"ONO " + pickle.dumps((task_id, exc_trace, exc_value))
def _do_handshake(socket, worker_id):
logger.debug("Sending hello")
socket.send(b"HELO IAM " + worker_id.encode("utf-8"))
logger.debug("Awaiting confirmation of hello recieved")
assert socket.recv() == b"HAY"
logger.debug("Got hello. Going into task loop")
def _handle_task(socket, data):
"""Handle a reply asking us to do a task"""
try:
(task_id, task_function) = pickle.loads(data)
logger.debug("Got task %s (%d bytes)", task_id, len(data))
return do_task(task_id, task_function)
except KeyboardInterrupt as exc:
# This is a special case; try to tell the master that we failed
# to quit, then continue to raise the error.
logger.info("Got interrupt while processing task")
socket.send(b"ONO " + pickle.dumps((task_id, "", exc)))
socket.recv()
raise
def run_slave(server_url, worker_id, timeout=30):
"""Run a slave instance and connect it to a specific master URL.
:param str server_url: The server string to use to connect
:param str worker_if: The worker ID to use when communicating
:param timeout: The time (in seconds) to wait with no jobs before terminating
"""
logger.debug("Running slave {} connect to {}".format(worker_id, server_url))
context = zmq.Context()
socket = context.socket(zmq.REQ)
logger.debug("Connecting")
socket.connect(server_url)
socket.RCVTIMEO = int(1000 * timeout)
try:
_do_handshake(socket, worker_id)
except zmq.error.Again:
logger.debug("Timed out waiting for handshake.")
sys.exit(1)
else:
# If waiting for the whole timeout, then stop waiting
last_job = time.time()
while time.time() - last_job < timeout:
logger.debug("Asking for a task")
socket.send("IZ BORED {}".format(worker_id).encode("UTF-8"))
reply = socket.recv()
# We get a command returned
assert reply.startswith(b"PLZ")
if reply == b"PLZ WAIT":
logger.debug("No tasks available. Trying again in a few seconds.")
time.sleep(min(timeout / 2.0, 5))
elif reply == b"PLZ GOWAY":
logger.debug("Got quit signal. ending main loop.")
break
elif reply.startswith(b"PLZ DO"):
try:
result = _handle_task(socket, reply[7:])
except KeyboardInterrupt:
# Now, we know we want to quit - so send the message letting
# the master know. This is a little unclean, but it's only
# because we are here that we can guarantee that we weren't in
# the middle of a send/recv when the signal was sent
logger.debug("Sending quit message after keyboardinterrupt")
socket.send(b"IGIVEUP " + worker_id.encode("utf-8"))
socket.recv()
raise
logger.debug("Sending result of %d bytes", len(result))
socket.send(result)
# Await the ok
assert socket.recv() == b"THX"
last_job = time.time()
if time.time() - last_job >= timeout:
logger.debug("Waited too long for new tasks. Quitting.")
socket.send(b"IGIVEUP " + worker_id.encode("utf-8"))
socket.recv()
finally:
logger.debug("Closing socket")
socket.LINGER = 300
socket.close()
logger.debug("Closing context")
context.term()
logger.debug("Slave completed.")
# Messaging protocol:
# Sent Recieved Action
# ----------------------- ------------- ----------------------------------
# HELO IAM {id} HAY Negotiation success
# IZ BORED {id} PLZ GOWAY Exit
# PLZ WAIT Nothing to do; try again soon
# PLZ DO {task} Hand off task to runner
# YAY {result} THX Task succeeded with result data
# ONO {result} THX Task failed - with exception data
# IGIVEUP {id} BYE Quitting; given up with processing
| [
"traceback.format_tb",
"dill.dumps",
"time.time",
"dill.loads",
"sys.exc_info",
"sys.exit",
"logging.getLogger",
"zmq.Context"
] | [((150, 177), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (167, 177), False, 'import logging\n'), ((3056, 3069), 'zmq.Context', 'zmq.Context', ([], {}), '()\n', (3067, 3069), False, 'import zmq\n'), ((2214, 2232), 'dill.loads', 'pickle.loads', (['data'], {}), '(data)\n', (2226, 2232), True, 'import dill as pickle\n'), ((3423, 3434), 'time.time', 'time.time', ([], {}), '()\n', (3432, 3434), False, 'import time\n'), ((835, 866), 'dill.dumps', 'pickle.dumps', (['(task_id, result)'], {}), '((task_id, result))\n', (847, 866), True, 'import dill as pickle\n'), ((1186, 1200), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (1198, 1200), False, 'import sys\n'), ((1217, 1247), 'traceback.format_tb', 'traceback.format_tb', (['exc_trace'], {}), '(exc_trace)\n', (1236, 1247), False, 'import traceback\n'), ((3330, 3341), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3338, 3341), False, 'import sys\n'), ((1596, 1619), 'dill.dumps', 'pickle.dumps', (['exc_value'], {}), '(exc_value)\n', (1608, 1619), True, 'import dill as pickle\n'), ((1781, 1826), 'dill.dumps', 'pickle.dumps', (['(task_id, exc_trace, exc_value)'], {}), '((task_id, exc_trace, exc_value))\n', (1793, 1826), True, 'import dill as pickle\n'), ((3445, 3456), 'time.time', 'time.time', ([], {}), '()\n', (3454, 3456), False, 'import time\n'), ((4721, 4732), 'time.time', 'time.time', ([], {}), '()\n', (4730, 4732), False, 'import time\n'), ((2572, 2604), 'dill.dumps', 'pickle.dumps', (["(task_id, '', exc)"], {}), "((task_id, '', exc))\n", (2584, 2604), True, 'import dill as pickle\n'), ((4702, 4713), 'time.time', 'time.time', ([], {}), '()\n', (4711, 4713), False, 'import time\n')] |
# -*- coding: utf-8 -*-
# @Time : 2020/2/12 15:47
# @Author : Chen
# @File : datasets.py
# @Software: PyCharm
import os, warnings
from mxnet.gluon.data import dataset, sampler
from mxnet import image
import numpy as np
class IdxSampler(sampler.Sampler):
"""Samples elements from [0, length) randomly without replacement.
Parameters
----------
length : int
Length of the sequence.
"""
def __init__(self, indices_selected):
if isinstance(indices_selected, list):
indices_selected = np.array(indices_selected)
self._indices_selected = indices_selected
self._length = indices_selected.shape[0]
def __iter__(self):
indices = self._indices_selected
np.random.shuffle(indices)
return iter(indices)
def __len__(self):
return self._length
class ImageFolderDataset(dataset.Dataset):
"""A dataset for loading image files stored in a folder structure.
like::
root/car/0001.jpg
root/car/xxxa.jpg
root/car/yyyb.jpg
root/bus/123.jpg
root/bus/023.jpg
root/bus/wwww.jpg
Parameters
----------
root : str
Path to root directory.
flag : {0, 1}, default 1
If 0, always convert loaded images to greyscale (1 channel).
If 1, always convert loaded images to colored (3 channels).
transform : callable, default None
A function that takes data and label and transforms them::
transform = lambda data, label: (data.astype(np.float32)/255, label)
Attributes
----------
synsets : list
List of class names. `synsets[i]` is the name for the integer label `i`
items : list of tuples
List of all images in (filename, label) pairs.
"""
def __init__(self, root, flag=1, transform=None, pseudo_labels=None):
self._root = os.path.expanduser(root)
self._flag = flag
self._transform = transform
self._exts = ['.jpg', '.jpeg', '.png']
self._list_images(self._root)
self._pseudo_labels = pseudo_labels
def _list_images(self, root):
self.synsets = []
self.items = []
for folder in sorted(os.listdir(root)):
path = os.path.join(root, folder)
if not os.path.isdir(path):
warnings.warn('Ignoring %s, which is not a directory.'%path, stacklevel=3)
continue
label = len(self.synsets)
self.synsets.append(folder)
for filename in sorted(os.listdir(path)):
filename = os.path.join(path, filename)
ext = os.path.splitext(filename)[1]
if ext.lower() not in self._exts:
warnings.warn('Ignoring %s of type %s. Only support %s'%(
filename, ext, ', '.join(self._exts)))
continue
self.items.append((filename, label))
def __getitem__(self, idx):
img = image.imread(self.items[idx][0], self._flag)
label = self.items[idx][1]
if self._transform is not None:
return self._transform(img, label)
if self._pseudo_labels is not None:
pseudo_label = self._pseudo_labels[idx]
return img, label, idx, pseudo_label
return img, label, idx
def __len__(self):
return len(self.items)
| [
"os.path.join",
"os.path.isdir",
"numpy.array",
"mxnet.image.imread",
"os.path.splitext",
"warnings.warn",
"os.path.expanduser",
"os.listdir",
"numpy.random.shuffle"
] | [((741, 767), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (758, 767), True, 'import numpy as np\n'), ((1879, 1903), 'os.path.expanduser', 'os.path.expanduser', (['root'], {}), '(root)\n', (1897, 1903), False, 'import os, warnings\n'), ((2991, 3035), 'mxnet.image.imread', 'image.imread', (['self.items[idx][0]', 'self._flag'], {}), '(self.items[idx][0], self._flag)\n', (3003, 3035), False, 'from mxnet import image\n'), ((541, 567), 'numpy.array', 'np.array', (['indices_selected'], {}), '(indices_selected)\n', (549, 567), True, 'import numpy as np\n'), ((2210, 2226), 'os.listdir', 'os.listdir', (['root'], {}), '(root)\n', (2220, 2226), False, 'import os, warnings\n'), ((2248, 2274), 'os.path.join', 'os.path.join', (['root', 'folder'], {}), '(root, folder)\n', (2260, 2274), False, 'import os, warnings\n'), ((2294, 2313), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (2307, 2313), False, 'import os, warnings\n'), ((2331, 2407), 'warnings.warn', 'warnings.warn', (["('Ignoring %s, which is not a directory.' % path)"], {'stacklevel': '(3)'}), "('Ignoring %s, which is not a directory.' % path, stacklevel=3)\n", (2344, 2407), False, 'import os, warnings\n'), ((2544, 2560), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (2554, 2560), False, 'import os, warnings\n'), ((2590, 2618), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (2602, 2618), False, 'import os, warnings\n'), ((2641, 2667), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (2657, 2667), False, 'import os, warnings\n')] |
'''
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <https://unlicense.org>
'''
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from joblib import dump
def train_model():
data = pd.read_csv('data/diabetes.csv')
X_train = data.drop(columns='Outcome')
y_train = data['Outcome']
rfc = RandomForestClassifier()
rfc.fit(X_train, y_train)
dump(rfc, 'model/diabetes_predictor.joblib')
def main():
train_model()
if __name__ == '__main__':
main()
| [
"pandas.read_csv",
"sklearn.ensemble.RandomForestClassifier",
"joblib.dump"
] | [((1347, 1379), 'pandas.read_csv', 'pd.read_csv', (['"""data/diabetes.csv"""'], {}), "('data/diabetes.csv')\n", (1358, 1379), True, 'import pandas as pd\n'), ((1463, 1487), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {}), '()\n', (1485, 1487), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((1522, 1566), 'joblib.dump', 'dump', (['rfc', '"""model/diabetes_predictor.joblib"""'], {}), "(rfc, 'model/diabetes_predictor.joblib')\n", (1526, 1566), False, 'from joblib import dump\n')] |
#!/usr/bin/env python
from setuptools import setup
setup(
name='pyr',
version='0.4.1',
description='A nicer REPL for Python.',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/zain/pyr',
packages=['pyr'],
install_requires=['pygments'],
scripts=['bin/pyr'],
)
| [
"setuptools.setup"
] | [((53, 287), 'setuptools.setup', 'setup', ([], {'name': '"""pyr"""', 'version': '"""0.4.1"""', 'description': '"""A nicer REPL for Python."""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'url': '"""https://github.com/zain/pyr"""', 'packages': "['pyr']", 'install_requires': "['pygments']", 'scripts': "['bin/pyr']"}), "(name='pyr', version='0.4.1', description='A nicer REPL for Python.',\n author='<NAME>', author_email='<EMAIL>', url=\n 'https://github.com/zain/pyr', packages=['pyr'], install_requires=[\n 'pygments'], scripts=['bin/pyr'])\n", (58, 287), False, 'from setuptools import setup\n')] |
#!/usr/bin/env python
import copy
import glob
import logging
import os
import re
import numpy as np
from astropy.io import fits
from scipy import interpolate, ndimage, optimize, signal
try:
from charis.image import Image
except:
from image import Image
log = logging.getLogger('main')
class PSFLets:
"""
Helper class to deal with the PSFLets on the detector. Does most of the heavy lifting
during the wavelength calibration step.
"""
def __init__(self, load=False, infile=None, infiledir='.'):
'''
Initialize the class
Parameters
----------
load: Boolean
Whether to load an already-existing wavelength calibration file
infile: String
If load is True, this is the name of the file
infiledir: String
If load is True, this is the directory in which the file resides
'''
self.xindx = None
self.yindx = None
self.lam_indx = None
self.nlam = None
self.nlam_max = None
self.interp_arr = None
self.order = None
if load:
self.loadpixsol(infile, infiledir)
def loadpixsol(self, infile=None, infiledir='./calibrations'):
'''
Loads existing wavelength calibration file
Parameters
----------
infile: String
Name of the file
infiledir: String
Directory in which the file resides
'''
if infile is None:
infile = re.sub('//', '/', infiledir + '/PSFloc.fits')
hdulist = fits.open(infile)
try:
self.xindx = hdulist[0].data
self.yindx = hdulist[1].data
self.lam_indx = hdulist[2].data
self.nlam = hdulist[3].data.astype(int)
except:
raise RuntimeError("File " + infile +
" does not appear to contain a CHARIS wavelength solution in the appropriate format.")
self.nlam_max = np.amax(self.nlam)
def savepixsol(self, outdir="calibrations/"):
'''
Saves wavelength calibration file
Parameters
----------
outdir: String
Directory in which to put the file. The file is name PSFloc.fits and is a
multi-extension FITS file, each extension corresponding to:
0. the list of wavelengths at which the calibration is done
1. a 2D ndarray with the X position of all lenslets
2. a 2D ndarray with the Y position of all lenslets
3. a 2D ndarray with the number of valid wavelengths for a given lenslet (some wavelengths fall outside of the detector area)
'''
if not os.path.isdir(outdir):
raise IOError("Attempting to save pixel solution to directory " + outdir + ". Directory does not exist.")
outfile = re.sub('//', '/', outdir + '/PSFloc.fits')
out = fits.HDUList(fits.PrimaryHDU(self.xindx))
out.append(fits.PrimaryHDU(self.yindx))
out.append(fits.PrimaryHDU(self.lam_indx))
out.append(fits.PrimaryHDU(self.nlam.astype(int)))
try:
out.writeto(outfile, overwrite=True)
except:
raise
def geninterparray(self, lam, allcoef, order=3):
'''
Set up array to solve for best-fit polynomial fits to the
coefficients of the wavelength solution. These will be used
to smooth/interpolate the wavelength solution, and
ultimately to compute its inverse.
Parameters
----------
lam: float
Wavelength in nm
allcoef: list of lists floats
Polynomial coefficients of wavelength solution
order: int
Order of polynomial wavelength solution
Notes
-----
Populates the attribute interp_arr in PSFLet class
'''
self.interp_arr = np.zeros((order + 1, allcoef.shape[1]))
self.order = order
xarr = np.ones((lam.shape[0], order + 1))
for i in range(1, order + 1):
xarr[:, i] = np.log(lam)**i
for i in range(self.interp_arr.shape[1]):
coef = np.linalg.lstsq(xarr, allcoef[:, i])[0]
self.interp_arr[:, i] = coef
def return_locations_short(self, coef, xindx, yindx):
'''
Returns the x,y detector location of a given lenslet for a given polynomial fit
Parameters
----------
coef: lists floats
Polynomial coefficients of fit for a single wavelength
xindx: int
X index of lenslet in lenslet array
yindx: int
Y index of lenslet in lenslet array
Returns
-------
interp_x: float
X coordinate on the detector
interp_y: float
Y coordinate on the detector
'''
coeforder = int(np.sqrt(coef.shape[0])) - 1
interp_x, interp_y = _transform(xindx, yindx, coeforder, coef)
return interp_x, interp_y
def return_res(self, lam, allcoef, xindx, yindx,
order=3, lam1=None, lam2=None):
'''
Returns the spectral resolution and interpolated wavelength array
Parameters
----------
lam: float
Wavelength in nm
allcoef: list of lists floats
Polynomial coefficients of wavelength solution
xindx: int
X index of lenslet in lenslet array
yindx: int
Y index of lenslet in lenslet array
order: int
Order of polynomial wavelength solution
lam1: float
Shortest wavelength in nm
lam2: float
Longest wavelength in nm
Returns
-------
interp_lam: array
Array of wavelengths
R: float
Effective spectral resolution
'''
if lam1 is None:
lam1 = np.amin(lam) / 1.04
if lam2 is None:
lam2 = np.amax(lam) * 1.03
interporder = order
if self.interp_arr is None:
self.geninterparray(lam, allcoef, order=order)
coeforder = int(np.sqrt(allcoef.shape[1])) - 1
n_spline = 100
interp_lam = np.linspace(lam1, lam2, n_spline)
dy = []
dx = []
for i in range(n_spline):
coef = np.zeros((coeforder + 1) * (coeforder + 2))
for k in range(1, interporder + 1):
coef += k * self.interp_arr[k] * np.log(interp_lam[i])**(k - 1)
_dx, _dy = _transform(xindx, yindx, coeforder, coef)
dx += [_dx]
dy += [_dy]
R = np.sqrt(np.asarray(dy)**2 + np.asarray(dx)**2)
return interp_lam, R
def monochrome_coef(self, lam, alllam=None, allcoef=None, order=3):
if self.interp_arr is None:
if alllam is None or allcoef is None:
raise ValueError("Interpolation array has not been computed. Must call monochrome_coef with arrays.")
self.geninterparray(alllam, allcoef, order=order)
coef = np.zeros(self.interp_arr[0].shape)
for k in range(self.order + 1):
coef += self.interp_arr[k] * np.log(lam)**k
return coef
def return_locations(self, lam, allcoef, xindx, yindx, order=3):
'''
Calculates the detector coordinates of lenslet located at `xindx`, `yindx`
for desired wavelength `lam`
Parameters
----------
lam: float
Wavelength in nm
allcoef: list of floats
Polynomial coefficients of wavelength solution
xindx: int
X index of lenslet in lenslet array
yindx: int
Y index of lenslet in lenslet array
order: int
Order of polynomial wavelength solution
Returns
-------
interp_x: float
X coordinate on the detector
interp_y: float
Y coordinate on the detector
'''
if len(allcoef.shape) == 1:
coeforder = int(np.sqrt(allcoef.shape[0])) - 1
interp_x, interp_y = _transform(xindx, yindx, coeforder, allcoef)
return interp_x, interp_y
if self.interp_arr is None:
self.geninterparray(lam, allcoef, order=order)
coeforder = int(np.sqrt(allcoef.shape[1])) - 1
if not (coeforder + 1) * (coeforder + 2) == allcoef.shape[1]:
raise ValueError("Number of coefficients incorrect for polynomial order.")
coef = np.zeros((coeforder + 1) * (coeforder + 2))
for k in range(self.order + 1):
coef += self.interp_arr[k] * np.log(lam)**k
interp_x, interp_y = _transform(xindx, yindx, coeforder, coef)
return interp_x, interp_y
def genpixsol(self, lam, allcoef, order=3, lam1=None, lam2=None):
"""
Calculates the wavelength at the center of each pixel within a microspectrum
Parameters
----------
lam: float
Wavelength in nm
allcoef: list of floats
List describing the polynomial coefficients that best fit the lenslets,
for all wavelengths
order: int
Order of the polynomical fit
lam1: float
Lowest wavelength in nm
lam2: float
Highest wavelength in nm
Notes
-----
This functions fills in most of the fields of the PSFLet class: the array
of xindx, yindx, nlam, lam_indx and nlam_max
"""
###################################################################
# Read in wavelengths of spots, coefficients of wavelength
# solution. Obtain extrapolated limits of wavlength solution
# to 4% below and 3% above limits of the coefficient file by
# default.
###################################################################
if lam1 is None:
lam1 = np.amin(lam) / 1.04
if lam2 is None:
lam2 = np.amax(lam) * 1.03
interporder = order
if self.interp_arr is None:
self.geninterparray(lam, allcoef, order=order)
coeforder = int(np.sqrt(allcoef.shape[1])) - 1
if not (coeforder + 1) * (coeforder + 2) == allcoef.shape[1]:
raise ValueError("Number of coefficients incorrect for polynomial order.")
xindx = np.arange(-100, 101)
xindx, yindx = np.meshgrid(xindx, xindx)
n_spline = 100
interp_x = np.zeros(tuple([n_spline] + list(xindx.shape)))
interp_y = np.zeros(interp_x.shape)
interp_lam = np.linspace(lam1, lam2, n_spline)
for i in range(n_spline):
coef = np.zeros((coeforder + 1) * (coeforder + 2))
for k in range(interporder + 1):
coef += self.interp_arr[k] * np.log(interp_lam[i])**k
interp_x[i], interp_y[i] = _transform(xindx, yindx, coeforder, coef)
x = np.zeros(tuple(list(xindx.shape) + [1000]))
y = np.zeros(x.shape)
nlam = np.zeros(xindx.shape, np.int)
lam_out = np.zeros(y.shape)
good = np.zeros(xindx.shape)
for ix in range(xindx.shape[0]):
for iy in range(xindx.shape[1]):
pix_x = interp_x[:, ix, iy]
pix_y = interp_y[:, ix, iy]
if np.all(pix_x < 0) or np.all(pix_x > 2048) or np.all(pix_y < 0) or np.all(pix_y > 2048):
continue
if pix_y[-1] < pix_y[0]:
try:
tck_y = interpolate.splrep(pix_y[::-1], interp_lam[::-1], k=1, s=0)
except:
print(pix_x, pix_y)
raise
else:
tck_y = interpolate.splrep(pix_y, interp_lam, k=1, s=0)
y1, y2 = [int(np.amin(pix_y)) + 1, int(np.amax(pix_y))]
tck_x = interpolate.splrep(interp_lam, pix_x, k=1, s=0)
nlam[ix, iy] = y2 - y1 + 1
y[ix, iy, :nlam[ix, iy]] = np.arange(y1, y2 + 1)
lam_out[ix, iy, :nlam[ix, iy]] = interpolate.splev(y[ix, iy, :nlam[ix, iy]], tck_y)
x[ix, iy, :nlam[ix, iy]] = interpolate.splev(lam_out[ix, iy, :nlam[ix, iy]], tck_x)
for nlam_max in range(x.shape[-1]):
if np.all(y[:, :, nlam_max] == 0):
break
self.xindx = x[:, :, :nlam_max]
self.yindx = y[:, :, :nlam_max]
self.nlam = nlam
self.lam_indx = lam_out[:, :, :nlam_max]
self.nlam_max = np.amax(nlam)
def _initcoef(order, scale=15.02, phi=np.arctan2(1.926, -1), x0=0, y0=0):
"""
Private function _initcoef in locate_psflets
Create a set of coefficients including a rotation matrix plus zeros.
Parameters
----------
order: int
The polynomial order of the grid distortion
scale: float
The linear separation in pixels of the PSFlets. Default 15.02.
phi: float
The pitch angle of the lenslets. Default atan(1.926)
x0: float
x offset to apply to the central pixel. Default 0
y0: float
y offset to apply to the central pixel. Default 0
Returns
-------
coef: list of floats
A list of length (order+1)*(order+2) to be optimized.
Notes
-----
The list of coefficients has space for a polynomial fit of the
input order (i.e., for order 3, up to terms like x**3 and x**2*y,
but not x**3*y). It is all zeros in the output apart from the
rotation matrix given by scale and phi.
"""
try:
if not order == int(order):
raise ValueError("Polynomial order must be integer")
else:
if order < 1 or order > 5:
raise ValueError("Polynomial order must be >0, <=5")
except:
raise ValueError("Polynomial order must be integer")
n = (order + 1) * (order + 2)
coef = np.zeros((n))
coef[0] = x0
coef[1] = scale * np.cos(phi)
coef[order + 1] = -scale * np.sin(phi)
coef[n / 2] = y0
coef[n / 2 + 1] = scale * np.sin(phi)
coef[n / 2 + order + 1] = scale * np.cos(phi)
return list(coef)
def _pullorder(coef, order=1):
coeforder = int(np.sqrt(len(coef) + 0.25) - 1.5 + 1e-12)
coef_short = []
i = 0
for ix in range(coeforder + 1):
for iy in range(coeforder - ix + 1):
if ix + iy <= order:
coef_short += [coef[i]]
i += 1
for ix in range(coeforder + 1):
for iy in range(coeforder - ix + 1):
if ix + iy <= order:
coef_short += [coef[i]]
i += 1
return coef_short
def _insertorder(coefshort, coef):
coeforder = int(np.sqrt(len(coef) + 0.25) - 1.5 + 1e-12)
shortorder = int(np.sqrt(len(coefshort) + 0.25) - 1.5 + 1e-12)
i = 0
j = 0
for ix in range(coeforder + 1):
for iy in range(coeforder - ix + 1):
if ix + iy <= shortorder:
coef[i] = coefshort[j]
j += 1
i += 1
for ix in range(coeforder + 1):
for iy in range(coeforder - ix + 1):
if ix + iy <= shortorder:
coef[i] = coefshort[j]
j += 1
i += 1
return coef
def _transform(x, y, order, coef, highordercoef=None):
"""
Private function _transform in locate_psflets
Apply the coefficients given to transform the coordinates using
a polynomial.
Parameters
----------
x: ndarray
Rectilinear grid
y: ndarray of floats
Rectilinear grid
order: int
Order of the polynomial fit
coef: list of floats
List of the coefficients. Must match the length required by
order = (order+1)*(order+2)
highordercoef: Boolean
Returns
-------
_x: ndarray
Transformed coordinates
_y: ndarray
Transformed coordinates
"""
try:
if not len(coef) == (order + 1) * (order + 2):
pass # raise ValueError("Number of coefficients incorrect for polynomial order.")
except:
raise AttributeError("order must be integer, coef should be a list.")
try:
if not order == int(order):
raise ValueError("Polynomial order must be integer")
else:
if order < 1 or order > 5:
raise ValueError("Polynomial order must be >0, <=5")
except:
raise ValueError("Polynomial order must be integer")
# n**2 + 3*n + 2 = (n + 1.5)**2 - 0.25
# = (1/4)*((2*n + 3)**2 - 1) = len(coef)
order1 = int(np.sqrt(len(coef) + 0.25) - 1.5 + 1e-12)
_x = np.zeros(np.asarray(x).shape)
_y = np.zeros(np.asarray(y).shape)
i = 0
for ix in range(order1 + 1):
for iy in range(order1 - ix + 1):
_x += coef[i] * x**ix * y**iy
i += 1
for ix in range(order1 + 1):
for iy in range(order1 - ix + 1):
_y += coef[i] * x**ix * y**iy
i += 1
if highordercoef is None:
return [_x, _y]
else:
order2 = int(np.sqrt(len(highordercoef) + 0.25) - 1.5 + 1e-12)
i = 0
for ix in range(order2 + 1):
for iy in range(order1 - ix + 1):
if ix + iy <= order1:
continue
_x += coef[i] * x**ix * y**iy
i += 1
for ix in range(order2 + 1):
for iy in range(order1 - ix + 1):
if ix + iy <= order1:
continue
_y += coef[i] * x**ix * y**iy
i += 1
return [_x, _y]
def _corrval(coef, x, y, filtered, order, trimfrac=0.1, highordercoef=None):
"""
Private function _corrval in locate_psflets
Return the negative of the sum of the middle XX% of the PSFlet
spot fluxes (disregarding those with the most and the least flux
to limit the impact of outliers). Analogous to the trimmed mean.
Parameters
----------
coef: list of floats
coefficients for polynomial transformation
x: ndarray
coordinates of lenslets
y: ndarray
coordinates of lenslets
filtered: ndarray
image convolved with gaussian PSFlet
order: int
order of the polynomial fit
trimfrac: float
fraction of outliers (high & low combined) to trim
Default 0.1 (5% trimmed on the high end, 5% on the low end)
highordercoef: boolean
Returns
-------
score: float
Negative sum of PSFlet fluxes, to be minimized
"""
#################################################################
# Use np.nan for lenslet coordinates outside the CHARIS FOV,
# discard these from the calculation before trimming.
#################################################################
_x, _y = _transform(x, y, order, coef, highordercoef)
vals = ndimage.map_coordinates(filtered, [_y, _x], mode='constant',
cval=np.nan, prefilter=False)
vals_ok = vals[np.where(np.isfinite(vals))]
iclip = int(vals_ok.shape[0] * trimfrac / 2)
vals_sorted = np.sort(vals_ok)
score = -1 * np.sum(vals_sorted[iclip:-iclip])
return score
def locatePSFlets(inImage, polyorder=2, sig=0.7, coef=None, trimfrac=0.1,
phi=np.arctan2(1.926, -1), scale=15.02, fitorder=None):
"""
function locatePSFlets takes an Image class, assumed to be a
monochromatic grid of spots with read noise and shot noise, and
returns the esimated positions of the spot centroids. This is
designed to constrain the domain of the PSF-let fitting later in
the pipeline.
Parameters
----------
imImage: Image class
Assumed to be a monochromatic grid of spots
polyorder: float
order of the polynomial coordinate transformation. Default 2.
sig: float
standard deviation of convolving Gaussian used
for estimating the grid of centroids. Should be close
to the true value for the PSF-let spots. Default 0.7.
coef: list
initial guess of the coefficients of polynomial
coordinate transformation
trimfrac: float
fraction of lenslet outliers (high & low
combined) to trim in the minimization. Default 0.1
(5% trimmed on the high end, 5% on the low end)
Returns
-------
x: 2D ndarray
Estimated spot centroids in x.
y: 2D ndarray
Estimated spot centroids in y.
good:2D boolean ndarray
True for lenslets with spots inside the detector footprint
coef: list of floats
List of best-fit polynomial coefficients
Notes
-----
the coefficients, if not supplied, are initially set to the
known pitch angle and scale. A loop then does a quick check to find
reasonable offsets in x and y. With all of the first-order polynomial
coefficients set, the optimizer refines these and the higher-order
coefficients. This routine seems to be relatively robust down to
per-lenslet signal-to-noise ratios of order unity (or even a little
less).
Important note: as of now (09/2015), the number of lenslets to grid
is hard-coded as 1/10 the dimensionality of the final array. This is
sufficient to cover the detector for the fiducial lenslet spacing.
"""
#############################################################
# Convolve with a Gaussian, centroid the filtered image.
#############################################################
x = np.arange(-1 * int(3 * sig + 1), int(3 * sig + 1) + 1)
x, y = np.meshgrid(x, x)
gaussian = np.exp(-(x**2 + y**2) / (2 * sig**2))
if inImage.ivar is None:
unfiltered = signal.convolve2d(inImage.data, gaussian, mode='same')
else:
unfiltered = signal.convolve2d(inImage.data * inImage.ivar, gaussian, mode='same')
unfiltered /= signal.convolve2d(inImage.ivar, gaussian, mode='same') + 1e-10
filtered = ndimage.interpolation.spline_filter(unfiltered)
#############################################################
# x, y: Grid of lenslet IDs, Lenslet (0, 0) is the center.
#############################################################
gridfrac = 20
ydim, xdim = inImage.data.shape
x = np.arange(-(ydim // gridfrac), ydim // gridfrac + 1)
x, y = np.meshgrid(x, x)
#############################################################
# Set up polynomial coefficients, convert from lenslet
# coordinates to coordinates on the detector array.
# Then optimize the coefficients.
# We want to start with a decent guess, so we use a grid of
# offsets. Seems to be robust down to SNR/PSFlet ~ 1
# Create slice indices for subimages to perform the intial
# fits on. The new dimensionality in both x and y is 2*subsize
#############################################################
if coef is None:
ix_arr = np.arange(0, 14, 0.5)
iy_arr = np.arange(0, 25, 0.5)
log.info("Initializing PSFlet location transformation coefficients")
init = True
else:
ix_arr = np.arange(-3.0, 3.05, 0.2)
iy_arr = np.arange(-3.0, 3.05, 0.2)
coef_save = list(coef[:])
log.info("Initializing transformation coefficients with previous values")
init = False
bestval = 0
subshape = xdim * 3 // 8
_s = x.shape[0] * 3 // 8
subfiltered = ndimage.interpolation.spline_filter(unfiltered[subshape:-subshape, subshape:-subshape])
for ix in ix_arr:
for iy in iy_arr:
if init:
coef = _initcoef(polyorder, x0=ix + xdim / 2. - subshape,
y0=iy + ydim / 2. - subshape, scale=scale, phi=phi)
else:
coef = copy.deepcopy(coef_save)
coef[0] += ix - subshape
coef[(polyorder + 1) * (polyorder + 2) / 2] += iy - subshape
newval = _corrval(coef, x[_s:-_s, _s:-_s], y[_s:-_s, _s:-_s],
subfiltered, polyorder, trimfrac)
if newval < bestval:
bestval = newval
coef_opt = copy.deepcopy(coef)
if init:
log.info("Performing initial optimization of PSFlet location transformation coefficients for frame " + inImage.filename)
res = optimize.minimize(_corrval, coef_opt, args=(
x[_s:-_s, _s:-_s], y[_s:-_s, _s:-_s], subfiltered, polyorder, trimfrac), method='Powell')
coef_opt = res.x
else:
log.info("Performing initial optimization of PSFlet location transformation coefficients for frame " + inImage.filename)
coef_lin = _pullorder(coef_opt, 1)
res = optimize.minimize(_corrval, coef_lin, args=(
x[_s:-_s, _s:-_s], y[_s:-_s, _s:-_s], subfiltered, polyorder, trimfrac, coef_opt), method='Powell', options={'xtol': 1e-6, 'ftol': 1e-6})
coef_lin = res.x
coef_opt = _insertorder(coef_lin, coef_opt)
coef_opt[0] += subshape
coef_opt[(polyorder + 1) * (polyorder + 2) / 2] += subshape
#############################################################
# If we have coefficients from last time, we assume that we
# are now at a slightly higher wavelength, so try out offsets
# that are slightly to the right to get a good initial guess.
#############################################################
log.info("Performing final optimization of PSFlet location transformation coefficients for frame " + inImage.filename)
if not init and fitorder is not None:
coef_lin = _pullorder(coef_opt, fitorder)
res = optimize.minimize(_corrval, coef_lin, args=(x, y, filtered, polyorder, trimfrac,
coef_opt), method='Powell', options={'xtol': 1e-5, 'ftol': 1e-5})
coef_lin = res.x
coef_opt = _insertorder(coef_lin, coef_opt)
else:
res = optimize.minimize(_corrval, coef_opt, args=(x, y, filtered, polyorder, trimfrac),
method='Powell', options={'xtol': 1e-5, 'ftol': 1e-5})
coef_opt = res.x
if not res.success:
log.info("Optimizing PSFlet location transformation coefficients may have failed for frame " + inImage.filename)
_x, _y = _transform(x, y, polyorder, coef_opt)
#############################################################
# Boolean: do the lenslet PSFlets lie within the detector?
#############################################################
good = (_x > 5) * (_x < xdim - 5) * (_y > 5) * (_y < ydim - 5)
return [_x, _y, good, coef_opt]
| [
"numpy.arctan2",
"numpy.sum",
"numpy.amin",
"astropy.io.fits.PrimaryHDU",
"numpy.ones",
"numpy.sin",
"numpy.arange",
"numpy.exp",
"scipy.optimize.minimize",
"numpy.meshgrid",
"scipy.signal.convolve2d",
"numpy.isfinite",
"numpy.linspace",
"scipy.ndimage.interpolation.spline_filter",
"re.sub",
"scipy.interpolate.splrep",
"copy.deepcopy",
"numpy.asarray",
"numpy.sort",
"astropy.io.fits.open",
"numpy.cos",
"numpy.all",
"numpy.log",
"numpy.linalg.lstsq",
"os.path.isdir",
"numpy.zeros",
"numpy.amax",
"scipy.interpolate.splev",
"scipy.ndimage.map_coordinates",
"logging.getLogger",
"numpy.sqrt"
] | [((271, 296), 'logging.getLogger', 'logging.getLogger', (['"""main"""'], {}), "('main')\n", (288, 296), False, 'import logging\n'), ((12613, 12634), 'numpy.arctan2', 'np.arctan2', (['(1.926)', '(-1)'], {}), '(1.926, -1)\n', (12623, 12634), True, 'import numpy as np\n'), ((13936, 13947), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (13944, 13947), True, 'import numpy as np\n'), ((18929, 19023), 'scipy.ndimage.map_coordinates', 'ndimage.map_coordinates', (['filtered', '[_y, _x]'], {'mode': '"""constant"""', 'cval': 'np.nan', 'prefilter': '(False)'}), "(filtered, [_y, _x], mode='constant', cval=np.nan,\n prefilter=False)\n", (18952, 19023), False, 'from scipy import interpolate, ndimage, optimize, signal\n'), ((19171, 19187), 'numpy.sort', 'np.sort', (['vals_ok'], {}), '(vals_ok)\n', (19178, 19187), True, 'import numpy as np\n'), ((19354, 19375), 'numpy.arctan2', 'np.arctan2', (['(1.926)', '(-1)'], {}), '(1.926, -1)\n', (19364, 19375), True, 'import numpy as np\n'), ((21646, 21663), 'numpy.meshgrid', 'np.meshgrid', (['x', 'x'], {}), '(x, x)\n', (21657, 21663), True, 'import numpy as np\n'), ((21679, 21722), 'numpy.exp', 'np.exp', (['(-(x ** 2 + y ** 2) / (2 * sig ** 2))'], {}), '(-(x ** 2 + y ** 2) / (2 * sig ** 2))\n', (21685, 21722), True, 'import numpy as np\n'), ((22025, 22072), 'scipy.ndimage.interpolation.spline_filter', 'ndimage.interpolation.spline_filter', (['unfiltered'], {}), '(unfiltered)\n', (22060, 22072), False, 'from scipy import interpolate, ndimage, optimize, signal\n'), ((22332, 22384), 'numpy.arange', 'np.arange', (['(-(ydim // gridfrac))', '(ydim // gridfrac + 1)'], {}), '(-(ydim // gridfrac), ydim // gridfrac + 1)\n', (22341, 22384), True, 'import numpy as np\n'), ((22396, 22413), 'numpy.meshgrid', 'np.meshgrid', (['x', 'x'], {}), '(x, x)\n', (22407, 22413), True, 'import numpy as np\n'), ((23477, 23569), 'scipy.ndimage.interpolation.spline_filter', 'ndimage.interpolation.spline_filter', (['unfiltered[subshape:-subshape, subshape:-subshape]'], {}), '(unfiltered[subshape:-subshape, subshape\n :-subshape])\n', (23512, 23569), False, 'from scipy import interpolate, ndimage, optimize, signal\n'), ((1581, 1598), 'astropy.io.fits.open', 'fits.open', (['infile'], {}), '(infile)\n', (1590, 1598), False, 'from astropy.io import fits\n'), ((1999, 2017), 'numpy.amax', 'np.amax', (['self.nlam'], {}), '(self.nlam)\n', (2006, 2017), True, 'import numpy as np\n'), ((2869, 2911), 're.sub', 're.sub', (['"""//"""', '"""/"""', "(outdir + '/PSFloc.fits')"], {}), "('//', '/', outdir + '/PSFloc.fits')\n", (2875, 2911), False, 'import re\n'), ((3907, 3946), 'numpy.zeros', 'np.zeros', (['(order + 1, allcoef.shape[1])'], {}), '((order + 1, allcoef.shape[1]))\n', (3915, 3946), True, 'import numpy as np\n'), ((3989, 4023), 'numpy.ones', 'np.ones', (['(lam.shape[0], order + 1)'], {}), '((lam.shape[0], order + 1))\n', (3996, 4023), True, 'import numpy as np\n'), ((6224, 6257), 'numpy.linspace', 'np.linspace', (['lam1', 'lam2', 'n_spline'], {}), '(lam1, lam2, n_spline)\n', (6235, 6257), True, 'import numpy as np\n'), ((7076, 7110), 'numpy.zeros', 'np.zeros', (['self.interp_arr[0].shape'], {}), '(self.interp_arr[0].shape)\n', (7084, 7110), True, 'import numpy as np\n'), ((8523, 8566), 'numpy.zeros', 'np.zeros', (['((coeforder + 1) * (coeforder + 2))'], {}), '((coeforder + 1) * (coeforder + 2))\n', (8531, 8566), True, 'import numpy as np\n'), ((10382, 10402), 'numpy.arange', 'np.arange', (['(-100)', '(101)'], {}), '(-100, 101)\n', (10391, 10402), True, 'import numpy as np\n'), ((10426, 10451), 'numpy.meshgrid', 'np.meshgrid', (['xindx', 'xindx'], {}), '(xindx, xindx)\n', (10437, 10451), True, 'import numpy as np\n'), ((10563, 10587), 'numpy.zeros', 'np.zeros', (['interp_x.shape'], {}), '(interp_x.shape)\n', (10571, 10587), True, 'import numpy as np\n'), ((10609, 10642), 'numpy.linspace', 'np.linspace', (['lam1', 'lam2', 'n_spline'], {}), '(lam1, lam2, n_spline)\n', (10620, 10642), True, 'import numpy as np\n'), ((11006, 11023), 'numpy.zeros', 'np.zeros', (['x.shape'], {}), '(x.shape)\n', (11014, 11023), True, 'import numpy as np\n'), ((11039, 11068), 'numpy.zeros', 'np.zeros', (['xindx.shape', 'np.int'], {}), '(xindx.shape, np.int)\n', (11047, 11068), True, 'import numpy as np\n'), ((11087, 11104), 'numpy.zeros', 'np.zeros', (['y.shape'], {}), '(y.shape)\n', (11095, 11104), True, 'import numpy as np\n'), ((11120, 11141), 'numpy.zeros', 'np.zeros', (['xindx.shape'], {}), '(xindx.shape)\n', (11128, 11141), True, 'import numpy as np\n'), ((12559, 12572), 'numpy.amax', 'np.amax', (['nlam'], {}), '(nlam)\n', (12566, 12572), True, 'import numpy as np\n'), ((13990, 14001), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (13996, 14001), True, 'import numpy as np\n'), ((14033, 14044), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (14039, 14044), True, 'import numpy as np\n'), ((14096, 14107), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (14102, 14107), True, 'import numpy as np\n'), ((14146, 14157), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (14152, 14157), True, 'import numpy as np\n'), ((19205, 19238), 'numpy.sum', 'np.sum', (['vals_sorted[iclip:-iclip]'], {}), '(vals_sorted[iclip:-iclip])\n', (19211, 19238), True, 'import numpy as np\n'), ((21768, 21822), 'scipy.signal.convolve2d', 'signal.convolve2d', (['inImage.data', 'gaussian'], {'mode': '"""same"""'}), "(inImage.data, gaussian, mode='same')\n", (21785, 21822), False, 'from scipy import interpolate, ndimage, optimize, signal\n'), ((21854, 21923), 'scipy.signal.convolve2d', 'signal.convolve2d', (['(inImage.data * inImage.ivar)', 'gaussian'], {'mode': '"""same"""'}), "(inImage.data * inImage.ivar, gaussian, mode='same')\n", (21871, 21923), False, 'from scipy import interpolate, ndimage, optimize, signal\n'), ((22991, 23012), 'numpy.arange', 'np.arange', (['(0)', '(14)', '(0.5)'], {}), '(0, 14, 0.5)\n', (23000, 23012), True, 'import numpy as np\n'), ((23030, 23051), 'numpy.arange', 'np.arange', (['(0)', '(25)', '(0.5)'], {}), '(0, 25, 0.5)\n', (23039, 23051), True, 'import numpy as np\n'), ((23176, 23202), 'numpy.arange', 'np.arange', (['(-3.0)', '(3.05)', '(0.2)'], {}), '(-3.0, 3.05, 0.2)\n', (23185, 23202), True, 'import numpy as np\n'), ((23220, 23246), 'numpy.arange', 'np.arange', (['(-3.0)', '(3.05)', '(0.2)'], {}), '(-3.0, 3.05, 0.2)\n', (23229, 23246), True, 'import numpy as np\n'), ((24386, 24524), 'scipy.optimize.minimize', 'optimize.minimize', (['_corrval', 'coef_opt'], {'args': '(x[_s:-_s, _s:-_s], y[_s:-_s, _s:-_s], subfiltered, polyorder, trimfrac)', 'method': '"""Powell"""'}), "(_corrval, coef_opt, args=(x[_s:-_s, _s:-_s], y[_s:-_s, _s\n :-_s], subfiltered, polyorder, trimfrac), method='Powell')\n", (24403, 24524), False, 'from scipy import interpolate, ndimage, optimize, signal\n'), ((24755, 24947), 'scipy.optimize.minimize', 'optimize.minimize', (['_corrval', 'coef_lin'], {'args': '(x[_s:-_s, _s:-_s], y[_s:-_s, _s:-_s], subfiltered, polyorder, trimfrac,\n coef_opt)', 'method': '"""Powell"""', 'options': "{'xtol': 1e-06, 'ftol': 1e-06}"}), "(_corrval, coef_lin, args=(x[_s:-_s, _s:-_s], y[_s:-_s, _s\n :-_s], subfiltered, polyorder, trimfrac, coef_opt), method='Powell',\n options={'xtol': 1e-06, 'ftol': 1e-06})\n", (24772, 24947), False, 'from scipy import interpolate, ndimage, optimize, signal\n'), ((25681, 25838), 'scipy.optimize.minimize', 'optimize.minimize', (['_corrval', 'coef_lin'], {'args': '(x, y, filtered, polyorder, trimfrac, coef_opt)', 'method': '"""Powell"""', 'options': "{'xtol': 1e-05, 'ftol': 1e-05}"}), "(_corrval, coef_lin, args=(x, y, filtered, polyorder,\n trimfrac, coef_opt), method='Powell', options={'xtol': 1e-05, 'ftol': \n 1e-05})\n", (25698, 25838), False, 'from scipy import interpolate, ndimage, optimize, signal\n'), ((25988, 26130), 'scipy.optimize.minimize', 'optimize.minimize', (['_corrval', 'coef_opt'], {'args': '(x, y, filtered, polyorder, trimfrac)', 'method': '"""Powell"""', 'options': "{'xtol': 1e-05, 'ftol': 1e-05}"}), "(_corrval, coef_opt, args=(x, y, filtered, polyorder,\n trimfrac), method='Powell', options={'xtol': 1e-05, 'ftol': 1e-05})\n", (26005, 26130), False, 'from scipy import interpolate, ndimage, optimize, signal\n'), ((1517, 1562), 're.sub', 're.sub', (['"""//"""', '"""/"""', "(infiledir + '/PSFloc.fits')"], {}), "('//', '/', infiledir + '/PSFloc.fits')\n", (1523, 1562), False, 'import re\n'), ((2709, 2730), 'os.path.isdir', 'os.path.isdir', (['outdir'], {}), '(outdir)\n', (2722, 2730), False, 'import os\n'), ((2939, 2966), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', (['self.xindx'], {}), '(self.xindx)\n', (2954, 2966), False, 'from astropy.io import fits\n'), ((2987, 3014), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', (['self.yindx'], {}), '(self.yindx)\n', (3002, 3014), False, 'from astropy.io import fits\n'), ((3035, 3065), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', (['self.lam_indx'], {}), '(self.lam_indx)\n', (3050, 3065), False, 'from astropy.io import fits\n'), ((6344, 6387), 'numpy.zeros', 'np.zeros', (['((coeforder + 1) * (coeforder + 2))'], {}), '((coeforder + 1) * (coeforder + 2))\n', (6352, 6387), True, 'import numpy as np\n'), ((10697, 10740), 'numpy.zeros', 'np.zeros', (['((coeforder + 1) * (coeforder + 2))'], {}), '((coeforder + 1) * (coeforder + 2))\n', (10705, 10740), True, 'import numpy as np\n'), ((12326, 12356), 'numpy.all', 'np.all', (['(y[:, :, nlam_max] == 0)'], {}), '(y[:, :, nlam_max] == 0)\n', (12332, 12356), True, 'import numpy as np\n'), ((16694, 16707), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (16704, 16707), True, 'import numpy as np\n'), ((16733, 16746), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (16743, 16746), True, 'import numpy as np\n'), ((19083, 19100), 'numpy.isfinite', 'np.isfinite', (['vals'], {}), '(vals)\n', (19094, 19100), True, 'import numpy as np\n'), ((21946, 22000), 'scipy.signal.convolve2d', 'signal.convolve2d', (['inImage.ivar', 'gaussian'], {'mode': '"""same"""'}), "(inImage.ivar, gaussian, mode='same')\n", (21963, 22000), False, 'from scipy import interpolate, ndimage, optimize, signal\n'), ((4087, 4098), 'numpy.log', 'np.log', (['lam'], {}), '(lam)\n', (4093, 4098), True, 'import numpy as np\n'), ((4172, 4208), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['xarr', 'allcoef[:, i]'], {}), '(xarr, allcoef[:, i])\n', (4187, 4208), True, 'import numpy as np\n'), ((4878, 4900), 'numpy.sqrt', 'np.sqrt', (['coef.shape[0]'], {}), '(coef.shape[0])\n', (4885, 4900), True, 'import numpy as np\n'), ((5914, 5926), 'numpy.amin', 'np.amin', (['lam'], {}), '(lam)\n', (5921, 5926), True, 'import numpy as np\n'), ((5978, 5990), 'numpy.amax', 'np.amax', (['lam'], {}), '(lam)\n', (5985, 5990), True, 'import numpy as np\n'), ((6148, 6173), 'numpy.sqrt', 'np.sqrt', (['allcoef.shape[1]'], {}), '(allcoef.shape[1])\n', (6155, 6173), True, 'import numpy as np\n'), ((8319, 8344), 'numpy.sqrt', 'np.sqrt', (['allcoef.shape[1]'], {}), '(allcoef.shape[1])\n', (8326, 8344), True, 'import numpy as np\n'), ((9944, 9956), 'numpy.amin', 'np.amin', (['lam'], {}), '(lam)\n', (9951, 9956), True, 'import numpy as np\n'), ((10008, 10020), 'numpy.amax', 'np.amax', (['lam'], {}), '(lam)\n', (10015, 10020), True, 'import numpy as np\n'), ((10177, 10202), 'numpy.sqrt', 'np.sqrt', (['allcoef.shape[1]'], {}), '(allcoef.shape[1])\n', (10184, 10202), True, 'import numpy as np\n'), ((11909, 11956), 'scipy.interpolate.splrep', 'interpolate.splrep', (['interp_lam', 'pix_x'], {'k': '(1)', 's': '(0)'}), '(interp_lam, pix_x, k=1, s=0)\n', (11927, 11956), False, 'from scipy import interpolate, ndimage, optimize, signal\n'), ((12044, 12065), 'numpy.arange', 'np.arange', (['y1', '(y2 + 1)'], {}), '(y1, y2 + 1)\n', (12053, 12065), True, 'import numpy as np\n'), ((12115, 12165), 'scipy.interpolate.splev', 'interpolate.splev', (['y[ix, iy, :nlam[ix, iy]]', 'tck_y'], {}), '(y[ix, iy, :nlam[ix, iy]], tck_y)\n', (12132, 12165), False, 'from scipy import interpolate, ndimage, optimize, signal\n'), ((12209, 12265), 'scipy.interpolate.splev', 'interpolate.splev', (['lam_out[ix, iy, :nlam[ix, iy]]', 'tck_x'], {}), '(lam_out[ix, iy, :nlam[ix, iy]], tck_x)\n', (12226, 12265), False, 'from scipy import interpolate, ndimage, optimize, signal\n'), ((23834, 23858), 'copy.deepcopy', 'copy.deepcopy', (['coef_save'], {}), '(coef_save)\n', (23847, 23858), False, 'import copy\n'), ((24209, 24228), 'copy.deepcopy', 'copy.deepcopy', (['coef'], {}), '(coef)\n', (24222, 24228), False, 'import copy\n'), ((6651, 6665), 'numpy.asarray', 'np.asarray', (['dy'], {}), '(dy)\n', (6661, 6665), True, 'import numpy as np\n'), ((6671, 6685), 'numpy.asarray', 'np.asarray', (['dx'], {}), '(dx)\n', (6681, 6685), True, 'import numpy as np\n'), ((7192, 7203), 'numpy.log', 'np.log', (['lam'], {}), '(lam)\n', (7198, 7203), True, 'import numpy as np\n'), ((8051, 8076), 'numpy.sqrt', 'np.sqrt', (['allcoef.shape[0]'], {}), '(allcoef.shape[0])\n', (8058, 8076), True, 'import numpy as np\n'), ((8648, 8659), 'numpy.log', 'np.log', (['lam'], {}), '(lam)\n', (8654, 8659), True, 'import numpy as np\n'), ((11336, 11353), 'numpy.all', 'np.all', (['(pix_x < 0)'], {}), '(pix_x < 0)\n', (11342, 11353), True, 'import numpy as np\n'), ((11357, 11377), 'numpy.all', 'np.all', (['(pix_x > 2048)'], {}), '(pix_x > 2048)\n', (11363, 11377), True, 'import numpy as np\n'), ((11381, 11398), 'numpy.all', 'np.all', (['(pix_y < 0)'], {}), '(pix_y < 0)\n', (11387, 11398), True, 'import numpy as np\n'), ((11402, 11422), 'numpy.all', 'np.all', (['(pix_y > 2048)'], {}), '(pix_y > 2048)\n', (11408, 11422), True, 'import numpy as np\n'), ((11764, 11811), 'scipy.interpolate.splrep', 'interpolate.splrep', (['pix_y', 'interp_lam'], {'k': '(1)', 's': '(0)'}), '(pix_y, interp_lam, k=1, s=0)\n', (11782, 11811), False, 'from scipy import interpolate, ndimage, optimize, signal\n'), ((6485, 6506), 'numpy.log', 'np.log', (['interp_lam[i]'], {}), '(interp_lam[i])\n', (6491, 6506), True, 'import numpy as np\n'), ((10831, 10852), 'numpy.log', 'np.log', (['interp_lam[i]'], {}), '(interp_lam[i])\n', (10837, 10852), True, 'import numpy as np\n'), ((11552, 11611), 'scipy.interpolate.splrep', 'interpolate.splrep', (['pix_y[::-1]', 'interp_lam[::-1]'], {'k': '(1)', 's': '(0)'}), '(pix_y[::-1], interp_lam[::-1], k=1, s=0)\n', (11570, 11611), False, 'from scipy import interpolate, ndimage, optimize, signal\n'), ((11868, 11882), 'numpy.amax', 'np.amax', (['pix_y'], {}), '(pix_y)\n', (11875, 11882), True, 'import numpy as np\n'), ((11843, 11857), 'numpy.amin', 'np.amin', (['pix_y'], {}), '(pix_y)\n', (11850, 11857), True, 'import numpy as np\n')] |
import unittest
import cap
class TestCap(unittest.TestCase):
def test_single_word(self):
text = 'python'
result = cap.cap_text(text)
self.assertEquals(result, 'Python')
def test_multiple_word(self):
text = 'python django'
result = cap.cap_text(text)
self.assertEquals(result, 'Python Django')
if __name__ == '__main__':
unittest.main() | [
"unittest.main",
"cap.cap_text"
] | [((404, 419), 'unittest.main', 'unittest.main', ([], {}), '()\n', (417, 419), False, 'import unittest\n'), ((143, 161), 'cap.cap_text', 'cap.cap_text', (['text'], {}), '(text)\n', (155, 161), False, 'import cap\n'), ((298, 316), 'cap.cap_text', 'cap.cap_text', (['text'], {}), '(text)\n', (310, 316), False, 'import cap\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Demo showing how km_dict and insegtannotator may be used together for
interactive segmentation.
@author: vand and abda
"""
import sys
import insegtannotator
import skimage.io
import skimage.data
import km_dict
import numpy as np
#%% EXAMPLE 1: glass fibres
## loading image
print('Loading image')
filename = '../data/glass.png'
image = skimage.io.imread(filename)
#%% EXAMPLE 2: nerve fibres
## loading image
print('Loading image')
filename = '../data/nerve_im_scale.png'
image = skimage.io.imread(filename)
#%% COMMON PART
patch_size = 11
branching_factor = 5
number_layers = 5
number_training_patches = 35000
normalization = False
image_float = image.astype(np.float)/255
# Build tree
T = km_dict.build_km_tree(image_float, patch_size, branching_factor, number_training_patches, number_layers, normalization)
# Search km-tree and get assignment
A, number_nodes = km_dict.search_km_tree(image_float, T, branching_factor, normalization)
# number of repetitions for updating the segmentation
number_repetitions = 2
def processing_function(labels):
r,c = labels.shape
l = np.max(labels)+1
label_image = np.zeros((r,c,l))
for k in range(number_repetitions):
for i in range(1,l):
label_image[:,:,i] = (labels == i).astype(float)
D = km_dict.improb_to_dictprob(A, label_image, number_nodes, patch_size) # Dictionary
P = km_dict.dictprob_to_improb(A, D, patch_size) # Probability map
labels = np.argmax(P,axis=2) # Segmentation
return labels
print('Showtime')
# showtime
app = insegtannotator.PyQt5.QtWidgets.QApplication([])
ex = insegtannotator.InSegtAnnotator(image, processing_function)
app.exec()
sys.exit()
| [
"km_dict.build_km_tree",
"numpy.argmax",
"numpy.zeros",
"km_dict.dictprob_to_improb",
"km_dict.search_km_tree",
"numpy.max",
"insegtannotator.PyQt5.QtWidgets.QApplication",
"km_dict.improb_to_dictprob",
"sys.exit",
"insegtannotator.InSegtAnnotator"
] | [((753, 876), 'km_dict.build_km_tree', 'km_dict.build_km_tree', (['image_float', 'patch_size', 'branching_factor', 'number_training_patches', 'number_layers', 'normalization'], {}), '(image_float, patch_size, branching_factor,\n number_training_patches, number_layers, normalization)\n', (774, 876), False, 'import km_dict\n'), ((927, 998), 'km_dict.search_km_tree', 'km_dict.search_km_tree', (['image_float', 'T', 'branching_factor', 'normalization'], {}), '(image_float, T, branching_factor, normalization)\n', (949, 998), False, 'import km_dict\n'), ((1604, 1652), 'insegtannotator.PyQt5.QtWidgets.QApplication', 'insegtannotator.PyQt5.QtWidgets.QApplication', (['[]'], {}), '([])\n', (1648, 1652), False, 'import insegtannotator\n'), ((1659, 1718), 'insegtannotator.InSegtAnnotator', 'insegtannotator.InSegtAnnotator', (['image', 'processing_function'], {}), '(image, processing_function)\n', (1690, 1718), False, 'import insegtannotator\n'), ((1730, 1740), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1738, 1740), False, 'import sys\n'), ((1176, 1195), 'numpy.zeros', 'np.zeros', (['(r, c, l)'], {}), '((r, c, l))\n', (1184, 1195), True, 'import numpy as np\n'), ((1141, 1155), 'numpy.max', 'np.max', (['labels'], {}), '(labels)\n', (1147, 1155), True, 'import numpy as np\n'), ((1336, 1404), 'km_dict.improb_to_dictprob', 'km_dict.improb_to_dictprob', (['A', 'label_image', 'number_nodes', 'patch_size'], {}), '(A, label_image, number_nodes, patch_size)\n', (1362, 1404), False, 'import km_dict\n'), ((1430, 1474), 'km_dict.dictprob_to_improb', 'km_dict.dictprob_to_improb', (['A', 'D', 'patch_size'], {}), '(A, D, patch_size)\n', (1456, 1474), False, 'import km_dict\n'), ((1510, 1530), 'numpy.argmax', 'np.argmax', (['P'], {'axis': '(2)'}), '(P, axis=2)\n', (1519, 1530), True, 'import numpy as np\n')] |
#! /usr/bin/python3
"""
Description
-----------
Class to communicate with Atlas Scientific OEM sensors in I2C mode.
Atlas Scientific i2c by GreenPonik
Source code is based on Atlas Scientific documentations:
https://www.atlas-scientific.com/files/EC_oem_datasheet.pdf
https://atlas-scientific.com/files/oem_pH_datasheet.pdf
"""
import time
from GreenPonik_Atlas_Scientific_OEM_i2c.AtlasOEMI2c import _AtlasOEMI2c
class _CommonsI2c(_AtlasOEMI2c):
"""
commons methods for EC and PH OEM circuits
"""
def _convert_raw_hex_to_float(self, byte_array):
"""
convert bytearray response to float result
return float converted value
"""
hexstr = byte_array.hex()
float_from_hexa = float.fromhex(byte_array.hex())
converted = float_from_hexa
if self.debug:
print("Byte Array to decode: ", byte_array)
print("Byte Array decoded to hexa string: %s" % hexstr)
print("float from hexa: %.3f" % float_from_hexa)
return converted
def _check_calibration_confirm(self, confirm):
"""
check the response of calibration confirm register
"""
if self.debug:
if hex(0x00) == hex(confirm):
print("Calibration applied")
else:
raise Exception("Cannot confirm the operation was correctly executed")
# ----- Getters ----- ########
def get_device_info(self):
"""
Get device information
@return string module type, firmware version
"""
if "EC" == self.moduletype or "PH" == self.moduletype:
info = self.read(
self.OEM_EC_REGISTERS["device_type"],
self.TWO_BYTE_READ,
)
return "SUCCESS: %s, module type: %s and firmware is: %s" % (
self.moduletype,
info[0],
info[1],
)
def get_type(self):
"""
Read sensor type
@return int the sensor type (1=EC, 4=PH)
"""
if "EC" == self.moduletype or "PH" == self.moduletype:
device_type = self.read(
self.OEM_EC_REGISTERS["device_type"],
self.ONE_BYTE_READ,
)
if self.debug:
print("Device type is: %s" % device_type)
return device_type
def get_firmware(self):
"""
Read sensor firmware
@return int the firmware revision
"""
if "EC" == self.moduletype or "PH" == self.moduletype:
firmware = self.read(
self.OEM_EC_REGISTERS["device_firmware"],
self.ONE_BYTE_READ,
)
if self.debug:
print("Firmware type is: %s" % firmware)
return firmware
def get_new_read_available(self):
"""
New Read is available
@return int 1 if new read available, 0 if not
"""
is_new_read = self.read(
self.OEM_EC_REGISTERS["device_new_reading"],
self.ONE_BYTE_READ,
)
return is_new_read
def get_read(self):
"""
Read sensor value
@return float the sensor value
"""
# self.set_wakeup_sleep_mode(0x01) # wake device before read
time.sleep(self._long_timeout)
if "EC" == self.moduletype:
rawhex = self.read(
self.OEM_EC_REGISTERS["device_ec_msb"],
self.FOUR_BYTE_READ,
)
value = self._convert_raw_hex_to_float(rawhex) / 100
elif "PH" == self.moduletype:
rawhex = self.read(
self.OEM_PH_REGISTERS["device_ph_msb"],
self.FOUR_BYTE_READ,
)
value = self._convert_raw_hex_to_float(rawhex) / 1000
if self.debug:
print(
"%s: %s%s"
% (
self.moduletype,
value,
"µs" if "EC" == self.moduletype else "",
)
)
# self.set_wakeup_sleep_mode(0x00) # sleep device after read
return value
def get_temperature(self):
"""
Get current compensation temperature
@return float temperature value
"""
if "EC" == self.moduletype:
rawhex = self.read(
self.OEM_EC_REGISTERS["device_temperature_comp_msb"],
self.FOUR_BYTE_READ,
)
elif "PH" == self.moduletype:
rawhex = self.read(
self.OEM_PH_REGISTERS["device_temperature_comp_msb"],
self.FOUR_BYTE_READ,
)
value = self._convert_raw_hex_to_float(rawhex) / 100
if self.debug:
print("%s compensation Temperature: %s°c" % (self.moduletype, value))
return value
def get_calibration(self):
"""
Get current calibrations data
:return: string with current points calibrated
:rtype:
"""
if "EC" == self.moduletype:
register = self.OEM_EC_REGISTERS["device_calibration_confirm"]
""" bits
- "dry": 0,
- "single": 1,
- "low": 2,
- "high": 3,
"""
binary_calib_status = self.EC_BINARY_CALIB_STATUS
elif "PH" == self.moduletype:
register = self.OEM_PH_REGISTERS["device_calibration_confirm"]
""" bits
- "low": 1,
- "mid": 2,
- "high": 3,
"""
binary_calib_status = self.PH_BINARY_CALIB_STATUS
r = self.read(register)
if self.debug:
print("Binary result from OEM", r)
print("Who is calibrated? >", binary_calib_status[r])
return binary_calib_status[r]
def get_led(self) -> int:
"""
Get led state
register is the same for EC and PH OEM circuit
:return: int 0x00 = OFF or 0x01 = ON
:rtype: int
"""
register = self.OEM_EC_REGISTERS["device_led"]
led_status = self.read(register)
if self.debug:
print("Led status is currently: %s" % hex(led_status))
return led_status
def get_wakeup_sleep_mode(self) -> int:
"""
get Active or Hibernate device mode
register is the same for EC and PH OEM circuit
:return: int 0x01 = WakeUp or 0x00 = Hibernate
:rtype: int
"""
register = self.OEM_EC_REGISTERS["device_sleep"]
mode = self.read(register)
if self.debug:
print(
"Device is currently in mode: %s"
% ("wakeup" if hex(0x01) == hex(mode) else "sleep")
)
return mode
# ----- Setters ----- ########
def set_temperature(self, t=25.0):
"""Set the compensation temperature
:param t: float temperature value
"""
self.set_wakeup_sleep_mode(0x01) # wake device before set temperature
time.sleep(self._long_timeout)
if "EC" == self.moduletype:
register = self.OEM_EC_REGISTERS["device_temperature_comp_msb"]
elif "PH" == self.moduletype:
register = self.OEM_PH_REGISTERS["device_temperature_comp_msb"]
byte_array = int(round(t * 100)).to_bytes(4, "big")
if self.debug:
print("Temperature to set: %.2f" % t)
print(
"%s sent converted temp to bytes: " % (self.moduletype),
byte_array,
)
time.sleep(self.short_timeout)
self.write(register, byte_array)
self.set_wakeup_sleep_mode(0x00) # sleep device after set temperature
def _set_calibration_registers(self, value):
"""calibration registers
do not use alone because calibration is apply by using set_calibration_apply
/!in float micro siemens µS for EC/!
/! in float for pH/!
"""
if "EC" == self.moduletype:
register = self.OEM_EC_REGISTERS["device_calibration_msb"]
# ec calibration wait for µSiemens
byte_array = int(round(value * 100)).to_bytes(4, "big")
elif "PH" == self.moduletype:
register = self.OEM_PH_REGISTERS["device_calibration_msb"]
byte_array = int(round(value * 1000)).to_bytes(4, "big")
self.write(register, byte_array)
if self.debug:
print("Value to send: %.2f" % value)
print(
"%s sent converted value to bytes: " % (self.moduletype),
byte_array,
)
def set_calibration_apply(self, value, point=""):
"""apply the calibration
:param value: float solution calibration value converted in float. EC waiting for µS e.g. 1.413 = > 1413.0
:param point: string "dry", "single", "low", "mid", "high" only
"""
if point not in ("dry", "single", "low", "mid", "high"):
raise Exception(
'missing string point argument, \
can only be "dry", "single", "low", "mid", "high"'
)
if "EC" == self.moduletype:
points = {"dry": 0x02, "single": 0x03, "low": 0x04, "high": 0x05}
register = self.OEM_EC_REGISTERS["device_calibration_request"]
elif "PH" == self.moduletype:
points = {"low": 0x02, "mid": 0x03, "high": 0x04}
register = self.OEM_PH_REGISTERS["device_calibration_request"]
self._set_calibration_registers(value)
time.sleep(self.long_timeout)
self.write(register, points[point]) # apply point calibration data
time.sleep(self.short_timeout) # wait before read register to get confirmation
conf = self.read(register)
self._check_calibration_confirm(conf)
return conf
def set_calibration_clear(self):
"""clear calibration data
"""
if "EC" == self.moduletype:
register = self.OEM_EC_REGISTERS["device_calibration_request"]
elif "PH" == self.moduletype:
register = self.OEM_PH_REGISTERS["device_calibration_request"]
self.write(register, 0x01) # send 0x01 to clear calibration data
time.sleep(self.short_timeout) # wait before read register to get confirmation
conf = self.read(register)
self._check_calibration_confirm(conf)
return conf
def set_i2c_addr(self, addr):
"""Change the device i2c address
:param addr: int = new i2c add
"""
if addr not in self.ADDR_OEM_HEXA and addr not in self.ADDR_OEM_DECIMAL:
raise Exception(
"only decimal address expected, convert hexa by using \
AtlasI2c.ADDR_OEM_DECIMAL or AtlasI2c.ADDR_EZO_DECIMAL"
)
else:
"""
write workflow to change physical i2c address
"""
self.address(addr)
raise NotImplementedError("write workflow to change physical i2c address")
def set_led(self, state=0x01):
"""Change Led state
:param state: byte state => 0x01 = ON or 0x00 = OFF
"""
register = self.OEM_EC_REGISTERS["device_led"]
self.write(register, state)
if self.debug:
print(
"Led status change to: %s"
% ("On" if hex(0x01) == hex(state) else "OFF")
)
def set_wakeup_sleep_mode(self, action=0x01):
"""change device mode to Active or Hibernate
register is the same for EC and PH OEM circuit
:param byte: action => 0x01 = WakeUp or 0x00 = Hibernate
"""
register = self.OEM_EC_REGISTERS["device_sleep"]
self.write(register, action)
if self.debug:
print(
"Device is now: %s"
% ("wakeup" if hex(0x01) == hex(action) else "sleep")
)
def set_ack_new_read_available(self):
"""Ack new Read available
"""
register = self.OEM_EC_REGISTERS["device_new_reading"]
ack = 0x00
self.write(register, ack)
if self.debug:
print("ack new reading available register %s to %s" % (register, ack))
| [
"time.sleep"
] | [((3274, 3304), 'time.sleep', 'time.sleep', (['self._long_timeout'], {}), '(self._long_timeout)\n', (3284, 3304), False, 'import time\n'), ((7030, 7060), 'time.sleep', 'time.sleep', (['self._long_timeout'], {}), '(self._long_timeout)\n', (7040, 7060), False, 'import time\n'), ((7562, 7592), 'time.sleep', 'time.sleep', (['self.short_timeout'], {}), '(self.short_timeout)\n', (7572, 7592), False, 'import time\n'), ((9547, 9576), 'time.sleep', 'time.sleep', (['self.long_timeout'], {}), '(self.long_timeout)\n', (9557, 9576), False, 'import time\n'), ((9661, 9691), 'time.sleep', 'time.sleep', (['self.short_timeout'], {}), '(self.short_timeout)\n', (9671, 9691), False, 'import time\n'), ((10232, 10262), 'time.sleep', 'time.sleep', (['self.short_timeout'], {}), '(self.short_timeout)\n', (10242, 10262), False, 'import time\n')] |
from datetime import datetime
import os
class core:
def __init__(self, environ = None, location = None):
self.response = None
if environ == None and location == None:
#If the environ and the location are None, no make anything.
self.response = """<h1>Petition doesn't have <u>Environ</u> or <u>Location</u></h1>"""
elif environ != None:
self.environ = environ
from tools.Utilities import buildRq
buildReq = buildRq()
from tools.main import main
request = buildReq.extrctEnv(environ, environ['DOCUMENT_ROOT'])
# try:
# self.response = main(request, environ).getResult()
# except Exception as ex:
# error = {"error": str(ex.args[0])}
# self.response = {"status": 200, "value": error, "type": "application/json"}
self.response = main(request, environ).getResult()
def result(self):
if self.response != None:
return self.response
else:
return ["plain", "Problem with the communication with the core..."]
def logs(self, logData):
logdir = os.listdir(self.environ['DOCUMENT_ROOT']+'logs/')
a = datetime.now()
logtime = ''
for piece in a.timetuple()[:3]:
logtime += str(piece)+'-'
logtime = logtime[:-1]+" "
for piece in a.timetuple()[3:]:
logtime += str(piece)+':'
logtime = logtime[:-3]
if len(logdir) < 1:
log = open(self.environ['DOCUMENT_ROOT']+'logs/error.log', 'w')
for piece in str(logData).split('\n'):
log.write('['+logtime+']'+str(piece)+'\n')
log.close()
else:
log = open(self.environ['DOCUMENT_ROOT']+'logs/error.log', 'r')
if len(log.readlines()) > 500:
self.cleanLogs(self.environ['DOCUMENT_ROOT']+'logs')
log = open(self.environ['DOCUMENT_ROOT']+'logs/error.log', 'w')
else:
log = open(self.environ['DOCUMENT_ROOT']+'logs/error.log', 'a')
for piece in str(logData).split('\n'):
log.write('['+logtime+']'+str(piece)+'\n')
log.close()
return str(logData)
def cleanLogs(self, location):
logfiles = os.listdir(location)
if len(logfiles) == 9:
os.remove(logfiles[-1])
logfiles = logfiles[:-1]
cont = 1
for log in logfiles:
os.rename(self.environ['DOCUMENT_ROOT']+'logs/'+log, self.environ['DOCUMENT_ROOT']+'logs/error_'+str(cont)+'.log')
else:
cont = 1
for log in logfiles:
os.rename(self.environ['DOCUMENT_ROOT']+'logs/'+log, self.environ['DOCUMENT_ROOT']+'logs/error_'+str(cont)+'.log')
| [
"os.remove",
"tools.main.main",
"tools.Utilities.buildRq",
"datetime.datetime.now",
"os.listdir"
] | [((1358, 1409), 'os.listdir', 'os.listdir', (["(self.environ['DOCUMENT_ROOT'] + 'logs/')"], {}), "(self.environ['DOCUMENT_ROOT'] + 'logs/')\n", (1368, 1409), False, 'import os\n'), ((1420, 1434), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1432, 1434), False, 'from datetime import datetime\n'), ((2572, 2592), 'os.listdir', 'os.listdir', (['location'], {}), '(location)\n', (2582, 2592), False, 'import os\n'), ((2636, 2659), 'os.remove', 'os.remove', (['logfiles[-1]'], {}), '(logfiles[-1])\n', (2645, 2659), False, 'import os\n'), ((574, 583), 'tools.Utilities.buildRq', 'buildRq', ([], {}), '()\n', (581, 583), False, 'from tools.Utilities import buildRq\n'), ((1002, 1024), 'tools.main.main', 'main', (['request', 'environ'], {}), '(request, environ)\n', (1006, 1024), False, 'from tools.main import main\n')] |
from flask import Blueprint, request, Response, render_template
from model.roads import Roads
from pyldapi import ContainerRenderer
import conf
import ast
import folium
print(__name__)
routes = Blueprint('controller', __name__)
DEFAULT_ITEMS_PER_PAGE=50
@routes.route('/', strict_slashes=True)
def home():
return render_template('home.html')
@routes.route('/rds/')
def roads():
# Search specific items using keywords
search_string = request.values.get('search')
try:
# get the register length from the online DB
sql = 'SELECT COUNT(*) FROM "transportroads"'
if search_string:
sql += '''WHERE UPPER(cast("id" as text)) LIKE '%{search_string}%' OR UPPER("name") LIKE '%{search_string}%';
'''.format(search_string=search_string.strip().upper())
no_of_items = conf.db_select(sql)[0][0]
page = int(request.values.get('page')) if request.values.get('page') is not None else 1
per_page = int(request.values.get('per_page')) \
if request.values.get('per_page') is not None else DEFAULT_ITEMS_PER_PAGE
offset = (page - 1) * per_page
# get the id and name for each record in the database
sql = '''SELECT "id", "name" FROM "transportroads"'''
if search_string:
sql += '''WHERE UPPER(cast("id" as text)) LIKE '%{search_string}%' OR UPPER("name") LIKE '%{search_string}%'
'''.format(search_string=search_string.strip().upper())
sql += '''ORDER BY "name"
OFFSET {} LIMIT {}'''.format(offset, per_page)
items = []
for item in conf.db_select(sql):
items.append(
(item[0], item[1])
)
except Exception as e:
print(e)
return Response('The Roads database is offline', mimetype='text/plain', status=500)
return ContainerRenderer(request=request,
instance_uri=request.url,
label='Roads Register',
comment='A register of Roads',
parent_container_uri='http://linked.data.gov.au/def/placenames/PlaceName',
parent_container_label='QLD_Roads',
members=items,
members_total_count=no_of_items,
profiles=None,
default_profile_token=None,
super_register=None,
page_size_max=1000,
register_template=None,
per_page=per_page,
search_query=search_string,
search_enabled=True
).render()
@routes.route('/map')
def show_map():
'''
Function to render a map around the specified line
'''
name = request.values.get('name')
coords_list = ast.literal_eval(request.values.get('coords'))[0]
# swap x & y for mapping
points = []
for coords in coords_list:
points.append(tuple([coords[1], coords[0]]))
ave_lat = sum(p[0] for p in points) / len(points)
ave_lon = sum(p[1] for p in points) / len(points)
# create a new map object
folium_map = folium.Map(location=[ave_lat, ave_lon], zoom_start=15)
tooltip = 'Click for more information'
folium.PolyLine(points, color="red", weight=2.5, opacity=1, popup = name, tooltip=tooltip).add_to(folium_map)
return folium_map.get_root().render()
@routes.route('/rds/<string:roads_id>')
def road(roads_id):
roads = Roads(request, request.base_url)
return roads.render()
| [
"flask.Blueprint",
"conf.db_select",
"flask.request.values.get",
"pyldapi.ContainerRenderer",
"folium.Map",
"flask.render_template",
"folium.PolyLine",
"flask.Response",
"model.roads.Roads"
] | [((194, 227), 'flask.Blueprint', 'Blueprint', (['"""controller"""', '__name__'], {}), "('controller', __name__)\n", (203, 227), False, 'from flask import Blueprint, request, Response, render_template\n'), ((319, 347), 'flask.render_template', 'render_template', (['"""home.html"""'], {}), "('home.html')\n", (334, 347), False, 'from flask import Blueprint, request, Response, render_template\n'), ((449, 477), 'flask.request.values.get', 'request.values.get', (['"""search"""'], {}), "('search')\n", (467, 477), False, 'from flask import Blueprint, request, Response, render_template\n'), ((2914, 2940), 'flask.request.values.get', 'request.values.get', (['"""name"""'], {}), "('name')\n", (2932, 2940), False, 'from flask import Blueprint, request, Response, render_template\n'), ((3296, 3350), 'folium.Map', 'folium.Map', ([], {'location': '[ave_lat, ave_lon]', 'zoom_start': '(15)'}), '(location=[ave_lat, ave_lon], zoom_start=15)\n', (3306, 3350), False, 'import folium\n'), ((3626, 3658), 'model.roads.Roads', 'Roads', (['request', 'request.base_url'], {}), '(request, request.base_url)\n', (3631, 3658), False, 'from model.roads import Roads\n'), ((1638, 1657), 'conf.db_select', 'conf.db_select', (['sql'], {}), '(sql)\n', (1652, 1657), False, 'import conf\n'), ((1793, 1869), 'flask.Response', 'Response', (['"""The Roads database is offline"""'], {'mimetype': '"""text/plain"""', 'status': '(500)'}), "('The Roads database is offline', mimetype='text/plain', status=500)\n", (1801, 1869), False, 'from flask import Blueprint, request, Response, render_template\n'), ((1882, 2360), 'pyldapi.ContainerRenderer', 'ContainerRenderer', ([], {'request': 'request', 'instance_uri': 'request.url', 'label': '"""Roads Register"""', 'comment': '"""A register of Roads"""', 'parent_container_uri': '"""http://linked.data.gov.au/def/placenames/PlaceName"""', 'parent_container_label': '"""QLD_Roads"""', 'members': 'items', 'members_total_count': 'no_of_items', 'profiles': 'None', 'default_profile_token': 'None', 'super_register': 'None', 'page_size_max': '(1000)', 'register_template': 'None', 'per_page': 'per_page', 'search_query': 'search_string', 'search_enabled': '(True)'}), "(request=request, instance_uri=request.url, label=\n 'Roads Register', comment='A register of Roads', parent_container_uri=\n 'http://linked.data.gov.au/def/placenames/PlaceName',\n parent_container_label='QLD_Roads', members=items, members_total_count=\n no_of_items, profiles=None, default_profile_token=None, super_register=\n None, page_size_max=1000, register_template=None, per_page=per_page,\n search_query=search_string, search_enabled=True)\n", (1899, 2360), False, 'from pyldapi import ContainerRenderer\n'), ((2976, 3004), 'flask.request.values.get', 'request.values.get', (['"""coords"""'], {}), "('coords')\n", (2994, 3004), False, 'from flask import Blueprint, request, Response, render_template\n'), ((3399, 3491), 'folium.PolyLine', 'folium.PolyLine', (['points'], {'color': '"""red"""', 'weight': '(2.5)', 'opacity': '(1)', 'popup': 'name', 'tooltip': 'tooltip'}), "(points, color='red', weight=2.5, opacity=1, popup=name,\n tooltip=tooltip)\n", (3414, 3491), False, 'import folium\n'), ((842, 861), 'conf.db_select', 'conf.db_select', (['sql'], {}), '(sql)\n', (856, 861), False, 'import conf\n'), ((919, 945), 'flask.request.values.get', 'request.values.get', (['"""page"""'], {}), "('page')\n", (937, 945), False, 'from flask import Blueprint, request, Response, render_template\n'), ((888, 914), 'flask.request.values.get', 'request.values.get', (['"""page"""'], {}), "('page')\n", (906, 914), False, 'from flask import Blueprint, request, Response, render_template\n'), ((1044, 1074), 'flask.request.values.get', 'request.values.get', (['"""per_page"""'], {}), "('per_page')\n", (1062, 1074), False, 'from flask import Blueprint, request, Response, render_template\n'), ((988, 1018), 'flask.request.values.get', 'request.values.get', (['"""per_page"""'], {}), "('per_page')\n", (1006, 1018), False, 'from flask import Blueprint, request, Response, render_template\n')] |
"""
Prepared by Backend/Server Team - Sheldon, Martin, Brian, Sarah, Veronica.
"""
from django.urls import re_path
from .consumers import ChatConsumer
# Assign pattern to activate selected websocket
websocket_urlpatterns = [
re_path(r'^ws/chat/(?P<room_name>[^/]+)/$', ChatConsumer),
]
| [
"django.urls.re_path"
] | [((231, 287), 'django.urls.re_path', 're_path', (['"""^ws/chat/(?P<room_name>[^/]+)/$"""', 'ChatConsumer'], {}), "('^ws/chat/(?P<room_name>[^/]+)/$', ChatConsumer)\n", (238, 287), False, 'from django.urls import re_path\n')] |
import pytest
import common
import time
from common import client, volume_name # NOQA
from common import SIZE, DEV_PATH
from common import check_volume_data, get_self_host_id, get_volume_endpoint
from common import write_volume_random_data
from common import RETRY_COUNTS, RETRY_ITERVAL
@pytest.mark.coretest # NOQA
def test_ha_simple_recovery(client, volume_name): # NOQA
ha_simple_recovery_test(client, volume_name, SIZE)
def ha_simple_recovery_test(client, volume_name, size, base_image=""): # NOQA
volume = client.create_volume(name=volume_name, size=size,
numberOfReplicas=2, baseImage=base_image)
volume = common.wait_for_volume_detached(client, volume_name)
assert volume["name"] == volume_name
assert volume["size"] == size
assert volume["numberOfReplicas"] == 2
assert volume["state"] == "detached"
assert volume["created"] != ""
assert volume["baseImage"] == base_image
host_id = get_self_host_id()
volume = volume.attach(hostId=host_id)
volume = common.wait_for_volume_healthy(client, volume_name)
volume = client.by_id_volume(volume_name)
assert get_volume_endpoint(volume) == DEV_PATH + volume_name
assert len(volume["replicas"]) == 2
replica0 = volume["replicas"][0]
assert replica0["name"] != ""
replica1 = volume["replicas"][1]
assert replica1["name"] != ""
data = write_volume_random_data(volume)
volume = volume.replicaRemove(name=replica0["name"])
# wait until we saw a replica starts rebuilding
new_replica_found = False
for i in range(RETRY_COUNTS):
v = client.by_id_volume(volume_name)
for r in v["replicas"]:
if r["name"] != replica0["name"] and \
r["name"] != replica1["name"]:
new_replica_found = True
break
if new_replica_found:
break
time.sleep(RETRY_ITERVAL)
assert new_replica_found
volume = common.wait_for_volume_healthy(client, volume_name)
volume = client.by_id_volume(volume_name)
assert volume["state"] == common.VOLUME_STATE_ATTACHED
assert volume["robustness"] == common.VOLUME_ROBUSTNESS_HEALTHY
assert len(volume["replicas"]) >= 2
found = False
for replica in volume["replicas"]:
if replica["name"] == replica1["name"]:
found = True
break
assert found
check_volume_data(volume, data)
volume = volume.detach()
volume = common.wait_for_volume_detached(client, volume_name)
client.delete(volume)
common.wait_for_volume_delete(client, volume_name)
volumes = client.list_volume()
assert len(volumes) == 0
@pytest.mark.coretest # NOQA
def test_ha_salvage(client, volume_name): # NOQA
ha_salvage_test(client, volume_name)
def ha_salvage_test(client, volume_name, base_image=""): # NOQA
volume = client.create_volume(name=volume_name, size=SIZE,
numberOfReplicas=2, baseImage=base_image)
volume = common.wait_for_volume_detached(client, volume_name)
assert volume["name"] == volume_name
assert volume["size"] == SIZE
assert volume["numberOfReplicas"] == 2
assert volume["state"] == "detached"
assert volume["created"] != ""
assert volume["baseImage"] == base_image
host_id = get_self_host_id()
volume = volume.attach(hostId=host_id)
volume = common.wait_for_volume_healthy(client, volume_name)
assert len(volume["replicas"]) == 2
replica0_name = volume["replicas"][0]["name"]
replica1_name = volume["replicas"][1]["name"]
data = write_volume_random_data(volume)
common.k8s_delete_replica_pods_for_volume(volume_name)
volume = common.wait_for_volume_faulted(client, volume_name)
assert len(volume["replicas"]) == 2
assert volume["replicas"][0]["failedAt"] != ""
assert volume["replicas"][1]["failedAt"] != ""
volume.salvage(names=[replica0_name, replica1_name])
volume = common.wait_for_volume_detached(client, volume_name)
assert len(volume["replicas"]) == 2
assert volume["replicas"][0]["failedAt"] == ""
assert volume["replicas"][1]["failedAt"] == ""
volume = volume.attach(hostId=host_id)
volume = common.wait_for_volume_healthy(client, volume_name)
check_volume_data(volume, data)
volume = volume.detach()
volume = common.wait_for_volume_detached(client, volume_name)
client.delete(volume)
common.wait_for_volume_delete(client, volume_name)
volumes = client.list_volume()
assert len(volumes) == 0
| [
"common.get_self_host_id",
"common.check_volume_data",
"common.k8s_delete_replica_pods_for_volume",
"common.client.create_volume",
"common.wait_for_volume_healthy",
"common.write_volume_random_data",
"common.wait_for_volume_detached",
"common.client.delete",
"common.wait_for_volume_delete",
"common.wait_for_volume_faulted",
"common.client.list_volume",
"time.sleep",
"common.get_volume_endpoint",
"common.client.by_id_volume"
] | [((529, 624), 'common.client.create_volume', 'client.create_volume', ([], {'name': 'volume_name', 'size': 'size', 'numberOfReplicas': '(2)', 'baseImage': 'base_image'}), '(name=volume_name, size=size, numberOfReplicas=2,\n baseImage=base_image)\n', (549, 624), False, 'from common import client, volume_name\n'), ((668, 720), 'common.wait_for_volume_detached', 'common.wait_for_volume_detached', (['client', 'volume_name'], {}), '(client, volume_name)\n', (699, 720), False, 'import common\n'), ((975, 993), 'common.get_self_host_id', 'get_self_host_id', ([], {}), '()\n', (991, 993), False, 'from common import check_volume_data, get_self_host_id, get_volume_endpoint\n'), ((1050, 1101), 'common.wait_for_volume_healthy', 'common.wait_for_volume_healthy', (['client', 'volume_name'], {}), '(client, volume_name)\n', (1080, 1101), False, 'import common\n'), ((1116, 1148), 'common.client.by_id_volume', 'client.by_id_volume', (['volume_name'], {}), '(volume_name)\n', (1135, 1148), False, 'from common import client, volume_name\n'), ((1410, 1442), 'common.write_volume_random_data', 'write_volume_random_data', (['volume'], {}), '(volume)\n', (1434, 1442), False, 'from common import write_volume_random_data\n'), ((1985, 2036), 'common.wait_for_volume_healthy', 'common.wait_for_volume_healthy', (['client', 'volume_name'], {}), '(client, volume_name)\n', (2015, 2036), False, 'import common\n'), ((2051, 2083), 'common.client.by_id_volume', 'client.by_id_volume', (['volume_name'], {}), '(volume_name)\n', (2070, 2083), False, 'from common import client, volume_name\n'), ((2422, 2453), 'common.check_volume_data', 'check_volume_data', (['volume', 'data'], {}), '(volume, data)\n', (2439, 2453), False, 'from common import check_volume_data, get_self_host_id, get_volume_endpoint\n'), ((2497, 2549), 'common.wait_for_volume_detached', 'common.wait_for_volume_detached', (['client', 'volume_name'], {}), '(client, volume_name)\n', (2528, 2549), False, 'import common\n'), ((2555, 2576), 'common.client.delete', 'client.delete', (['volume'], {}), '(volume)\n', (2568, 2576), False, 'from common import client, volume_name\n'), ((2581, 2631), 'common.wait_for_volume_delete', 'common.wait_for_volume_delete', (['client', 'volume_name'], {}), '(client, volume_name)\n', (2610, 2631), False, 'import common\n'), ((2647, 2667), 'common.client.list_volume', 'client.list_volume', ([], {}), '()\n', (2665, 2667), False, 'from common import client, volume_name\n'), ((2901, 2996), 'common.client.create_volume', 'client.create_volume', ([], {'name': 'volume_name', 'size': 'SIZE', 'numberOfReplicas': '(2)', 'baseImage': 'base_image'}), '(name=volume_name, size=SIZE, numberOfReplicas=2,\n baseImage=base_image)\n', (2921, 2996), False, 'from common import client, volume_name\n'), ((3040, 3092), 'common.wait_for_volume_detached', 'common.wait_for_volume_detached', (['client', 'volume_name'], {}), '(client, volume_name)\n', (3071, 3092), False, 'import common\n'), ((3347, 3365), 'common.get_self_host_id', 'get_self_host_id', ([], {}), '()\n', (3363, 3365), False, 'from common import check_volume_data, get_self_host_id, get_volume_endpoint\n'), ((3422, 3473), 'common.wait_for_volume_healthy', 'common.wait_for_volume_healthy', (['client', 'volume_name'], {}), '(client, volume_name)\n', (3452, 3473), False, 'import common\n'), ((3627, 3659), 'common.write_volume_random_data', 'write_volume_random_data', (['volume'], {}), '(volume)\n', (3651, 3659), False, 'from common import write_volume_random_data\n'), ((3665, 3719), 'common.k8s_delete_replica_pods_for_volume', 'common.k8s_delete_replica_pods_for_volume', (['volume_name'], {}), '(volume_name)\n', (3706, 3719), False, 'import common\n'), ((3734, 3785), 'common.wait_for_volume_faulted', 'common.wait_for_volume_faulted', (['client', 'volume_name'], {}), '(client, volume_name)\n', (3764, 3785), False, 'import common\n'), ((4000, 4052), 'common.wait_for_volume_detached', 'common.wait_for_volume_detached', (['client', 'volume_name'], {}), '(client, volume_name)\n', (4031, 4052), False, 'import common\n'), ((4252, 4303), 'common.wait_for_volume_healthy', 'common.wait_for_volume_healthy', (['client', 'volume_name'], {}), '(client, volume_name)\n', (4282, 4303), False, 'import common\n'), ((4309, 4340), 'common.check_volume_data', 'check_volume_data', (['volume', 'data'], {}), '(volume, data)\n', (4326, 4340), False, 'from common import check_volume_data, get_self_host_id, get_volume_endpoint\n'), ((4384, 4436), 'common.wait_for_volume_detached', 'common.wait_for_volume_detached', (['client', 'volume_name'], {}), '(client, volume_name)\n', (4415, 4436), False, 'import common\n'), ((4442, 4463), 'common.client.delete', 'client.delete', (['volume'], {}), '(volume)\n', (4455, 4463), False, 'from common import client, volume_name\n'), ((4468, 4518), 'common.wait_for_volume_delete', 'common.wait_for_volume_delete', (['client', 'volume_name'], {}), '(client, volume_name)\n', (4497, 4518), False, 'import common\n'), ((4534, 4554), 'common.client.list_volume', 'client.list_volume', ([], {}), '()\n', (4552, 4554), False, 'from common import client, volume_name\n'), ((1160, 1187), 'common.get_volume_endpoint', 'get_volume_endpoint', (['volume'], {}), '(volume)\n', (1179, 1187), False, 'from common import check_volume_data, get_self_host_id, get_volume_endpoint\n'), ((1630, 1662), 'common.client.by_id_volume', 'client.by_id_volume', (['volume_name'], {}), '(volume_name)\n', (1649, 1662), False, 'from common import client, volume_name\n'), ((1916, 1941), 'time.sleep', 'time.sleep', (['RETRY_ITERVAL'], {}), '(RETRY_ITERVAL)\n', (1926, 1941), False, 'import time\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-02-20 06:45
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('landing', '0008_remove_featurescover_active'),
]
operations = [
migrations.CreateModel(
name='PresentationCover',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.URLField(verbose_name='image')),
('label', models.CharField(blank=True, max_length=50, verbose_name='label')),
('label_it', models.CharField(blank=True, max_length=50, null=True, verbose_name='label')),
('label_en', models.CharField(blank=True, max_length=50, null=True, verbose_name='label')),
('section', models.CharField(choices=[('DES', 'description'), ('FEA', 'features')], default='DES', max_length=3)),
('default', models.BooleanField(default=False)),
],
options={
'verbose_name': 'Presentation Cover',
'verbose_name_plural': 'Presentation Covers',
},
),
migrations.RemoveField(
model_name='presentation',
name='features_cover',
),
migrations.DeleteModel(
name='FeaturesCover',
),
migrations.AddField(
model_name='presentation',
name='presentation_covers',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='landing.PresentationCover', verbose_name='presentation cover'),
),
]
| [
"django.db.models.URLField",
"django.db.migrations.RemoveField",
"django.db.models.ForeignKey",
"django.db.models.CharField",
"django.db.migrations.DeleteModel",
"django.db.models.AutoField",
"django.db.models.BooleanField"
] | [((1299, 1371), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""presentation"""', 'name': '"""features_cover"""'}), "(model_name='presentation', name='features_cover')\n", (1321, 1371), False, 'from django.db import migrations, models\n'), ((1416, 1460), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""FeaturesCover"""'}), "(name='FeaturesCover')\n", (1438, 1460), False, 'from django.db import migrations, models\n'), ((1611, 1773), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""landing.PresentationCover"""', 'verbose_name': '"""presentation cover"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, to='landing.PresentationCover', verbose_name=\n 'presentation cover')\n", (1628, 1773), False, 'from django.db import migrations, models\n'), ((448, 541), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (464, 541), False, 'from django.db import migrations, models\n'), ((566, 603), 'django.db.models.URLField', 'models.URLField', ([], {'verbose_name': '"""image"""'}), "(verbose_name='image')\n", (581, 603), False, 'from django.db import migrations, models\n'), ((632, 697), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(50)', 'verbose_name': '"""label"""'}), "(blank=True, max_length=50, verbose_name='label')\n", (648, 697), False, 'from django.db import migrations, models\n'), ((729, 805), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(50)', 'null': '(True)', 'verbose_name': '"""label"""'}), "(blank=True, max_length=50, null=True, verbose_name='label')\n", (745, 805), False, 'from django.db import migrations, models\n'), ((837, 913), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(50)', 'null': '(True)', 'verbose_name': '"""label"""'}), "(blank=True, max_length=50, null=True, verbose_name='label')\n", (853, 913), False, 'from django.db import migrations, models\n'), ((944, 1048), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('DES', 'description'), ('FEA', 'features')]", 'default': '"""DES"""', 'max_length': '(3)'}), "(choices=[('DES', 'description'), ('FEA', 'features')],\n default='DES', max_length=3)\n", (960, 1048), False, 'from django.db import migrations, models\n'), ((1075, 1109), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1094, 1109), False, 'from django.db import migrations, models\n')] |
import argparse
import numpy as np
import matplotlib.pyplot as plt
from FAUSTPy import *
#######################################################
# set up command line arguments
#######################################################
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--faustfloat',
dest="faustfloat",
default="float",
help="The value of FAUSTFLOAT.")
parser.add_argument('-p', '--path',
dest="faust_path",
default="",
help="The path to the FAUST compiler.")
parser.add_argument('-c', '--cflags',
dest="cflags",
default=[],
type=str.split,
help="Extra compiler flags")
parser.add_argument('-s', '--fs',
dest="fs",
default=48000,
type=int,
help="The sampling frequency")
args = parser.parse_args()
#######################################################
# initialise the FAUST object and get the default parameters
#######################################################
wrapper.FAUST_PATH = args.faust_path
dattorro = FAUST("dattorro_notch_cut_regalia.dsp", args.fs, args.faustfloat,
extra_compile_args=args.cflags)
def_Q = dattorro.dsp.ui.p_Q
def_Gain = dattorro.dsp.ui.p_Gain
def_Freq = dattorro.dsp.ui.p_Center_Freq
#######################################################
# plot the frequency response with the default settings
#######################################################
audio = np.zeros((dattorro.dsp.num_in, args.fs), dtype=dattorro.dsp.dtype)
audio[:, 0] = 1
out = dattorro.compute(audio)
print(audio)
print(out)
spec = np.fft.fft(out)[:, :args.fs/2]
fig = plt.figure()
p = fig.add_subplot(
1, 1, 1,
title="Frequency response with the default settings\n"
"(Q={}, F={:.2f} Hz, G={:.0f} dB FS)".format(
def_Q.zone, def_Freq.zone, 20*np.log10(def_Gain.zone+1e-8)
),
xlabel="Frequency in Hz (log)",
ylabel="Magnitude in dB FS",
xscale="log"
)
p.plot(20*np.log10(np.absolute(spec.T)+1e-8))
p.legend(("Left channel", "Right channel"), loc="best")
#######################################################
# plot the frequency response with varying Q
#######################################################
Q = np.linspace(def_Q.min, def_Q.max, 10)
dattorro.dsp.ui.p_Center_Freq = 1e2
dattorro.dsp.ui.p_Gain = 10**(-0.5) # -10 dB
cur_G = dattorro.dsp.ui.p_Gain.zone
cur_F = dattorro.dsp.ui.p_Center_Freq.zone
fig = plt.figure()
p = fig.add_subplot(
1, 1, 1,
title="Frequency response "
"(G={:.0f} dB FS, F={} Hz)".format(20*np.log10(cur_G+1e-8), cur_F),
xlabel="Frequency in Hz (log)",
ylabel="Magnitude in dB FS",
xscale="log"
)
for q in Q:
dattorro.dsp.ui.p_Q = q
out = dattorro.compute(audio)
spec = np.fft.fft(out)[0, :args.fs/2]
p.plot(20*np.log10(np.absolute(spec.T)+1e-8),
label="Q={}".format(q))
p.legend(loc="best")
#######################################################
# plot the frequency response with varying gain
#######################################################
# start at -60 dB because the minimum is at an extremely low -160 dB
G = np.logspace(-3, np.log10(def_Gain.max), 10)
dattorro.dsp.ui.p_Q = 2
cur_Q = dattorro.dsp.ui.p_Q.zone
cur_F = dattorro.dsp.ui.p_Center_Freq.zone
fig = plt.figure()
p = fig.add_subplot(
1, 1, 1,
title="Frequency response (Q={}, F={} Hz)".format(cur_Q, cur_F),
xlabel="Frequency in Hz (log)",
ylabel="Magnitude in dB FS",
xscale="log"
)
for g in G:
dattorro.dsp.ui.p_Gain = g
out = dattorro.compute(audio)
spec = np.fft.fft(out)[0, :args.fs/2]
p.plot(20*np.log10(np.absolute(spec.T)+1e-8),
label="G={:.3g} dB FS".format(20*np.log10(g+1e-8)))
p.legend(loc="best")
###########################################################
# plot the frequency response with varying center frequency
###########################################################
F = np.logspace(np.log10(def_Freq.min), np.log10(def_Freq.max), 10)
dattorro.dsp.ui.p_Q = def_Q.default
dattorro.dsp.ui.p_Gain = 10**(-0.5) # -10 dB
cur_Q = dattorro.dsp.ui.p_Q.zone
cur_G = dattorro.dsp.ui.p_Gain.zone
fig = plt.figure()
p = fig.add_subplot(
1, 1, 1,
title="Frequency response "
"(Q={}, G={:.0f} dB FS)".format(cur_Q, 20*np.log10(cur_G+1e-8)),
xlabel="Frequency in Hz (log)",
ylabel="Magnitude in dB FS",
xscale="log"
)
for f in F:
dattorro.dsp.ui.p_Center_Freq = f
out = dattorro.compute(audio)
spec = np.fft.fft(out)[0, :args.fs/2]
p.plot(20*np.log10(np.absolute(spec.T)+1e-8),
label="F={:.2f} Hz".format(f))
p.legend(loc="best")
################
# show the plots
################
plt.show()
print("everything passes!")
| [
"numpy.absolute",
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"numpy.fft.fft",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.linspace",
"numpy.log10"
] | [((244, 269), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (267, 269), False, 'import argparse\n'), ((1627, 1693), 'numpy.zeros', 'np.zeros', (['(dattorro.dsp.num_in, args.fs)'], {'dtype': 'dattorro.dsp.dtype'}), '((dattorro.dsp.num_in, args.fs), dtype=dattorro.dsp.dtype)\n', (1635, 1693), True, 'import numpy as np\n'), ((1812, 1824), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1822, 1824), True, 'import matplotlib.pyplot as plt\n'), ((2413, 2450), 'numpy.linspace', 'np.linspace', (['def_Q.min', 'def_Q.max', '(10)'], {}), '(def_Q.min, def_Q.max, 10)\n', (2424, 2450), True, 'import numpy as np\n'), ((2621, 2633), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2631, 2633), True, 'import matplotlib.pyplot as plt\n'), ((3479, 3491), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3489, 3491), True, 'import matplotlib.pyplot as plt\n'), ((4349, 4361), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4359, 4361), True, 'import matplotlib.pyplot as plt\n'), ((4886, 4896), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4894, 4896), True, 'import matplotlib.pyplot as plt\n'), ((1774, 1789), 'numpy.fft.fft', 'np.fft.fft', (['out'], {}), '(out)\n', (1784, 1789), True, 'import numpy as np\n'), ((3342, 3364), 'numpy.log10', 'np.log10', (['def_Gain.max'], {}), '(def_Gain.max)\n', (3350, 3364), True, 'import numpy as np\n'), ((4137, 4159), 'numpy.log10', 'np.log10', (['def_Freq.min'], {}), '(def_Freq.min)\n', (4145, 4159), True, 'import numpy as np\n'), ((4161, 4183), 'numpy.log10', 'np.log10', (['def_Freq.max'], {}), '(def_Freq.max)\n', (4169, 4183), True, 'import numpy as np\n'), ((2952, 2967), 'numpy.fft.fft', 'np.fft.fft', (['out'], {}), '(out)\n', (2962, 2967), True, 'import numpy as np\n'), ((3772, 3787), 'numpy.fft.fft', 'np.fft.fft', (['out'], {}), '(out)\n', (3782, 3787), True, 'import numpy as np\n'), ((4687, 4702), 'numpy.fft.fft', 'np.fft.fft', (['out'], {}), '(out)\n', (4697, 4702), True, 'import numpy as np\n'), ((2018, 2049), 'numpy.log10', 'np.log10', (['(def_Gain.zone + 1e-08)'], {}), '(def_Gain.zone + 1e-08)\n', (2026, 2049), True, 'import numpy as np\n'), ((2167, 2186), 'numpy.absolute', 'np.absolute', (['spec.T'], {}), '(spec.T)\n', (2178, 2186), True, 'import numpy as np\n'), ((2748, 2771), 'numpy.log10', 'np.log10', (['(cur_G + 1e-08)'], {}), '(cur_G + 1e-08)\n', (2756, 2771), True, 'import numpy as np\n'), ((4480, 4503), 'numpy.log10', 'np.log10', (['(cur_G + 1e-08)'], {}), '(cur_G + 1e-08)\n', (4488, 4503), True, 'import numpy as np\n'), ((3007, 3026), 'numpy.absolute', 'np.absolute', (['spec.T'], {}), '(spec.T)\n', (3018, 3026), True, 'import numpy as np\n'), ((3827, 3846), 'numpy.absolute', 'np.absolute', (['spec.T'], {}), '(spec.T)\n', (3838, 3846), True, 'import numpy as np\n'), ((3898, 3917), 'numpy.log10', 'np.log10', (['(g + 1e-08)'], {}), '(g + 1e-08)\n', (3906, 3917), True, 'import numpy as np\n'), ((4742, 4761), 'numpy.absolute', 'np.absolute', (['spec.T'], {}), '(spec.T)\n', (4753, 4761), True, 'import numpy as np\n')] |
#!/usr/bin/python3
import os
import sys
import http.server
import socketserver
import socket
import shutil
from base64 import b64encode
from urllib.parse import quote
from os.path import basename, splitext, join, isfile
from collections import defaultdict
from subprocess import run
from distutils.dir_util import copy_tree
from distutils.file_util import copy_file
build_dir = 'build'
source_dir = 'source'
dest_dir = 'built_static'
css_dir = join(build_dir, 'css')
images_dir = join(build_dir, 'images')
class TemporaryTCPServer(socketserver.TCPServer):
def server_bind(self):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address)
def serve(port):
os.chdir(dest_dir)
handler = http.server.SimpleHTTPRequestHandler
httpd = TemporaryTCPServer(("", port), handler)
print("[serve] serving on port " + str(port))
httpd.serve_forever()
def clean():
shutil.rmtree(build_dir)
shutil.rmtree(dest_dir)
def build():
copy_tree(source_dir, build_dir, update=1)
make_fallback_images(images_dir)
print('[create] _images.scss ', end='')
save_images_css(images_dir, join(css_dir, '_images.scss'))
print('[ok]')
run_sass(css_dir, join(dest_dir, 'css'))
print('[update] asis ', end='')
copy_tree(join(source_dir, 'asis'), join(dest_dir, 'asis'), update=1)
print('[ok]')
def run_sass(css_source_dir, css_dest_dir):
os.makedirs(css_dest_dir, exist_ok=True)
for (dirpath, dirnames, filenames) in os.walk(css_source_dir):
for f in filenames:
name, ext = splitext(f)
if ext == '.scss' and name[0] != '_':
print("[sass] " + f + ' ', end='')
run([
'sass',
join(css_source_dir, f),
join(css_dest_dir, name + '.css')
], check = True)
print("[ok]")
elif ext == '.css':
print("[copy] " + f + ' ', end='')
copy_file(join(css_source_dir, f), join(css_dest_dir, f), update=1)
print("[ok]")
break
def make_fallback_images(images_dir):
images = find_built_images(images_dir)
for image, files in images.items():
f = files[0]
pngimage = image + '.png'
if pngimage not in files:
print("[create] " + pngimage + ' ', end='')
run([
'convert',
'-background', 'none',
join(images_dir, f),
join(images_dir, pngimage)
], check = True)
print("[ok]")
def images_in_dir(dir):
vectors = []
rasters = []
dumb_rasters = []
lossy = []
for (dirpath, dirnames, filenames) in os.walk(dir):
for f in filenames:
name, ext = splitext(basename(f))
if ext in ['.svg']:
vectors += [f]
if ext in ['.png']:
rasters += [f]
if ext in ['.gif']:
dumb_rasters += [f]
if ext in ['.jpg', '.jpeg']:
lossy += [f]
break
return vectors + rasters + dumb_rasters + lossy
def find_built_images(images_dir):
images = defaultdict(list)
for image in images_in_dir(images_dir):
name, _ = splitext(basename(image))
images[name] += [image]
return dict(images)
def images_to_css(images_dir):
images = find_built_images(images_dir)
csseses = []
for name, files in images.items():
css = '.image-' + name + " {\n"
files_and_extensions = [(f, splitext(f)[1][1:]) for f in files]
for image, ext in [(f, ext) for f, ext in files_and_extensions if ext != 'svg']:
data = raster_data(join(images_dir, image), ext)
css += 'background-image: url(' + data + ");\n"
for svg, ext in [(f, ext) for f, ext in files_and_extensions if ext == 'svg']:
data = xml_data(join(images_dir, svg), ext)
css += 'background-image: url(' + data + "), linear-gradient(transparent, transparent);\n"
css += "}\n"
csseses += [css]
return "\n".join(csseses)
def save_images_css(images_dir, css_file):
with open(css_file, 'w') as f:
f.write(images_to_css(images_dir))
def raster_data(image_filename, ext):
with open(image_filename, 'rb') as f:
data = b64encode(f.read()).decode('utf-8')
return 'data:image/' + ext + ';base64,' + data
def xml_data(image_filename, ext):
with open(image_filename, 'r') as f:
data = quote(f.read())
return 'data:image/' + ext + '+xml;charset=US-ASCII,' + data
def image_data(image_filename):
_, ext = splitext(image_filename)
if ext == '.svg':
return xml_data(image_filename, ext)
else:
return raster_data(image_filename, ext)
if __name__ == '__main__':
try:
arg = sys.argv[1]
except IndexError:
arg = None
if arg == 'build':
build()
elif arg == 'clean':
clean()
elif arg == 'serve':
try:
port = int(sys.argv[2])
except IndexError:
port = 8000
build()
serve(port)
else:
print('please use "build", "clean" or "serve" as a first argument.')
| [
"os.makedirs",
"os.path.basename",
"os.walk",
"collections.defaultdict",
"os.path.splitext",
"shutil.rmtree",
"os.path.join",
"os.chdir",
"distutils.dir_util.copy_tree"
] | [((447, 469), 'os.path.join', 'join', (['build_dir', '"""css"""'], {}), "(build_dir, 'css')\n", (451, 469), False, 'from os.path import basename, splitext, join, isfile\n'), ((483, 508), 'os.path.join', 'join', (['build_dir', '"""images"""'], {}), "(build_dir, 'images')\n", (487, 508), False, 'from os.path import basename, splitext, join, isfile\n'), ((729, 747), 'os.chdir', 'os.chdir', (['dest_dir'], {}), '(dest_dir)\n', (737, 747), False, 'import os\n'), ((947, 971), 'shutil.rmtree', 'shutil.rmtree', (['build_dir'], {}), '(build_dir)\n', (960, 971), False, 'import shutil\n'), ((976, 999), 'shutil.rmtree', 'shutil.rmtree', (['dest_dir'], {}), '(dest_dir)\n', (989, 999), False, 'import shutil\n'), ((1018, 1060), 'distutils.dir_util.copy_tree', 'copy_tree', (['source_dir', 'build_dir'], {'update': '(1)'}), '(source_dir, build_dir, update=1)\n', (1027, 1060), False, 'from distutils.dir_util import copy_tree\n'), ((1449, 1489), 'os.makedirs', 'os.makedirs', (['css_dest_dir'], {'exist_ok': '(True)'}), '(css_dest_dir, exist_ok=True)\n', (1460, 1489), False, 'import os\n'), ((1533, 1556), 'os.walk', 'os.walk', (['css_source_dir'], {}), '(css_source_dir)\n', (1540, 1556), False, 'import os\n'), ((2778, 2790), 'os.walk', 'os.walk', (['dir'], {}), '(dir)\n', (2785, 2790), False, 'import os\n'), ((3247, 3264), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3258, 3264), False, 'from collections import defaultdict\n'), ((4723, 4747), 'os.path.splitext', 'splitext', (['image_filename'], {}), '(image_filename)\n', (4731, 4747), False, 'from os.path import basename, splitext, join, isfile\n'), ((1176, 1205), 'os.path.join', 'join', (['css_dir', '"""_images.scss"""'], {}), "(css_dir, '_images.scss')\n", (1180, 1205), False, 'from os.path import basename, splitext, join, isfile\n'), ((1248, 1269), 'os.path.join', 'join', (['dest_dir', '"""css"""'], {}), "(dest_dir, 'css')\n", (1252, 1269), False, 'from os.path import basename, splitext, join, isfile\n'), ((1322, 1346), 'os.path.join', 'join', (['source_dir', '"""asis"""'], {}), "(source_dir, 'asis')\n", (1326, 1346), False, 'from os.path import basename, splitext, join, isfile\n'), ((1348, 1370), 'os.path.join', 'join', (['dest_dir', '"""asis"""'], {}), "(dest_dir, 'asis')\n", (1352, 1370), False, 'from os.path import basename, splitext, join, isfile\n'), ((1610, 1621), 'os.path.splitext', 'splitext', (['f'], {}), '(f)\n', (1618, 1621), False, 'from os.path import basename, splitext, join, isfile\n'), ((3337, 3352), 'os.path.basename', 'basename', (['image'], {}), '(image)\n', (3345, 3352), False, 'from os.path import basename, splitext, join, isfile\n'), ((2853, 2864), 'os.path.basename', 'basename', (['f'], {}), '(f)\n', (2861, 2864), False, 'from os.path import basename, splitext, join, isfile\n'), ((3776, 3799), 'os.path.join', 'join', (['images_dir', 'image'], {}), '(images_dir, image)\n', (3780, 3799), False, 'from os.path import basename, splitext, join, isfile\n'), ((3982, 4003), 'os.path.join', 'join', (['images_dir', 'svg'], {}), '(images_dir, svg)\n', (3986, 4003), False, 'from os.path import basename, splitext, join, isfile\n'), ((2519, 2538), 'os.path.join', 'join', (['images_dir', 'f'], {}), '(images_dir, f)\n', (2523, 2538), False, 'from os.path import basename, splitext, join, isfile\n'), ((2557, 2583), 'os.path.join', 'join', (['images_dir', 'pngimage'], {}), '(images_dir, pngimage)\n', (2561, 2583), False, 'from os.path import basename, splitext, join, isfile\n'), ((1793, 1816), 'os.path.join', 'join', (['css_source_dir', 'f'], {}), '(css_source_dir, f)\n', (1797, 1816), False, 'from os.path import basename, splitext, join, isfile\n'), ((1838, 1871), 'os.path.join', 'join', (['css_dest_dir', "(name + '.css')"], {}), "(css_dest_dir, name + '.css')\n", (1842, 1871), False, 'from os.path import basename, splitext, join, isfile\n'), ((2048, 2071), 'os.path.join', 'join', (['css_source_dir', 'f'], {}), '(css_source_dir, f)\n', (2052, 2071), False, 'from os.path import basename, splitext, join, isfile\n'), ((2073, 2094), 'os.path.join', 'join', (['css_dest_dir', 'f'], {}), '(css_dest_dir, f)\n', (2077, 2094), False, 'from os.path import basename, splitext, join, isfile\n'), ((3619, 3630), 'os.path.splitext', 'splitext', (['f'], {}), '(f)\n', (3627, 3630), False, 'from os.path import basename, splitext, join, isfile\n')] |
import os
import re
import uuid
import globals
from PIL import Image, ImageDraw, ImageFont
from utils.Asset import ImageAsset
SPACING = 5
back_regex = re.compile(r'back_([0-9]*)\.jpg')
BACK_PIC_UNIT_WIDTH, BACK_PIC_UNIT_HEIGHT = 140, 130
BACK_PIC_NUM_EACH_LINE = 5
def bg_image_gen(back_number, s):
def half_en_len(s):
return (len(s) + (len(s.encode(encoding='utf-8')) - len(s)) // 2) // 2
back_number = f'back_{back_number}'
img_path = os.path.join(globals.staticpath, f'bg/{back_number}.jpg')
im_src = Image.open(img_path)
if back_number in [f'back_{n}' for n in [38, 46, 47, 51, 52, 53]]:
real_width = max(3, im_src.width // max(6, half_en_len(s)) * 4 // 5)
font = ImageFont.truetype(os.path.join(globals.staticpath, 'simhei.ttf'), real_width)
real_height = real_width + SPACING
im = Image.new('RGB', (im_src.width, im_src.height), (255, 255, 255))
im.paste(im_src)
text_width = im_src.width
draw = ImageDraw.Draw(im)
sz = draw.textsize(s, font=font)
x = (text_width - sz[0]) / 2
y = im_src.height - real_height
draw.text((x, y), s, fill=(245, 255, 250), font=font)
elif back_number in [f'back_{n}' for n in [33]]:
real_width = max(3, im_src.width // max(6, half_en_len(s)) * 4 // 5)
font = ImageFont.truetype(os.path.join(globals.staticpath, 'simhei.ttf'), real_width)
real_height = real_width + SPACING
im = Image.new('RGB', (im_src.width, im_src.height), (255, 255, 255))
im.paste(im_src)
text_width = im_src.width
draw = ImageDraw.Draw(im)
sz = draw.textsize(s, font=font)
x = (text_width - sz[0]) / 2
y = im_src.height - 2 * real_height
draw.text((x, y), s, fill=(245, 255, 250), font=font)
elif back_number in [f'back_{n}' for n in [50]]:
real_width = max(3, im_src.width // max(6, half_en_len(s)) * 4 // 5)
font = ImageFont.truetype(os.path.join(globals.staticpath, 'simhei.ttf'), real_width)
real_height = real_width + SPACING
im = Image.new('RGB', (im_src.width, im_src.height), (255, 255, 255))
im.paste(im_src)
text_width = im_src.width
draw = ImageDraw.Draw(im)
sz = draw.textsize(s, font=font)
x = (text_width - sz[0]) / 2
y = 5
draw.text((x, y), s, fill=(23, 0, 0), font=font)
else:
real_width = max(3, im_src.width // max(6, half_en_len(s)))
font = ImageFont.truetype(os.path.join(globals.staticpath, 'simhei.ttf'), real_width)
real_height = real_width + SPACING
im = Image.new('RGB', (im_src.width, im_src.height + real_height), (255, 255, 255))
im.paste(im_src)
text_width = im_src.width
draw = ImageDraw.Draw(im)
sz = draw.textsize(s, font=font)
x = (text_width - sz[0]) / 2
y = im_src.height
draw.text((x, y), s, fill=(23, 0, 0), font=font)
return im
def get_back_pics():
raw = ImageAsset.get('back_catalogue')
if raw:
return raw
back_pic_set = set()
for _, _, files in os.walk(os.path.join(globals.staticpath, 'bg')):
for f in files:
if f.startswith('back_') and f.endswith('.jpg'):
num = int(back_regex.findall(f)[0])
back_pic_set.add(num)
cur_back_pic_nums = len(back_pic_set)
if cur_back_pic_nums == 0:
return
im = Image.new('RGB', (BACK_PIC_NUM_EACH_LINE * BACK_PIC_UNIT_WIDTH, BACK_PIC_UNIT_HEIGHT * (((cur_back_pic_nums - 1) // BACK_PIC_NUM_EACH_LINE) + 1)), (255, 255, 255))
for i, num in enumerate(back_pic_set):
im_o = bg_image_gen(num, f'底图 {num}')
im_o = im_o.resize((BACK_PIC_UNIT_WIDTH, BACK_PIC_UNIT_HEIGHT))
box = (i % BACK_PIC_NUM_EACH_LINE * BACK_PIC_UNIT_WIDTH, i // BACK_PIC_NUM_EACH_LINE * BACK_PIC_UNIT_HEIGHT)
im.paste(im_o, box)
return ImageAsset.image_raw(im, 'back_catalogue')
def merge_image(rsn, rarity, attribute, band_id, thumbnail=True, trained=False, return_fn=False):
if thumbnail:
try:
if return_fn:
fn = os.path.join(globals.datapath, 'image', f'auto_reply/cards/thumb/m_{rsn}_{"normal" if not trained else "after_training"}.png')
if os.access(fn, os.R_OK):
return fn
attribute_icon = Image.open(os.path.join(globals.asset_resource_path, f'{attribute}.png'))
band_icon = Image.open(os.path.join(globals.asset_resource_path, f'band_{band_id}.png'))
if not trained:
back_image = Image.open(f'{os.path.join(globals.asset_card_thumb_path, f"{rsn}_normal.png")}')
star = Image.open(os.path.join(globals.asset_resource_path, 'star.png')).resize((32, 32), Image.ANTIALIAS)
else:
back_image = Image.open(f'{os.path.join(globals.asset_card_thumb_path, f"{rsn}_after_training.png")}')
star = Image.open(os.path.join(globals.asset_resource_path, 'star_trained.png')).resize((32, 32), Image.ANTIALIAS)
if rarity == 1:
frame = Image.open(os.path.join(globals.asset_resource_path, f'card-1-{attribute}.png'))
else:
frame = Image.open(os.path.join(globals.asset_resource_path, f'card-{rarity}.png'))
back_image.paste(frame, (0, 0), mask=frame)
back_image.paste(band_icon, (0, 0), mask=band_icon)
back_image.paste(attribute_icon, (180 - 50, 0), mask=attribute_icon)
for i in range(rarity):
back_image.paste(star, (2, 170 - 27 * (i + 1)), mask=star)
if return_fn:
fn = os.path.join(globals.datapath, 'image', f'auto_reply/cards/thumb/m_{rsn}_{"normal" if not trained else "after_training"}.png')
back_image.save(fn)
return fn
return back_image
except:
import sys
sys.excepthook(*sys.exc_info())
return None
else:
fn = os.path.join(globals.datapath, 'image', f'auto_reply/cards/m_{rsn}_{"normal" if not trained else "after_training"}.png')
if os.access(fn, os.R_OK):
return fn
try:
OUT_WIDTH, OUT_HEIGHT = 1364, 1020
INNER_WIDTH, INNER_HEIGHT = 1334, 1002
STAR_SIZE, ICON_SIZE = 100, 150
TOP_OFFSET, RIGHT_OFFSET, BOTTOM_OFFSET, LEFT_OFFSET = 22, 165, 20, 10
STAT_STEP = 95
back_image = Image.new('RGB', (OUT_WIDTH, OUT_HEIGHT))
attribute_icon = Image.open(os.path.join(globals.asset_resource_path, f'{attribute}.png')).resize((ICON_SIZE, ICON_SIZE), Image.ANTIALIAS)
band_icon = Image.open(os.path.join(globals.asset_resource_path, f'band_{band_id}.png')).resize((ICON_SIZE, ICON_SIZE), Image.ANTIALIAS)
if not trained:
card = Image.open(f'{os.path.join(globals.asset_card_path, f"{rsn}_card_normal.png")}')
star = Image.open(os.path.join(globals.asset_resource_path, 'star.png')).resize((STAR_SIZE, STAR_SIZE), Image.ANTIALIAS)
else:
card = Image.open(f'{os.path.join(globals.asset_card_path, f"{rsn}_card_after_training.png")}')
star = Image.open(os.path.join(globals.asset_resource_path, 'star_trained.png')).resize((STAR_SIZE, STAR_SIZE), Image.ANTIALIAS)
if rarity == 1:
frame = Image.open(os.path.join(globals.asset_resource_path, f'frame-1-{attribute}.png')).resize((OUT_WIDTH, OUT_HEIGHT), Image.ANTIALIAS)
else:
frame = Image.open(os.path.join(globals.asset_resource_path, f'frame-{rarity}.png')).resize((OUT_WIDTH, OUT_HEIGHT), Image.ANTIALIAS)
back_image.paste(card, ((OUT_WIDTH - INNER_WIDTH) // 2, (OUT_HEIGHT - INNER_HEIGHT) // 2), mask=card)
back_image.paste(frame, (0, 0), mask=frame)
back_image.paste(band_icon, (LEFT_OFFSET, TOP_OFFSET), mask=band_icon)
back_image.paste(attribute_icon, (OUT_WIDTH - RIGHT_OFFSET, TOP_OFFSET), mask=attribute_icon)
for i in range(rarity):
back_image.paste(star, (LEFT_OFFSET, OUT_HEIGHT - BOTTOM_OFFSET - STAT_STEP * (i + 1)), mask=star)
back_image.save(fn)
return fn
except:
return ''
def white_padding(width, height):
return Image.new('RGB', (width, height), (255, 255, 255))
def thumbnail(**options):
# images: a list of Image objects, or a list of lists(tuples) of Image objects
# labels: a list of strings shown at the bottom
# image_style: if not assigned, take the params of the first image; if both assigned, will be forced to resize
# width: width of each image, if not assigned, will be min(scaled value by height, 180)
# height: height of each image, if not assigned, will be min(scaled value by width, 180)
# label_style:
# font_size: font_size of each label
# col_num (images are arranged row by row)
# col_space: (space between two columns)
# row_space (space between two rows, if labels exist, it means the space between the label of row1 and the image of row2)
images = options['images']
first_image = images[0]
if not isinstance(first_image, Image.Image):
if isinstance(first_image, (list, tuple)):
first_image = first_image[0]
if not isinstance(first_image, Image.Image):
raise Exception('images must be a list of Image objects, or a list of lists(tuples) of Image objects')
else:
raise Exception('images must be a list of Image objects, or a list of lists(tuples) of Image objects')
else:
images = [[im] for im in images]
if not options.get('image_style'):
box_width, box_height = first_image.size
else:
if options['image_style'].get('width') and options['image_style'].get('height'):
box_width, box_height = options['image_style']['width'], options['image_style']['height']
images = [[im.resize((box_width, box_height)) for im in im_list] for im_list in images]
elif options['image_style'].get('width') and not options['image_style'].get('height'):
images = [[im.resize((options['image_style']['width'], options['image_style']['width'] * im.size[1] // im.size[0])) for im in im_list] for im_list in images]
box_width, box_height = options['image_style']['width'], max([im.size[1] for im_list in images for im in im_list])
elif not options['image_style'].get('width') and options['image_style'].get('height'):
images = [[im.resize((options['image_style']['height'] * im.size[0] // im.size[1], options['image_style']['height'])) for im in im_list] for im_list in images]
box_width, box_height = max([im.size[0] for im_list in images for im in im_list]), options['image_style']['height']
col_num = options.get('col_num', 4)
row_num = (len(images) - 1) // col_num + 1
col_space = options.get('col_space', 0)
row_space = options.get('row_space', 0)
if options.get('labels'):
font = ImageFont.truetype(os.path.join(globals.staticpath, 'simhei.ttf'), options.get('label_style', {}).get('font_size', 20))
all_chars = set()
max_label_width = 0
for label in options['labels']:
max_label_width = max(max_label_width, ImageDraw.Draw(Image.new('RGB', (0, 0))).textsize(label, font=font)[0])
all_chars |= set(label)
label_height = ImageDraw.Draw(Image.new('RGB', (0, 0))).textsize(''.join(all_chars), font=font)[1]
box_width = max(box_width * len(images[0]), max_label_width) // len(images[0])
back_image = Image.new('RGB', (
col_num * len(images[0]) * box_width + (col_num - 1) * col_space,
(box_height + label_height) * row_num + row_num * row_space,
), (255, 255, 255))
draw = ImageDraw.Draw(back_image)
labels = options['labels']
for r in range(row_num):
for c in range(col_num):
if r * col_num + c >= len(images):
break
image_group = images[r * col_num + c]
for i, im in enumerate(image_group):
back_image.paste(im, (
(len(image_group) * c + i) * box_width + (box_width - im.size[0]) // 2 + col_space * c,
r * (box_height + label_height + row_space)
))
sz = draw.textsize(labels[r * col_num + c], font=font)
draw.text((
len(image_group) * c * box_width + (len(image_group) * box_width - sz[0]) // 2 + c * col_space, r * (box_height + label_height + row_space) + box_height
), labels[r * col_num + c], fill=(0, 0, 0), font=font)
else:
back_image = Image.new('RGB', (
col_num * len(images[0]) * box_width + (col_num - 1) * col_space,
box_height * row_num + (row_num - 1) * row_space
), (255, 255, 255))
draw = ImageDraw.Draw(back_image)
for r in range(row_num):
for c in range(col_num):
if r * col_num + c >= len(images):
break
image_group = images[r * col_num + c]
for i, im in enumerate(image_group):
back_image.paste(im, (
(len(image_group) * c + i) * box_width + (box_width - im.size[0]) // 2 + c * col_space * int(i == len(image_group) - 1),
r * (box_height + row_space)
))
return ImageAsset.image_raw(back_image)
def open_nontransparent(filename):
try:
image = Image.open(filename).convert('RGBA')
new_image = Image.new('RGBA', image.size, (255, 255, 255, 255))
new_image.paste(image, (0, 0), image)
return new_image
except:
pass
def manual():
raw = ImageAsset.get('manual')
if raw:
return raw
row_space = 20
col_space = 50
font = ImageFont.truetype(os.path.join(globals.staticpath, 'simhei.ttf'), 20)
lines = [
'ycm/有车吗: 查询车牌(来源: https://bandoristation.com/)',
'底图目录: 查询底图目录(是的,不仅功能一样,连图都盗过来了,虽然还没更新。底图31,Tsugu!.jpg)',
'底图+数字: 切换底图',
'xx.jpg: 图片合成',
'',
'以下查询功能数据来源Bestdori',
'查卡 [稀有度] [颜色] [人物] [乐团] [技能类型]: 按条件筛选符合要求的卡片,同类条件取并集,不同类条件取交集。例如: 查卡 4x pure ksm 分',
'查卡+数字: 按id查询单卡信息',
'无框+数字: 按id查询单卡无框卡面',
'活动列表 [活动类型]: 按条件筛选符合要求的活动,活动类型包括“一般活动”,“竞演LIVE”或“对邦”,“挑战LIVE”或“CP”,“LIVE试炼”,“任务LIVE”',
'活动+数字 [服务器]: 按id查询单活动信息,默认国服,可选“日服”,“国际服”,“台服”,“国服”,“韩服”',
'卡池列表 [卡池类型]: 按条件筛选符合要求的卡池,卡池类型包括“常驻”或“无期限”,“限时”或“限定”或“期间限定”,“特殊”(该条件慎加,因为没啥特别的卡池),“必4”',
'卡池+数字 [服务器]: 按id查询单卡池信息,默认国服,可选“日服”,“国际服”,“台服”,“国服”,“韩服”',
'',
'以下查询功能数据来源bilibili开放的豹跳接口,慎用',
'查抽卡名字 名字: 查用户名称包含该名字的玩家出的4星',
]
line_height = ImageDraw.Draw(Image.new('RGB', (0, 0))).textsize('底图目录', font=font)[1]
image = Image.new('RGB', (ImageDraw.Draw(Image.new('RGB', (0, 0))).textsize(max(lines, key=lambda line: len(line)),
font=font)[0] + 2 * col_space, (line_height + row_space) * len(lines)), (255, 255, 255))
draw = ImageDraw.Draw(image)
line_pos = row_space
for i, line in enumerate(lines):
sz = draw.textsize(line, font=font)
draw.text((col_space, line_pos), line, fill=(0, 0, 0), font=font)
line_pos += sz[1] + row_space
return ImageAsset.image_raw(image, 'manual')
def compress(infile, mb=None, step=10, quality=80, isabs=False):
if not isabs:
absinfile = os.path.join(globals.datapath, 'image', infile)
else:
absinfile = infile
outfile = infile[infile.rfind('/') + 1:infile.rfind('.')] + '-c.jpg'
absoutfile = os.path.join(globals.datapath, 'image', outfile)
if os.path.exists(absoutfile):
return outfile
if mb is None:
im = Image.open(absinfile)
im = im.convert('RGB')
im.save(absoutfile, quality=quality)
return absoutfile
o_size = os.path.getsize(absinfile) / 1024
if o_size <= mb:
return infile
while o_size > mb:
im = Image.open(absinfile)
im = im.convert('RGB')
im.save(absoutfile, quality=quality)
if quality - step < 0:
break
quality -= step
o_size = os.path.getsize(absoutfile) / 1024
return absoutfile
| [
"PIL.Image.new",
"utils.Asset.ImageAsset.get",
"os.path.getsize",
"os.path.exists",
"PIL.Image.open",
"sys.exc_info",
"utils.Asset.ImageAsset.image_raw",
"PIL.ImageDraw.Draw",
"os.path.join",
"os.access",
"re.compile"
] | [((153, 186), 're.compile', 're.compile', (['"""back_([0-9]*)\\\\.jpg"""'], {}), "('back_([0-9]*)\\\\.jpg')\n", (163, 186), False, 'import re\n'), ((461, 518), 'os.path.join', 'os.path.join', (['globals.staticpath', 'f"""bg/{back_number}.jpg"""'], {}), "(globals.staticpath, f'bg/{back_number}.jpg')\n", (473, 518), False, 'import os\n'), ((532, 552), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (542, 552), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((3009, 3041), 'utils.Asset.ImageAsset.get', 'ImageAsset.get', (['"""back_catalogue"""'], {}), "('back_catalogue')\n", (3023, 3041), False, 'from utils.Asset import ImageAsset\n'), ((3445, 3615), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(BACK_PIC_NUM_EACH_LINE * BACK_PIC_UNIT_WIDTH, BACK_PIC_UNIT_HEIGHT * ((\n cur_back_pic_nums - 1) // BACK_PIC_NUM_EACH_LINE + 1))', '(255, 255, 255)'], {}), "('RGB', (BACK_PIC_NUM_EACH_LINE * BACK_PIC_UNIT_WIDTH, \n BACK_PIC_UNIT_HEIGHT * ((cur_back_pic_nums - 1) //\n BACK_PIC_NUM_EACH_LINE + 1)), (255, 255, 255))\n", (3454, 3615), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((3927, 3969), 'utils.Asset.ImageAsset.image_raw', 'ImageAsset.image_raw', (['im', '"""back_catalogue"""'], {}), "(im, 'back_catalogue')\n", (3947, 3969), False, 'from utils.Asset import ImageAsset\n'), ((8400, 8450), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(width, height)', '(255, 255, 255)'], {}), "('RGB', (width, height), (255, 255, 255))\n", (8409, 8450), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((13645, 13677), 'utils.Asset.ImageAsset.image_raw', 'ImageAsset.image_raw', (['back_image'], {}), '(back_image)\n', (13665, 13677), False, 'from utils.Asset import ImageAsset\n'), ((13971, 13995), 'utils.Asset.ImageAsset.get', 'ImageAsset.get', (['"""manual"""'], {}), "('manual')\n", (13985, 13995), False, 'from utils.Asset import ImageAsset\n'), ((15288, 15309), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['image'], {}), '(image)\n', (15302, 15309), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((15541, 15578), 'utils.Asset.ImageAsset.image_raw', 'ImageAsset.image_raw', (['image', '"""manual"""'], {}), "(image, 'manual')\n", (15561, 15578), False, 'from utils.Asset import ImageAsset\n'), ((15859, 15907), 'os.path.join', 'os.path.join', (['globals.datapath', '"""image"""', 'outfile'], {}), "(globals.datapath, 'image', outfile)\n", (15871, 15907), False, 'import os\n'), ((15915, 15941), 'os.path.exists', 'os.path.exists', (['absoutfile'], {}), '(absoutfile)\n', (15929, 15941), False, 'import os\n'), ((851, 915), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(im_src.width, im_src.height)', '(255, 255, 255)'], {}), "('RGB', (im_src.width, im_src.height), (255, 255, 255))\n", (860, 915), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((991, 1009), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['im'], {}), '(im)\n', (1005, 1009), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((3130, 3168), 'os.path.join', 'os.path.join', (['globals.staticpath', '"""bg"""'], {}), "(globals.staticpath, 'bg')\n", (3142, 3168), False, 'import os\n'), ((6043, 6172), 'os.path.join', 'os.path.join', (['globals.datapath', '"""image"""', 'f"""auto_reply/cards/m_{rsn}_{\'normal\' if not trained else \'after_training\'}.png"""'], {}), '(globals.datapath, \'image\',\n f"auto_reply/cards/m_{rsn}_{\'normal\' if not trained else \'after_training\'}.png"\n )\n', (6055, 6172), False, 'import os\n'), ((6175, 6197), 'os.access', 'os.access', (['fn', 'os.R_OK'], {}), '(fn, os.R_OK)\n', (6184, 6197), False, 'import os\n'), ((11950, 11976), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['back_image'], {}), '(back_image)\n', (11964, 11976), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((13088, 13114), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['back_image'], {}), '(back_image)\n', (13102, 13114), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((13797, 13848), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', 'image.size', '(255, 255, 255, 255)'], {}), "('RGBA', image.size, (255, 255, 255, 255))\n", (13806, 13848), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((14096, 14142), 'os.path.join', 'os.path.join', (['globals.staticpath', '"""simhei.ttf"""'], {}), "(globals.staticpath, 'simhei.ttf')\n", (14108, 14142), False, 'import os\n'), ((15684, 15731), 'os.path.join', 'os.path.join', (['globals.datapath', '"""image"""', 'infile'], {}), "(globals.datapath, 'image', infile)\n", (15696, 15731), False, 'import os\n'), ((15998, 16019), 'PIL.Image.open', 'Image.open', (['absinfile'], {}), '(absinfile)\n', (16008, 16019), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((16136, 16162), 'os.path.getsize', 'os.path.getsize', (['absinfile'], {}), '(absinfile)\n', (16151, 16162), False, 'import os\n'), ((16250, 16271), 'PIL.Image.open', 'Image.open', (['absinfile'], {}), '(absinfile)\n', (16260, 16271), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((735, 781), 'os.path.join', 'os.path.join', (['globals.staticpath', '"""simhei.ttf"""'], {}), "(globals.staticpath, 'simhei.ttf')\n", (747, 781), False, 'import os\n'), ((1470, 1534), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(im_src.width, im_src.height)', '(255, 255, 255)'], {}), "('RGB', (im_src.width, im_src.height), (255, 255, 255))\n", (1479, 1534), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((1609, 1627), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['im'], {}), '(im)\n', (1623, 1627), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((6513, 6554), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(OUT_WIDTH, OUT_HEIGHT)'], {}), "('RGB', (OUT_WIDTH, OUT_HEIGHT))\n", (6522, 6554), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((11166, 11212), 'os.path.join', 'os.path.join', (['globals.staticpath', '"""simhei.ttf"""'], {}), "(globals.staticpath, 'simhei.ttf')\n", (11178, 11212), False, 'import os\n'), ((16438, 16465), 'os.path.getsize', 'os.path.getsize', (['absoutfile'], {}), '(absoutfile)\n', (16453, 16465), False, 'import os\n'), ((1354, 1400), 'os.path.join', 'os.path.join', (['globals.staticpath', '"""simhei.ttf"""'], {}), "(globals.staticpath, 'simhei.ttf')\n", (1366, 1400), False, 'import os\n'), ((2092, 2156), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(im_src.width, im_src.height)', '(255, 255, 255)'], {}), "('RGB', (im_src.width, im_src.height), (255, 255, 255))\n", (2101, 2156), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((2232, 2250), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['im'], {}), '(im)\n', (2246, 2250), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((2628, 2706), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(im_src.width, im_src.height + real_height)', '(255, 255, 255)'], {}), "('RGB', (im_src.width, im_src.height + real_height), (255, 255, 255))\n", (2637, 2706), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((2782, 2800), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['im'], {}), '(im)\n', (2796, 2800), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((4148, 4283), 'os.path.join', 'os.path.join', (['globals.datapath', '"""image"""', 'f"""auto_reply/cards/thumb/m_{rsn}_{\'normal\' if not trained else \'after_training\'}.png"""'], {}), '(globals.datapath, \'image\',\n f"auto_reply/cards/thumb/m_{rsn}_{\'normal\' if not trained else \'after_training\'}.png"\n )\n', (4160, 4283), False, 'import os\n'), ((4294, 4316), 'os.access', 'os.access', (['fn', 'os.R_OK'], {}), '(fn, os.R_OK)\n', (4303, 4316), False, 'import os\n'), ((4388, 4449), 'os.path.join', 'os.path.join', (['globals.asset_resource_path', 'f"""{attribute}.png"""'], {}), "(globals.asset_resource_path, f'{attribute}.png')\n", (4400, 4449), False, 'import os\n'), ((4486, 4550), 'os.path.join', 'os.path.join', (['globals.asset_resource_path', 'f"""band_{band_id}.png"""'], {}), "(globals.asset_resource_path, f'band_{band_id}.png')\n", (4498, 4550), False, 'import os\n'), ((5693, 5828), 'os.path.join', 'os.path.join', (['globals.datapath', '"""image"""', 'f"""auto_reply/cards/thumb/m_{rsn}_{\'normal\' if not trained else \'after_training\'}.png"""'], {}), '(globals.datapath, \'image\',\n f"auto_reply/cards/thumb/m_{rsn}_{\'normal\' if not trained else \'after_training\'}.png"\n )\n', (5705, 5828), False, 'import os\n'), ((13740, 13760), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (13750, 13760), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((1976, 2022), 'os.path.join', 'os.path.join', (['globals.staticpath', '"""simhei.ttf"""'], {}), "(globals.staticpath, 'simhei.ttf')\n", (1988, 2022), False, 'import os\n'), ((2512, 2558), 'os.path.join', 'os.path.join', (['globals.staticpath', '"""simhei.ttf"""'], {}), "(globals.staticpath, 'simhei.ttf')\n", (2524, 2558), False, 'import os\n'), ((5145, 5213), 'os.path.join', 'os.path.join', (['globals.asset_resource_path', 'f"""card-1-{attribute}.png"""'], {}), "(globals.asset_resource_path, f'card-1-{attribute}.png')\n", (5157, 5213), False, 'import os\n'), ((5268, 5331), 'os.path.join', 'os.path.join', (['globals.asset_resource_path', 'f"""card-{rarity}.png"""'], {}), "(globals.asset_resource_path, f'card-{rarity}.png')\n", (5280, 5331), False, 'import os\n'), ((14988, 15012), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(0, 0)'], {}), "('RGB', (0, 0))\n", (14997, 15012), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((5979, 5993), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (5991, 5993), False, 'import sys\n'), ((6595, 6656), 'os.path.join', 'os.path.join', (['globals.asset_resource_path', 'f"""{attribute}.png"""'], {}), "(globals.asset_resource_path, f'{attribute}.png')\n", (6607, 6656), False, 'import os\n'), ((6741, 6805), 'os.path.join', 'os.path.join', (['globals.asset_resource_path', 'f"""band_{band_id}.png"""'], {}), "(globals.asset_resource_path, f'band_{band_id}.png')\n", (6753, 6805), False, 'import os\n'), ((11558, 11582), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(0, 0)'], {}), "('RGB', (0, 0))\n", (11567, 11582), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((4623, 4687), 'os.path.join', 'os.path.join', (['globals.asset_card_thumb_path', 'f"""{rsn}_normal.png"""'], {}), "(globals.asset_card_thumb_path, f'{rsn}_normal.png')\n", (4635, 4687), False, 'import os\n'), ((4725, 4778), 'os.path.join', 'os.path.join', (['globals.asset_resource_path', '"""star.png"""'], {}), "(globals.asset_resource_path, 'star.png')\n", (4737, 4778), False, 'import os\n'), ((4875, 4947), 'os.path.join', 'os.path.join', (['globals.asset_card_thumb_path', 'f"""{rsn}_after_training.png"""'], {}), "(globals.asset_card_thumb_path, f'{rsn}_after_training.png')\n", (4887, 4947), False, 'import os\n'), ((4985, 5046), 'os.path.join', 'os.path.join', (['globals.asset_resource_path', '"""star_trained.png"""'], {}), "(globals.asset_resource_path, 'star_trained.png')\n", (4997, 5046), False, 'import os\n'), ((6920, 6983), 'os.path.join', 'os.path.join', (['globals.asset_card_path', 'f"""{rsn}_card_normal.png"""'], {}), "(globals.asset_card_path, f'{rsn}_card_normal.png')\n", (6932, 6983), False, 'import os\n'), ((7021, 7074), 'os.path.join', 'os.path.join', (['globals.asset_resource_path', '"""star.png"""'], {}), "(globals.asset_resource_path, 'star.png')\n", (7033, 7074), False, 'import os\n'), ((7179, 7250), 'os.path.join', 'os.path.join', (['globals.asset_card_path', 'f"""{rsn}_card_after_training.png"""'], {}), "(globals.asset_card_path, f'{rsn}_card_after_training.png')\n", (7191, 7250), False, 'import os\n'), ((7288, 7349), 'os.path.join', 'os.path.join', (['globals.asset_resource_path', '"""star_trained.png"""'], {}), "(globals.asset_resource_path, 'star_trained.png')\n", (7300, 7349), False, 'import os\n'), ((7462, 7531), 'os.path.join', 'os.path.join', (['globals.asset_resource_path', 'f"""frame-1-{attribute}.png"""'], {}), "(globals.asset_resource_path, f'frame-1-{attribute}.png')\n", (7474, 7531), False, 'import os\n'), ((7635, 7699), 'os.path.join', 'os.path.join', (['globals.asset_resource_path', 'f"""frame-{rarity}.png"""'], {}), "(globals.asset_resource_path, f'frame-{rarity}.png')\n", (7647, 7699), False, 'import os\n'), ((11427, 11451), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(0, 0)'], {}), "('RGB', (0, 0))\n", (11436, 11451), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((15091, 15115), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(0, 0)'], {}), "('RGB', (0, 0))\n", (15100, 15115), False, 'from PIL import Image, ImageDraw, ImageFont\n')] |
import numpy as np
from numpy import linalg as LA
class LDA():
def __init__(self, dim = 2):
self.dim = dim
self.matrixTransf = None
def fit_transform(self, X, labels):
positive = []
negative = []
for i in range(len(labels)):
if labels[i] == 1:
positive.append(X[i])
else:
negative.append(X[i])
positive = np.array(positive)
negative = np.array(negative)
media_pos = np.mean(positive, axis = 0)
media_neg = np.mean(negative, axis = 0)
cov_pos = np.cov(positive.T)
cov_neg = np.cov(negative.T)
SW = cov_pos + cov_neg
sub = (media_pos - media_neg)
print(SW.shape)
print(sub.shape)
wLDA = np.matmul(LA.pinv(SW), sub)
self.matrixTransf = np.array(wLDA)
print("Matriz de transformação")
print(self.matrixTransf)
res = np.matmul(X, self.matrixTransf.T)
return res
| [
"numpy.mean",
"numpy.array",
"numpy.matmul",
"numpy.cov",
"numpy.linalg.pinv"
] | [((441, 459), 'numpy.array', 'np.array', (['positive'], {}), '(positive)\n', (449, 459), True, 'import numpy as np\n'), ((479, 497), 'numpy.array', 'np.array', (['negative'], {}), '(negative)\n', (487, 497), True, 'import numpy as np\n'), ((527, 552), 'numpy.mean', 'np.mean', (['positive'], {'axis': '(0)'}), '(positive, axis=0)\n', (534, 552), True, 'import numpy as np\n'), ((575, 600), 'numpy.mean', 'np.mean', (['negative'], {'axis': '(0)'}), '(negative, axis=0)\n', (582, 600), True, 'import numpy as np\n'), ((621, 639), 'numpy.cov', 'np.cov', (['positive.T'], {}), '(positive.T)\n', (627, 639), True, 'import numpy as np\n'), ((658, 676), 'numpy.cov', 'np.cov', (['negative.T'], {}), '(negative.T)\n', (664, 676), True, 'import numpy as np\n'), ((902, 916), 'numpy.array', 'np.array', (['wLDA'], {}), '(wLDA)\n', (910, 916), True, 'import numpy as np\n'), ((1014, 1047), 'numpy.matmul', 'np.matmul', (['X', 'self.matrixTransf.T'], {}), '(X, self.matrixTransf.T)\n', (1023, 1047), True, 'import numpy as np\n'), ((847, 858), 'numpy.linalg.pinv', 'LA.pinv', (['SW'], {}), '(SW)\n', (854, 858), True, 'from numpy import linalg as LA\n')] |
# Tweepy
# Copyright 2009-2010 <NAME>
# See LICENSE for details.
"""
Tweepy Twitter API library
"""
__version__ = '3.5.0'
__author__ = '<NAME>'
__license__ = 'MIT'
from tweepy.models import Status, User, DirectMessage, Friendship, SavedSearch, SearchResults, ModelFactory, Category
from tweepy.error import TweepError, RateLimitError
from tweepy.api import API
from tweepy.cache import Cache, MemoryCache, FileCache
from tweepy.auth import OAuthHandler, AppAuthHandler
from tweepy.limit import RateLimitHandler
from tweepy.streaming import Stream, StreamListener
from tweepy.cursor import Cursor
# Global, unauthenticated instance of API
api = API()
def debug(enable=True, level=1):
from six.moves.http_client import HTTPConnection
HTTPConnection.debuglevel = level
def chunks(l, n):
for i in xrange(0, len(l), n):
yield l[i:i+n]
class Twitter(object):
"""
Twitter API wrapper based on Tweepy using the RateLimitHandler
with multiple access tokens (see https://github.com/svven/tweepy).
It also handles API method cursors and splits input param lists in
chunks if neccessary.
"""
def __init__(self,
consumer_key, consumer_secret, access_tokens=None):
"""
Initialize params for RateLimitHandler to pass to Tweepy API.
Param `access_tokens` must be a dictionary but it can be loaded
later just before the first API method call, and has to be like
{user_id: (access_token_key, access_token_secret)}.
"""
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.access_tokens = access_tokens
_api = None
def _get_api(self):
"Initialize Tweepy API object with RateLimitHandler auth."
auth = RateLimitHandler(self.consumer_key, self.consumer_secret)
for key, secret in self.access_tokens.values():
auth.add_access_token(key, secret)
# print 'Token pool size: %d' % len(auth.tokens)
return API(auth)
# retry_count=2, retry_delay=3,
# wait_on_rate_limit=True, wait_on_rate_limit_notify=True
@property
def api(self):
"Lazy loaded Tweepy API object."
if not self._api:
self._api = self._get_api()
return self._api
| [
"tweepy.api.API",
"tweepy.limit.RateLimitHandler"
] | [((647, 652), 'tweepy.api.API', 'API', ([], {}), '()\n', (650, 652), False, 'from tweepy.api import API\n'), ((1769, 1826), 'tweepy.limit.RateLimitHandler', 'RateLimitHandler', (['self.consumer_key', 'self.consumer_secret'], {}), '(self.consumer_key, self.consumer_secret)\n', (1785, 1826), False, 'from tweepy.limit import RateLimitHandler\n'), ((2002, 2011), 'tweepy.api.API', 'API', (['auth'], {}), '(auth)\n', (2005, 2011), False, 'from tweepy.api import API\n')] |
from datetime import timedelta
import numpy as np
import pandas as pd
import argparse
import torch
import json
import os
from add_csv import csv_to_sqlite, csv_to_json
from sqlnet.dbengine import DBEngine
from sqlova.utils.utils_wikisql import *
from train import construct_hyper_param, get_models
#### prediction ####################
def get_args():
parser = argparse.ArgumentParser()
# parser.add_argument("--model_file", required=True, help='model file to use (e.g. model_best.pt)')
# parser.add_argument("--bert_model_file", required=True, help='bert model file to use (e.g. model_bert_best.pt)')
# parser.add_argument("--bert_path", required=True, help='path to bert files (bert_config*.json etc)')
# parser.add_argument("--data_path", required=True, help='path to *.jsonl and *.db files')
# parser.add_argument("--split", required=True, help='prefix of jsonl and db files (e.g. dev)')
# parser.add_argument("--result_path", required=True, help='directory in which to place results')
args = construct_hyper_param(parser)
return args
def load_models(args):
BERT_PT_PATH = './data_and_model'
path_model_bert = './model_bert_best.pt'
path_model = './model_best.pt'
model, model_bert, tokenizer, bert_config = get_models(args, BERT_PT_PATH, True, path_model_bert, path_model)
return model, model_bert, tokenizer, bert_config
def my_get_fields(t, data_tables):
### t: list of dict
### data_tables: dict
nlu, nlu_t, tb, hds = [], [], [], []
for t1 in t:
nlu.append( t1['question'])
nlu_t.append( t1['question_tok'])
tbid = t1['table_id']
tb.append(data_tables[tbid])
hds.append(data_tables[tbid]['header'])
return nlu, nlu_t, tb, hds
def my_predict( data_loader, data_table,
model, model_bert, bert_config, tokenizer,
max_seq_length,
num_target_layers, path_db, dset_name,
EG=False, beam_size=4):
model.eval()
model_bert.eval()
# engine = DBEngine(os.path.join(path_db, f"{dset_name}.db"))
engine = DBEngine(path_db)
results = []
for _, t in enumerate(data_loader):
nlu, nlu_t, tb, hds = my_get_fields(t, data_table)
wemb_n, wemb_h, l_n, l_hpu, l_hs, \
nlu_tt, t_to_tt_idx, tt_to_t_idx \
= get_wemb_bert(bert_config, model_bert, tokenizer, nlu_t, hds, max_seq_length,
num_out_layers_n=num_target_layers, num_out_layers_h=num_target_layers)
if not EG:
# No Execution guided decoding
s_sc, s_sa, s_wn, s_wc, s_wo, s_wv = model(wemb_n, l_n, wemb_h, l_hpu, l_hs)
pr_sc, pr_sa, pr_wn, pr_wc, pr_wo, pr_wvi = pred_sw_se(s_sc, s_sa, s_wn, s_wc, s_wo, s_wv, )
pr_wv_str, pr_wv_str_wp = convert_pr_wvi_to_string(pr_wvi, nlu_t, nlu_tt, tt_to_t_idx, nlu)
pr_sql_i = generate_sql_i(pr_sc, pr_sa, pr_wn, pr_wc, pr_wo, pr_wv_str, nlu)
else:
# Execution guided decoding
prob_sca, prob_w, prob_wn_w, \
pr_sc, pr_sa, pr_wn, pr_sql_i \
= model.beam_forward(wemb_n, l_n, wemb_h, l_hpu,
l_hs, engine, tb,
nlu_t, nlu_tt,
tt_to_t_idx, nlu,
beam_size=beam_size)
# sort and generate
pr_wc, pr_wo, pr_wv, pr_sql_i = sort_and_generate_pr_w(pr_sql_i)
# Following variables are just for consistency with no-EG case.
pr_wvi = None # not used
pr_wv_str=None
pr_wv_str_wp=None
pr_sql_q = generate_sql_q(pr_sql_i, tb)
for b, (pr_sql_i1, pr_sql_q1) in enumerate(zip(pr_sql_i, pr_sql_q)):
results1 = {}
results1["query"] = pr_sql_i1
results1["table_id"] = tb[b]["id"]
results1["nlu"] = nlu[b]
results1["sql"] = pr_sql_q1
results.append(results1)
return results
#### deal with data ###################
## 不需要了
def read_csv_to_table(csv_path):
# file_name as table_id
table_id = csv_path.split('/')[-1][:-4]
df = pd.read_csv(csv_path)
headers = df.columns.tolist()
rows = []
for _, row in df.iterrows():
rows.append(row.tolist())
print(rows)
## TODO: add_csv
def create_table_and_db():
pass
def read_scripts(txt_path):
nlu = []
with open(txt_path, 'r') as f:
line = f.readline()
while line:
if line.endswith('\n'):
nlu.append(line[:-1])
else:
nlu.append(line)
line = f.readline()
return nlu
## TODO: with tools in annotate_ws.py
def split_scripts(nlu):
nlu_t = []
for nlu1 in nlu:
nlu_t.append(nlu1.split(' '))
return nlu_t
def get_tables(tb_path):
table = {}
with open(tb_path) as f:
for _, line in enumerate(f):
t1 = json.loads(line.strip())
table[t1['id']] = t1
return table
def prepare_data():
sc_paths = [ './Qian_data/company_script.txt',
'./Qian_data/product_script.txt',]
sc_tableids = [ 'company_table',
'product_table',]
nlu = []
nlu_t = []
tbid = []
for i in range(len(sc_paths)):
nlu_i = read_scripts(sc_paths[i])
nlu_t_i = split_scripts(nlu_i)
nlu.extend(nlu_i)
nlu_t.extend(nlu_t_i)
tbid.extend([sc_tableids[i]] * len(nlu_i))
data = []
for i in range(len(nlu)):
data.append({
'question': nlu[i],
'question_tok': nlu_t[i],
'table_id': tbid[i],
})
return data
if __name__ == '__main__':
dset_name = 'qian'
save_path = './Qian_data/'
### model
args = get_args()
model, model_bert, tokenizer, bert_config = load_models(args)
### data
db_path = './Qian_data/qian.db'
tb_path = './Qian_data/qian.tables.jsonl'
data_table = get_tables(tb_path)
data = prepare_data()
data_loader = torch.utils.data.DataLoader(
batch_size=args.bS,
dataset=data,
shuffle=False,
num_workers=1,
collate_fn=lambda x: x # now dictionary values are not merged!
)
### predict
with torch.no_grad():
results = my_predict(data_loader,
data_table,
model,
model_bert,
bert_config,
tokenizer,
max_seq_length=args.max_seq_length,
num_target_layers=args.num_target_layers,
path_db=db_path,
dset_name=dset_name,
EG=False, #args.EG,
)
# save results
save_for_evaluation(save_path, results, dset_name)
| [
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"pandas.read_csv",
"train.get_models",
"sqlnet.dbengine.DBEngine",
"train.construct_hyper_param",
"torch.no_grad"
] | [((364, 389), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (387, 389), False, 'import argparse\n'), ((1007, 1036), 'train.construct_hyper_param', 'construct_hyper_param', (['parser'], {}), '(parser)\n', (1028, 1036), False, 'from train import construct_hyper_param, get_models\n'), ((1230, 1295), 'train.get_models', 'get_models', (['args', 'BERT_PT_PATH', '(True)', 'path_model_bert', 'path_model'], {}), '(args, BERT_PT_PATH, True, path_model_bert, path_model)\n', (1240, 1295), False, 'from train import construct_hyper_param, get_models\n'), ((1964, 1981), 'sqlnet.dbengine.DBEngine', 'DBEngine', (['path_db'], {}), '(path_db)\n', (1972, 1981), False, 'from sqlnet.dbengine import DBEngine\n'), ((3682, 3703), 'pandas.read_csv', 'pd.read_csv', (['csv_path'], {}), '(csv_path)\n', (3693, 3703), True, 'import pandas as pd\n'), ((5284, 5403), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'batch_size': 'args.bS', 'dataset': 'data', 'shuffle': '(False)', 'num_workers': '(1)', 'collate_fn': '(lambda x: x)'}), '(batch_size=args.bS, dataset=data, shuffle=False,\n num_workers=1, collate_fn=lambda x: x)\n', (5311, 5403), False, 'import torch\n'), ((5474, 5489), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5487, 5489), False, 'import torch\n')] |
import tensorflow as tf
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from PIL import Image
import Network
import dataset
from Network import BATCH_SIZE
from dataset import DataSet
def output_predict(depths, images, depths_discretized, depths_reconstructed, output_dir):
print("output predict into %s" % output_dir)
if not tf.gfile.Exists(output_dir):
tf.gfile.MakeDirs(output_dir)
for i, _ in enumerate(images):
image, depth, depth_discretized, depth_reconstructed = images[i], depths[i], depths_discretized[i], \
depths_reconstructed[i]
pilimg = Image.fromarray(np.uint8(image))
image_name = "%s/%03d_org.png" % (output_dir, i)
pilimg.save(image_name)
depth = depth.transpose(2, 0, 1)
if np.max(depth) != 0:
ra_depth = (depth / np.max(depth)) * 255.0
else:
ra_depth = depth * 255.0
depth_pil = Image.fromarray(np.uint8(ra_depth[0]), mode="L")
depth_name = "%s/%03d.png" % (output_dir, i)
depth_pil.save(depth_name)
for j in range(dataset.DEPTH_DIM):
ra_depth = depth_discretized[:, :, j] * 255.0
depth_discr_pil = Image.fromarray(np.uint8(ra_depth), mode="L")
depth_discr_name = "%s/%03d_%03d_discr.png" % (output_dir, i, j)
depth_discr_pil.save(depth_discr_name)
# for j in range(DEPTH_DIM):
# ra_depth = mask[:, :, j]
# depth_discr_pil = Image.fromarray(np.uint8(ra_depth), mode="L")
# depth_discr_name = "%s/%03d_%03d_discr_m.png" % (output_dir, i, j)
# depth_discr_pil.save(depth_discr_name)
#
# for j in range(DEPTH_DIM):
# ra_depth = mask_lower[:, :, j]
# depth_discr_pil = Image.fromarray(np.uint8(ra_depth), mode="L")
# depth_discr_name = "%s/%03d_%03d_discr_ml.png" % (output_dir, i, j)
# depth_discr_pil.save(depth_discr_name)
depth = depth_reconstructed[:, :, 0]
if np.max(depth) != 0:
ra_depth = (depth / np.max(depth)) * 255.0
else:
ra_depth = depth * 255.0
depth_pil = Image.fromarray(np.uint8(ra_depth), mode="L")
depth_name = "%s/%03d_reconstructed.png" % (output_dir, i)
depth_pil.save(depth_name)
def playground_loss_function(labels, logits):
# in rank 2, [elements, classes]
# tf.nn.weighted_cross_entropy_with_logits(labels, logits, weights)
losses = tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits)
return losses
def prob_to_logit(probs):
return np.log(probs / (1 - probs))
def softmax(x):
"""Same behaviour as tf.nn.softmax in tensorflow"""
e_x = np.exp(x)
sum_per_row = np.tile(e_x.sum(axis=1), (x.shape[1], 1)).T
print('e_x', '\n', e_x)
print('sum_per_row', '\n', sum_per_row)
return e_x / sum_per_row
def softmax_cross_entropy_loss(labels, logits):
"""Same behaviour as tf.nn.softmax_cross_entropy_with_logits in tensorflow"""
loss_per_row = - np.sum(labels * np.log(softmax(logits)), axis=1)
return loss_per_row
def labels_to_info_gain(labels, logits, alpha=0.2):
last_axis = len(logits.shape) - 1
label_idx = np.tile(np.argmax(labels, axis=last_axis), (labels.shape[last_axis], 1)).T
prob_bin_idx = np.tile(range(logits.shape[last_axis]), (labels.shape[0], 1))
# print('label_idx', '\n', label_idx)
# print('probs_idx', '\n', prob_bin_idx)
info_gain = np.exp(-alpha * (label_idx - prob_bin_idx)**2)
print('info gain', '\n', info_gain)
return info_gain
def tf_labels_to_info_gain(labels, logits, alpha=0.2):
last_axis = len(logits.shape) - 1
label_idx = tf.expand_dims(tf.argmax(labels, axis=last_axis), 0)
label_idx = tf.cast(label_idx, dtype=tf.int32)
label_idx = tf.tile(label_idx, [labels.shape[last_axis], 1])
label_idx = tf.transpose(label_idx)
prob_bin_idx = tf.expand_dims(tf.range(logits.shape[last_axis], dtype=tf.int32), last_axis)
prob_bin_idx = tf.transpose(prob_bin_idx)
prob_bin_idx = tf.tile(prob_bin_idx, [labels.shape[0], 1])
difference = (label_idx - prob_bin_idx)**2
difference = tf.cast(difference, dtype=tf.float32)
info_gain = tf.exp(-alpha * difference)
return info_gain
def informed_cross_entropy_loss(labels, logits):
"""Same behaviour as tf.nn.weighted_cross_entropy_with_logits in tensorflow"""
probs = softmax(logits)
print('probs', '\n', probs)
logged_probs = np.log(probs)
print('logged probs', '\n', logged_probs)
loss_per_row = - np.sum(labels_to_info_gain(labels, logits) * logged_probs, axis=1)
return loss_per_row
def playing_with_losses():
labels = np.array([
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
# [0, 1, 0, 0, 0],
# [0, 0, 1, 0, 0],
# [0, 0, 0, 1, 0],
# [0, 0, 0, 0, 1],
# [1, 0, 0, 0, 0],
])
logits = np.array([
[0, 20, 0, 0, 0],
[0, 10, 0, 0, 0],
[0, 2, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 1, 0, 0, 1],
[0, 1, 0, 0, 0],
# [3, 1, 1, 1, 1],
# [0, 10, 0, 0, 0],
# [1, 5, 1, 1, 1],
# [0, 0, 1, 0, 0],
# [1, 1, 4, 1, 1],
# [1, 1, 1, 4, 1],
# [1, 1, 1, 1, 4],
# [4, 1, 1, 1, 1],
])
probs = softmax(logits)
loss = softmax_cross_entropy_loss(labels=labels, logits=logits)
new_loss = informed_cross_entropy_loss(labels=labels, logits=logits)
with tf.Graph().as_default():
with tf.Session() as sess:
logits_tf = tf.constant(logits, dtype=tf.float32)
labels_tf = tf.constant(labels, dtype=tf.float32)
probs_tf = sess.run(tf.nn.softmax(logits_tf))
loss_tf = sess.run(tf.nn.softmax_cross_entropy_with_logits(labels=labels_tf, logits=logits_tf))
new_loss_tf = sess.run(tf.nn.softmax_cross_entropy_with_logits(labels=tf_labels_to_info_gain(labels, logits_tf), logits=logits_tf))
# print('labels', '\n', labels)
# print('logits', '\n', logits)
# print('probs', '\n', probs)
# print('probs diff', '\n', probs - probs_tf)
print('loss', '\n', loss)
print('loss_tf', '\n', loss_tf)
print('loss diff', '\n', loss - loss_tf)
print('new_loss', '\n', new_loss)
print('new_loss_tf', '\n', new_loss_tf)
print('new loss diff', '\n', new_loss - new_loss_tf)
# f, axarr = plt.subplots(2, 3)
# axarr[0, 0].set_title('sample 1')
# axarr[0, 0].plot(probs[0, :])
# axarr[0, 1].set_title('sample 2')
# axarr[0, 1].plot(probs[1, :])
# axarr[1, 0].set_title('sample 3')
# axarr[1, 0].plot(probs[2, :])
# axarr[1, 1].set_title('sample 4')
# axarr[1, 1].plot(probs[3, :])
plt.plot(probs[0, :], color='r')
plt.plot(probs[1, :], color='g')
plt.plot(probs[2, :], color='b')
plt.plot(probs[3, :], color='y')
plt.show()
def input_parser(filename):
assert tf.get_default_session() is sess
tf.logging.warning(('filename', filename))
channel_data = tf.data.TextLineDataset(filename).map(lambda line: tf.decode_csv(line, [["path"], ["annotation"]]))
return channel_data
def filenames_to_data(rgb_filename, voxelmap_filename):
tf.logging.warning(('rgb_filename', rgb_filename))
rgb_image = dataset.DataSet.filename_to_input_image(rgb_filename)
voxelmap = tf.py_func(dataset.DataSet.filename_to_target_voxelmap, [voxelmap_filename], tf.int32)
voxelmap.set_shape([dataset.TARGET_WIDTH, dataset.TARGET_HEIGHT, dataset.DEPTH_DIM])
# voxelmap = dataset.DataSet.filename_to_target_voxelmap(voxelmap_filename)
depth_reconstructed = dataset.DataSet.tf_voxelmap_to_depth(voxelmap)
return rgb_image, voxelmap, depth_reconstructed
def tf_new_data_api_experiments():
# global sess
batch_size = 4
with sess.as_default():
tf.logging.set_verbosity(tf.logging.INFO)
# dataset = tf.data.TFRecordDataset(['train-voxel-gta.csv', 'test-voxel-gta.csv'])
train_imgs = tf.constant(['train-voxel-gta.csv'])
filename_list = tf.data.Dataset.from_tensor_slices(train_imgs)
filename_pairs = filename_list.flat_map(input_parser)
data_pairs = filename_pairs.map(filenames_to_data)
data_pairs = data_pairs.batch(batch_size)
#
# # input
# image = dataset.DataSet.filename_to_input_image(filename)
# # target
# voxelmap = dataset.DataSet.filename_to_target_voxelmap(voxelmap_filename)
# depth_reconstructed = dataset.DataSet.tf_voxelmap_to_depth(voxelmap)
iterator = data_pairs.make_one_shot_iterator()
batch_images, batch_voxels, batch_depths = iterator.get_next()
for i in range(1):
images_values, voxels_values, depths_values = sess.run([batch_images, batch_voxels, batch_depths])
for j in range(batch_size):
plt.figure(figsize=(10, 6))
plt.axis('off')
plt.imshow(images_values[j, :, :, :].astype(dtype=np.uint8))
plt.savefig('inspections/out-{}-rgb.png'.format(j), bbox_inches='tight')
plt.figure(figsize=(10, 6))
plt.axis('off')
plt.imshow(depths_values[j, :, :].T, cmap='gray')
plt.savefig('inspections/out-{}-depth.png'.format(j), bbox_inches='tight')
# pure numpy calculation of depth image from voxelmap
occupied_ndc_grid = voxels_values[j, :, :, :]
occupied_ndc_grid = np.flip(occupied_ndc_grid, axis=2)
depth_size = occupied_ndc_grid.shape[2]
new_depth = np.argmax(occupied_ndc_grid, axis=2)
new_depth = new_depth.T
new_depth *= int(255/depth_size)
plt.figure(figsize=(10, 7))
plt.axis('off')
plt.imshow(new_depth, cmap='gray')
plt.savefig('inspections/out-{}-depth-np.png'.format(j), bbox_inches='tight')
def load_numpy_bin():
# name = 'inspections/2018-03-07--17-57-32--527.bin'
name = 'inspections/2018-03-07--17-57-32--527.npy'
# numpy_voxelmap = np.fromfile(name, sep=';')
numpy_voxelmap = np.load(name)
print(numpy_voxelmap.shape)
# numpy_voxelmap = numpy_voxelmap.reshape([240, 160, 100])
numpy_voxelmap = np.flip(numpy_voxelmap, axis=2)
# now I have just boolean for each value
# so I create mask to assign higher value to booleans in higher index
depth_size = numpy_voxelmap.shape[2]
new_depth = np.argmax(numpy_voxelmap, axis=2)
new_depth = new_depth.T
new_depth *= int(255 / depth_size)
plt.figure(figsize=(10, 6))
plt.axis('off')
plt.imshow(new_depth, cmap='gray')
plt.savefig('inspections/2018-03-07--17-57-32--527.png', bbox_inches='tight')
sess = tf.Session(config=tf.ConfigProto(device_count={'GPU': 0}))
if __name__ == '__main__':
# playing_with_losses()
# tf_dataset_experiments()
# load_numpy_bin()
tf_new_data_api_experiments()
# arr = np.array([
# [1, 1, 1, 2],
# [2, 2, 2, 4],
# [4, 4, 4, 8],
# ])
# with tf.Graph().as_default():
# with tf.Session() as sess:
# logits_tf = tf.constant(arr, dtype=tf.float32)
# tf_mean = sess.run(tf.reduce_mean(logits_tf))
# print('tf_mean\n', tf_mean)
#
# print('mean\n', np.mean(arr))
# print('sum_per_row\n', np.sum(arr, axis=1))
# print('mean_of_sum\n', np.mean(np.sum(arr, axis=1), axis=0))
# ds = DataSet(8)
# ds.load_params('train.csv')
#
# d = list(range(1, 100))
# d_min = np.min(d)
# d_max = 20
# num_bins = 10
# q_calc = (np.log(np.max(d)) - np.log(d_min)) / (num_bins - 1)
# # q = 0.5 # width of quantization bin
# l = np.round((np.log(d) - np.log(d_min)) / q_calc)
#
# print(d)
# print(l)
#
# print('q_calc', q_calc)
#
# f, axarr = plt.subplots(2, 2)
# axarr[0, 0].plot(d)
# axarr[0, 1].plot(np.log(d))
# axarr[1, 0].plot(np.log(d) - np.log(d_min))
# axarr[1, 1].plot((np.log(d) - np.log(d_min)) / q_calc)
# plt.show()
# with tf.Graph().as_default():
# with tf.Session() as sess:
# x = tf.constant(d)
#
# # for i in range(500):
# # if i % 500 == 0:
# # print('hi', i)
#
# IMAGE_HEIGHT = 240
# IMAGE_WIDTH = 320
# TARGET_HEIGHT = 120
# TARGET_WIDTH = 160
# DEPTH_DIM = 10
#
# filename_queue = tf.train.string_input_producer(['train.csv'], shuffle=True)
# reader = tf.TextLineReader()
# _, serialized_example = reader.read(filename_queue)
# filename, depth_filename = tf.decode_csv(serialized_example, [["path"], ["annotation"]])
# # input
# jpg = tf.read_file(filename)
# image = tf.image.decode_jpeg(jpg, channels=3)
# image = tf.cast(image, tf.float32)
# # target
# depth_png = tf.read_file(depth_filename)
# depth = tf.image.decode_png(depth_png, channels=1)
# depth = tf.cast(depth, tf.float32)
# depth = depth / 255.0
# # depth = tf.cast(depth, tf.int64)
# # resize
# image = tf.image.resize_images(image, (IMAGE_HEIGHT, IMAGE_WIDTH))
# depth = tf.image.resize_images(depth, (TARGET_HEIGHT, TARGET_WIDTH))
#
# depth_discretized = dataset.DataSet.discretize_depth(depth)
#
# invalid_depth = tf.sign(depth)
#
# batch_size = 8
# # generate batch
# images, depths, depths_discretized, invalid_depths = tf.train.batch(
# [image, depth, depth_discretized, invalid_depth],
# batch_size=batch_size,
# num_threads=4,
# capacity=40)
#
# depth_reconstructed, weights, mask, mask_multiplied, mask_multiplied_sum = Network.Network.bins_to_depth(depths_discretized)
#
# print('weights: ', weights)
#
# coord = tf.train.Coordinator()
# threads = tf.train.start_queue_runners(sess=sess, coord=coord)
#
# images_val, depths_val, depths_discretized_val, invalid_depths_val, depth_reconstructed_val, mask_val, mask_multiplied_val, mask_multiplied_sum_val = sess.run(
# [images, depths, depths_discretized, invalid_depths, depth_reconstructed, mask, mask_multiplied, mask_multiplied_sum])
# sess.run(images)
#
# output_predict(depths_val, images_val, depths_discretized_val,
# depth_reconstructed_val, 'kunda')
#
# depth_reconstructed_val = depth_reconstructed_val[:, :, :, 0]
# coord.request_stop()
# coord.join(threads)
#
# layer = 2
# f, axarr = plt.subplots(2, 3)
# axarr[0, 0].set_title('masks_val')
# axarr[0, 0].imshow(mask_val[0, :, :, layer])
# axarr[0, 1].set_title('mask_multiplied_val')
# axarr[0, 1].imshow(mask_multiplied_val[0, :, :, layer])
# axarr[1, 0].set_title('depths_val')
# axarr[1, 0].imshow(depths_val[0, :, :, 0])
# axarr[1, 1].set_title('depths_discretized_val')
# axarr[1, 1].imshow(depths_discretized_val[0, :, :, layer])
# axarr[0, 2].set_title('mask_multiplied_sum_val')
# axarr[0, 2].imshow(mask_multiplied_sum_val[0, :, :])
# axarr[1, 2].set_title('depth_reconstructed_val')
# axarr[1, 2].imshow(depth_reconstructed_val[0, :, :])
# plt.show()
# network = Network.Network()
# network.prepare()
# total_vars = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
# print('trainable vars: ', total_vars)
# for output bins = 200: 73 696 786
# for output bins = 100: 65 312 586 | [
"numpy.load",
"tensorflow.gfile.Exists",
"numpy.argmax",
"tensorflow.logging.warning",
"dataset.DataSet.filename_to_input_image",
"tensorflow.logging.set_verbosity",
"tensorflow.ConfigProto",
"matplotlib.pyplot.figure",
"numpy.exp",
"tensorflow.nn.softmax",
"tensorflow.data.TextLineDataset",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"matplotlib.pyplot.imshow",
"dataset.DataSet.tf_voxelmap_to_depth",
"tensorflow.cast",
"numpy.max",
"tensorflow.exp",
"tensorflow.range",
"numpy.uint8",
"matplotlib.pyplot.show",
"tensorflow.Session",
"tensorflow.constant",
"tensorflow.transpose",
"tensorflow.tile",
"matplotlib.use",
"tensorflow.Graph",
"tensorflow.decode_csv",
"numpy.flip",
"numpy.log",
"matplotlib.pyplot.plot",
"tensorflow.py_func",
"tensorflow.gfile.MakeDirs",
"tensorflow.argmax",
"matplotlib.pyplot.axis",
"tensorflow.data.Dataset.from_tensor_slices",
"numpy.array",
"matplotlib.pyplot.savefig",
"tensorflow.get_default_session"
] | [((61, 82), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (75, 82), False, 'import matplotlib\n'), ((2575, 2644), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'labels': 'labels', 'logits': 'logits'}), '(labels=labels, logits=logits)\n', (2614, 2644), True, 'import tensorflow as tf\n'), ((2702, 2729), 'numpy.log', 'np.log', (['(probs / (1 - probs))'], {}), '(probs / (1 - probs))\n', (2708, 2729), True, 'import numpy as np\n'), ((2814, 2823), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (2820, 2823), True, 'import numpy as np\n'), ((3580, 3628), 'numpy.exp', 'np.exp', (['(-alpha * (label_idx - prob_bin_idx) ** 2)'], {}), '(-alpha * (label_idx - prob_bin_idx) ** 2)\n', (3586, 3628), True, 'import numpy as np\n'), ((3868, 3902), 'tensorflow.cast', 'tf.cast', (['label_idx'], {'dtype': 'tf.int32'}), '(label_idx, dtype=tf.int32)\n', (3875, 3902), True, 'import tensorflow as tf\n'), ((3919, 3967), 'tensorflow.tile', 'tf.tile', (['label_idx', '[labels.shape[last_axis], 1]'], {}), '(label_idx, [labels.shape[last_axis], 1])\n', (3926, 3967), True, 'import tensorflow as tf\n'), ((3984, 4007), 'tensorflow.transpose', 'tf.transpose', (['label_idx'], {}), '(label_idx)\n', (3996, 4007), True, 'import tensorflow as tf\n'), ((4123, 4149), 'tensorflow.transpose', 'tf.transpose', (['prob_bin_idx'], {}), '(prob_bin_idx)\n', (4135, 4149), True, 'import tensorflow as tf\n'), ((4169, 4212), 'tensorflow.tile', 'tf.tile', (['prob_bin_idx', '[labels.shape[0], 1]'], {}), '(prob_bin_idx, [labels.shape[0], 1])\n', (4176, 4212), True, 'import tensorflow as tf\n'), ((4277, 4314), 'tensorflow.cast', 'tf.cast', (['difference'], {'dtype': 'tf.float32'}), '(difference, dtype=tf.float32)\n', (4284, 4314), True, 'import tensorflow as tf\n'), ((4331, 4358), 'tensorflow.exp', 'tf.exp', (['(-alpha * difference)'], {}), '(-alpha * difference)\n', (4337, 4358), True, 'import tensorflow as tf\n'), ((4593, 4606), 'numpy.log', 'np.log', (['probs'], {}), '(probs)\n', (4599, 4606), True, 'import numpy as np\n'), ((4807, 4924), 'numpy.array', 'np.array', (['[[0, 1, 0, 0, 0], [0, 1, 0, 0, 0], [0, 1, 0, 0, 0], [0, 1, 0, 0, 0], [0, 1,\n 0, 0, 0], [0, 1, 0, 0, 0]]'], {}), '([[0, 1, 0, 0, 0], [0, 1, 0, 0, 0], [0, 1, 0, 0, 0], [0, 1, 0, 0, 0\n ], [0, 1, 0, 0, 0], [0, 1, 0, 0, 0]])\n', (4815, 4924), True, 'import numpy as np\n'), ((5123, 5241), 'numpy.array', 'np.array', (['[[0, 20, 0, 0, 0], [0, 10, 0, 0, 0], [0, 2, 0, 0, 0], [1, 1, 1, 0, 0], [0, \n 1, 0, 0, 1], [0, 1, 0, 0, 0]]'], {}), '([[0, 20, 0, 0, 0], [0, 10, 0, 0, 0], [0, 2, 0, 0, 0], [1, 1, 1, 0,\n 0], [0, 1, 0, 0, 1], [0, 1, 0, 0, 0]])\n', (5131, 5241), True, 'import numpy as np\n'), ((6936, 6968), 'matplotlib.pyplot.plot', 'plt.plot', (['probs[0, :]'], {'color': '"""r"""'}), "(probs[0, :], color='r')\n", (6944, 6968), True, 'import matplotlib.pyplot as plt\n'), ((6973, 7005), 'matplotlib.pyplot.plot', 'plt.plot', (['probs[1, :]'], {'color': '"""g"""'}), "(probs[1, :], color='g')\n", (6981, 7005), True, 'import matplotlib.pyplot as plt\n'), ((7010, 7042), 'matplotlib.pyplot.plot', 'plt.plot', (['probs[2, :]'], {'color': '"""b"""'}), "(probs[2, :], color='b')\n", (7018, 7042), True, 'import matplotlib.pyplot as plt\n'), ((7047, 7079), 'matplotlib.pyplot.plot', 'plt.plot', (['probs[3, :]'], {'color': '"""y"""'}), "(probs[3, :], color='y')\n", (7055, 7079), True, 'import matplotlib.pyplot as plt\n'), ((7085, 7095), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7093, 7095), True, 'import matplotlib.pyplot as plt\n'), ((7174, 7216), 'tensorflow.logging.warning', 'tf.logging.warning', (["('filename', filename)"], {}), "(('filename', filename))\n", (7192, 7216), True, 'import tensorflow as tf\n'), ((7422, 7472), 'tensorflow.logging.warning', 'tf.logging.warning', (["('rgb_filename', rgb_filename)"], {}), "(('rgb_filename', rgb_filename))\n", (7440, 7472), True, 'import tensorflow as tf\n'), ((7489, 7542), 'dataset.DataSet.filename_to_input_image', 'dataset.DataSet.filename_to_input_image', (['rgb_filename'], {}), '(rgb_filename)\n', (7528, 7542), False, 'import dataset\n'), ((7558, 7648), 'tensorflow.py_func', 'tf.py_func', (['dataset.DataSet.filename_to_target_voxelmap', '[voxelmap_filename]', 'tf.int32'], {}), '(dataset.DataSet.filename_to_target_voxelmap, [voxelmap_filename],\n tf.int32)\n', (7568, 7648), True, 'import tensorflow as tf\n'), ((7840, 7886), 'dataset.DataSet.tf_voxelmap_to_depth', 'dataset.DataSet.tf_voxelmap_to_depth', (['voxelmap'], {}), '(voxelmap)\n', (7876, 7886), False, 'import dataset\n'), ((10385, 10398), 'numpy.load', 'np.load', (['name'], {}), '(name)\n', (10392, 10398), True, 'import numpy as np\n'), ((10515, 10546), 'numpy.flip', 'np.flip', (['numpy_voxelmap'], {'axis': '(2)'}), '(numpy_voxelmap, axis=2)\n', (10522, 10546), True, 'import numpy as np\n'), ((10725, 10758), 'numpy.argmax', 'np.argmax', (['numpy_voxelmap'], {'axis': '(2)'}), '(numpy_voxelmap, axis=2)\n', (10734, 10758), True, 'import numpy as np\n'), ((10831, 10858), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (10841, 10858), True, 'import matplotlib.pyplot as plt\n'), ((10863, 10878), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (10871, 10878), True, 'import matplotlib.pyplot as plt\n'), ((10883, 10917), 'matplotlib.pyplot.imshow', 'plt.imshow', (['new_depth'], {'cmap': '"""gray"""'}), "(new_depth, cmap='gray')\n", (10893, 10917), True, 'import matplotlib.pyplot as plt\n'), ((10922, 10999), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""inspections/2018-03-07--17-57-32--527.png"""'], {'bbox_inches': '"""tight"""'}), "('inspections/2018-03-07--17-57-32--527.png', bbox_inches='tight')\n", (10933, 10999), True, 'import matplotlib.pyplot as plt\n'), ((379, 406), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['output_dir'], {}), '(output_dir)\n', (394, 406), True, 'import tensorflow as tf\n'), ((416, 445), 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['output_dir'], {}), '(output_dir)\n', (433, 445), True, 'import tensorflow as tf\n'), ((3814, 3847), 'tensorflow.argmax', 'tf.argmax', (['labels'], {'axis': 'last_axis'}), '(labels, axis=last_axis)\n', (3823, 3847), True, 'import tensorflow as tf\n'), ((4042, 4091), 'tensorflow.range', 'tf.range', (['logits.shape[last_axis]'], {'dtype': 'tf.int32'}), '(logits.shape[last_axis], dtype=tf.int32)\n', (4050, 4091), True, 'import tensorflow as tf\n'), ((7137, 7161), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (7159, 7161), True, 'import tensorflow as tf\n'), ((8049, 8090), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (8073, 8090), True, 'import tensorflow as tf\n'), ((8203, 8239), 'tensorflow.constant', 'tf.constant', (["['train-voxel-gta.csv']"], {}), "(['train-voxel-gta.csv'])\n", (8214, 8239), True, 'import tensorflow as tf\n'), ((8264, 8310), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['train_imgs'], {}), '(train_imgs)\n', (8298, 8310), True, 'import tensorflow as tf\n'), ((11027, 11066), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'device_count': "{'GPU': 0}"}), "(device_count={'GPU': 0})\n", (11041, 11066), True, 'import tensorflow as tf\n'), ((712, 727), 'numpy.uint8', 'np.uint8', (['image'], {}), '(image)\n', (720, 727), True, 'import numpy as np\n'), ((870, 883), 'numpy.max', 'np.max', (['depth'], {}), '(depth)\n', (876, 883), True, 'import numpy as np\n'), ((1032, 1053), 'numpy.uint8', 'np.uint8', (['ra_depth[0]'], {}), '(ra_depth[0])\n', (1040, 1053), True, 'import numpy as np\n'), ((2110, 2123), 'numpy.max', 'np.max', (['depth'], {}), '(depth)\n', (2116, 2123), True, 'import numpy as np\n'), ((2272, 2290), 'numpy.uint8', 'np.uint8', (['ra_depth'], {}), '(ra_depth)\n', (2280, 2290), True, 'import numpy as np\n'), ((3329, 3362), 'numpy.argmax', 'np.argmax', (['labels'], {'axis': 'last_axis'}), '(labels, axis=last_axis)\n', (3338, 3362), True, 'import numpy as np\n'), ((5727, 5739), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (5737, 5739), True, 'import tensorflow as tf\n'), ((5773, 5810), 'tensorflow.constant', 'tf.constant', (['logits'], {'dtype': 'tf.float32'}), '(logits, dtype=tf.float32)\n', (5784, 5810), True, 'import tensorflow as tf\n'), ((5835, 5872), 'tensorflow.constant', 'tf.constant', (['labels'], {'dtype': 'tf.float32'}), '(labels, dtype=tf.float32)\n', (5846, 5872), True, 'import tensorflow as tf\n'), ((7236, 7269), 'tensorflow.data.TextLineDataset', 'tf.data.TextLineDataset', (['filename'], {}), '(filename)\n', (7259, 7269), True, 'import tensorflow as tf\n'), ((7287, 7334), 'tensorflow.decode_csv', 'tf.decode_csv', (['line', "[['path'], ['annotation']]"], {}), "(line, [['path'], ['annotation']])\n", (7300, 7334), True, 'import tensorflow as tf\n'), ((1301, 1319), 'numpy.uint8', 'np.uint8', (['ra_depth'], {}), '(ra_depth)\n', (1309, 1319), True, 'import numpy as np\n'), ((5689, 5699), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (5697, 5699), True, 'import tensorflow as tf\n'), ((5905, 5929), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits_tf'], {}), '(logits_tf)\n', (5918, 5929), True, 'import tensorflow as tf\n'), ((5962, 6037), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'labels': 'labels_tf', 'logits': 'logits_tf'}), '(labels=labels_tf, logits=logits_tf)\n', (6001, 6037), True, 'import tensorflow as tf\n'), ((9083, 9110), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (9093, 9110), True, 'import matplotlib.pyplot as plt\n'), ((9127, 9142), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (9135, 9142), True, 'import matplotlib.pyplot as plt\n'), ((9326, 9353), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (9336, 9353), True, 'import matplotlib.pyplot as plt\n'), ((9370, 9385), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (9378, 9385), True, 'import matplotlib.pyplot as plt\n'), ((9402, 9451), 'matplotlib.pyplot.imshow', 'plt.imshow', (['depths_values[j, :, :].T'], {'cmap': '"""gray"""'}), "(depths_values[j, :, :].T, cmap='gray')\n", (9412, 9451), True, 'import matplotlib.pyplot as plt\n'), ((9712, 9746), 'numpy.flip', 'np.flip', (['occupied_ndc_grid'], {'axis': '(2)'}), '(occupied_ndc_grid, axis=2)\n', (9719, 9746), True, 'import numpy as np\n'), ((9831, 9867), 'numpy.argmax', 'np.argmax', (['occupied_ndc_grid'], {'axis': '(2)'}), '(occupied_ndc_grid, axis=2)\n', (9840, 9867), True, 'import numpy as np\n'), ((9973, 10000), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (9983, 10000), True, 'import matplotlib.pyplot as plt\n'), ((10017, 10032), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (10025, 10032), True, 'import matplotlib.pyplot as plt\n'), ((10049, 10083), 'matplotlib.pyplot.imshow', 'plt.imshow', (['new_depth'], {'cmap': '"""gray"""'}), "(new_depth, cmap='gray')\n", (10059, 10083), True, 'import matplotlib.pyplot as plt\n'), ((922, 935), 'numpy.max', 'np.max', (['depth'], {}), '(depth)\n', (928, 935), True, 'import numpy as np\n'), ((2162, 2175), 'numpy.max', 'np.max', (['depth'], {}), '(depth)\n', (2168, 2175), True, 'import numpy as np\n')] |
# ==================================================================================================
# Copyright 2012 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
try:
import cPickle as pickle
except ImportError:
import pickle
from twitter.pants import has_sources
from twitter.pants.base.build_invalidator import (
BuildInvalidator,
CacheKeyGenerator,
NO_SOURCES,
TARGET_SOURCES)
from twitter.pants.base.target import Target
from twitter.pants.targets import TargetWithSources
from twitter.pants.targets.external_dependency import ExternalDependency
from twitter.pants.targets.internal import InternalTarget
class VersionedTargetSet(object):
"""Represents a list of targets, a corresponding CacheKey, and a flag determining whether the
list of targets is currently valid.
When invalidating a single target, this can be used to represent that target as a singleton.
When checking the artifact cache, this can also be used to represent a list of targets that are
built together into a single artifact.
"""
@classmethod
def from_versioned_targets(cls, versioned_targets):
first_target = versioned_targets[0]
cache_manager = first_target._cache_manager
# Quick sanity check; all the versioned targets should have the same cache manager.
# TODO(ryan): the way VersionedTargets store their own links to a single CacheManager instance
# feels hacky; see if there's a cleaner way for callers to handle awareness of the CacheManager.
for versioned_target in versioned_targets:
if versioned_target._cache_manager != cache_manager:
raise ValueError("Attempting to combine versioned targets %s and %s with different"
" CacheManager instances: %s and %s" % (first_target, versioned_target,
cache_manager,
versioned_target._cache_manager))
return cls(cache_manager, versioned_targets)
def __init__(self, cache_manager, versioned_targets):
self._cache_manager = cache_manager
self.versioned_targets = versioned_targets
self.targets = [vt.target for vt in versioned_targets]
# The following line is a no-op if cache_key was set in the VersionedTarget __init__ method.
self.cache_key = CacheKeyGenerator.combine_cache_keys([vt.cache_key
for vt in versioned_targets])
self.num_sources = self.cache_key.num_sources
self.valid = not cache_manager.needs_update(self.cache_key)
def update(self):
self._cache_manager.update(self)
def force_invalidate(self):
self._cache_manager.force_invalidate(self)
def __repr__(self):
return "VTS(%s. %d)" % (','.join(target.id for target in self.targets), 1 if self.valid else 0)
class VersionedTarget(VersionedTargetSet):
"""This class represents a singleton VersionedTargetSet, and has links to VersionedTargets that
the wrapped target depends on (after having resolved through any "alias" targets.
"""
def __init__(self, cache_manager, target, cache_key):
if not isinstance(target, TargetWithSources):
raise ValueError("The target %s must support sources and does not." % target.id)
self.target = target
self.cache_key = cache_key
# Must come after the assignments above, as they are used in the parent's __init__.
VersionedTargetSet.__init__(self, cache_manager, [self])
self.id = target.id
self.dependencies = set()
# The result of calling check() on a CacheManager.
# Each member is a list of VersionedTargetSet objects in topological order.
# Tasks may need to perform no, some or all operations on either of these, depending on how they
# are implemented.
class InvalidationCheck(object):
@classmethod
def _partition_versioned_targets(cls, versioned_targets, partition_size_hint):
"""Groups versioned targets so that each group has roughly the same number of sources.
versioned_targets is a list of VersionedTarget objects [vt1, vt2, vt3, vt4, vt5, vt6, ...].
Returns a list of VersionedTargetSet objects, e.g., [VT1, VT2, VT3, ...] representing the
same underlying targets. E.g., VT1 is the combination of [vt1, vt2, vt3], VT2 is the combination
of [vt4, vt5] and VT3 is [vt6].
The new versioned targets are chosen to have roughly partition_size_hint sources.
This is useful as a compromise between flat mode, where we build all targets in a
single compiler invocation, and non-flat mode, where we invoke a compiler for each target,
which may lead to lots of compiler startup overhead. A task can choose instead to build one
group at a time.
"""
res = []
# Hack around the python outer scope problem.
class VtGroup(object):
def __init__(self):
self.vts = []
self.total_sources = 0
current_group = VtGroup()
def add_to_current_group(vt):
current_group.vts.append(vt)
current_group.total_sources += vt.num_sources
def close_current_group():
if len(current_group.vts) > 0:
new_vt = VersionedTargetSet.from_versioned_targets(current_group.vts)
res.append(new_vt)
current_group.vts = []
current_group.total_sources = 0
for vt in versioned_targets:
add_to_current_group(vt)
if current_group.total_sources > 1.5 * partition_size_hint and len(current_group.vts) > 1:
# Too big. Close the current group without this vt and add it to the next one.
current_group.vts.pop()
close_current_group()
add_to_current_group(vt)
elif current_group.total_sources > partition_size_hint:
close_current_group()
close_current_group() # Close the last group, if any.
return res
def __init__(self, all_vts, invalid_vts, partition_size_hint=None):
# All the targets, valid and invalid.
self.all_vts = all_vts
# All the targets, partitioned if so requested.
self.all_vts_partitioned = self._partition_versioned_targets(
all_vts, partition_size_hint) if partition_size_hint else all_vts
# Just the invalid targets.
self.invalid_vts = invalid_vts
# Just the invalid targets, partitioned if so requested.
self.invalid_vts_partitioned = self._partition_versioned_targets(
invalid_vts, partition_size_hint) if partition_size_hint else invalid_vts
class CacheManager(object):
"""Manages cache checks, updates and invalidation keeping track of basic change
and invalidation statistics.
Note that this is distinct from the ArtifactCache concept, and should probably be renamed.
"""
def __init__(self, cache_key_generator, build_invalidator_dir,
invalidate_dependents, extra_data, only_externaldeps):
self._cache_key_generator = cache_key_generator
self._invalidate_dependents = invalidate_dependents
self._extra_data = pickle.dumps(extra_data) # extra_data may be None.
self._sources = NO_SOURCES if only_externaldeps else TARGET_SOURCES
self._invalidator = BuildInvalidator(build_invalidator_dir)
def update(self, vts):
"""Mark a changed or invalidated VersionedTargetSet as successfully processed."""
for vt in vts.versioned_targets:
self._invalidator.update(vt.cache_key)
vt.valid = True
self._invalidator.update(vts.cache_key)
vts.valid = True
def force_invalidate(self, vts):
"""Force invalidation of a VersionedTargetSet."""
for vt in vts.versioned_targets:
self._invalidator.force_invalidate(vt.cache_key)
vt.valid = False
self._invalidator.force_invalidate(vts.cache_key)
vts.valid = False
def check(self, targets, partition_size_hint=None):
"""Checks whether each of the targets has changed and invalidates it if so.
Returns a list of VersionedTargetSet objects (either valid or invalid). The returned sets
'cover' the input targets, possibly partitioning them, and are in topological order.
The caller can inspect these in order and, e.g., rebuild the invalid ones.
"""
all_vts = self._sort_and_validate_targets(targets)
invalid_vts = filter(lambda vt: not vt.valid, all_vts)
return InvalidationCheck(all_vts, invalid_vts, partition_size_hint)
def _sort_and_validate_targets(self, targets):
"""Validate each target.
Returns a topologically ordered set of VersionedTargets, each representing one input target.
"""
# We must check the targets in this order, to ensure correctness if invalidate_dependents=True,
# since we use earlier cache keys to compute later cache keys in this case.
ordered_targets = self._order_target_list(targets)
# This will be a list of VersionedTargets that correspond to @targets.
versioned_targets = []
# This will be a mapping from each target to its corresponding VersionedTarget.
versioned_targets_by_target = {}
# Map from id to current fingerprint of the target with that id. We update this as we iterate,
# in topological order, so when handling a target, this will already contain all its deps (in
# this round).
id_to_hash = {}
for target in ordered_targets:
dependency_keys = set()
if self._invalidate_dependents and hasattr(target, 'dependencies'):
# Note that we only need to do this for the immediate deps, because those will already
# reflect changes in their own deps.
for dep in target.dependencies:
# We rely on the fact that any deps have already been processed, either in an earlier
# round or because they came first in ordered_targets.
if isinstance(dep, ExternalDependency):
dependency_keys.add(dep.cache_key())
elif isinstance(dep, Target):
fprint = id_to_hash.get(dep.id, None)
if fprint is None:
# It may have been processed in a prior round, and therefore the fprint should
# have been written out by the invalidator.
fprint = self._invalidator.existing_hash(dep.id)
# Note that fprint may be None here, indicating that the dependency will not be
# processed until a later phase. For example, if a codegen target depends on a
# library target (because the generated code needs that library).
if fprint is not None:
dependency_keys.add(fprint)
else:
raise ValueError('Cannot calculate a cache_key for a dependency: %s' % dep)
cache_key = self._key_for(target, dependency_keys)
id_to_hash[target.id] = cache_key.hash
# Create a VersionedTarget corresponding to @target.
versioned_target = VersionedTarget(self, target, cache_key)
# Add the new VersionedTarget to the list of computed VersionedTargets.
versioned_targets.append(versioned_target)
# Add to the mapping from Targets to VersionedTargets, for use in hooking up VersionedTarget
# dependencies below.
versioned_targets_by_target[target] = versioned_target
# Having created all applicable VersionedTargets, now we build the VersionedTarget dependency
# graph, looking through targets that don't correspond to VersionedTargets themselves.
versioned_target_deps_by_target = {}
def get_versioned_target_deps_for_target(target):
# For every dependency of @target, we will store its corresponding VersionedTarget here. For
# dependencies that don't correspond to a VersionedTarget (e.g. pass-through dependency
# wrappers), we will resolve their actual dependencies and find VersionedTargets for them.
versioned_target_deps = set([])
if hasattr(target, 'dependencies'):
for dep in target.dependencies:
for dependency in dep.resolve():
if dependency in versioned_targets_by_target:
# If there exists a VersionedTarget corresponding to this Target, store it and
# continue.
versioned_target_deps.add(versioned_targets_by_target[dependency])
elif dependency in versioned_target_deps_by_target:
# Otherwise, see if we've already resolved this dependency to the VersionedTargets it
# depends on, and use those.
versioned_target_deps.update(versioned_target_deps_by_target[dependency])
else:
# Otherwise, compute the VersionedTargets that correspond to this dependency's
# dependencies, cache and use the computed result.
versioned_target_deps_by_target[dependency] = get_versioned_target_deps_for_target(
dependency)
versioned_target_deps.update(versioned_target_deps_by_target[dependency])
# Return the VersionedTarget dependencies that this target's VersionedTarget should depend on.
return versioned_target_deps
# Initialize all VersionedTargets to point to the VersionedTargets they depend on.
for versioned_target in versioned_targets:
versioned_target.dependencies = get_versioned_target_deps_for_target(versioned_target.target)
return versioned_targets
def needs_update(self, cache_key):
return self._invalidator.needs_update(cache_key)
def _order_target_list(self, targets):
"""Orders the targets topologically, from least to most dependent."""
targets = set(filter(has_sources, targets))
return filter(targets.__contains__, reversed(InternalTarget.sort_targets(targets)))
def _key_for(self, target, dependency_keys):
def fingerprint_extra(sha):
sha.update(self._extra_data)
for key in sorted(dependency_keys): # Sort to ensure hashing in a consistent order.
sha.update(key)
return self._cache_key_generator.key_for_target(
target,
sources=self._sources,
fingerprint_extra=fingerprint_extra
)
| [
"twitter.pants.targets.internal.InternalTarget.sort_targets",
"twitter.pants.base.build_invalidator.BuildInvalidator",
"twitter.pants.base.build_invalidator.CacheKeyGenerator.combine_cache_keys",
"pickle.dumps"
] | [((3070, 3155), 'twitter.pants.base.build_invalidator.CacheKeyGenerator.combine_cache_keys', 'CacheKeyGenerator.combine_cache_keys', (['[vt.cache_key for vt in versioned_targets]'], {}), '([vt.cache_key for vt in versioned_targets]\n )\n', (3106, 3155), False, 'from twitter.pants.base.build_invalidator import BuildInvalidator, CacheKeyGenerator, NO_SOURCES, TARGET_SOURCES\n'), ((7658, 7682), 'pickle.dumps', 'pickle.dumps', (['extra_data'], {}), '(extra_data)\n', (7670, 7682), False, 'import pickle\n'), ((7807, 7846), 'twitter.pants.base.build_invalidator.BuildInvalidator', 'BuildInvalidator', (['build_invalidator_dir'], {}), '(build_invalidator_dir)\n', (7823, 7846), False, 'from twitter.pants.base.build_invalidator import BuildInvalidator, CacheKeyGenerator, NO_SOURCES, TARGET_SOURCES\n'), ((14181, 14217), 'twitter.pants.targets.internal.InternalTarget.sort_targets', 'InternalTarget.sort_targets', (['targets'], {}), '(targets)\n', (14208, 14217), False, 'from twitter.pants.targets.internal import InternalTarget\n')] |
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is configman
#
# The Initial Developer of the Original Code is
# Mozilla Foundation
# Portions created by the Initial Developer are Copyright (C) 2011
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# <NAME>, <EMAIL>
# <NAME>, <EMAIL>
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
import unittest
import os
import json
import tempfile
from cStringIO import StringIO
import configman.config_manager as config_manager
import configman.datetime_util as dtu
from configman.value_sources.for_json import ValueSource
#from ..value_sources.for_json import ValueSource
def bbb_minus_one(config, local_config, args):
return config.bbb - 1
class TestCase(unittest.TestCase):
def test_for_json_basics(self):
tmp_filename = os.path.join(tempfile.gettempdir(), 'test.json')
j = {'fred': 'wilma',
'number': 23,
}
with open(tmp_filename, 'w') as f:
json.dump(j, f)
try:
jvs = ValueSource(tmp_filename)
vals = jvs.get_values(None, True)
self.assertEqual(vals['fred'], 'wilma')
self.assertEqual(vals['number'], 23)
finally:
if os.path.isfile(tmp_filename):
os.remove(tmp_filename)
def test_write_json(self):
n = config_manager.Namespace(doc='top')
n.add_option('aaa', '2011-05-04T15:10:00', 'the a',
short_form='a',
from_string_converter=dtu.datetime_from_ISO_string
)
def value_iter():
yield 'aaa', 'aaa', n.aaa
s = StringIO()
ValueSource.write(value_iter, output_stream=s)
received = s.getvalue()
s.close()
jrec = json.loads(received)
expect_to_find = {
"short_form": "a",
"default": "2011-05-04T15:10:00",
"doc": "the a",
"value": "2011-05-04T15:10:00",
"from_string_converter":
"configman.datetime_util.datetime_from_ISO_string",
"name": "aaa"
}
for key, value in expect_to_find.items():
self.assertEqual(jrec['aaa'][key], value)
def test_json_round_trip(self):
n = config_manager.Namespace(doc='top')
n.add_option('aaa', '2011-05-04T15:10:00', 'the a',
short_form='a',
from_string_converter=dtu.datetime_from_ISO_string
)
expected_date = dtu.datetime_from_ISO_string('2011-05-04T15:10:00')
n.add_option('bbb', '37', 'the a',
short_form='a',
from_string_converter=int
)
n.add_option('write', 'json')
n.add_aggregation('bbb_minus_one', bbb_minus_one)
#t = tempfile.NamedTemporaryFile('w', suffix='.json', delete=False)
name = '/tmp/test.json'
import functools
opener = functools.partial(open, name, 'w')
c1 = config_manager.ConfigurationManager([n], [],
use_admin_controls=True,
use_auto_help=False,
app_name='/tmp/test',
app_version='0',
app_description='',
argv_source=[])
c1.write_conf('json', opener)
d1 = {'bbb': 88}
d2 = {'bbb': '-99'}
try:
with open(name) as jfp:
j = json.load(jfp)
c2 = config_manager.ConfigurationManager((j,), (d1, d2),
use_admin_controls=True,
use_auto_help=False,
argv_source=[])
config = c2.get_config()
self.assertEqual(config.aaa, expected_date)
self.assertEqual(config.bbb, -99)
self.assertEqual(config.bbb_minus_one, -100)
finally:
os.unlink(name)
| [
"functools.partial",
"json.dump",
"os.remove",
"json.load",
"json.loads",
"configman.value_sources.for_json.ValueSource.write",
"os.unlink",
"tempfile.gettempdir",
"configman.config_manager.Namespace",
"configman.datetime_util.datetime_from_ISO_string",
"os.path.isfile",
"configman.value_sources.for_json.ValueSource",
"cStringIO.StringIO",
"configman.config_manager.ConfigurationManager"
] | [((2673, 2708), 'configman.config_manager.Namespace', 'config_manager.Namespace', ([], {'doc': '"""top"""'}), "(doc='top')\n", (2697, 2708), True, 'import configman.config_manager as config_manager\n'), ((2944, 2954), 'cStringIO.StringIO', 'StringIO', ([], {}), '()\n', (2952, 2954), False, 'from cStringIO import StringIO\n'), ((2963, 3009), 'configman.value_sources.for_json.ValueSource.write', 'ValueSource.write', (['value_iter'], {'output_stream': 's'}), '(value_iter, output_stream=s)\n', (2980, 3009), False, 'from configman.value_sources.for_json import ValueSource\n'), ((3075, 3095), 'json.loads', 'json.loads', (['received'], {}), '(received)\n', (3085, 3095), False, 'import json\n'), ((3553, 3588), 'configman.config_manager.Namespace', 'config_manager.Namespace', ([], {'doc': '"""top"""'}), "(doc='top')\n", (3577, 3588), True, 'import configman.config_manager as config_manager\n'), ((3770, 3821), 'configman.datetime_util.datetime_from_ISO_string', 'dtu.datetime_from_ISO_string', (['"""2011-05-04T15:10:00"""'], {}), "('2011-05-04T15:10:00')\n", (3798, 3821), True, 'import configman.datetime_util as dtu\n'), ((4183, 4217), 'functools.partial', 'functools.partial', (['open', 'name', '"""w"""'], {}), "(open, name, 'w')\n", (4200, 4217), False, 'import functools\n'), ((4231, 4404), 'configman.config_manager.ConfigurationManager', 'config_manager.ConfigurationManager', (['[n]', '[]'], {'use_admin_controls': '(True)', 'use_auto_help': '(False)', 'app_name': '"""/tmp/test"""', 'app_version': '"""0"""', 'app_description': '""""""', 'argv_source': '[]'}), "([n], [], use_admin_controls=True,\n use_auto_help=False, app_name='/tmp/test', app_version='0',\n app_description='', argv_source=[])\n", (4266, 4404), True, 'import configman.config_manager as config_manager\n'), ((2145, 2166), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (2164, 2166), False, 'import tempfile\n'), ((2307, 2322), 'json.dump', 'json.dump', (['j', 'f'], {}), '(j, f)\n', (2316, 2322), False, 'import json\n'), ((2354, 2379), 'configman.value_sources.for_json.ValueSource', 'ValueSource', (['tmp_filename'], {}), '(tmp_filename)\n', (2365, 2379), False, 'from configman.value_sources.for_json import ValueSource\n'), ((2559, 2587), 'os.path.isfile', 'os.path.isfile', (['tmp_filename'], {}), '(tmp_filename)\n', (2573, 2587), False, 'import os\n'), ((4757, 4874), 'configman.config_manager.ConfigurationManager', 'config_manager.ConfigurationManager', (['(j,)', '(d1, d2)'], {'use_admin_controls': '(True)', 'use_auto_help': '(False)', 'argv_source': '[]'}), '((j,), (d1, d2), use_admin_controls=True,\n use_auto_help=False, argv_source=[])\n', (4792, 4874), True, 'import configman.config_manager as config_manager\n'), ((5217, 5232), 'os.unlink', 'os.unlink', (['name'], {}), '(name)\n', (5226, 5232), False, 'import os\n'), ((2605, 2628), 'os.remove', 'os.remove', (['tmp_filename'], {}), '(tmp_filename)\n', (2614, 2628), False, 'import os\n'), ((4725, 4739), 'json.load', 'json.load', (['jfp'], {}), '(jfp)\n', (4734, 4739), False, 'import json\n')] |