hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e4fbf9c4787bd6c823e79265ebbbdf508f8294f4 | 4,200 | py | Python | src/openclimategis/util/ncconv/experimental/ocg_converter/csv_.py | Peshal1067/OpenClimateGIS | 297db6ae1f6dd8459ede6bed905c8d85bd93c5d6 | [
"BSD-3-Clause"
] | 3 | 2015-04-23T09:09:04.000Z | 2020-02-26T17:40:19.000Z | src/openclimategis/util/ncconv/experimental/ocg_converter/csv_.py | arthur-e/OpenClimateGIS | 297db6ae1f6dd8459ede6bed905c8d85bd93c5d6 | [
"BSD-3-Clause"
] | null | null | null | src/openclimategis/util/ncconv/experimental/ocg_converter/csv_.py | arthur-e/OpenClimateGIS | 297db6ae1f6dd8459ede6bed905c8d85bd93c5d6 | [
"BSD-3-Clause"
] | 2 | 2017-05-30T10:27:36.000Z | 2020-11-09T13:52:58.000Z | import io
import zipfile
import csv
from util.ncconv.experimental.ocg_converter.subocg_converter import SubOcgConverter
class CsvConverter(SubOcgConverter):
# __headers__ = ['OCGID','GID','TIME','LEVEL','VALUE','AREA_M2','WKT','WKB']
def __init__(self,*args,**kwds):
self.as_wkt = kwds.pop('as_wkt',False)
self.as_wkb = kwds.pop('as_wkb',False)
self.add_area = kwds.pop('add_area',True)
## call the superclass
super(CsvConverter,self).__init__(*args,**kwds)
# self.headers = self.get_headers(self.value_table)
# ## need to extract the time as well
# if 'TID' in self.headers:
# self.headers.insert(self.headers.index('TID')+1,'TIME')
#
# codes = [['add_area','AREA_M2'],['as_wkt','WKT'],['as_wkb','WKB']]
# for code in codes:
# if getattr(self,code[0]):
# self.headers.append(code[1])
def get_writer(self,buffer,headers=None):
writer = csv.writer(buffer)
if headers is None: headers = self.get_headers()
writer.writerow(headers)
writer = csv.DictWriter(buffer,headers)
return(writer)
def _convert_(self):
buffer = io.BytesIO()
writer = self.get_writer(buffer)
for attrs in self.get_iter(wkt=self.as_wkt,wkb=self.as_wkb):
writer.writerow(attrs)
buffer.flush()
return(buffer.getvalue())
class LinkedCsvConverter(CsvConverter):
# def __init__(self,*args,**kwds):
# self.tables = kwds.pop('tables',None)
#
# super(LinkedCsvConverter,self).__init__(*args,**kwds)
#
# if self.tables is None and self.use_stat:
# tables = kwds.pop('tables',['Geometry','Stat'])
# elif self.tables is None and not self.use_stat:
# tables = kwds.pop('tables',['Geometry','Time','Value'])
# self.tables = [getattr(self.db,tbl) for tbl in tables]
# def _clean_headers_(self,table):
# headers = self.get_headers(table)
# if self.get_tablename(table) == 'geometry':
# codes = [['add_area','AREA_M2'],['as_wkt','WKT'],['as_wkb','WKB']]
# for code in codes:
# if not getattr(self,code[0]):
# headers.remove(code[1])
# return(headers)
def _convert_(self):
if self.use_stat:
itrs = [[self.sub.sub.iter_geom_with_area,'geometry',{'keep_geom':False}],
[self.sub.sub.iter_time,'time',{}],
[self.sub.sub.iter_value_keyed,'value',{}],
[self.sub.iter_stats,'stat',{'keep_geom':False}]]
else:
itrs = [[self.sub.iter_geom_with_area,'geometry',{'keep_geom':False}],
[self.sub.iter_time,'time',{}],
[self.sub.iter_value_keyed,'value',{}]]
## generate the info for writing
info = []
for itr in itrs:
iter = itr[0]
headers = iter(**itr[2]).next().keys()
# headers = self._clean_headers_(table)
# headers = self._clean_headers_([h.upper() for h in table.__mapper__.columns.keys()])
arcname = '{0}_{1}.csv'.format(self.base_name,itr[1])
buffer = io.BytesIO()
writer = self.get_writer(buffer,headers=headers)
info.append(dict(headers=headers,
writer=writer,
arcname=arcname,
iter=iter(**itr[2]),
buffer=buffer))
## write the tables
for i in info:
## loop through each database record
for attrs in i['iter']:
i['writer'].writerow(attrs)
i['buffer'].flush()
return(info)
def _response_(self,payload):
buffer = io.BytesIO()
zip = zipfile.ZipFile(buffer,'w',zipfile.ZIP_DEFLATED)
for info in payload:
zip.writestr(info['arcname'],info['buffer'].getvalue())
self.write_meta(zip)
zip.close()
buffer.flush()
zip_stream = buffer.getvalue()
buffer.close()
return(zip_stream) | 37.837838 | 97 | 0.553095 | 4,067 | 0.968333 | 0 | 0 | 0 | 0 | 0 | 0 | 1,714 | 0.408095 |
9001723e3ad1821cbe597698ec69edc8b0731984 | 839 | py | Python | uninas/modules/modules/cnn.py | cogsys-tuebingen/uninas | 06729b9cf517ec416fb798ae387c5bd9c3a278ac | [
"MIT"
] | 18 | 2020-11-22T16:03:08.000Z | 2022-03-15T12:11:46.000Z | uninas/modules/modules/cnn.py | cogsys-tuebingen/uninas | 06729b9cf517ec416fb798ae387c5bd9c3a278ac | [
"MIT"
] | 2 | 2022-01-04T08:10:17.000Z | 2022-01-05T08:13:14.000Z | uninas/modules/modules/cnn.py | cogsys-tuebingen/uninas | 06729b9cf517ec416fb798ae387c5bd9c3a278ac | [
"MIT"
] | 6 | 2021-03-08T07:08:52.000Z | 2022-02-24T12:00:43.000Z | import torch
import torch.nn as nn
class SqueezeModule(nn.Module):
def forward(self, x: torch.Tensor) -> torch.Tensor:
return x.squeeze()
class GapSqueezeModule(nn.Module):
""" global average pooling and squeezing """
def __init__(self):
super().__init__()
self.gap = nn.AdaptiveAvgPool2d(1)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.gap(x).squeeze()
class PaddingToValueModule(nn.Module):
def __init__(self, to_value: int, dim=-1):
super().__init__()
self.to_value = to_value
self.dim = dim
def forward(self, x: torch.Tensor) -> torch.Tensor:
shape = list(x.shape)
shape[self.dim] = self.to_value - shape[self.dim]
return torch.cat([x, torch.zeros(shape, dtype=x.dtype, device=x.device)], dim=self.dim)
| 27.064516 | 95 | 0.640048 | 795 | 0.947557 | 0 | 0 | 0 | 0 | 0 | 0 | 44 | 0.052443 |
9003b0f6d049c9acbb898890fc3e7195ecd16b28 | 1,634 | py | Python | arcade/gui/examples/anchor_widgets.py | akapkotel/arcade | 6e43ec53e7bfa3dee1aa574404794e3695aad381 | [
"MIT"
] | null | null | null | arcade/gui/examples/anchor_widgets.py | akapkotel/arcade | 6e43ec53e7bfa3dee1aa574404794e3695aad381 | [
"MIT"
] | 1 | 2022-03-21T06:24:29.000Z | 2022-03-21T06:24:29.000Z | arcade/gui/examples/anchor_widgets.py | Ibrahim2750mi/arcade | bf3229e64117931bffb8e50926a996a7a8fc9b8b | [
"MIT"
] | null | null | null | """
Example shows how to use UIAnchorWidget to position widgets on screen.
Dummy widgets indicate hovered, pressed and clicked.
"""
import arcade
from arcade.gui import UIManager
from arcade.gui.widgets import UIDummy
from arcade.gui.widgets.layout import UIAnchorLayout
class UIMockup(arcade.Window):
def __init__(self):
super().__init__(800, 600, "UI Mockup", resizable=True)
self.manager = UIManager()
self.manager.enable()
arcade.set_background_color(arcade.color.DARK_BLUE_GRAY)
anchor = self.manager.add(UIAnchorLayout())
anchor.add(
child=UIDummy(color=arcade.color.RED),
anchor_x="center_x",
anchor_y="top",
)
anchor.add(
child=UIDummy(color=arcade.color.BLUE),
anchor_x="right",
anchor_y="center_y",
)
anchor.add(
child=UIDummy(color=arcade.color.GREEN),
anchor_x="center_x",
anchor_y="center_y",
)
anchor.add(
child=UIDummy(color=arcade.color.YELLOW),
anchor_x="left",
anchor_y="bottom",
)
anchor.add(
child=UIDummy(color=arcade.color.ORANGE),
anchor_x="left",
align_x=20,
anchor_y="center_y",
)
anchor.add(
child=UIDummy(color=arcade.color.ORANGE),
anchor_x="right",
align_x=-40,
anchor_y="bottom",
align_y=40,
)
def on_draw(self):
self.clear()
self.manager.draw()
window = UIMockup()
arcade.run()
| 24.38806 | 70 | 0.570991 | 1,324 | 0.810282 | 0 | 0 | 0 | 0 | 0 | 0 | 239 | 0.146267 |
90041b2eae192a57fb04bf6a09bec2f9aae7dce1 | 3,897 | py | Python | tools/e2e_inference.py | nanit/deep-high-resolution-net.pytorch | 17226df8effda518c47355e85f4733638c20297a | [
"MIT"
] | null | null | null | tools/e2e_inference.py | nanit/deep-high-resolution-net.pytorch | 17226df8effda518c47355e85f4733638c20297a | [
"MIT"
] | 2 | 2021-09-23T12:59:27.000Z | 2021-11-01T12:21:51.000Z | tools/e2e_inference.py | nanit/deep-high-resolution-net.pytorch | 17226df8effda518c47355e85f4733638c20297a | [
"MIT"
] | null | null | null | import os
import glob
import pickle
import sys
import tensorflow as tf
from numba import cuda
from python_tools.OSUtils import ensure_dir
from offline_predict import get_boxes_from_detection_predictions_data, convert_boxes_to_bboxes, predict_on_image_list, load_skeleton_model
DETECTION_RESEARCH_FOLDER = os.path.expanduser('~/nanit/tf-models/research/')
sys.path.append(DETECTION_RESEARCH_FOLDER)
from object_detection.nanit_model_predict import run_on_dataset, label_map_util, extract_labels_names_from_label_map
# input images folder
IMAGES_FOLDER = os.path.expanduser('~/nanit/skeleton_data_phase1/sms_videos/')
# Detection Model
DETECTION_MODEL_DIR = os.path.expanduser('~/nanit/model_train_phase1/export')
DETECTION_LABEL_PATH = os.path.expanduser('~/nanit/detection_unified/detection_unified_label_map.pbtxt')
# Skeleton Model
SKELETON_CONFIG_FILE_NAME = '../experiments/nanit_mpii/hrnet/w32_256x256_nanit_skeleton.yaml'
SKELETON_TORCH_SCRIPT_MODEL_PATH = '../export/skeleton_model_phase2_and_homography_images_plus_aug_dropout.pth'
# Outputs
OUTPUT_FOLDER = os.path.expanduser('~/nanit/skeleton_data/output/sms_videos/')
DETECTION_IMAGES_SAVE_PATH = os.path.join(OUTPUT_FOLDER, 'detection_images')
SKELETON_IMAGES_SAVE_PATH = os.path.join(OUTPUT_FOLDER, 'skeleton_images')
SAVE_DEBUG_IMAGES = False
def detection_inference(image_paths):
category_index = label_map_util.create_category_index_from_labelmap(DETECTION_LABEL_PATH, use_display_name=True)
detection_predictions_filepath = os.path.join(OUTPUT_FOLDER, 'detection_predictions.pkl')
saved_model_path = os.path.join(DETECTION_MODEL_DIR, 'saved_model')
detection_model = tf.saved_model.load(saved_model_path)
detection_predictions = run_on_dataset(detection_model,
image_paths,
None, # test_image_gt
category_index,
False, # USE_GT
save_flag=SAVE_DEBUG_IMAGES,
save_path=DETECTION_IMAGES_SAVE_PATH)
with open(detection_predictions_filepath, 'wb') as f:
pickle.dump(detection_predictions, f)
print('Detection Predictions saved to: {}'.format(detection_predictions_filepath))
print('Release GPU Memory (After Detection Predictions)')
device = cuda.get_current_device()
device.reset()
return detection_predictions
def skeleton_inference(detection_predictions, image_paths):
boxes = get_boxes_from_detection_predictions_data(detection_predictions)
babies_bboxes, heads_bboxes = convert_boxes_to_bboxes(boxes)
gt_data = {}
homography_matrix_data = {}
pose_model, cfg = load_skeleton_model(SKELETON_CONFIG_FILE_NAME, SKELETON_TORCH_SCRIPT_MODEL_PATH)
skeleton_predictions, _ = predict_on_image_list(image_paths, babies_bboxes, heads_bboxes, gt_data, homography_matrix_data,
pose_model, cfg, SKELETON_IMAGES_SAVE_PATH, SAVE_DEBUG_IMAGES)
skeleton_predictions_filepath = os.path.join(OUTPUT_FOLDER, 'skeleton_predictions.pkl')
with open(skeleton_predictions_filepath, 'wb') as f:
pickle.dump(skeleton_predictions, f)
print('Skeleton Detected {} / {}'.format(len(skeleton_predictions), len(image_paths)))
return skeleton_predictions
def main():
image_paths = glob.glob(os.path.join(IMAGES_FOLDER, '**/*'))
image_paths = [p for p in image_paths if p.endswith('.png') or p.endswith('.jpg')]
detection_predictions = detection_inference(image_paths)
skeleton_predictions = skeleton_inference(detection_predictions, image_paths)
print('Done Skeleton End-to-End Predictions')
if __name__ == '__main__':
ensure_dir(DETECTION_IMAGES_SAVE_PATH)
ensure_dir(SKELETON_IMAGES_SAVE_PATH)
main()
| 45.847059 | 138 | 0.744419 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 724 | 0.185784 |
9007054fb0674671d547ac9d0adee85e1c24f33c | 1,234 | py | Python | analytics/extract/bare/funds-explorer/scrap_ranking.py | vicmattos/data-invest | 4318a33117583bf492b45c69c957fd0ea2c455e1 | [
"MIT"
] | null | null | null | analytics/extract/bare/funds-explorer/scrap_ranking.py | vicmattos/data-invest | 4318a33117583bf492b45c69c957fd0ea2c455e1 | [
"MIT"
] | null | null | null | analytics/extract/bare/funds-explorer/scrap_ranking.py | vicmattos/data-invest | 4318a33117583bf492b45c69c957fd0ea2c455e1 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import os
import csv
import time
from datetime import datetime
import requests
from bs4 import BeautifulSoup
url = 'https://www.fundsexplorer.com.br/ranking'
# Data Cleansing
# 'R$' => ''
# '%' => ''
# '.0' => ''
# '.' => ''
# ',' => '.'
# 'N/A' => ''
print("Starting...{}".format(datetime.now()))
response = requests.get(url)
soup = BeautifulSoup(response.text, "html.parser")
data = []
table = soup.find(id="table-ranking")
table_head = table.find('thead')
rows = table_head.find_all('tr')
for row in rows:
cols = row.find_all('th')
colsd = [ele.get_text(separator=" ").strip() for ele in cols]
data.append([ele for ele in colsd])
table_body = table.find('tbody')
rows = table_body.find_all('tr')
for row in rows:
cols = row.find_all('td')
colsd = [ele.text.replace('R$','').replace('%','').replace('.0','').replace('.','').replace('N/A','').replace(',','.').strip() for ele in cols]
data.append([ele for ele in colsd])
out_dir = 'out/'
os.makedirs(out_dir, exist_ok=False)
file = open(out_dir+"fii.csv", "w")
wtr = csv.writer(file, delimiter=';', lineterminator='\n')
for x in data : wtr.writerow(x)
file.close()
print("Finish...{}".format(datetime.now()))
time.sleep(1)
| 20.915254 | 147 | 0.627229 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 295 | 0.23906 |
9009e3424db2d10a8ac51689c842cea2498a6040 | 14,546 | py | Python | stentseg/apps/_3DPointSelector.py | almarklein/stentseg | 48255fffdc2394d1dc4ce2208c9a91e1d4c35a46 | [
"BSD-3-Clause"
] | 1 | 2020-08-28T16:34:10.000Z | 2020-08-28T16:34:10.000Z | stentseg/apps/_3DPointSelector.py | almarklein/stentseg | 48255fffdc2394d1dc4ce2208c9a91e1d4c35a46 | [
"BSD-3-Clause"
] | null | null | null | stentseg/apps/_3DPointSelector.py | almarklein/stentseg | 48255fffdc2394d1dc4ce2208c9a91e1d4c35a46 | [
"BSD-3-Clause"
] | 1 | 2021-04-25T06:59:36.000Z | 2021-04-25T06:59:36.000Z | """ Module 3D Point Selector
Provides functionality view slices and to select points in multiplanar reconstructions.
"""
import os, time, sys
import numpy as np
import visvis as vv
from visvis.utils.pypoints import Point, Pointset, Aarray
import OpenGL.GL as gl
import OpenGL.GLU as glu
class VolViewer:
""" VolViewer. View (CT) volume while scrolling through slices x,y or z depending on the direction chosen
"""
def __init__(self, vol, direction, axes=None, clim=None):
self.direction = direction
# Store vol and init
if self.direction == 0:
self.vol = vol
elif self.direction == 1:
self.vol = np.transpose(vol,(1,0,2))
self.vol.origin = (vol.origin[1],vol.origin[0],vol.origin[2])
self.vol.sampling = (vol.sampling[1],vol.sampling[0],vol.sampling[2])
elif self.direction == 2:
self.vol = np.transpose(vol,(2,0,1))
self.vol.origin = (vol.origin[2],vol.origin[0],vol.origin[1])
self.vol.sampling = (vol.sampling[2],vol.sampling[0],vol.sampling[1])
else:
S('No valid input for direction, only 1,2 or 3 is possible')
self.slice = 0
# Prepare figure and axex
if axes is None:
self.a = vv.gca()
else:
self.a = axes
self.f = vv.gcf()
# Create slice in 2D texture
if clim:
self.t = vv.imshow(self.vol[self.round_slice,:,:],clim = clim, axes=self.a)
else:
self.t = vv.imshow(self.vol[self.round_slice,:,:],axes=self.a)
# Bind
self.a.eventScroll.Bind(self.on_scroll)
self.eventPositionUpdate = vv.events.BaseEvent(self)
axes.eventMouseDown.Bind(self.on_click)
# Fig properties
self.a.bgcolor = [0,0,0]
self.a.axis.visible = False
self.a.showAxis = False
@property
def round_slice(self):
return int(self.slice + 0.5)
def on_scroll(self, event):
self.slice += int(event.verticalSteps)
if self.slice > (self.vol.shape[0]-1):
self.slice = (self.vol.shape[0]-1)
if self.slice < 0:
self.slice = 0
self.show()
return True
def on_click(self, event):
# get current mouse position
self._refpos = [round(event.x2d,1), round(event.y2d,1)]
#print(self._refpos)
self.eventPositionUpdate.Fire()
return self._refpos
def show(self):
self.t.SetData(self.vol[self.round_slice,:,:])
def GetCurrentSlice(self):
ctslice = self.slice
CurrentSlice = round(self.vol.origin[0] + ctslice * self.vol.sampling[0],1)
return CurrentSlice
def SetCurrentSlice(self, slicenr):
ctslice = (slicenr - self.vol.origin[0])/ self.vol.sampling[0]
self.slice = ctslice
self.show()
class PointSelect3D:
""" A helper class for 3d point select. Use the select3dpoint function to
perform manual point selection.
"""
def __init__(self, vol, a_transversal, a_coronal, a_sagittal, a_MIP, a_text, nr_of_stents, clim=None):
self.nr_of_stents = nr_of_stents
self.f = vv.gcf()
self.vol = vol
# Create empty list of endpoints
self.endpoints = []
self.endpoints = ['xx,yy,zz'] * nr_of_stents * 2
self.endpointsindex = 0
# Create text objects
self._labelcurrent = vv.Label(a_text)
self._labelx = vv.Label(a_text)
self._labelxslice = vv.Label(a_text)
self._labely = vv.Label(a_text)
self._labelyslice = vv.Label(a_text)
self._labelz = vv.Label(a_text)
self._labelzslice = vv.Label(a_text)
self._labelcurrent.position = -250,10
self._labelx.position = -250,35
self._labelxslice.position = -200,35
self._labely.position = -250,55
self._labelyslice.position = -200,55
self._labelz.position = -250,75
self._labelzslice.position = -200,75
self._labelendpointstext =[]
self._labelendpointstext.append(vv.Label(a_text))
self._labelendpointstext[0].position = 100,-5
self._labelendpointstext.append(vv.Label(a_text))
self._labelendpointstext[1].position = 230,-5
for i in range(2,self.nr_of_stents+2):
self._labelendpointstext.append(vv.Label(a_text))
self._labelendpointstext[i].position = 40,15+(20*(i-2))
self._labelendpoints = []
for i in range(0,self.nr_of_stents * 2,2):
self._labelendpoints.append(vv.Label(a_text))
self._labelendpoints[i].position = 100,15+(20*(i/2)),50,20
self._labelendpoints.append(vv.Label(a_text))
self._labelendpoints[i+1].position = 230,15+(20*(i/2)),50,20
# Create Select button
self._select = False
self._butselect = vv.PushButton(a_text)
self._butselect.position = -110,150
self._butselect.text = 'Select'
# Create Back button
self._back = False
self._butback = vv.PushButton(a_text)
self._butback.position = 10,150
self._butback.text = 'Back'
# Create Close button
self._finished = False
self._butclose = vv.PushButton(a_text)
self._butclose.position = -50,180
self._butclose.text = 'Finish'
# Get short name for sampling
if isinstance(vol, Aarray):
self._sam = sam = vol.sampling
else:
self._sam = None
sam = (1,1,1)
# Display the slices and 3D MIP
self.b1 = VolViewer(vol, 0, axes=a_transversal, clim=clim)
self.b2 = VolViewer(vol, 1, axes=a_coronal, clim=clim)
self.b3 = VolViewer(vol, 2, axes=a_sagittal, clim=clim)
renderstyle = 'mip'
a_MIP.daspect = 1,1,-1
self.b4 = vv.volshow(vol, clim=(0,2500), renderStyle = renderstyle, axes=a_MIP)
c = vv.ClimEditor(a_MIP)
c.position = (10, 50)
# set axis settings
for a in [a_transversal, a_coronal, a_sagittal, a_MIP]:
a.bgcolor = [0,0,0]
a.axis.visible = False
a.showAxis = True
# get current slice number
Zslice = self.b1.GetCurrentSlice()
Yslice = self.b2.GetCurrentSlice()
Xslice = self.b3.GetCurrentSlice()
size = vol.shape
# create lines for position of x,y and z slices
origin = vol.origin
Zrange = (origin[0], (size[0]*sam[0])+origin[0])
Xrange = (origin[1], (size[1]*sam[1])+origin[1])
Yrange = (origin[2], (size[2]*sam[2])+origin[2])
self.l11 = vv.Line(a_transversal,[(Yslice,Xrange[0]),(Yslice,Xrange[1])])
self.l12 = vv.Line(a_transversal,[(Yrange[0],Xslice),(Yrange[1],Xslice)])
self.l21 = vv.Line(a_coronal,[(Zslice,Zrange[0]),(Zslice,Zrange[1])])
self.l22 = vv.Line(a_coronal,[(Yrange[0],Xslice),(Yrange[1],Xslice)])
self.l31 = vv.Line(a_sagittal, [(Zslice,Zrange[0]),(Zslice,Zrange[1])])
self.l32 = vv.Line(a_sagittal, [(Xrange[0],Yslice),(Xrange[1],Yslice)])
# change color of the lines
for i in [self.l11,self.l12,self.l21,self.l22,self.l31,self.l32]:
i.lc = 'g'
# create a point in the MIP figure for the current position
self.mippoint = vv.Line(a_MIP, [(Zslice,Xslice,Yslice)])
self.mippoint.ms = 'o'
self.mippoint.mw = 5
self.mippoint.mc = 'g'
self.mippoint.alpha = 0.9
# Get list of all range wobjects
self._volviewers = [self.b1, self.b2, self.b3]
# Bind events
fig = a_text.GetFigure()
fig.eventClose.Bind(self._OnFinish)
self._butclose.eventPress.Bind(self._OnFinish)
self._butselect.eventPress.Bind(self._OnSelect)
self._butback.eventPress.Bind(self._OnBack)
for r in self._volviewers:
r.eventPositionUpdate.Bind(self._OnMouseClickAxis)
for s in range(len(self._labelendpoints)):
self._labelendpoints[s].eventMouseDown.Bind(self._OnMouseClickEndpoint)
# Almost done
self._SetTexts()
self.updatePosition()
def _OnMouseClickEndpoint(self,event):
index = self._labelendpoints.index(event.owner)
self.endpointsindex = index
self.updateText()
def _OnMouseClickAxis(self,event):
# Get ranges of wobject that fired the event
rangex, rangey = event.owner._refpos[0], event.owner._refpos[1]
# Update slices in onther wobjects
if event.owner is self.b1:
self.b2.SetCurrentSlice(rangey)
self.b3.SetCurrentSlice(rangex)
elif event.owner is self.b2:
self.b1.SetCurrentSlice(rangey)
self.b3.SetCurrentSlice(rangex)
elif event.owner is self.b3:
self.b1.SetCurrentSlice(rangey)
self.b2.SetCurrentSlice(rangex)
else:
print('unknown owner! %s' % repr(event.owner))
def _SetTexts(self):
# Get short names for labels
lx, ly, lz = self._labelx, self._labely, self._labelz
# Apply texts
self._labelcurrent.text = 'Current Position:'
lx.text = 'X: '
ly.text = 'Y: '
lz.text = 'Z: '
self._labelendpointstext[0].text = 'StartPoints'
self._labelendpointstext[1].text = 'EndPoints'
for i in range(2,(self.nr_of_stents)+2):
self._labelendpointstext[i].text = 'Stent %1d:' % int(i-1)
for i in range(self.nr_of_stents*2):
self._labelendpoints[i].text = self.endpoints[i]
for i in range(self.nr_of_stents*2):
if i == self.endpointsindex:
self._labelendpoints[i].textColor = 'b'
else:
self._labelendpoints[i].textColor = 'k'
def _OnSelect(self, event):
Position = self.updatePosition()
if self.endpointsindex <= len(self.endpoints)-1:
self.endpoints[self.endpointsindex] = Position
self.endpointsindex += 1
self.updateText()
#print(self.endpoints)
#print('Current position = ' + str(Position))
def _OnBack(self, event):
if not(self.endpointsindex <0):
self.endpoints[self.endpointsindex] = 'xx,yy,zz'
self.updateText()
print(self.endpoints)
print('Back Pressed')
def _OnFinish(self, event):
self._finished = True
return self.endpoints
print('Finish Pressed')
def updatePosition(self):
# get current slice numbers
Zslice = self.b1.GetCurrentSlice()
Yslice = self.b2.GetCurrentSlice()
Xslice = self.b3.GetCurrentSlice()
# update lines
self.l11.SetXdata([Xslice,Xslice])
self.l12.SetYdata([Yslice,Yslice])
self.l21.SetXdata([Xslice,Xslice])
self.l22.SetYdata([Zslice,Zslice])
self.l31.SetXdata([Yslice,Yslice])
self.l32.SetYdata([Zslice,Zslice])
# update Point
self.mippoint.SetXdata([Xslice])
self.mippoint.SetYdata([Yslice])
self.mippoint.SetZdata([Zslice])
# update current slice text
self._labelxslice.text = str(Xslice)
self._labelyslice.text = str(Yslice)
self._labelzslice.text = str(Zslice)
# return Position
Position = (Xslice, Yslice, Zslice)
return Position
def updateText(self):
for i in range(self.nr_of_stents*2):
self._labelendpoints[i].text = str(self.endpoints[i])
if i == self.endpointsindex:
self._labelendpoints[i].textColor = 'b'
else:
self._labelendpoints[i].textColor = 'k'
def Run(self):
vv.processEvents()
self.updatePosition()
def select3dpoints(vol, nr_of_stents, fig=None, clim=None):
""" Manually select 3d points in a volume. In the given figure (or a new
figure if None), three axes are created that display the transversal,
sagittal and coronal slices of the volume. The user can then use the mouse
to scroll to the correct slice and select the current position as an
endpoint of a stent.
Input: Number of stents to select start- and endpoints for.
"""
# Create figure
if fig is None:
fig = vv.figure()
figCleanup = True
else:
fig.Clear()
figCleanup = False
# Create four axes and a wibject to attach text labels to
fig.position = 0, 22, 750, 700
fig.title = '3D Point Selector'
a1 = vv.subplot(321)
a2 = vv.subplot(322)
a3 = vv.subplot(323)
a4 = vv.subplot(324)
a5 = vv.Wibject(fig)
# x-richting, y-richting, x-breedte?, y-breedte?
a5.position = 0.5, 0.7, 0.5, 0.5
# Set settings
for a in [a1, a2, a3, a4]:
a.showAxis = False
# Create PointSelect instance
pointselect3d = PointSelect3D(vol, a1, a3, a2, a4, a5, nr_of_stents, clim)
# Enter a mainloop
while not pointselect3d._finished:
pointselect3d.Run()
time.sleep(0.01)
# Clean up figure (close if we opened it)
fig.Clear()
fig.DrawNow()
if figCleanup:
fig.Destroy()
# Done (return points)
Startpoints = []
Endpoints = []
for i in range(nr_of_stents):
if isinstance(pointselect3d.endpoints[i*2],tuple):
Startpoints.append(pointselect3d.endpoints[i*2])
if isinstance(pointselect3d.endpoints[(i*2)+1],tuple):
Endpoints.append(pointselect3d.endpoints[(i*2)+1])
return Startpoints, Endpoints
| 36.732323 | 110 | 0.564691 | 12,342 | 0.848481 | 0 | 0 | 75 | 0.005156 | 0 | 0 | 2,162 | 0.148632 |
900c0665baf28282fa05f41faa6b983f942bdbf2 | 343 | py | Python | crossasr/estimator.py | mhilmiasyrofi/CrossASRv2 | 202b9a7caadf5f8d6f115f776526960af35a73a3 | [
"MIT"
] | 3 | 2021-05-12T02:48:06.000Z | 2021-12-21T14:45:56.000Z | crossasr/estimator.py | mhilmiasyrofi/CrossASRv2 | 202b9a7caadf5f8d6f115f776526960af35a73a3 | [
"MIT"
] | null | null | null | crossasr/estimator.py | mhilmiasyrofi/CrossASRv2 | 202b9a7caadf5f8d6f115f776526960af35a73a3 | [
"MIT"
] | 1 | 2021-06-14T11:15:35.000Z | 2021-06-14T11:15:35.000Z | class Estimator:
def __init__(self, name:str):
self.name = name
def getName(self) -> str :
return self.name
def setName(self, name:str):
self.name = name
def fit(self, X:[str], y:[int]):
raise NotImplementedError()
def predict(self, X:[str]):
raise NotImplementedError()
| 21.4375 | 36 | 0.571429 | 342 | 0.997085 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
900c0768bdb748b6c9e90f7a675de1895a6d4acc | 4,386 | py | Python | app/api/V1/views/auth_endpoints.py | eduhmik/Store-Manager-api | 50395d39758800784fbd0b3e516b8bedc900cdeb | [
"MIT"
] | null | null | null | app/api/V1/views/auth_endpoints.py | eduhmik/Store-Manager-api | 50395d39758800784fbd0b3e516b8bedc900cdeb | [
"MIT"
] | 8 | 2018-10-24T01:33:32.000Z | 2019-10-21T17:36:01.000Z | app/api/V1/views/auth_endpoints.py | eduhmik/Store-Manager-api | 50395d39758800784fbd0b3e516b8bedc900cdeb | [
"MIT"
] | 1 | 2018-10-19T21:57:48.000Z | 2018-10-19T21:57:48.000Z | from flask import Flask, request, jsonify, Blueprint, json, make_response
from flask_restplus import Resource, reqparse, Api, Namespace, fields
from ..models.user_model import User
api = Namespace('Register Endpoint', description='A collection of register endpoints for the user model')
ns = Namespace('Users Endpoints', description='Users endpoints to fetch all users and delete them')
ns2 = Namespace('Login_endpoint', description='Login endpoints for the user model')
ns3 = Namespace('Logout Endpoint', description='An endpoint to logout')
parser = reqparse.RequestParser()
parser.add_argument('username', help = 'This field cannot be blank')
parser.add_argument('email', help = 'This field cannot be blank', required = True)
parser.add_argument('phone', help = 'This field cannot be blank')
parser.add_argument('role', help = 'This field cannot be blank')
parser.add_argument('password', help = 'This field cannot be blank', required = True)
login_fields = api.model('Login', {
'email': fields.String,
'password': fields.String
})
"""user login"""
@ns2.route('')
class UserLogin(Resource):
@ns2.expect(login_fields)
def post(self):
args = parser.parse_args()
email = args['email']
password = args['password']
try:
<<<<<<< HEAD
current_user = User.get_single_user(email)
=======
current_user = User.get_single_user(self, email)
>>>>>>> develop
if current_user == 'not found':
return make_response(jsonify({
'status': 'success',
'message': 'User does not exist, sign up!'
}), 200)
if current_user and User.verify_hash(password, current_user['password']):
role = current_user['role']
email = current_user['email']
auth_token = User.encode_auth_token(email, role)
if auth_token:
return make_response(jsonify({
'status' : 'ok',
'message' : 'Logged in successfully',
'auth_token': auth_token.decode()
}), 200)
else:
return make_response(jsonify({
'message' : 'Incorrect email or password',
'status' : 'fail'
}), 400)
except Exception as e:
return make_response(jsonify({
'message' : str(e),
'status' : 'failed'
}), 500)
registration_fields = api.model('Registration', {
'username' : fields.String,
'email': fields.String,
'phone' : fields.String,
'role': fields.String,
'password': fields.String
})
"""user regitration"""
@api.route('')
class UserRegistration(Resource):
@api.expect(registration_fields)
def post(self):
args = parser.parse_args()
username = args['username']
email = args['email']
phone = args['phone']
role = args['role']
password = args['password']
found_email = User.get_single_user(self, email)
if found_email == 'not found':
try:
new_user = User(email, User.generate_hash(password), username, role, phone)
created_user = new_user.create_user()
return make_response(jsonify({
'status': 'ok',
'message': 'User created successfully',
'users': created_user
}), 201)
except Exception as e:
return make_response(jsonify({
'message' : str(e),
'status' : 'failed'
}), 500)
return make_response(jsonify({
'status': 'fail',
'message' : 'Email already exists, please log in'
}))
"""fetch all users"""
@ns.route('')
class AllUsers(Resource):
@ns.doc(security='apikey')
def get(self):
users_list = User.get_all_users(self)
return make_response(jsonify({
'message': 'Get all users successful',
'status': 'ok',
'users': users_list
}), 200)
"""user logout"""
@ns3.route('')
class UserLogoutAccess(Resource):
@ns3.doc(security='apikey')
def post(self):
pass
| 33.227273 | 105 | 0.559052 | 1,731 | 0.394665 | 0 | 0 | 1,793 | 0.408801 | 0 | 0 | 1,080 | 0.246238 |
900d82f05310f821f29ddc9642e0427552a38aae | 2,687 | py | Python | wolfram/wolfram.py | Wyn10/aikaterna-cogs | a7fd5930f90b4046296ce98406639219c38302a7 | [
"MIT"
] | null | null | null | wolfram/wolfram.py | Wyn10/aikaterna-cogs | a7fd5930f90b4046296ce98406639219c38302a7 | [
"MIT"
] | null | null | null | wolfram/wolfram.py | Wyn10/aikaterna-cogs | a7fd5930f90b4046296ce98406639219c38302a7 | [
"MIT"
] | 1 | 2019-03-30T05:18:42.000Z | 2019-03-30T05:18:42.000Z | import os
import aiohttp
from discord.ext import commands
import xml.etree.ElementTree as ET
from cogs.utils.dataIO import dataIO
from .utils import checks
from .utils.chat_formatting import escape_mass_mentions
from .utils.chat_formatting import box
from __main__ import send_cmd_help
class Wolfram:
def __init__(self, bot):
self.bot = bot
self.settings = dataIO.load_json("data/wolfram/settings.json")
@commands.command(pass_context=True, name="wolfram", aliases=["ask"])
async def _wolfram(self, ctx, *arguments: str):
"""
Ask Wolfram Alpha any question
"""
api_key = self.settings["WOLFRAM_API_KEY"]
if api_key:
url = "http://api.wolframalpha.com/v2/query?"
query = " ".join(arguments)
payload = {"input": query, "appid": api_key}
headers = {"user-agent": "Red-cog/1.0.0"}
conn = aiohttp.TCPConnector(verify_ssl=False)
session = aiohttp.ClientSession(connector=conn)
async with session.get(url, params=payload, headers=headers) as r:
result = await r.text()
session.close()
root = ET.fromstring(result)
a = []
for pt in root.findall(".//plaintext"):
if pt.text:
a.append(pt.text.capitalize())
if len(a) < 1:
message = "There is as yet insufficient data for a meaningful answer."
else:
message = "\n".join(a[0:3])
else:
message = (
"No API key set for Wolfram Alpha. Get one at http://products.wolframalpha.com/api/"
)
message = escape_mass_mentions(message)
await self.bot.say(box(message))
@commands.command(pass_context=True, name="setwolframapi", aliases=["setwolfram"])
@checks.is_owner()
async def _setwolframapi(self, ctx, key: str):
"""
Set the api-key
"""
if key:
self.settings["WOLFRAM_API_KEY"] = key
dataIO.save_json("data/wolfram/settings.json", self.settings)
await self.bot.say("Key set.")
else:
await send_cmd_help(ctx)
def check_folder():
if not os.path.exists("data/wolfram"):
print("Creating data/wolfram folder...")
os.makedirs("data/wolfram")
def check_file():
data = {}
data["WOLFRAM_API_KEY"] = False
f = "data/wolfram/settings.json"
if not dataIO.is_valid_json(f):
print("Creating default settings.json...")
dataIO.save_json(f, data)
def setup(bot):
check_folder()
check_file()
n = Wolfram(bot)
bot.add_cog(n)
| 32.373494 | 100 | 0.593971 | 1,926 | 0.716785 | 0 | 0 | 1,777 | 0.661332 | 1,593 | 0.592854 | 620 | 0.230741 |
900e83820d1dd64bbe34d301bee0ba27a36ecb07 | 176 | py | Python | Servers/ProbabilisticServer.py | MartinHex/master-thesis | b5077d9acce60fd42467f73df6e39c61fd3e19b2 | [
"MIT"
] | null | null | null | Servers/ProbabilisticServer.py | MartinHex/master-thesis | b5077d9acce60fd42467f73df6e39c61fd3e19b2 | [
"MIT"
] | 2 | 2022-02-22T11:48:21.000Z | 2022-02-28T15:49:47.000Z | Servers/ProbabilisticServer.py | MartinHex/master-thesis | b5077d9acce60fd42467f73df6e39c61fd3e19b2 | [
"MIT"
] | null | null | null | from abc import ABC,abstractmethod
from Servers.ABCServer import ABCServer
class ProbabilisticServer(ABCServer):
@abstractmethod
def sample_model(self):
pass
| 19.555556 | 39 | 0.767045 | 99 | 0.5625 | 0 | 0 | 56 | 0.318182 | 0 | 0 | 0 | 0 |
900fb0840db60165c43f29441d88fb00f6f55c09 | 718 | py | Python | fastapi_crud/fastapicrud.py | miikapo/fastapi-crud | d8517d93068b0e71fb114a695a41f48570387b9a | [
"Apache-2.0"
] | 5 | 2021-11-02T20:13:41.000Z | 2022-03-19T00:01:53.000Z | fastapi_crud/fastapicrud.py | miikapo/fastapi-crud | d8517d93068b0e71fb114a695a41f48570387b9a | [
"Apache-2.0"
] | null | null | null | fastapi_crud/fastapicrud.py | miikapo/fastapi-crud | d8517d93068b0e71fb114a695a41f48570387b9a | [
"Apache-2.0"
] | null | null | null | from fastapi import Depends
from sqlalchemy.ext.asyncio import AsyncEngine
from sqlalchemy.orm import sessionmaker
from typing import AsyncGenerator
from fastapi_crud.session import Session
from fastapi_crud.router import ModelRouter
from fastapi_crud.types import Model
class FastapiCRUD:
def __init__(self, engine: AsyncEngine) -> None:
self._session_maker = sessionmaker(engine, class_=Session)
self.session: Session = Depends(self._session)
async def _session(self) -> AsyncGenerator[Session, None]:
async with self._session_maker() as session:
yield session
def create_router(self, model: Model) -> ModelRouter:
return ModelRouter(model, self.session)
| 32.636364 | 66 | 0.754875 | 443 | 0.616992 | 137 | 0.190808 | 0 | 0 | 137 | 0.190808 | 0 | 0 |
900fd7a3396de18f3541a06d832b5444ad752082 | 671 | py | Python | migrations/versions/98f3e3ad195c_update_blog_to_add_a_title.py | Josephat-n/myBlog | d2e3b368617cd3ca55b6bd40e6950122967e1d9f | [
"MIT"
] | null | null | null | migrations/versions/98f3e3ad195c_update_blog_to_add_a_title.py | Josephat-n/myBlog | d2e3b368617cd3ca55b6bd40e6950122967e1d9f | [
"MIT"
] | null | null | null | migrations/versions/98f3e3ad195c_update_blog_to_add_a_title.py | Josephat-n/myBlog | d2e3b368617cd3ca55b6bd40e6950122967e1d9f | [
"MIT"
] | null | null | null | """update blog to add a title.
Revision ID: 98f3e3ad195c
Revises: 2d98c5165674
Create Date: 2019-12-02 22:58:10.377423
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '98f3e3ad195c'
down_revision = '2d98c5165674'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('blogs', sa.Column('title', sa.String(length=255), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('blogs', 'title')
# ### end Alembic commands ###
| 23.137931 | 84 | 0.690015 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 402 | 0.599106 |
901066d43a75d83ccca11050c805f23f07e56a57 | 2,691 | py | Python | message/views.py | ThusharaX/mumbleapi | 8435fe9d86869cce81961f42c9860fa3810c171b | [
"Apache-2.0"
] | 187 | 2021-04-24T14:49:44.000Z | 2022-03-31T14:25:22.000Z | message/views.py | ThusharaX/mumbleapi | 8435fe9d86869cce81961f42c9860fa3810c171b | [
"Apache-2.0"
] | 119 | 2021-04-24T18:08:43.000Z | 2022-01-09T00:57:19.000Z | message/views.py | ThusharaX/mumbleapi | 8435fe9d86869cce81961f42c9860fa3810c171b | [
"Apache-2.0"
] | 174 | 2021-04-24T15:57:23.000Z | 2022-03-11T02:09:04.000Z | from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework import status
from users.models import UserProfile
from .serializers import MessageSerializer , ThreadSerializer
from .models import UserMessage , Thread
from django.db.models import Q
@api_view(['GET'])
@permission_classes((IsAuthenticated,))
def read_message(request, pk):
try:
thread = Thread.objects.get(id=pk)
messages = thread.messages.all()
un_read = thread.messages.filter(is_read=False)
for msg in un_read:
msg.is_read = True
msg.save()
serializer = MessageSerializer(messages, many=True)
return Response(serializer.data)
except Exception as e:
return Response({'details': f"{e}"},status=status.HTTP_204_NO_CONTENT)
@api_view(['POST'])
@permission_classes((IsAuthenticated,))
def CreateThread(request):
sender = request.user.userprofile
recipient_id = request.data.get('recipient_id')
recipient = UserProfile.objects.get(id=recipient_id)
if recipient_id is not None:
try:
thread,created = Thread.objects.get_or_create(sender=sender,reciever=recipient)
serializer = ThreadSerializer(thread, many=False)
return Response(serializer.data)
except UserProfile.DoesNotExist:
return Response({'detail':'User with that id doesnt not exists'})
else:
return Response({'details':'Recipient id not found'})
@api_view(['GET'])
@permission_classes((IsAuthenticated,))
def get_messages(request):
user = request.user.userprofile
threads = Thread.objects.filter(Q(sender=user)|Q(reciever=user))
serializer = ThreadSerializer(threads, many=True)
return Response(serializer.data)
@api_view(['POST'])
@permission_classes((IsAuthenticated,))
def create_message(request):
sender = request.user.userprofile
data = request.data
thread_id = data.get('thread_id')
if thread_id:
message = data.get('message')
thread= Thread.objects.get(id=thread_id)
if thread:
if message is not None:
message = UserMessage.objects.create(thread=thread,sender=sender,body=message)
message.save()
serializer = ThreadSerializer(thread, many=False)
return Response(serializer.data)
else:
return Response({'details':'Content for message required'})
else:
return Response({'details':'Thread not found'})
else:
return Response({'details':'Please provide other user id'}) | 39 | 94 | 0.687105 | 0 | 0 | 0 | 0 | 2,312 | 0.85916 | 0 | 0 | 254 | 0.094389 |
9010af2b84b0a8a7a8c133b624651330f5e4d485 | 3,691 | py | Python | widgets/dialogs/transactions_editor.py | redstorm45/money_analyst | 9ccf8aa4cd7bad7aff21a82ce4219406009f126a | [
"Apache-2.0"
] | null | null | null | widgets/dialogs/transactions_editor.py | redstorm45/money_analyst | 9ccf8aa4cd7bad7aff21a82ce4219406009f126a | [
"Apache-2.0"
] | null | null | null | widgets/dialogs/transactions_editor.py | redstorm45/money_analyst | 9ccf8aa4cd7bad7aff21a82ce4219406009f126a | [
"Apache-2.0"
] | null | null | null | import PyQt5.QtWidgets as Qtw
import PyQt5.QtCore as QtCore
from widgets.labels import LabelsWidget
DATE_FORMAT = 'yyyy-MM-dd'
class TransactionDialog(Qtw.QDialog):
"""
A dialog used to edit a transaction
"""
def __init__(self, parent, model_cat, desc='', category=0, amount=0, date=''):
super(TransactionDialog, self).__init__(parent)
self.validated = False
self.validated_data = None
self.model_cat = model_cat
self.all_labels = model_cat.get_labels()
self.setModal(True)
edit_desc_label = Qtw.QLabel("Description:", self)
self.edit_desc = Qtw.QLineEdit(self)
edit_cat_label = Qtw.QLabel("Catégorie:", self)
self.edit_cat = Qtw.QComboBox(self)
self.edit_cat.insertItems(0, self.model_cat.get_names())
self.edit_cat.setInsertPolicy(self.edit_cat.NoInsert)
edit_amount_label = Qtw.QLabel("Montant (centimes):", self)
self.edit_amount = Qtw.QLineEdit('0', self)
self.edit_amount.textChanged.connect(self.updateAmountHint)
self.edit_amount_hint = Qtw.QLabel('soit: 0,00€', self)
self.edit_date = Qtw.QCalendarWidget(self)
self.edit_labels = LabelsWidget(self, self.all_labels)
buttons_widget = Qtw.QWidget(self)
buttons_layout = Qtw.QHBoxLayout()
cancel_button = Qtw.QPushButton("Annuler", buttons_widget)
cancel_button.clicked.connect(self.reject)
validate_button = Qtw.QPushButton("Valider", buttons_widget)
validate_button.clicked.connect(self.validate)
buttons_layout.addWidget(cancel_button)
buttons_layout.addWidget(validate_button)
buttons_widget.setLayout(buttons_layout)
layout = Qtw.QGridLayout()
layout.addWidget(edit_desc_label, 0, 0)
layout.addWidget(self.edit_desc, 1, 0)
layout.addWidget(edit_cat_label, 2, 0)
layout.addWidget(self.edit_cat, 3, 0)
layout.addWidget(edit_amount_label, 4, 0)
layout.addWidget(self.edit_amount, 5, 0)
layout.addWidget(self.edit_amount_hint, 6, 0)
layout.addWidget(self.edit_date, 0, 1, 7, 1)
layout.addWidget(self.edit_labels, 7, 0, 1, 2)
layout.addWidget(buttons_widget, 8, 0, 1, 2)
self.setLayout(layout)
def updateAmountHint(self):
try:
val = int(self.edit_amount.text())
S = 'soit: ' + str(val//100) + ',' + str(val%100).zfill(2) + '€'
self.edit_amount_hint.setText(S)
except ValueError:
self.edit_amount_hint.setText('soit: ?')
def setData(self, desc, category, amount, date, labels):
self.edit_desc.setText(desc)
self.edit_cat.setCurrentText(self.model_cat.get_name_for_id(category))
self.edit_amount.setText(str(amount))
self.edit_date.setSelectedDate(QtCore.QDate.fromString(date, DATE_FORMAT))
self.edit_labels.set_labels(labels)
def validate(self):
if self.edit_desc.text() == '':
box = Qtw.QMessageBox()
box.setText('Rajoutez une description à la transaction')
box.exec_()
return
try:
amount = int(self.edit_amount.text())
except ValueError:
box = Qtw.QMessageBox()
box.setText('Montant invalide')
box.exec_()
return
self.validated = True
cat_id = self.model_cat.get_id_for_name(self.edit_cat.currentText())
date = self.edit_date.selectedDate().toString(DATE_FORMAT)
labels = self.edit_labels.labels.copy()
self.validated_data = (self.edit_desc.text(), cat_id, amount, date, labels)
self.accept()
| 38.051546 | 83 | 0.648605 | 3,565 | 0.964295 | 0 | 0 | 0 | 0 | 0 | 0 | 240 | 0.064918 |
9010dcd0fdbf2d57ab797fb8bac064a9780ede3f | 20,416 | py | Python | octue/cloud/pub_sub/service.py | octue/octue-sdk-python | 31c6e9358d3401ca708f5b3da702bfe3be3e52ce | [
"MIT"
] | 5 | 2020-10-01T12:43:10.000Z | 2022-03-14T17:26:25.000Z | octue/cloud/pub_sub/service.py | octue/octue-sdk-python | 31c6e9358d3401ca708f5b3da702bfe3be3e52ce | [
"MIT"
] | 322 | 2020-06-24T15:55:22.000Z | 2022-03-30T11:49:28.000Z | octue/cloud/pub_sub/service.py | octue/octue-sdk-python | 31c6e9358d3401ca708f5b3da702bfe3be3e52ce | [
"MIT"
] | null | null | null | import base64
import concurrent.futures
import json
import logging
import sys
import time
import traceback as tb
import uuid
from google.api_core import retry
from google.cloud import pubsub_v1
import octue.exceptions
import twined.exceptions
from octue.cloud.credentials import GCPCredentialsManager
from octue.cloud.pub_sub import Subscription, Topic
from octue.cloud.pub_sub.logging import GooglePubSubHandler
from octue.mixins import CoolNameable
from octue.resources.manifest import Manifest
from octue.utils.encoders import OctueJSONEncoder
from octue.utils.exceptions import create_exceptions_mapping
from octue.utils.objects import get_nested_attribute
logger = logging.getLogger(__name__)
OCTUE_NAMESPACE = "octue.services"
ANSWERS_NAMESPACE = "answers"
# Switch message batching off by setting max_messages to 1. This minimises latency and is recommended for
# microservices publishing single messages in a request-response sequence.
BATCH_SETTINGS = pubsub_v1.types.BatchSettings(max_bytes=10 * 1000 * 1000, max_latency=0.01, max_messages=1)
EXCEPTIONS_MAPPING = create_exceptions_mapping(
globals()["__builtins__"], vars(twined.exceptions), vars(octue.exceptions)
)
class Service(CoolNameable):
"""A Twined service that can be used in two modes:
* As a server accepting questions (input values and manifests), running them through its app, and responding to the
requesting service with the results of the analysis.
* As a requester of answers from another Service in the above mode.
Services communicate entirely via Google Pub/Sub and can ask and/or respond to questions from any other Service that
has a corresponding topic on Google Pub/Sub.
:param octue.resources.service_backends.ServiceBackend backend: the object representing the type of backend the service uses
:param str|None service_id: a string UUID optionally preceded by the octue services namespace "octue.services."
:param callable|None run_function: the function the service should run when it is called
:return None:
"""
def __init__(self, backend, service_id=None, run_function=None):
if service_id is None:
self.id = f"{OCTUE_NAMESPACE}.{str(uuid.uuid4())}"
elif not service_id:
raise ValueError(f"service_id should be None or a non-falsey value; received {service_id!r} instead.")
else:
if service_id.startswith(OCTUE_NAMESPACE):
self.id = service_id
else:
self.id = f"{OCTUE_NAMESPACE}.{service_id}"
self.backend = backend
self.run_function = run_function
self._credentials = GCPCredentialsManager(backend.credentials_environment_variable).get_credentials()
self.publisher = pubsub_v1.PublisherClient(credentials=self._credentials, batch_settings=BATCH_SETTINGS)
super().__init__()
def __repr__(self):
return f"<{type(self).__name__}({self.name!r})>"
def serve(self, timeout=None, delete_topic_and_subscription_on_exit=False):
"""Start the Service as a server, waiting to accept questions from any other Service using Google Pub/Sub on
the same Google Cloud Platform project. Questions are responded to asynchronously.
:param float|None timeout: time in seconds after which to shut down the service
:param bool delete_topic_and_subscription_on_exit: if `True`, delete the service's topic and subscription on exit
:return None:
"""
topic = Topic(name=self.id, namespace=OCTUE_NAMESPACE, service=self)
topic.create(allow_existing=True)
subscriber = pubsub_v1.SubscriberClient(credentials=self._credentials)
subscription = Subscription(
name=self.id,
topic=topic,
namespace=OCTUE_NAMESPACE,
project_name=self.backend.project_name,
subscriber=subscriber,
expiration_time=None,
)
subscription.create(allow_existing=True)
future = subscriber.subscribe(subscription=subscription.path, callback=self.answer)
logger.debug("%r is waiting for questions.", self)
with subscriber:
try:
future.result(timeout=timeout)
except (TimeoutError, concurrent.futures.TimeoutError, KeyboardInterrupt):
future.cancel()
if delete_topic_and_subscription_on_exit:
topic.delete()
subscription.delete()
def answer(self, question, timeout=30):
"""Answer a question (i.e. run the Service's app to analyse the given data, and return the output values to the
asker). Answers are published to a topic whose name is generated from the UUID sent with the question, and are
in the format specified in the Service's Twine file.
:param dict|Message question:
:param float|None timeout: time in seconds to keep retrying sending of the answer once it has been calculated
:raise Exception: if any exception arises during running analysis and sending its results
:return None:
"""
data, question_uuid, forward_logs = self.parse_question(question)
topic = self.instantiate_answer_topic(question_uuid)
if forward_logs:
analysis_log_handler = GooglePubSubHandler(publisher=self.publisher, topic=topic)
else:
analysis_log_handler = None
try:
analysis = self.run_function(
analysis_id=question_uuid,
input_values=data["input_values"],
input_manifest=data["input_manifest"],
analysis_log_handler=analysis_log_handler,
)
if analysis.output_manifest is None:
serialised_output_manifest = None
else:
serialised_output_manifest = analysis.output_manifest.serialise()
self.publisher.publish(
topic=topic.path,
data=json.dumps(
{
"type": "result",
"output_values": analysis.output_values,
"output_manifest": serialised_output_manifest,
"message_number": topic.messages_published,
},
cls=OctueJSONEncoder,
).encode(),
retry=retry.Retry(deadline=timeout),
)
topic.messages_published += 1
logger.info("%r responded to question %r.", self, question_uuid)
except BaseException as error: # noqa
self.send_exception_to_asker(topic, timeout)
raise error
def parse_question(self, question):
"""Parse a question in the Google Cloud Pub/Sub or Google Cloud Run format.
:param dict|Message question:
:return (dict, str, bool):
"""
try:
# Parse Google Cloud Pub/Sub question format.
data = json.loads(question.data.decode())
question.ack()
logger.info("%r received a question.", self)
except Exception:
# Parse Google Cloud Run question format.
data = json.loads(base64.b64decode(question["data"]).decode("utf-8").strip())
question_uuid = get_nested_attribute(question, "attributes.question_uuid")
forward_logs = bool(int(get_nested_attribute(question, "attributes.forward_logs")))
return data, question_uuid, forward_logs
def instantiate_answer_topic(self, question_uuid, service_id=None):
"""Instantiate the answer topic for the given question UUID for the given service ID.
:param str question_uuid:
:param str|None service_id: the ID of the service to ask the question to
:return octue.cloud.pub_sub.topic.Topic:
"""
return Topic(
name=".".join((service_id or self.id, ANSWERS_NAMESPACE, question_uuid)),
namespace=OCTUE_NAMESPACE,
service=self,
)
def ask(
self,
service_id,
input_values=None,
input_manifest=None,
subscribe_to_logs=True,
allow_local_files=False,
timeout=30,
):
"""Ask a serving Service a question (i.e. send it input values for it to run its app on). The input values must
be in the format specified by the serving Service's Twine file. A single-use topic and subscription are created
before sending the question to the serving Service - the topic is the expected publishing place for the answer
from the serving Service when it comes, and the subscription is set up to subscribe to this.
:param str service_id: the UUID of the service to ask the question to
:param any input_values: the input values of the question
:param octue.resources.manifest.Manifest|None input_manifest: the input manifest of the question
:param bool subscribe_to_logs: if `True`, subscribe to logs from the remote service and handle them with the local log handlers
:param bool allow_local_files: if `True`, allow the input manifest to contain references to local files - this should only be set to `True` if the serving service will have access to these local files
:param float|None timeout: time in seconds to keep retrying sending the question
:return (octue.cloud.pub_sub.subscription.Subscription, str): the response subscription and question UUID
"""
if not allow_local_files:
if (input_manifest is not None) and (not input_manifest.all_datasets_are_in_cloud):
raise octue.exceptions.FileLocationError(
"All datasets of the input manifest and all files of the datasets must be uploaded to the cloud "
"before asking a service to perform an analysis upon them. The manifest must then be updated with "
"the new cloud locations."
)
question_topic = Topic(name=service_id, namespace=OCTUE_NAMESPACE, service=self)
if not question_topic.exists():
raise octue.exceptions.ServiceNotFound(f"Service with ID {service_id!r} cannot be found.")
question_uuid = str(uuid.uuid4())
response_topic = self.instantiate_answer_topic(question_uuid, service_id)
response_topic.create(allow_existing=False)
response_subscription = Subscription(
name=response_topic.name,
topic=response_topic,
namespace=OCTUE_NAMESPACE,
project_name=self.backend.project_name,
subscriber=pubsub_v1.SubscriberClient(credentials=self._credentials),
)
response_subscription.create(allow_existing=False)
if input_manifest is not None:
input_manifest = input_manifest.serialise()
self.publisher.publish(
topic=question_topic.path,
data=json.dumps({"input_values": input_values, "input_manifest": input_manifest}).encode(),
question_uuid=question_uuid,
forward_logs=str(int(subscribe_to_logs)),
retry=retry.Retry(deadline=timeout),
)
logger.info("%r asked a question %r to service %r.", self, question_uuid, service_id)
return response_subscription, question_uuid
def wait_for_answer(self, subscription, service_name="REMOTE", timeout=30):
"""Wait for an answer to a question on the given subscription, deleting the subscription and its topic once
the answer is received.
:param octue.cloud.pub_sub.subscription.Subscription subscription: the subscription for the question's answer
:param str service_name: an arbitrary name to refer to the service subscribed to by (used for labelling its remote log messages)
:param float|None timeout: how long to wait for an answer before raising a TimeoutError
:raise TimeoutError: if the timeout is exceeded
:return dict: dictionary containing the keys "output_values" and "output_manifest"
"""
subscriber = pubsub_v1.SubscriberClient(credentials=self._credentials)
message_handler = OrderedMessageHandler(
message_puller=self._pull_message,
subscriber=subscriber,
subscription=subscription,
service_name=service_name,
)
with subscriber:
try:
return message_handler.handle_messages(timeout=timeout)
finally:
subscription.delete()
subscription.topic.delete()
def send_exception_to_asker(self, topic, timeout=30):
"""Serialise and send the exception being handled to the asker.
:param octue.cloud.pub_sub.topic.Topic topic:
:param float|None timeout: time in seconds to keep retrying sending of the exception
:return None:
"""
exception_info = sys.exc_info()
exception = exception_info[1]
exception_message = f"Error in {self!r}: {exception}"
traceback = tb.format_list(tb.extract_tb(exception_info[2]))
self.publisher.publish(
topic=topic.path,
data=json.dumps(
{
"type": "exception",
"exception_type": type(exception).__name__,
"exception_message": exception_message,
"traceback": traceback,
"message_number": topic.messages_published,
}
).encode(),
retry=retry.Retry(deadline=timeout),
)
topic.messages_published += 1
def _pull_message(self, subscriber, subscription, timeout):
"""Pull a message from the subscription, raising a `TimeoutError` if the timeout is exceeded before succeeding.
:param octue.cloud.pub_sub.subscription.Subscription subscription: the subscription the message is expected on
:param float|None timeout: how long to wait in seconds for the message before raising a TimeoutError
:raise TimeoutError|concurrent.futures.TimeoutError: if the timeout is exceeded
:return dict: message containing data
"""
start_time = time.perf_counter()
while True:
no_message = True
attempt = 1
while no_message:
logger.debug("Pulling messages from Google Pub/Sub: attempt %d.", attempt)
pull_response = subscriber.pull(
request={"subscription": subscription.path, "max_messages": 1},
retry=retry.Retry(),
)
try:
answer = pull_response.received_messages[0]
no_message = False
except IndexError:
logger.debug("Google Pub/Sub pull response timed out early.")
attempt += 1
if timeout is not None and (time.perf_counter() - start_time) > timeout:
raise TimeoutError(
f"No message received from topic {subscription.topic.path!r} after {timeout} seconds.",
)
continue
subscriber.acknowledge(request={"subscription": subscription.path, "ack_ids": [answer.ack_id]})
logger.debug("%r received a message related to question %r.", self, subscription.topic.path.split(".")[-1])
return json.loads(answer.message.data.decode())
class OrderedMessageHandler:
"""A handler for Google Pub/Sub messages that ensures messages are handled in the order they were sent.
:param callable message_puller: function that pulls a message from the subscription
:param google.pubsub_v1.services.subscriber.client.SubscriberClient subscriber: a Google Pub/Sub subscriber
:param octue.cloud.pub_sub.subscription.Subscription subscription: the subscription messages are pulled from
:param str service_name: an arbitrary name to refer to the service subscribed to by (used for labelling its remote log messages)
:param dict|None message_handlers: a mapping of message handler names to callables that handle each type of message
:return None:
"""
def __init__(self, message_puller, subscriber, subscription, service_name="REMOTE", message_handlers=None):
self.message_puller = message_puller
self.subscriber = subscriber
self.subscription = subscription
self.service_name = service_name
self._waiting_messages = {}
self._previous_message_number = -1
self._message_handlers = message_handlers or {
"log_record": self._handle_log_message,
"exception": self._handle_exception,
"result": self._handle_result,
}
def handle_messages(self, timeout=30):
"""Pull messages and handle them in the order they were sent until a result is returned by a message handler,
then return that result.
:param float|None timeout: how long to wait for an answer before raising a `TimeoutError`
:raise TimeoutError: if the timeout is exceeded before receiving the final message
:return dict:
"""
start_time = time.perf_counter()
pull_timeout = None
while True:
if timeout is not None:
run_time = time.perf_counter() - start_time
if run_time > timeout:
raise TimeoutError(
f"No final answer received from topic {self.subscription.topic.path!r} after {timeout} seconds.",
)
pull_timeout = timeout - run_time
message = self.message_puller(self.subscriber, self.subscription, timeout=pull_timeout)
self._waiting_messages[message["message_number"]] = message
try:
while self._waiting_messages:
message = self._waiting_messages.pop(self._previous_message_number + 1)
result = self._handle_message(message)
if result is not None:
return result
except KeyError:
pass
def _handle_message(self, message):
"""Pass a message to its handler and update the previous message number.
:param dict message:
:return dict|None:
"""
self._previous_message_number += 1
try:
return self._message_handlers[message["type"]](message)
except KeyError:
logger.warning("Received a message of unknown type %r.", message["type"])
def _handle_log_message(self, message):
"""Deserialise the message into a log record and pass it to the local log handlers, adding `[REMOTE] to the
start of the log message.
:param dict message:
:return None:
"""
record = logging.makeLogRecord(message["log_record"])
record.msg = f"[{self.service_name}] {record.msg}"
logger.handle(record)
def _handle_exception(self, message):
"""Raise the exception from the responding service that is serialised in `data`.
:param dict message:
:raise Exception:
:return None:
"""
exception_message = "\n\n".join(
(
message["exception_message"],
f"The following traceback was captured from the remote service {self.service_name!r}:",
"".join(message["traceback"]),
)
)
try:
raise EXCEPTIONS_MAPPING[message["exception_type"]](exception_message)
# Allow unknown exception types to still be raised.
except KeyError:
raise type(message["exception_type"], (Exception,), {})(exception_message)
def _handle_result(self, message):
"""Convert the result to the correct form, deserialising the output manifest if it is present in the message.
:param dict message:
:return dict:
"""
logger.info("Received an answer to question %r.", self.subscription.topic.path.split(".")[-1])
if message["output_manifest"] is None:
output_manifest = None
else:
output_manifest = Manifest.deserialise(message["output_manifest"], from_string=True)
return {"output_values": message["output_values"], "output_manifest": output_manifest}
| 43.162791 | 208 | 0.653556 | 19,222 | 0.941516 | 0 | 0 | 0 | 0 | 0 | 0 | 8,752 | 0.428683 |
90124bdf01042879c8d23bc0de43bbfc19264166 | 1,978 | py | Python | Tareas/DistanciaGrafos.py | A01746540/SEM9Algoritmos | 2aaf1a344413dfbece77022a5b34e0c5318aa5e5 | [
"MIT"
] | null | null | null | Tareas/DistanciaGrafos.py | A01746540/SEM9Algoritmos | 2aaf1a344413dfbece77022a5b34e0c5318aa5e5 | [
"MIT"
] | null | null | null | Tareas/DistanciaGrafos.py | A01746540/SEM9Algoritmos | 2aaf1a344413dfbece77022a5b34e0c5318aa5e5 | [
"MIT"
] | null | null | null | from collections import defaultdict
class Graph:
metro = ['El Rosario', 'Instituto del Petroleo', 'Tacuba', 'Hidalgo', 'Tacubaya', 'Deportivo 18 de Marzo',
'Centro Medico', 'Mixcoac', 'Balderas', 'Bellas Artes', 'Guerrero', 'Martin Carrera', 'Zapata',
'Chabacano',
'Salto del Agua', 'Garibaldi', 'La Raza', 'Pino Suarez', 'Consulado', 'Candelaria', 'Ermita',
'Santa Anita', 'Oceania', 'Morelos',
'San Lazaro', 'Jamaica', 'Atlalilco', 'Pantitlan']
def __init__(self):
self.graph = defaultdict(list)
def addEdge(self, u, v):
# agregar un Edge al grafo
for val in v:
self.graph[u].append(val)
def BFS(self, s):
# realizar el BFS
d = []
for i in range(100):
d.append(0)
d[s] = 0
queue = []
visited = [False] * (max(self.graph) + 1)
queue.append(s)
visited[s] = True
while queue:
s = queue.pop(0)
print(s, end=" ")
for v in self.graph[s]:
if visited[v] == False:
queue.append(v)
visited[v] = True
d[v] = d[s] + 1
print("\nNodo inicial: El Rosario")
for i in range(28):
print(f"Desde el Rosario hasta {self.metro[i]} es {d[i]}")
g = Graph()
g.addEdge(0, [1, 2])
g.addEdge(1, [14, 15])
g.addEdge(2, [3, 4])
g.addEdge(3, [11, 12])
g.addEdge(4, [5, 6, 9])
g.addEdge(5, [8, 9])
g.addEdge(6, [7])
g.addEdge(7, [25])
g.addEdge(8, [10, 21, 25, 26])
g.addEdge(9, [3, 10])
g.addEdge(11, [10, 21])
g.addEdge(12, [13, 14])
g.addEdge(13, [11])
g.addEdge(14, [15, 17])
g.addEdge(15, [16])
g.addEdge(17, [16, 18, 19])
g.addEdge(18, [23])
g.addEdge(19, [13, 20])
g.addEdge(20, [18, 23])
g.addEdge(21, [10, 22])
g.addEdge(22, [19, 20])
g.addEdge(23, [24])
g.addEdge(24, [8, 22])
g.addEdge(25, [27])
g.addEdge(26, [24, 27])
print("BFT:")
g.BFS(0) | 25.688312 | 110 | 0.517695 | 1,336 | 0.67543 | 0 | 0 | 0 | 0 | 0 | 0 | 464 | 0.23458 |
9012c72f255c969953043607b7f84aeb3ccb4764 | 9,807 | py | Python | octopus_deploy_swagger_client/models/artifact_resource.py | cvent/octopus-deploy-api-client | 0e03e842e1beb29b132776aee077df570b88366a | [
"Apache-2.0"
] | null | null | null | octopus_deploy_swagger_client/models/artifact_resource.py | cvent/octopus-deploy-api-client | 0e03e842e1beb29b132776aee077df570b88366a | [
"Apache-2.0"
] | null | null | null | octopus_deploy_swagger_client/models/artifact_resource.py | cvent/octopus-deploy-api-client | 0e03e842e1beb29b132776aee077df570b88366a | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Octopus Server API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 2019.6.7+Branch.tags-2019.6.7.Sha.aa18dc6809953218c66f57eff7d26481d9b23d6a
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ArtifactResource(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'space_id': 'str',
'filename': 'str',
'source': 'str',
'server_task_id': 'str',
'created': 'datetime',
'log_correlation_id': 'str',
'last_modified_on': 'datetime',
'last_modified_by': 'str',
'links': 'dict(str, str)'
}
attribute_map = {
'id': 'Id',
'space_id': 'SpaceId',
'filename': 'Filename',
'source': 'Source',
'server_task_id': 'ServerTaskId',
'created': 'Created',
'log_correlation_id': 'LogCorrelationId',
'last_modified_on': 'LastModifiedOn',
'last_modified_by': 'LastModifiedBy',
'links': 'Links'
}
def __init__(self, id=None, space_id=None, filename=None, source=None, server_task_id=None, created=None, log_correlation_id=None, last_modified_on=None, last_modified_by=None, links=None): # noqa: E501
"""ArtifactResource - a model defined in Swagger""" # noqa: E501
self._id = None
self._space_id = None
self._filename = None
self._source = None
self._server_task_id = None
self._created = None
self._log_correlation_id = None
self._last_modified_on = None
self._last_modified_by = None
self._links = None
self.discriminator = None
if id is not None:
self.id = id
if space_id is not None:
self.space_id = space_id
self.filename = filename
if source is not None:
self.source = source
if server_task_id is not None:
self.server_task_id = server_task_id
if created is not None:
self.created = created
if log_correlation_id is not None:
self.log_correlation_id = log_correlation_id
if last_modified_on is not None:
self.last_modified_on = last_modified_on
if last_modified_by is not None:
self.last_modified_by = last_modified_by
if links is not None:
self.links = links
@property
def id(self):
"""Gets the id of this ArtifactResource. # noqa: E501
:return: The id of this ArtifactResource. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ArtifactResource.
:param id: The id of this ArtifactResource. # noqa: E501
:type: str
"""
self._id = id
@property
def space_id(self):
"""Gets the space_id of this ArtifactResource. # noqa: E501
:return: The space_id of this ArtifactResource. # noqa: E501
:rtype: str
"""
return self._space_id
@space_id.setter
def space_id(self, space_id):
"""Sets the space_id of this ArtifactResource.
:param space_id: The space_id of this ArtifactResource. # noqa: E501
:type: str
"""
self._space_id = space_id
@property
def filename(self):
"""Gets the filename of this ArtifactResource. # noqa: E501
:return: The filename of this ArtifactResource. # noqa: E501
:rtype: str
"""
return self._filename
@filename.setter
def filename(self, filename):
"""Sets the filename of this ArtifactResource.
:param filename: The filename of this ArtifactResource. # noqa: E501
:type: str
"""
if filename is None:
raise ValueError("Invalid value for `filename`, must not be `None`") # noqa: E501
self._filename = filename
@property
def source(self):
"""Gets the source of this ArtifactResource. # noqa: E501
:return: The source of this ArtifactResource. # noqa: E501
:rtype: str
"""
return self._source
@source.setter
def source(self, source):
"""Sets the source of this ArtifactResource.
:param source: The source of this ArtifactResource. # noqa: E501
:type: str
"""
self._source = source
@property
def server_task_id(self):
"""Gets the server_task_id of this ArtifactResource. # noqa: E501
:return: The server_task_id of this ArtifactResource. # noqa: E501
:rtype: str
"""
return self._server_task_id
@server_task_id.setter
def server_task_id(self, server_task_id):
"""Sets the server_task_id of this ArtifactResource.
:param server_task_id: The server_task_id of this ArtifactResource. # noqa: E501
:type: str
"""
self._server_task_id = server_task_id
@property
def created(self):
"""Gets the created of this ArtifactResource. # noqa: E501
:return: The created of this ArtifactResource. # noqa: E501
:rtype: datetime
"""
return self._created
@created.setter
def created(self, created):
"""Sets the created of this ArtifactResource.
:param created: The created of this ArtifactResource. # noqa: E501
:type: datetime
"""
self._created = created
@property
def log_correlation_id(self):
"""Gets the log_correlation_id of this ArtifactResource. # noqa: E501
:return: The log_correlation_id of this ArtifactResource. # noqa: E501
:rtype: str
"""
return self._log_correlation_id
@log_correlation_id.setter
def log_correlation_id(self, log_correlation_id):
"""Sets the log_correlation_id of this ArtifactResource.
:param log_correlation_id: The log_correlation_id of this ArtifactResource. # noqa: E501
:type: str
"""
self._log_correlation_id = log_correlation_id
@property
def last_modified_on(self):
"""Gets the last_modified_on of this ArtifactResource. # noqa: E501
:return: The last_modified_on of this ArtifactResource. # noqa: E501
:rtype: datetime
"""
return self._last_modified_on
@last_modified_on.setter
def last_modified_on(self, last_modified_on):
"""Sets the last_modified_on of this ArtifactResource.
:param last_modified_on: The last_modified_on of this ArtifactResource. # noqa: E501
:type: datetime
"""
self._last_modified_on = last_modified_on
@property
def last_modified_by(self):
"""Gets the last_modified_by of this ArtifactResource. # noqa: E501
:return: The last_modified_by of this ArtifactResource. # noqa: E501
:rtype: str
"""
return self._last_modified_by
@last_modified_by.setter
def last_modified_by(self, last_modified_by):
"""Sets the last_modified_by of this ArtifactResource.
:param last_modified_by: The last_modified_by of this ArtifactResource. # noqa: E501
:type: str
"""
self._last_modified_by = last_modified_by
@property
def links(self):
"""Gets the links of this ArtifactResource. # noqa: E501
:return: The links of this ArtifactResource. # noqa: E501
:rtype: dict(str, str)
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this ArtifactResource.
:param links: The links of this ArtifactResource. # noqa: E501
:type: dict(str, str)
"""
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ArtifactResource, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ArtifactResource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.940171 | 207 | 0.597634 | 9,407 | 0.959213 | 0 | 0 | 5,276 | 0.537983 | 0 | 0 | 4,896 | 0.499235 |
9012e5a8ed3f5ec28adee161865cc14545390e2d | 1,691 | py | Python | lab04/submited/utils.py | Battleman/InternetAnalyticsW | 005e5de6c0e591be6dc303ec46cc82249e70f666 | [
"MIT"
] | null | null | null | lab04/submited/utils.py | Battleman/InternetAnalyticsW | 005e5de6c0e591be6dc303ec46cc82249e70f666 | [
"MIT"
] | null | null | null | lab04/submited/utils.py | Battleman/InternetAnalyticsW | 005e5de6c0e591be6dc303ec46cc82249e70f666 | [
"MIT"
] | null | null | null | # ######################
# Some useful utilities.
# ######################
import json, os, pickle
def listPrettyPrint(l, n):
"""Prints a list l on n columns to improve readability"""
if(n == 5):
for a,b,c,d,e in zip(l[::5],l[1::5],l[2::5],l[3::5],l[4::5]):
print('{:<22}{:<22}{:<22}{:<22}{:<}'.format(a,b,c,d,e))
if(n == 4):
for a,b,c,d in zip(l[::4],l[1::4],l[2::4],l[3::4]):
print('{:<30}{:<30}{:<30}{:<}'.format(a,b,c,d))
if(n == 3):
for a,b,c in zip(l[::3],l[1::3],l[2::3]):
print('{:<30}{:<30}{:<}'.format(a,b,c))
if(n == 2):
for a,b in zip(l[::2],l[1::2]):
print('{:<40}{:<}'.format(a,b))
if(len(l)%n != 0): #print remaining
for i in range(len(l)%n):
print(l[-(len(l)%n):][i], end='\t')
def save_json(objects, path):
"""
Save a list of objects as JSON (.txt).
"""
# Remove the file if it exists
if os.path.exists(path):
os.remove(path)
for obj in objects:
# 'a' stands for 'append' to the end of the file
# '+' to create the file if it doesn't exist
with open(path, 'a+') as f:
f.write(json.dumps(obj))
f.write('\n')
def load_json(path):
"""
Read a JSON from a text file. Expect a list of objects.
"""
with open(path) as f:
lines = f.readlines()
return [json.loads(s) for s in lines]
def save_pkl(obj, path):
"""
Save an object to path.
"""
with open(path, 'wb') as f:
pickle.dump(obj, f)
def load_pkl(path):
"""
Load a pickle from path.
"""
with open(path, 'rb') as f:
return pickle.load(f)
| 25.621212 | 69 | 0.474867 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 575 | 0.340035 |
9012f44f49fd46b9f38512bc6891494632f15f28 | 489 | py | Python | primes/factorisation.py | miloszlakomy/algutils | f83c330a0ca31cdac536de811f447820c70ecb38 | [
"MIT"
] | null | null | null | primes/factorisation.py | miloszlakomy/algutils | f83c330a0ca31cdac536de811f447820c70ecb38 | [
"MIT"
] | null | null | null | primes/factorisation.py | miloszlakomy/algutils | f83c330a0ca31cdac536de811f447820c70ecb38 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from algutils.primes import cached_primes
def factorise(n):
if n <= 0:
raise ValueError("n must be a positive integer")
ps = cached_primes.get_primes_list(min_lim=int(n**.5) + 1)
ret = {}
for p in ps:
if n == 1:
break
if p**2 > n: # n is prime
break
if n % p == 0:
n //= p
v = 1
while n % p == 0:
n //= p
v += 1
ret[p] = v
if n > 1: # n is prime
ret[n] = 1
return ret
| 13.971429 | 60 | 0.492843 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 76 | 0.155419 |
901370bb81cfb113acd130dc5b682b50dbdb76b2 | 1,133 | py | Python | NLP/DuSQL-Baseline/text2sql/models/grammar/__init__.py | pkulzb/Research | 88da4910a356f1e95e1e1e05316500055533683d | [
"Apache-2.0"
] | 53 | 2020-03-31T16:20:53.000Z | 2021-11-16T11:48:38.000Z | NLP/DuSQL-Baseline/text2sql/models/grammar/__init__.py | pkulzb/Research | 88da4910a356f1e95e1e1e05316500055533683d | [
"Apache-2.0"
] | 1 | 2020-04-06T08:10:12.000Z | 2020-04-06T08:10:12.000Z | NLP/DuSQL-Baseline/text2sql/models/grammar/__init__.py | pkulzb/Research | 88da4910a356f1e95e1e1e05316500055533683d | [
"Apache-2.0"
] | 53 | 2020-04-01T01:59:08.000Z | 2022-03-14T07:59:58.000Z | # -*- coding:utf8 -*-
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""grammar model"""
from collections import namedtuple
DecoderInputsWrapper = namedtuple("DecoderInputsWrapper", "input action gmr_mask")
DecoderDynamicVocab = namedtuple("DecoderDynamicVocab",
"table table_len column column_len value value_len column2table_mask")
from text2sql.models.grammar.nets import grammar_output
from text2sql.models.grammar.infer_decoder import GrammarInferDecoder
from text2sql.models.grammar.dynamic_decode import decode_with_grammar
| 41.962963 | 103 | 0.770521 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 774 | 0.683142 |
9015d694e84b3f0cc392aae6d053e1e708a338df | 627 | py | Python | verifai/samplers/__init__.py | jst-qaml/VerifAI | d91bc1289d720c055a36fa0e1ad9f68b986ca1a4 | [
"BSD-3-Clause"
] | 1 | 2020-07-27T13:32:01.000Z | 2020-07-27T13:32:01.000Z | verifai/samplers/__init__.py | shromonag/VerifAI | ace214d1c3282ed5ea63ee3f52457e35f54ebb62 | [
"BSD-3-Clause"
] | null | null | null | verifai/samplers/__init__.py | shromonag/VerifAI | ace214d1c3282ed5ea63ee3f52457e35f54ebb62 | [
"BSD-3-Clause"
] | null | null | null | from .domain_sampler import SamplingError, SplitSampler
from .feature_sampler import FeatureSampler, LateFeatureSampler
from .halton import HaltonSampler
from .cross_entropy import (CrossEntropySampler, ContinuousCrossEntropySampler,
DiscreteCrossEntropySampler)
from .random_sampler import RandomSampler
from .bayesian_optimization import BayesOptSampler
from .simulated_annealing import SimulatedAnnealingSampler
# only import ScenicSampler if Scenic is installed
try:
import scenic
except ModuleNotFoundError:
pass # do not attempt to import ScenicSampler
else:
from .scenic_sampler import ScenicSampler
| 36.882353 | 79 | 0.845295 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 90 | 0.143541 |
90171f9ff5db50b3b195ffb3fb8c3f04d8941fed | 159 | py | Python | sb3_training/gym-iotmarket/setup.py | prasoonpatidar/multiagentRL-resource-sharing | e63ba7fc3c7ab019e9fd109cd45b739e3322152f | [
"MIT"
] | null | null | null | sb3_training/gym-iotmarket/setup.py | prasoonpatidar/multiagentRL-resource-sharing | e63ba7fc3c7ab019e9fd109cd45b739e3322152f | [
"MIT"
] | null | null | null | sb3_training/gym-iotmarket/setup.py | prasoonpatidar/multiagentRL-resource-sharing | e63ba7fc3c7ab019e9fd109cd45b739e3322152f | [
"MIT"
] | null | null | null | from setuptools import setup
setup(name='gym_iotmarket',
version='0.0.1',
install_requires=['gym','scipy','numpy'] # And any other dependencies
) | 26.5 | 76 | 0.685535 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 69 | 0.433962 |
901749f4d47b3e2fe3385b589ae5e10eae1bbab0 | 13,064 | py | Python | mcd2c/cfile/__init__.py | Asdew95/mcd2c | bcec95276a45b4bfd90ece5569246bd5a247368b | [
"Zlib"
] | 3 | 2020-08-18T19:11:39.000Z | 2021-06-13T12:16:41.000Z | mcd2c/cfile/__init__.py | Asdew95/mcd2c | bcec95276a45b4bfd90ece5569246bd5a247368b | [
"Zlib"
] | null | null | null | mcd2c/cfile/__init__.py | Asdew95/mcd2c | bcec95276a45b4bfd90ece5569246bd5a247368b | [
"Zlib"
] | 2 | 2021-06-13T12:16:47.000Z | 2021-12-07T18:52:43.000Z | # Inspired by https://github.com/cogu/cfile
c_indent_char = ' '
def set_indent_char(char):
global c_indent_char
c_indent_char = char
class blank:
def __init__(self, num=1):
self.indent = 0 #Irrelevant, kept because it simplifies sequences
self.num = num
def __str__(self):
# Sequences automatically insert one line break for each element, so
# we substract one line break to account for that
return (self.num - 1) * '\n'
# line and its subclasses can be used as container classes for sequences, which
# can span multiple lines. When used on its own though it's a single line
class line:
def __init__(self, elem, indent=0):
self.elem = elem
self._indent = 0
self.indent = indent
@property
def indent(self):
return self._indent
@indent.setter
def indent(self, val):
if hasattr(self.elem, 'indent'):
self.elem.indent = val
self._indent = val
def __str__(self):
return f'{c_indent_char * self.indent}{self.elem}'
class statement(line):
def __str__(self):
return super().__str__() + ';'
class returnval(line):
def __str__(self):
return f'{c_indent_char * self.indent}return {self.elem};'
class typedef(line):
def __init__(self, elem, name, indent=0):
super().__init__(elem, indent)
self.name = name
def __str__(self):
return f'{c_indent_char * self.indent}typedef {self.elem} {self.name};'
class linecomment(line):
def __str__(self):
return f'{c_indent_char * self.indent}// {self.elem}'
class include(line):
def __init__(self, path, sys=False, indent=0):
super().__init__(
f'#include <{path}>' if sys else f'#include "{path}"', indent
)
class preprocessor(line):
directive = ''
def __init__(self, val, indent=0):
super().__init__(f'#{self.directive} {val}', indent)
class define(preprocessor):
directive = 'define'
class indef(preprocessor):
directive = 'ifndef'
class endif(line):
def __init__(self, indent=0):
super().__init__('#endif', indent)
from collections.abc import MutableSequence
# Group of elements at the same indentation level
class sequence(MutableSequence):
def __init__(self, elems=None, indent=0):
self.elems = [] if elems is None else elems
self._indent = indent
self.indent = indent
def __getitem__(self, key):
return self.elems[key]
def __setitem__(self, key, item):
if(isinstance(item, str)):
item = line(item)
item.indent = self.indent
self.elems[key] = item
def __delitem__(self, key):
del self.elems[key]
def __len__(self):
return len(self.elems)
def insert(self, key, item):
if(isinstance(item, str)):
item = line(item)
item.indent = self.indent
self.elems.insert(key, item)
@property
def indent(self):
return self._indent
@indent.setter
def indent(self, val):
for elem in self.elems:
elem.indent = val
self._indent = val
def __str__(self):
return '\n'.join([str(elem) for elem in self.elems])
#Like sequence, but joins on space instead of newline
class linesequence(sequence):
def __setitem__(self, key, item):
if(isinstance(item, str)):
item = line(item)
item.indent = self.indent if(isinstance(item, sequence)) else 0
self.elems[key] = item
def insert(self, key, item):
if(isinstance(item, str)):
item = line(item)
item.indent = self.indent if(isinstance(item, sequence)) else 0
self.elems.insert(key, item)
@property
def indent(self):
return self._indent
@indent.setter
def indent(self, val):
for elem in self.elems:
elem.indent = val if(isinstance(elem, sequence)) else 0
self._indent = val
def __str__(self):
i = c_indent_char * self.indent
return i + ' '.join([str(elem) for elem in self.elems])
# Common for block comments and block scope items
class _block(sequence):
def __init__(self, elems=None, inner_indent=1, indent=1):
self._inner_indent = inner_indent
super().__init__(elems, indent)
@property
def inner_indent(self):
return self._inner_indent
@inner_indent.setter
def inner_indent(self, val):
for elem in self.elems:
elem.indent = self._indent + val
self._inner_indent = val
@property
def indent(self):
return self._indent
@indent.setter
def indent(self, val):
for elem in self.elems:
elem.indent = val + self._inner_indent
self._indent = val
# Curly bracket {} grouped elements, optionally at different indentation level
# Does not indent first line, that's expected to be done by a wrapping class
# such as line, statement, or typedef
class block(_block):
def __str__(self):
return f'{{\n{super().__str__()}\n{self.indent * c_indent_char}}}'
# Similar to block but with block comment /* */ delimiters instead of {}
# Doesn't need to be wrapped in anything to get indentation correct
class blockcomment(_block):
def __str__(self):
i = self.indent * c_indent_char
return f'{i}/*\n{super().__str__()}\n{i}*/'
class blocktype(block):
keyword = ''
def __init__(self, name=None, elems=None, inner_indent=1, indent=0):
super().__init__(indent=indent, inner_indent=inner_indent, elems=elems)
self.name = name
def __str__(self):
if self.name:
return f'{self.keyword} {self.name} {super().__str__()}'
return f'{self.keyword} {super().__str__()}'
class struct(blocktype):
keyword = 'struct'
class union(blocktype):
keyword = 'union'
class enum(blocktype):
keyword = 'enum'
def __str__(self):
inner = ',\n'.join([str(elem) for elem in self.elems])
i = self.indent * c_indent_char
if self.name:
return f'{self.keyword} {self.name} {{\n{inner}\n{i}}}'
return f'{self.keyword} {{\n{inner}\n{i}}}'
class commablock(blocktype):
def __str__(self):
for elem in self.elems:
elem.indent = self.indent + self._inner_indent
inner = ',\n'.join([str(elem) for elem in self.elems])
return f'{{\n{inner}\n{self.indent * c_indent_char}}}'
class conditional(block):
keyword = ''
def __init__(self, condition, elems=None, inner_indent=1, indent=0):
super().__init__(indent=indent, inner_indent=inner_indent, elems=elems)
self.condition = condition
def __str__(self):
i = self.indent * c_indent_char
return f'{i}{self.keyword}({self.condition}) {super().__str__()}'
class _unspacedconditional(block):
keyword = ''
def __init__(self, condition, elems=None, inner_indent=1, indent=0):
super().__init__(indent=indent, inner_indent=inner_indent, elems=elems)
self.condition = condition
def __str__(self):
return f'{self.keyword}({self.condition}) {super().__str__()}'
class ifcond(conditional):
keyword = 'if'
class nospace_ifcond(_unspacedconditional):
keyword = 'if'
class elifcond(_unspacedconditional):
keyword = 'else if'
class elsecond(block):
keyword = 'else'
def __str__(self):
return f'{self.keyword} {super().__str__()}'
class switch(conditional):
keyword = 'switch'
def __str__(self):
s = ''
for elem in self.elems[:-1]:
s += str(elem) if elem.fall and not len(elem) else f'{elem}\n'
s += str(self.elems[-1])
i = self.indent * c_indent_char
return f'{i}{self.keyword}({self.condition}) {{\n{s}\n{i}}}'
class case(_block):
def __init__(self, val, elems=None, fall=False, inner_indent=1, indent=0):
super().__init__(elems, inner_indent, indent)
self.val = val
self.fall = fall
def __str__(self):
o = self.indent * c_indent_char
i = (self.indent + self.inner_indent) * c_indent_char
if self.fall:
return f'{o}case {self.val}:\n{super().__str__()}'
return f'{o}case {self.val}:\n{super().__str__()}\n{i}break;'
class defaultcase(_block):
def __init__(self, elems=None, fall=True, inner_indent=1, indent=0):
super().__init__(elems, inner_indent, indent)
self.fall = fall
def __str__(self):
o = self.indent * c_indent_char
i = (self.indent + self.inner_indent) * c_indent_char
if self.fall:
return f'{o}default:\n{super().__str__()}'
return f'{o}default:\n{super().__str__()}\n{i}break;'
class inlineif(statement):
keyword = 'if'
def __init__(self, condition, elem, indent=0):
super().__init__(elem, indent)
self.condition = condition
def __str__(self):
i = c_indent_char * self.indent
return i + f'{self.keyword}({self.condition}) {self.elem}'
@property
def indent(self):
return self._indent
@indent.setter
def indent(self, val):
self._indent = val
class forloop(block):
keyword = 'for'
def __init__(self, vars=None, cond=None, post=None, elems=None,
inner_indent=1, indent=0):
super().__init__(elems, inner_indent, indent)
self.vars = '' if vars is None else vars
self.cond = '' if cond is None else cond
self.post = '' if post is None else post
def __str__(self):
l1 = f'{self.vars}; {self.cond}' if self.cond else self.vars + ';'
l2 = f'{l1}; {self.post}' if self.post else l1 + ';'
i = self.indent * c_indent_char
return f'{i}{self.keyword}({l2}) {super().__str__()}'
class variable:
def __init__(self, name, typename=None, array=0):
self.name = name
self.typename = typename
self.array = array
@property
def decl(self):
return variabledecl(self.name, self.typename, self.array)
def __str__(self):
return str(self.name)
class variabledecl(variable):
def __str__(self):
if self.array:
return f'{self.typename} {self.name}[{self.array}]'
return f'{self.typename} {self.name}'
class monop:
op = ''
def __init__(self, val, preop = True):
self.val = val
self.preop = preop
def __str__(self):
if self.preop:
return f'{self.op}{self.val}'
return f'{self.op}{self.val}'
class defop(monop):
op = '*'
class refop(monop):
op = '&'
class incop(monop):
op = '++'
class decop(monop):
op = '--'
class operator:
op = ''
def __init__(self, lvalue, rvalue):
self.lvalue = lvalue
self.rvalue = rvalue
def __str__(self):
return f'{self.lvalue} {self.op} {self.rvalue}'
class assign(operator):
op = '='
class addop(operator):
op = '+'
class subop(operator):
op = '-'
class mulop(operator):
op = '*'
class addeq(operator):
op = '+='
class subeq(operator):
op = '-='
class noteq(operator):
op = '!='
class eqeq(operator):
op = '=='
class lth(operator):
op = '<'
class ltheq(operator):
op = '<='
class gth(operator):
op = '>'
class gtheq(operator):
op = '>='
class wrap:
def __init__(self, val, invert=False):
self.val = val
self.invert = invert
def __str__(self):
if self.invert:
return f'!({self.val})'
return f'({self.val})'
class fcall(MutableSequence):
def __init__(self, name, typename, args=None):
self.name = name
self.typename = typename
self.args = [] if args is None else list(args)
def __getitem__(self, key):
return self.args[key]
def __setitem__(self, key, item):
self.args[key] = item
def __delitem__(self, key):
del self.args[key]
def __len__(self):
return len(self.args)
def insert(self, key, item):
self.args.insert(key, item)
@property
def decl(self):
return fdecl(name, typename, [a.decl for a in self.args])
def __str__(self):
a = ', '.join([str(arg) for arg in self.args])
return f'{self.name}({a})'
class fdecl(fcall):
def __str__(self):
a = ', '.join([str(arg) for arg in self.args])
return f'{self.typename} {self.name}({a})'
class _file(sequence):
def __init__(self, path, elems=None):
self.path = path
super().__init__(elems)
class cfile(_file):
pass
import os
class hfile(_file):
def __init__(self, path, elems=None, guard=None):
super().__init__(path, elems)
if guard is None:
bn = os.path.basename(path)
self.guard = f'{os.path.splitext(bn)[0].upper()}_H'
else:
self.guard = guard
def __str__(self):
t = sequence([indef(self.guard), define(self.guard), blank(2)])
t.extend(self)
t.append(endif())
t.append(blank())
return str(t)
| 26.770492 | 79 | 0.606782 | 12,102 | 0.926363 | 0 | 0 | 1,380 | 0.105634 | 0 | 0 | 2,448 | 0.187385 |
90179942b479c02a62c0f83dc133fc47dd16d363 | 906 | py | Python | typed_python/compiler/tests/subclass_of_test.py | APrioriInvestments/nativepython | 94e6b09d788e49cbe34b9b0d3c948218d7a8dcc5 | [
"Apache-2.0"
] | 52 | 2019-04-12T18:07:56.000Z | 2019-10-07T10:26:19.000Z | typed_python/compiler/tests/subclass_of_test.py | APrioriInvestments/nativepython | 94e6b09d788e49cbe34b9b0d3c948218d7a8dcc5 | [
"Apache-2.0"
] | 135 | 2019-04-15T12:52:56.000Z | 2019-10-08T18:39:58.000Z | typed_python/compiler/tests/subclass_of_test.py | APrioriInvestments/nativepython | 94e6b09d788e49cbe34b9b0d3c948218d7a8dcc5 | [
"Apache-2.0"
] | 1 | 2019-04-12T13:03:38.000Z | 2019-04-12T13:03:38.000Z | from typed_python import Entrypoint, SubclassOf, Class, Final, Function, ListOf
class A(Class):
pass
class B(A):
pass
class C(B, Final):
pass
def test_can_cast_subclass_of_correctly():
@Function
def f(c: SubclassOf(C)):
return "C"
@f.overload
def f(c: SubclassOf(B)):
return "B"
@f.overload
def f(c: SubclassOf(A)):
return "A"
def checkIt():
assert f(C) == "C", f(C)
assert f(B) == "B", f(B)
assert f(A) == "A", f(A)
checkIt()
Entrypoint(checkIt)()
@Entrypoint
def checkItList(x):
res = ListOf(str)()
for cls in x:
res.append(f(cls))
return res
assert checkItList(ListOf(SubclassOf(A))([A, B, C])) == ["A", "B", "C"]
assert checkItList(ListOf(SubclassOf(B))([B, C])) == ["B", "C"]
assert checkItList(ListOf(SubclassOf(C))([C])) == ["C"]
| 18.12 | 79 | 0.540839 | 71 | 0.078366 | 0 | 0 | 312 | 0.344371 | 0 | 0 | 36 | 0.039735 |
9017a7257730a81fd99b9ead002708bc4ceba13e | 1,442 | py | Python | experiments/toy.py | jcrickmer/pyvision | 5aea7cd9a85d7d26196c375275e7bf00c27a8ac8 | [
"MIT"
] | 53 | 2015-03-10T06:20:50.000Z | 2021-06-07T07:34:02.000Z | experiments/toy.py | jcrickmer/pyvision | 5aea7cd9a85d7d26196c375275e7bf00c27a8ac8 | [
"MIT"
] | 1 | 2016-11-20T14:28:38.000Z | 2016-11-20T14:28:38.000Z | experiments/toy.py | jcrickmer/pyvision | 5aea7cd9a85d7d26196c375275e7bf00c27a8ac8 | [
"MIT"
] | 56 | 2015-02-10T20:49:42.000Z | 2021-04-03T05:41:09.000Z | from vision import *
from vision.track import alearn, interpolation
from vision import visualize
from vision.toymaker import *
import os
import multiprocessing
g = Geppetto()
b = Rectangle()
b = b.linear((300,300), 100)
b = b.linear((0,300), 200)
b = b.linear((300,0), 300)
g.add(b)
path = b.groundtruth()
pathdict = dict((x.frame, x) for x in path)
start = 0
stop = 299
given = [pathdict[start], pathdict[stop]]
id = "toy"
pool = multiprocessing.Pool(24)
root = os.path.dirname(os.path.abspath(__file__))
for _ in range(1):
print "Given frames are:", ", ".join(str(x.frame) for x in given)
print "Simulating with {0} clicks".format(len(given))
askingfor = alearn.pick(g, given, pool = pool, skip = 1,
bgskip = 10, bgsize = 5e3, plot = "tmp/",
errortube = 100000)
print "Requested frame {0}".format(askingfor)
print "Visualizing path with {0} clicks".format(len(given))
vit = visualize.highlight_path(g, interpolation.LinearFill(given))
base = "{0}/visualize/{1}/clicks{2}/wants{3}".format(root, id,
len(given),
askingfor)
try:
os.makedirs(base)
except:
pass
visualize.save(vit, lambda x: "{0}/{1}.jpg".format(base, x))
given.append(pathdict[askingfor])
given.sort(key = lambda x: x.frame)
| 30.041667 | 71 | 0.585298 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 168 | 0.116505 |
90192683a9596914db24fa7e2c76ff1a12788127 | 355 | py | Python | angr/engines/soot/statements/goto.py | Kyle-Kyle/angr | 345b2131a7a67e3a6ffc7d9fd475146a3e12f837 | [
"BSD-2-Clause"
] | 6,132 | 2015-08-06T23:24:47.000Z | 2022-03-31T21:49:34.000Z | angr/engines/soot/statements/goto.py | Kyle-Kyle/angr | 345b2131a7a67e3a6ffc7d9fd475146a3e12f837 | [
"BSD-2-Clause"
] | 2,272 | 2015-08-10T08:40:07.000Z | 2022-03-31T23:46:44.000Z | angr/engines/soot/statements/goto.py | Kyle-Kyle/angr | 345b2131a7a67e3a6ffc7d9fd475146a3e12f837 | [
"BSD-2-Clause"
] | 1,155 | 2015-08-06T23:37:39.000Z | 2022-03-31T05:54:11.000Z |
import logging
from .base import SimSootStmt
l = logging.getLogger('angr.engines.soot.statements.goto')
class SimSootStmt_Goto(SimSootStmt):
def _execute(self):
jmp_target = self._get_bb_addr_from_instr(instr=self.stmt.target)
self._add_jmp_target(target=jmp_target,
condition=self.state.solver.true)
| 25.357143 | 73 | 0.704225 | 245 | 0.690141 | 0 | 0 | 0 | 0 | 0 | 0 | 35 | 0.098592 |
901943c57d651786afa2ce40b989408f3ebb4e7f | 979 | py | Python | game/entity/player.py | fisher60/pyweek-2021 | 294b45d768a7e0d85ac67dc4b12384e68fc4f399 | [
"MIT"
] | 8 | 2021-03-27T21:20:28.000Z | 2021-03-31T08:09:26.000Z | game/entity/player.py | fisher60/pyweek-2021 | 294b45d768a7e0d85ac67dc4b12384e68fc4f399 | [
"MIT"
] | 49 | 2021-03-27T21:18:08.000Z | 2021-04-03T02:53:53.000Z | game/entity/player.py | fisher60/pyweek-2021 | 294b45d768a7e0d85ac67dc4b12384e68fc4f399 | [
"MIT"
] | 1 | 2021-04-02T21:58:39.000Z | 2021-04-02T21:58:39.000Z | import arcade
from ..constants import TILE_SIZE, PLAYER_SCALING
from ..utils import Vector
class PlayerInventory:
keys: int = 0
class Player(arcade.Sprite):
def __init__(self, *args, **kwargs):
super().__init__(
"game/assets/sprites/square.png", PLAYER_SCALING, *args, **kwargs
)
self.inventory: PlayerInventory = PlayerInventory()
@property
def position(self) -> Vector:
return Vector(int(self.center_x), int(self.center_y))
def update(self):
...
def handle_user_input(self, key: int, modifiers: int):
"""
Handle events passed from the MainWindow.
:return:
"""
if key == arcade.key.UP:
self.center_y += TILE_SIZE
elif key == arcade.key.DOWN:
self.center_y -= TILE_SIZE
elif key == arcade.key.LEFT:
self.center_x -= TILE_SIZE
elif key == arcade.key.RIGHT:
self.center_x += TILE_SIZE
| 25.763158 | 77 | 0.597549 | 881 | 0.899898 | 0 | 0 | 105 | 0.107252 | 0 | 0 | 114 | 0.116445 |
901a2b9dfc7f70764388119a93640679584d1a0d | 112 | py | Python | scikits/datasmooth/__init__.py | jjstickel/scikit-datasmooth | 976ab86998d1648506684360ab9d65b8a3ccf078 | [
"BSD-3-Clause"
] | 7 | 2015-06-18T15:34:28.000Z | 2021-09-14T13:04:08.000Z | scikits/datasmooth/__init__.py | jjstickel/scikit-datasmooth | 976ab86998d1648506684360ab9d65b8a3ccf078 | [
"BSD-3-Clause"
] | 2 | 2018-03-10T03:46:39.000Z | 2018-11-27T18:55:13.000Z | scikits/datasmooth/__init__.py | jjstickel/scikit-datasmooth | 976ab86998d1648506684360ab9d65b8a3ccf078 | [
"BSD-3-Clause"
] | 6 | 2015-03-29T07:36:18.000Z | 2020-09-15T16:25:06.000Z | __version__ = '0.7.1'
try:
from regularsmooth import *
except ImportError:
from .regularsmooth import *
| 18.666667 | 32 | 0.714286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 0.0625 |
901cb3458a6cbbc2527b80d00c06a9a4f1e903b2 | 2,389 | py | Python | code/tfidf/tfidf.py | vadlamak/strata-teaching-the-elephant-to-read | 5f3963c90c520ac1b7b41d21939230ef5df6414f | [
"Apache-2.0"
] | null | null | null | code/tfidf/tfidf.py | vadlamak/strata-teaching-the-elephant-to-read | 5f3963c90c520ac1b7b41d21939230ef5df6414f | [
"Apache-2.0"
] | 1 | 2021-03-26T00:26:00.000Z | 2021-03-26T00:26:00.000Z | code/tfidf/tfidf.py | vadlamak/strata-teaching-the-elephant-to-read | 5f3963c90c520ac1b7b41d21939230ef5df6414f | [
"Apache-2.0"
] | null | null | null | import math
import string
from itertools import groupby
from operator import itemgetter
from nltk.corpus import stopwords
from nltk.tokenize import wordpunct_tokenize
N = 10788.0 # Number of documents, in float to make division work.
class TermMapper(object):
def __init__(self):
if 'stopwords' in self.params:
with open(self.params['stopwords'], 'r') as excludes:
self._stopwords = set(line.strip() for line in excludes)
else:
self._stopwords = None
self.curdoc = None
def __call__(self, key, value):
if value.startswith('='*34):
self.curdoc = int(value.strip("=").strip())
else:
for word in self.tokenize(value):
if not word in self.stopwords:
yield (word, self.curdoc), 1
def normalize(self, word):
word = word.lower()
if word not in string.punctuation:
return word
def tokenize(self, sentence):
for word in wordpunct_tokenize(sentence):
word = self.normalize(word)
if word: yield word
@property
def stopwords(self):
if not self._stopwords:
self._stopwords = stopwords.words('english')
return self._stopwords
class UnitMapper(object):
def __call__(self, key, value):
term, docid = key
yield term, (docid, value, 1)
class IDFMapper(object):
def __call__(self, key, value):
term, docid = key
tf, n = value
idf = math.log(N/n)
yield (term, docid), idf*tf
class SumReducer(object):
def __call__(self, key, values):
yield key, sum(values)
class BufferReducer(object):
def __call__(self, key, values):
term = key
values = list(values)
n = sum(g[2] for g in values)
for g in values:
yield (term, g[0]), (g[1], n)
class IdentityReducer(object):
def __call__(self, key, values):
for value in values:
yield key, value
def runner(job):
job.additer(TermMapper, SumReducer, combiner=SumReducer)
job.additer(UnitMapper, BufferReducer)
job.additer(IDFMapper, IdentityReducer)
def starter(prog):
excludes = prog.delopt("stopwords")
if excludes: prog.addopt("param", "stopwords="+excludes)
if __name__ == "__main__":
import dumbo
dumbo.main(runner, starter)
| 25.688172 | 72 | 0.612809 | 1,776 | 0.743407 | 1,010 | 0.422771 | 154 | 0.064462 | 0 | 0 | 134 | 0.05609 |
901d3f9a5542e6bed0daf35f7574ccf1740c36b8 | 1,046 | py | Python | game-watch-api/games/admin.py | fouadsan/game_watch | ca38d283ef8f55499ea520eb52a78ebfac8a77a4 | [
"MIT"
] | null | null | null | game-watch-api/games/admin.py | fouadsan/game_watch | ca38d283ef8f55499ea520eb52a78ebfac8a77a4 | [
"MIT"
] | null | null | null | game-watch-api/games/admin.py | fouadsan/game_watch | ca38d283ef8f55499ea520eb52a78ebfac8a77a4 | [
"MIT"
] | null | null | null | from django.contrib import admin
from admin_interface.models import Theme as Th
from .models import Genre, Platform, Screenshot, Artwork, Mode, PlayerPerspective, Engine, Theme, Game
admin.site.unregister(Th)
admin.site.register(Genre)
admin.site.register(Platform)
admin.site.register(Mode)
admin.site.register(PlayerPerspective)
admin.site.register(Engine)
admin.site.register(Theme)
admin.site.register(Screenshot)
admin.site.register(Artwork)
class GameAdmin(admin.ModelAdmin):
fields = ('name', 'genre', 'poster',
'platforms', 'release_date', 'is_popular', 'description', 'rating', 'developer', 'publisher', 'game_modes', 'game_engines', 'player_perspective', 'themes', 'storyline', 'screenshots', 'artworks')
list_display = ('name', 'id', 'genre', 'poster_tag',
'release_date', 'is_released', 'get_users')
def get_users(self, obj):
return "\n".join([f'#{u.id}' for u in obj.users.all()])
get_users.short_description = 'users favorite'
admin.site.register(Game, GameAdmin)
| 32.6875 | 209 | 0.711281 | 554 | 0.529637 | 0 | 0 | 0 | 0 | 0 | 0 | 286 | 0.273423 |
901eab214940948112dfada28e162b16759ac77e | 589 | py | Python | plot-wbgt.py | tanupoo/tools-pubsec | 3dfea4b677226395eff89f90aebec3105ba4f4d5 | [
"MIT"
] | 1 | 2021-05-27T21:10:04.000Z | 2021-05-27T21:10:04.000Z | plot-wbgt.py | tanupoo/pubsec-tools | 3dfea4b677226395eff89f90aebec3105ba4f4d5 | [
"MIT"
] | null | null | null | plot-wbgt.py | tanupoo/pubsec-tools | 3dfea4b677226395eff89f90aebec3105ba4f4d5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
import json
import matplotlib.pyplot as plt
result = json.load(sys.stdin)
x = result["hour"]
y = result["wbgt"]
fig = plt.figure(figsize=(8, 4))
ax1 = fig.add_subplot(1,1,1)
ax1.set_xlabel("hour")
ax1.set_ylabel("wbgt")
ax1.set_xticks(list(range(0,24,1)))
ax1.set_yticks(list(range(15,41,5)))
ax1.set_yticks(list(range(15,41,1)), minor=True)
ax1.set_xlim(1,24)
ax1.set_ylim(15,40)
ax1.grid(b=True, axis="x", which="major")
ax1.grid(b=True, axis="y", which="major")
ax1.grid(b=True, axis="y", which="minor")
ax1.plot(x,y)
plt.tight_layout()
plt.show()
| 21.035714 | 48 | 0.696095 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 75 | 0.127334 |
90204f12090cacf498331393d685af08f22c49b2 | 1,005 | py | Python | day_07/day_07.py | GuillaumeGandon/advent-of-code-2015 | ff4201a9a27d1ca7f687a613eeec72dd12fe1487 | [
"MIT"
] | null | null | null | day_07/day_07.py | GuillaumeGandon/advent-of-code-2015 | ff4201a9a27d1ca7f687a613eeec72dd12fe1487 | [
"MIT"
] | null | null | null | day_07/day_07.py | GuillaumeGandon/advent-of-code-2015 | ff4201a9a27d1ca7f687a613eeec72dd12fe1487 | [
"MIT"
] | null | null | null | from functools import cache
def split_row(row):
instructions, output = row.split(' -> ')
return output, tuple(instructions.split(' '))
@cache
def solve(key):
if key.isdigit():
return int(key)
else:
instructions = circuit[key]
if len(instructions) == 1:
return solve(instructions[0])
elif len(instructions) == 2:
gate, wire_or_signal = instructions
return 65535 - solve(wire_or_signal)
else:
a, gate, b = instructions
if gate == 'AND':
return solve(a) & solve(b)
elif gate == 'OR':
return solve(a) | solve(b)
elif gate == 'LSHIFT':
return solve(a) << int(b)
else:
return solve(a) >> int(b)
circuit = dict(map(split_row, open('input').read().splitlines()))
print(f"Answer part one: {solve('a')}")
solve.cache_clear()
circuit['b'] = ('16076',)
print(f"Answer part two: {solve('a')}")
| 25.769231 | 65 | 0.536318 | 0 | 0 | 0 | 0 | 661 | 0.657711 | 0 | 0 | 107 | 0.106468 |
90215693db46543c286a97842122238df6972cc0 | 523 | py | Python | noteout/tests/test_nb_only.py | stefanv/noteout | b76b35c675fa1221be35835d56c2937e3f56b317 | [
"BSD-2-Clause"
] | 3 | 2021-08-14T19:35:37.000Z | 2021-08-23T16:53:51.000Z | noteout/tests/test_nb_only.py | stefanv/noteout | b76b35c675fa1221be35835d56c2937e3f56b317 | [
"BSD-2-Clause"
] | 1 | 2021-11-23T18:40:45.000Z | 2021-11-23T20:40:48.000Z | noteout/tests/test_nb_only.py | stefanv/noteout | b76b35c675fa1221be35835d56c2937e3f56b317 | [
"BSD-2-Clause"
] | 1 | 2021-11-23T18:33:58.000Z | 2021-11-23T18:33:58.000Z | """ Test nb-only filter
"""
from io import StringIO
from noteout.nb_only import NbonlyFilter as nnbo
from .tutils import (read_md, assert_json_equal, filter_doc)
def test_nb_only():
content = """/
Some text [notebook only]{.nb-only}more text.
::: nb-only
Only in notebook.
:::
More text.
"""
doc = read_md(StringIO(content))
filtered = filter_doc(doc, nnbo)
exp_content = """/
Some text more text.
More text.
"""
exp_doc = read_md(StringIO(exp_content))
assert_json_equal(filtered, exp_doc)
| 17.433333 | 60 | 0.692161 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 169 | 0.323136 |
9021b355ad9a734bb50dcef4f74291f2233e5506 | 1,386 | py | Python | spectrocrunch/utils/comparable.py | woutdenolf/spectrocrunch | fde4b6e0f462f464ce7af6a942b355d3d8f39f77 | [
"MIT"
] | 3 | 2018-04-16T15:51:36.000Z | 2019-12-16T11:21:05.000Z | spectrocrunch/utils/comparable.py | woutdenolf/spectrocrunch | fde4b6e0f462f464ce7af6a942b355d3d8f39f77 | [
"MIT"
] | null | null | null | spectrocrunch/utils/comparable.py | woutdenolf/spectrocrunch | fde4b6e0f462f464ce7af6a942b355d3d8f39f77 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import operator
class Comparable(object):
@property
def _repr(self):
"""Unique representation of an instance"""
return "{}{}".format(type(self).__name__, id(self))
def _cmpkey(self, other):
return self._repr
def _sortkey(self, other):
return self._cmpkey
def __repr__(self):
return self._repr
def __str__(self):
return repr(self)
def encode(self, *args, **kwargs):
return str(self).encode(*args, **kwargs)
def _compareop(self, other, op, key):
a = getattr(self, key)(other)
try:
b = getattr(other, key)(self)
except:
return op(a, other)
else:
return op(a, b)
def _sort(self, other, op):
return self._compareop(other, op, "_sortkey")
def _compare(self, other, op):
return self._compareop(other, op, "_cmpkey")
def __lt__(self, other):
return self._sort(other, operator.lt)
def __le__(self, other):
return self._sort(other, operator.le)
def __ge__(self, other):
return self._sort(other, operator.ge)
def __gt__(self, other):
return self._sort(other, operator.gt)
def __eq__(self, other):
return self._compare(other, operator.eq)
def __ne__(self, other):
return self._compare(other, operator.ne)
| 23.491525 | 59 | 0.591631 | 1,342 | 0.968254 | 0 | 0 | 141 | 0.101732 | 0 | 0 | 90 | 0.064935 |
9021bc3863fa351375d2c840439601cf105a7273 | 3,241 | py | Python | backend/fastspider.py | wakeblade/weiboSpiderAndCook | b5ca0708abd0a938eb0ac611d037a5d3daf9384f | [
"MIT"
] | 1 | 2020-02-08T16:22:19.000Z | 2020-02-08T16:22:19.000Z | backend/fastspider.py | wakeblade/weiboWordCount | b5ca0708abd0a938eb0ac611d037a5d3daf9384f | [
"MIT"
] | null | null | null | backend/fastspider.py | wakeblade/weiboWordCount | b5ca0708abd0a938eb0ac611d037a5d3daf9384f | [
"MIT"
] | null | null | null | # !/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
from gevent import monkey
monkey.patch_all()
from gevent.queue import Queue
"""
import requests
import time
import random
proxies=[]
with open('./ips.txt') as f:
proxies = [line.split('@')[0] for line in f]
def randomProxy(proxies):
ip = random.choice(proxies)
return {'https':ip,'http':ip}
class Task:
def __init__(self,url=None,method='get',params=None,data=None,cookie=None):
self.url=url
self.method=method
self.params=params
self.data=data
self.cookie=cookie
def __str__(self):
return str(self.__dict__)
class Spider:
methods ={
'get':requests.get,
'post':requests.post,
'put':requests.put,
'delete':requests.delete,
'head':requests.head
}
config ={
'ERROR_DELAY':10, #反爬延迟
'PAGE_DELAY':1, #单页延迟
'RANDOM_SEED':3, #单页延迟
}
def __init__(self,header=None,proxy=None,timeout=None,config=None):
self.header=header
self.proxy=proxy
self.timeout=timeout
if config:
self.update(config)
def __str__(self):
return str(self.__dict__)
def url(self,url):
task =Task(url)
return self.task(task)
def task(self,task):
if task.url==None:
raise('Error:爬虫任务url不能为空!')
self.method ='get' if task.method==None else task.method
kwargs={'url':task.url}
if self.header:
kwargs['headers']=self.header
if self.proxy:
kwargs['proxies']=self.proxy
if self.timeout:
kwargs['timeout']=self.timeout
if task.params:
kwargs['params']=task.params
if task.cookie:
kwargs['cookies']=task.cookie
if task.data:
kwargs['data']=task.data
#print("\n{} \n- {}\n".format(self,task))
delay=random.randint(0,self.config['RANDOM_SEED'])
while True:
try:
res = self.methods[self.method](**kwargs)
except Exception as e:
#print(e)
kwargs['proxies']=randomProxy(proxies)
print('(延迟{}s)==={}==={}'.format(str(delay),kwargs['proxies'],task.url))
delay+=1
time.sleep(delay*self.config['ERROR_DELAY'])
else:
time.sleep(delay*self.config['PAGE_DELAY'])
break
return res
"""
url = 'http://icanhazip.com'
header ={
'Accept':'*/*',
'Content-Type':'application/x-www-form-urlencoded; charset=UTF-8',
'Accept-Language':'zh-CN',
'Accept-Encoding':'gzip, deflate',
#'Connection': 'Keep-Alive',
'Connection': 'close',
'Cache-Control': 'no-cache',
'User-Agent':'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.01)'
}
task1=Task(url=url,method='post')
task2=Task(url)
#spider = Spider(header,randomProxy(proxies),(2,2))
spider = Spider()
#print(spider.task(task2).text)
print(spider.url(url).text)
#t = timeit.timeit(stmt='spider(task1.proxies(randomProxy(proxies)))',setup='from __main__ import spider,task1,randomProxy,proxies',number=10)
#print(t)
""" | 28.429825 | 142 | 0.573588 | 2,194 | 0.667884 | 0 | 0 | 0 | 0 | 0 | 0 | 1,222 | 0.371994 |
9021ca74c4cfd208803fde68aec8f4729d95dd36 | 1,156 | py | Python | setup.py | loic-simon/asyncode | 5f9873acf93f1a3ae6d4ca0b3dfc55acc7598969 | [
"MIT"
] | 1 | 2021-12-22T16:09:52.000Z | 2021-12-22T16:09:52.000Z | setup.py | loic-simon/asyncode | 5f9873acf93f1a3ae6d4ca0b3dfc55acc7598969 | [
"MIT"
] | null | null | null | setup.py | loic-simon/asyncode | 5f9873acf93f1a3ae6d4ca0b3dfc55acc7598969 | [
"MIT"
] | null | null | null | import setuptools
version = "1.0.0"
with open("README.md", "r", encoding="utf-8") as fh:
readme = fh.read()
setuptools.setup(
name="asyncode",
version=version,
author="Loïc Simon",
author_email="loic.simon@espci.org",
description="Emulating Python's interactive interpreter in asynchronous contexts",
long_description=readme,
long_description_content_type="text/markdown",
url="https://github.com/loic-simon/asyncode",
py_modules=["asyncode"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Framework :: AsyncIO",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Interpreters",
],
install_requires=[],
python_requires='>=3.5',
)
# python3 setup.py sdist bdist_wheel
# twine upload dist/*
| 31.243243 | 86 | 0.634948 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 689 | 0.595506 |
9021dafddba10ec136860876afbe1b58c1dcc7f4 | 1,926 | py | Python | test_image.py | GSM-Festival-2021/Prettier-photo-plus-Server | 98c30d5c8379491d12a4cfd4ed880fd800c40a7c | [
"MIT"
] | null | null | null | test_image.py | GSM-Festival-2021/Prettier-photo-plus-Server | 98c30d5c8379491d12a4cfd4ed880fd800c40a7c | [
"MIT"
] | null | null | null | test_image.py | GSM-Festival-2021/Prettier-photo-plus-Server | 98c30d5c8379491d12a4cfd4ed880fd800c40a7c | [
"MIT"
] | null | null | null | function solution(x1, y1, x2, y2) {
const x1 = 8;
const y1 = 4;
const x2 = 8;
const y2 = 10;
let soundSum = 0;
// 두 스피커 사이가 가까워 음량이 5를 넘는 경우
if (Math.abs(x1 - x2) + Math.abs(y1 - y2) < 4) return -1;
if (3 < x1 && x1 < 13 && 3 < x2 && x2 < 13 && 3 < y1 && y1 < 13 && 3 < y2 && y2 < 13) {
// 벽에 닿지 않는다면 한 스피커당 80 음량을 차지한다.
soundSum += checkWall(x1, y1)
soundSum += checkWall(x2, y2)
soundSum += 160;
return soundSum;
} else {function solution() {
const x1 = 8;
const y1 = 4;
const x2 = 8;
const y2 = 10;
let room = Array.from(Array(15), () => new Array(15).fill(0));
let roomSize = 15;
let xLocation = 0;
let yLocation = 0;
let soundSum = 0;
soundCounting(x1, x2);
console.log(room);
if (Math.abs(x1 - x2) + Math.abs(y1 - y2) < 4) return -1;
if (3 < x1 < 13 && 3 < x2 < 13 && 3 < y1 < 13 && 3 < y2 < 13) {
soundCounting(x1, x2);
console.log(room);
} else {
return -1;
}
function wallSoundCounting() {
// 아 몰루
}
function soundCounting(x, y) {
// 만약에 벽에 닿는다면
// 스피커 영역이 겹친다면
//겹치는 영역이 하나도 없다면
soundSum += 80;
for (let i = 0; i < 9; i++) {
for (let j = 0; j < 9; j++) {
room[x - 4 + i][y - 4 + j]++;
}
}
}
}
// 벽에 닿아 음량이 5를 넘는 경우
return -1;
}
function checkWall(x, y) {
let cnt = 0;
// 만약에 벽에 소리가 닿는다면
if (6 > x || x > 10 ) {
cnt += wallSoundCounting(x);
}
if (6 > y || y > 10) {
cnt += wallSoundCounting(y);
}
return cnt;
}
function wallSoundCounting(wallLocation) {
let cnt = 0;
switch (wallLocation) {
case 4:
case 12:
cnt += 16;
break;
case 5:
case 11:
cnt += 9;
break;
}
return cnt;
}
} | 21.164835 | 91 | 0.450156 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
9022b8901ebe6c1ee9599a4efe5b224353a4bd15 | 8,328 | py | Python | crops/command_line/crops-cropstr.py | jjavier-bm/crops | 658a98f9c168cc27b3f967e7a60a0df896ef5ac6 | [
"BSD-3-Clause"
] | null | null | null | crops/command_line/crops-cropstr.py | jjavier-bm/crops | 658a98f9c168cc27b3f967e7a60a0df896ef5ac6 | [
"BSD-3-Clause"
] | 5 | 2020-07-17T08:45:22.000Z | 2022-03-11T13:39:26.000Z | crops/command_line/crops-cropstr.py | jjavier-bm/crops | 658a98f9c168cc27b3f967e7a60a0df896ef5ac6 | [
"BSD-3-Clause"
] | 1 | 2020-07-07T15:42:07.000Z | 2020-07-07T15:42:07.000Z | """==========
This script will remove a number of residues from a sequence file
in agreement to the intervals and other details supplied.
"""
from crops.about import __prog__, __description__, __author__, __date__, __version__
import argparse
import os
from crops.io import check_path
from crops.io import outpathgen
from crops.io import parsers as cin
from crops.io import taggers as ctg
from crops.core import ops as cop
from crops import command_line as ccl
logger=None
def create_argument_parser():
"""Create a parser for the command line arguments used in crops-renumber"""
parser = argparse.ArgumentParser(prog=__prog__, formatter_class=argparse.RawDescriptionHelpFormatter,
description=__description__+' ('+__prog__+') v.'+__version__+'\n'+__doc__)
parser.add_argument("input_seqpath",nargs=1, metavar="Sequence_filepath",
help="Input sequence filepath.")
parser.add_argument("input_strpath",nargs=1, metavar="Structure_filepath",
help="Input structure filepath or dir. If a directory is inserted, it will act on all structure files in such directory.")
parser.add_argument("input_database",nargs=1, metavar="Intervals_database",
help="Input intervals database filepath.")
parser.add_argument("-o","--outdir",nargs=1,metavar="Output_Directory",
help="Set output directory path. If not supplied, default is the one containing the input sequence.")
sections=parser.add_mutually_exclusive_group(required=False)
sections.add_argument("-t","--terminals",action='store_true',default=False,
help="Ignore interval discontinuities and only crop the ends off.")
sections.add_argument("-u","--uniprot_threshold", nargs=2, metavar=("Uniprot_ratio_threshold","Sequence_database"),
help='Act if SIFTS database is used as intervals source AND %% residues from single Uniprot sequence is above threshold. Threshold: [MIN,MAX)=[0,100). Database path: uniclust##_yyyy_mm_consensus.fasta-path or server-only. The latter requires internet connexion.')
parser.add_argument('--version', action='version', version='%(prog)s '+ __version__)
return parser
def main():
parser = create_argument_parser()
args = parser.parse_args()
global logger
logger = ccl.crops_logger(level="info")
logger.info(ccl.welcome())
inseq=check_path(args.input_seqpath[0],'file')
indb=check_path(args.input_database[0],'file')
instr=check_path(args.input_strpath[0])
if args.uniprot_threshold is not None:
insprot=check_path(args.uniprot_threshold[1]) if args.uniprot_threshold != 'server-only' else 'server-only'
else:
insprot=None
minlen=float(args.uniprot_threshold[0]) if args.uniprot_threshold is not None else 0.0
targetlbl=ctg.target_format(indb,terms=args.terminals, th=minlen)
infixlbl=ctg.infix_gen(indb,terms=args.terminals)
if args.outdir is None:
outdir=check_path(os.path.dirname(inseq),'dir')
else:
outdir=check_path(os.path.join(args.outdir[0],''),'dir')
###########################################
logger.info('Parsing sequence file '+inseq)
seqset=cin.parseseqfile(inseq)
logger.info('Done')
logger.info('Parsing structure file '+instr)
strset, fileset=cin.parsestrfile(instr)
logger.info('Done')
logger.info('Parsing interval database file '+indb)
if len(seqset)>0:
intervals=cin.import_db(indb,pdb_in=seqset)
else:
raise ValueError('No chains were imported from sequence file.')
logger.info('Done\n')
if insprot is not None and minlen>0.0:
logger.info('Parsing uniprot sequence file '+insprot)
uniprotset={}
for seqncid, seqnc in seqset.items():
for monomerid, monomer in seqnc.imer.items():
if 'uniprot' in intervals[seqncid][monomerid].tags:
for key in intervals[seqncid][monomerid].tags['uniprot']:
if key.upper() not in uniprotset:
uniprotset[key.upper()]=None
uniprotset=cin.parseseqfile(insprot, uniprot=uniprotset)['uniprot']
logger.info('Done\n')
###########################################
gseqset={}
logger.info('Renumbering structure(s)...')
for key, structure in strset.items():
if key in seqset:
newstructure,gseqset[key]=cop.renumber_pdb(seqset[key],structure,seqback=True)
outstr=outpathgen(outdir,subdir=key,filename=key+infixlbl["renumber"]+os.path.splitext(instr)[1],mksubdir=True)
#newstructure.write_pdb(outstr)
newstructure.write_minimal_pdb(outstr)
logger.info('Done\n')
logger.info('Cropping renumbered structure(s)...')
outseq=os.path.join(outdir,os.path.splitext(os.path.basename(inseq))[0]+infixlbl["croprenum"]+os.path.splitext(os.path.basename(inseq))[1])
for key, S in gseqset.items():
newS=S.deepcopy()
if key in intervals:
if insprot is not None and minlen>0.0:
newinterval={}
for key2,monomer in S.imer.items():
if key2 in intervals[key]:
if insprot is not None and minlen>0.0:
newinterval[key2]=intervals[key][key2].deepcopy()
newinterval[key2].tags['description']+=' - Uniprot threshold'
newinterval[key2].subint=[]
unilbl=' uniprot chains included: '
for unicode,uniintervals in intervals[key][key2].tags['uniprot'].items():
if 100*uniintervals.n_elements()/uniprotset.imer[unicode].length()>=minlen:
newinterval[key2]=newinterval[key2].union(intervals[key][key2].intersection(uniintervals))
unilbl+=unicode +'|'
monomer=cop.crop_seq(monomer,newinterval[key2],targetlbl+unilbl,terms=args.terminals)
else:
monomer=cop.crop_seq(monomer,intervals[key][key2],targetlbl,terms=args.terminals)
newS.imer[key2]=monomer.deepcopy()
else:
logger.warning('Chain-name '+key+'_'+str(key2)+' not found in database. Cropping not performed.')
outseq=outpathgen(outdir,subdir=key,filename=key+infixlbl["croprenum"]+os.path.splitext(os.path.basename(inseq))[1])
monomer.dump(outseq)
if 'cropmap' in monomer.info:
outmap=outpathgen(outdir,subdir=key,filename=key+infixlbl["croprenum"]+'.cropmap')
monomer.dumpmap(outmap)
if insprot is not None and minlen>0.0:
cropped_str=cop.crop_pdb(strset[key],newS,original_id=True)
else:
cropped_str=cop.crop_pdb(strset[key],newS,original_id=True)
outstr=outpathgen(outdir,subdir=key,filename=key+infixlbl["crop"]+os.path.splitext(instr)[1],mksubdir=True)
#cropped_str.write_pdb(outstr)
cropped_str.write_minimal_pdb(outstr)
if insprot is not None and minlen>0.0:
cropped_str2=cop.crop_pdb(strset[key],newS,original_id=False)
else:
cropped_str2=cop.crop_pdb(strset[key],newS,original_id=False)
outstr=outpathgen(outdir,subdir=key,filename=key+infixlbl["croprenum"]+os.path.splitext(instr)[1],mksubdir=True)
#cropped_str2.write_pdb(outstr)
cropped_str2.write_minimal_pdb(outstr)
else:
logger.warning('PDB-ID '+key.upper()+' not found in database. Cropping not performed.')
for key2,monomer in newS.imer.items():
outseq=outpathgen(outdir,subdir=key,filename=key+os.path.splitext(os.path.basename(inseq))[1])
monomer.dump(outseq)
logger.info('Done\n')
return
if __name__ == "__main__":
import sys
import traceback
try:
main()
logger.info(ccl.ok())
sys.exit(0)
except Exception as e:
if not isinstance(e, SystemExit):
msg = "".join(traceback.format_exception(*sys.exc_info()))
logger.critical(msg)
sys.exit(1)
| 47.862069 | 289 | 0.635687 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,908 | 0.229107 |
90251fea4bf1c0681bcedcffe3e8a599e9d53e72 | 13,189 | py | Python | DSDM_Assignment2_final.py | antonyjames1996/time-series-analysis-agrotech | 1e2abfe07f0e82c7a6f5cc01a268826fb2d29635 | [
"MIT"
] | null | null | null | DSDM_Assignment2_final.py | antonyjames1996/time-series-analysis-agrotech | 1e2abfe07f0e82c7a6f5cc01a268826fb2d29635 | [
"MIT"
] | null | null | null | DSDM_Assignment2_final.py | antonyjames1996/time-series-analysis-agrotech | 1e2abfe07f0e82c7a6f5cc01a268826fb2d29635 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[95]:
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# get_ipython().run_line_magic('matplotlib', 'inline')
from sklearn.neighbors import LocalOutlierFactor
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.multioutput import MultiOutputRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import StackingRegressor
from sklearn.svm import LinearSVR
from sklearn.metrics import r2_score
from sklearn.ensemble import StackingRegressor
from sklearn.feature_selection import RFECV
# from xgboost.sklearn import XGBClassifier
data = pd.ExcelFile('Data.xlsx')
plants = pd.read_excel(data, 'plants')
flight = pd.read_excel(data, 'flight dates')
planting = pd.read_excel(data, 'planting')
weather = pd.read_excel(data, 'weather')
# In[2]:
# Renaming the plants data columns
plants = plants.rename(columns = {'Batch Number': 'batch_number', 'Plant Date': 'plant_date', 'Class': 'class',
'Fresh Weight (g)': 'fresh_weight', 'Head Weight (g)': 'head_weight',
'Radial Diameter (mm)': 'radial_diameter', 'Polar Diameter (mm)': 'polar_diameter',
'Diameter Ratio': 'diameter_ratio', 'Leaves': 'leaves', 'Density (kg/L)': 'density',
'Leaf Area (cm^2)': 'leaf_area', 'Square ID': 'square_id',
'Check Date': 'check_date', 'Flight Date': 'flight_date', 'Remove': 'remove'})
plants.describe()
# In[3]:
# Dropping the wrong garbage data after row 1822
planting = planting.iloc[0:1821, :]
planting = planting.drop(columns = ['Column2', 'Column3', 'Column1', 'Column4'])
planting
# In[4]:
# Removing all the non-null values from the 'Remove column'
plants = plants[plants['remove'].isnull()]
# Dropping the remove column from the dataset
plants = plants.drop(columns = ['remove'])
# Dropping the leaves column
plants = plants.drop(columns = ['leaves'])
# In[5]:
# the number of NaN values in the plants plant_date
plants['plant_date'].isna().sum()
# In[6]:
# rename the flights data columns
flight = flight.rename(columns = {'Batch Number': 'batch_number', 'Flight Date': 'flight_date'})
# In[7]:
# Merging the plants and flight data on 'batch_number'
df_merge = pd.merge(plants, flight, how = 'left', on = 'batch_number')
dd1 = df_merge.loc[: , df_merge.columns != 'flight_date_x']
dd2 = df_merge.drop('flight_date_y', axis = 1)
dd1 = dd1.rename(columns = {'flight_date_y': 'flight_date'})
dd2 = dd2.rename(columns = {'flight_date_x': 'flight_date'})
dd1.update(dd2)
df_merge = dd1
# In[8]:
### Dropping the NaN values of the flight_date, head_weight, radial_diameter, polar_diameter
plant = df_merge.dropna(subset = ['flight_date', 'head_weight', 'radial_diameter', 'polar_diameter'])
# In[9]:
plant = plant.copy()
# In[10]:
plant
# In[11]:
### dropping the rows with Null values in plant_date
plant.dropna(subset = ['plant_date'], inplace = True)
# In[12]:
### Making a new variable 'flight_time' which tells the number of days from the 'plant_date'
plant['flight_time'] = plant['flight_date'] - plant['plant_date']
plant['flight_time'] = plant['flight_time'].astype('timedelta64[D]')
# In[13]:
plant['check_time'] = plant['check_date'] - plant['plant_date']
plant['check_time'] = plant['check_time'].astype('timedelta64[D]')
# In[14]:
plant['check_flight_time'] = plant['check_date'] - plant['flight_date']
plant['check_flight_time'] = plant['check_flight_time'].astype('timedelta64[D]')
# In[15]:
### dropping all the Null values in the plants
plant.dropna(inplace=True)
# In[16]:
### changing the plant-date and check-date to date time format
plant['plant_date']= pd.to_datetime(plant['plant_date'])
plant['check_date']= pd.to_datetime(plant['check_date'])
# In[17]:
### renaming the columns of the weather data
weather = weather.rename(columns = {'Unnamed: 0': 'weather_date', 'Solar Radiation [avg]': 'solar_radiation',
'Precipitation [sum]': 'precipitation', 'Wind Speed [avg]': 'wind_speed_avg',
'Wind Speed [max]': 'wind_speed_max', 'Battery Voltage [last]': 'battery_voltage',
'Leaf Wetness [time]': 'leaf_wetness', 'Air Temperature [avg]': 'air_temp_avg',
'Air Temperature [max]': 'air_temp_max', 'Air Temperature [min]': 'air_temp_min',
'Relative Humidity [avg]': 'relative_humidity', 'Dew Point [avg]': 'dew_point_avg',
'Dew Point [min]': 'dew_point_min', 'ET0 [result]': 'eto_result'})
# In[18]:
### dropping the duplpicates in the weather dataset
weather = weather.drop_duplicates(subset = ['weather_date'])
# In[19]:
### changing the weather-date to date time format
weather['weather_date']= pd.to_datetime(weather['weather_date'])
# In[20]:
for x,(i, j) in enumerate(zip(plant.plant_date, plant.check_date)):
df_subset = weather[(weather['weather_date']>i) & (weather['weather_date']< j)]
plant.at[x, 'avg_precipitation'] = (df_subset['precipitation'].mean())
plant.at[x, 'std_precipitation'] = (df_subset['precipitation'].std())
plant.at[x, 'avg_solar_rad'] = df_subset['solar_radiation'].mean()
plant.at[x, 'std_solar_rad'] = df_subset['solar_radiation'].std()
plant.at[x, 'avg_wind_speed'] = df_subset['wind_speed_avg'].mean()
plant.at[x, 'std_wind_speed'] = df_subset['wind_speed_avg'].std()
plant.at[x, 'avg_air_temp'] = df_subset['air_temp_avg'].mean()
plant.at[x, 'std_air_temp'] = df_subset['air_temp_avg'].std()
plant.at[x, 'avg_leaf_wetness'] = df_subset['leaf_wetness'].mean()
plant.at[x, 'std_leaf_wetness'] = df_subset['leaf_wetness'].std()
plant.at[x, 'avg_relative_humidity'] = df_subset['relative_humidity'].mean()
plant.at[x, 'std_relative_humidity'] = df_subset['relative_humidity'].std()
plant.at[x, 'avg_dew_point'] = df_subset['dew_point_avg'].mean()
plant.at[x, 'std_dew_point'] = df_subset['dew_point_avg'].std()
# In[21]:
### dropping the rows with Null values again if any
plant = plant.dropna()
# In[22]:
plant = plant[['plant_date', 'flight_date', 'check_date','batch_number', 'class', 'density',
'leaf_area','square_id',
'flight_time', 'check_time', 'check_flight_time',
'avg_precipitation', 'std_precipitation', 'avg_solar_rad', 'std_solar_rad',
'avg_wind_speed','std_wind_speed', 'avg_air_temp', 'std_air_temp',
'avg_leaf_wetness', 'std_leaf_wetness', 'avg_relative_humidity','std_relative_humidity',
'avg_dew_point','std_dew_point' ,'fresh_weight', 'diameter_ratio',
'head_weight', 'radial_diameter', 'polar_diameter']]
# In[23]:
plant
# In[97]:
### Exploratory Data Analysis
### Plant data analysis
plant_data = plant[['batch_number', 'class',
'density', 'leaf_area','square_id', 'flight_time', 'check_time' ,'fresh_weight',
'diameter_ratio', 'head_weight', 'radial_diameter', 'polar_diameter']]
plant_data.hist(figsize = (16,10))
# plt.savefig("plant_hist.pdf", format="pdf", bbox_inches="tight")
# plt.show()
# In[98]:
### plant_data heatmap
fig = plt.figure(figsize = (10,10))
sns.heatmap(plant_data.corr(), vmax = 0.6, square = True)
# plt.savefig("plant_heatmap.pdf", format="pdf", bbox_inches="tight")
# plt.show()
# In[106]:
sns.jointplot(x = "radial_diameter",y = "polar_diameter", data=plant, hue="class");
# plt.savefig("radial_polar.pdf", format="pdf", bbox_inches="tight")
# plt.suptitle("Joint plot between Fresh Weight and Head Weight", y = 0)
# plt.show()
# In[107]:
sns.jointplot(x = "fresh_weight", y = "head_weight", data=plant
, hue="class");
plt.suptitle("Joint plot between Fresh Weight and Head Weight", y = 0)
# plt.savefig("fresh_weight_head_weight.pdf", format="pdf", bbox_inches="tight")
# plt.show()
# In[108]:
sns.scatterplot(data = plant, x="check_time", y="density", hue="class")
plt.title('Scatterplot between check_time - density',loc='center' ,y=-0.3)
plt.xlabel('check_time')
plt.ylabel('density')
# plt.savefig("check_time_density.pdf", format="pdf", bbox_inches="tight")
# plt.show()
# In[109]:
sns.pairplot(plant[['batch_number', 'class', 'flight_time' ,
'head_weight', 'radial_diameter', 'polar_diameter']])
# plt.savefig("plant_pairplot.pdf", format="pdf", bbox_inches="tight")
# plt.show()
# In[110]:
### weather data analysis
weather.hist(figsize = (16,10))
# plt.savefig("weather_histplot.pdf", format="pdf", bbox_inches="tight")
# plt.show()
# In[111]:
fig = plt.figure(figsize = (10,10))
sns.heatmap(weather.corr(), vmax = .8, square = True)
# plt.savefig("weather_heatmap.pdf", format="pdf", bbox_inches="tight")
# plt.show()
# In[37]:
X = plant.iloc[:, 3:-5]
y = plant.iloc[:, -3:]
# In[43]:
X = X.to_numpy()
y = y.to_numpy()
# In[45]:
### detection of Outliers
outliers = LocalOutlierFactor()
out = outliers.fit_predict(X)
# masking out by selecting all rows that are not outliers
mask = out != -1
X, y = X[mask, :], y[mask]
print(X.shape, y.shape)
# In[47]:
# Split the data into train, test split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
# In[127]:
X_plant = plant.iloc[:, 3:11]
y_plant = plant.iloc[:, -3:]
X_weather = plant.iloc[:, 11:25]
y_weather = plant.iloc[:, -3:]
# In[128]:
X_plant_train, X_plant_test, y_plant_train, y_plant_test = train_test_split(X_plant, y_plant, test_size=0.33,
random_state=42)
X_weather_train, X_weather_test, y_weather_train, y_weather_test = train_test_split(X_weather, y_weather,
test_size=0.33, random_state=42)
# In[132]:
# Model 1 : Linear Regression
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print('Linear Regression model score:' ,r2_score(y_test, y_pred, multioutput='variance_weighted'))
# In[129]:
# Model 1.1 : Linear Regression using just plants data
model = LinearRegression()
model.fit(X_plant_train, y_plant_train)
y_pred = model.predict(X_plant_test)
print('Linear Regression model score with plants:' ,r2_score(y_plant_test, y_pred, multioutput='variance_weighted'))
# In[130]:
# Model 1.2 : Linear Regression using just weather data
model = LinearRegression()
model.fit(X_weather_train, y_weather_train)
y_pred = model.predict(X_weather_test)
print('Linear Regression model score with weather:' ,r2_score(y_weather_test, y_pred, multioutput='variance_weighted'))
# In[135]:
# Model 2 : Random Forest
model = RandomForestRegressor()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print('Random Forest model score:' ,r2_score(y_test, y_pred, multioutput='variance_weighted'))
# In[137]:
feat_importances = pd.Series(model.feature_importances_, index=plant.iloc[:, 3:-5].columns)
feat_importances.nlargest(10).plot(kind='barh')
# plt.savefig("feature_imp_all.pdf", format="pdf", bbox_inches="tight")
# plt.show()
# In[138]:
# Model 2.1 : Random Forest using just plant data
model = RandomForestRegressor()
model.fit(X_plant_train, y_plant_train)
y_pred = model.predict(X_plant_test)
print('Random Forest model score with plants:' ,r2_score(y_plant_test, y_pred, multioutput='variance_weighted'))
# In[140]:
feat_importances = pd.Series(model.feature_importances_, index=plant.iloc[:, 3:11].columns)
feat_importances.nlargest(10).plot(kind='barh')
# plt.savefig("feature_imp_plant.pdf", format="pdf", bbox_inches="tight")
# plt.show()
# In[141]:
# Model 2.2 : Random Forest using just weather data
model = RandomForestRegressor()
model.fit(X_weather_train, y_weather_train)
y_pred = model.predict(X_weather_test)
print('Random Forest model score with weather:' ,r2_score(y_weather_test, y_pred, multioutput='variance_weighted'))
# In[142]:
feat_importances = pd.Series(model.feature_importances_, index=plant.iloc[:, 11:25].columns)
feat_importances.nlargest(10).plot(kind='barh')
# plt.savefig("feature_imp_weather.pdf", format="pdf", bbox_inches="tight")
# plt.show()
# In[94]:
# list(plant.iloc[:, 3:-5].columns.values)
# In[ ]:
# In[93]:
# # Model 2 : Random Forest
# model = RandomForestRegressor()
# model.fit(X_train, y_train)
# y_pred = model.predict(X_test)
# print('model score:' ,r2_score(y_test, y_pred, multioutput='variance_weighted'))
# In[ ]:
# 'batch_number' ,'density' , 'leaf_area' , 'check_time', 'std_precipitation', 'avg_solar_rad',
# 'std_solar_rad', 'std_air_temp', 'avg_relative_humidity', 'std_relative_humidity', 'avg_dew_point',
# 'std_dew_point'
# In[91]:
# Model 3 : Gradient Boosting
reg = MultiOutputRegressor(GradientBoostingRegressor())
reg.fit(X_train, y_train)
print('Gradient Boosting score', reg.score(X_test, y_test))
# In[ ]:
| 26.116832 | 119 | 0.679506 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,832 | 0.518007 |
902524521dfdcde30b78e79a7e608392d647a998 | 6,127 | py | Python | modules/msa/msa/contrib/uniqauth/views.py | haoyutan/MSA-Framework | 7c5553b244347f26029729161e15e60b0cc805f5 | [
"MIT"
] | 2 | 2016-11-22T11:44:52.000Z | 2017-08-29T02:38:01.000Z | modules/msa/msa/contrib/uniqauth/views.py | haoyutan/MSA-Framework | 7c5553b244347f26029729161e15e60b0cc805f5 | [
"MIT"
] | null | null | null | modules/msa/msa/contrib/uniqauth/views.py | haoyutan/MSA-Framework | 7c5553b244347f26029729161e15e60b0cc805f5 | [
"MIT"
] | null | null | null | from django.contrib.auth import authenticate
from msa.utils.ipware import get_ip
from msa.views import LoggedAPIView
from rest_framework import status
from rest_framework.authentication import TokenAuthentication, BasicAuthentication
from rest_framework.authtoken.models import Token
from rest_framework.permissions import AllowAny, IsAuthenticated, IsAdminUser
from rest_framework.response import Response
from .serializers import *
class Register(LoggedAPIView):
authentication_classes = ()
permission_classes = (AllowAny,)
serializer_class = RegisterSerializer
def post(self, request):
pp = self.serializer_class(data=request.data)
if pp.is_valid():
username = pp.validated_data['username']
password = pp.validated_data['password']
if Account.objects.count() <= 0:
user = User.objects.create_superuser(username=username, password=password, email=None)
else:
user = User.objects.create_user(username=username, password=password, email=None)
account = Account(user=user)
#account.save()
password_history = PasswordHistory(account=account, ip=get_ip(request), password=password)
password_history.save()
return Response(status=status.HTTP_201_CREATED)
else:
raise BadRequest(pp.errors)
class LogIn(LoggedAPIView):
authentication_classes = ()
permission_classes = (AllowAny,)
serializer_class = LogInSerializer
def get(self, request):
pp = self.serializer_class(data=request.GET)
if pp.is_valid():
username = pp.validated_data['username']
password = pp.validated_data['password']
user = authenticate(username=username, password=password)
if user is not None:
token, created = Token.objects.get_or_create(user=user)
'''
if not created:
token.created = timezone.now()
token.save()
'''
account = Account.objects.get(user=user)
access_log = AccessLog(account=account, ip=get_ip(request), token=token)
access_log.save()
return Response({'token': token.key})
else:
raise Unauthorized()
else:
raise BadRequest(pp.errors)
class Verify(LoggedAPIView):
authentication_classes = (TokenAuthentication, BasicAuthentication)
permission_classes = (IsAuthenticated,)
serializer_class = None
def get(self, request):
return Response(status=status.HTTP_200_OK)
class Password(LoggedAPIView):
authentication_classes = (TokenAuthentication, BasicAuthentication)
permission_classes = (IsAuthenticated,)
serializer_class = PasswordSerializer
def post(self, request):
pp = self.serializer_class(data=request.data)
if pp.is_valid():
username = pp.validated_data['username']
password_old = pp.validated_data['password_old']
password_new = pp.validated_data['password_new']
user = authenticate(username=username, password=password_old)
if user is not None:
user.set_password(password_new)
user.save()
account = Account.objects.get(user=user)
#account.update = timezone.now()
account.save()
password_history = PasswordHistory(account=account, ip=get_ip(request), password=password_new)
password_history.save()
user.auth_token.delete()
return Response(status=status.HTTP_202_ACCEPTED)
else:
raise Unauthorized()
else:
raise BadRequest(pp.errors)
class Detail(LoggedAPIView):
authentication_classes = (TokenAuthentication, BasicAuthentication)
permission_classes = (IsAuthenticated,)
serializer_class = AccountSerializer
def get(self, request):
return Response(self.serializer_class(Account.objects.get(user=request.user)).data)
class Misc(LoggedAPIView):
authentication_classes = (TokenAuthentication, BasicAuthentication)
permission_classes = (IsAuthenticated,)
serializer_class = MiscSerializer
def post(self, request):
pp = self.serializer_class(data=request.data)
if pp.is_valid():
account = Account.objects.get(user=request.user)
if account.misc:
misc = json.loads(account.misc)
else:
misc = dict()
misc[pp.validated_data['field']] = pp.validated_data['value']
account.misc = json.dumps(misc)
account.save()
return Response(status=status.HTTP_201_CREATED)
else:
raise BadRequest(pp.errors)
class AdminList(LoggedAPIView):
authentication_classes = (TokenAuthentication, BasicAuthentication)
permission_classes = (IsAdminUser,)
serializer_class = AccountSerializer
def get(self, request):
return Response(self.serializer_class(Account.objects.all(), many=True).data)
class AdminReset(LoggedAPIView):
authentication_classes = (TokenAuthentication, BasicAuthentication)
permission_classes = (IsAdminUser,)
serializer_class = AdminResetSerializer
def put(self, request):
pp = self.serializer_class(data=request.data)
if pp.is_valid():
username = pp.validated_data['username']
password = pp.validated_data['password']
user = User.objects.get(username=username)
user.set_password(password)
user.save()
account = Account.objects.get(user=user)
#account.update = timezone.now()
account.save()
password_history = PasswordHistory(account=account, ip=get_ip(request), password=password)
password_history.save()
user.auth_token.delete()
return Response(status=status.HTTP_202_ACCEPTED)
else:
raise BadRequest(pp.errors)
| 37.588957 | 110 | 0.647299 | 5,668 | 0.925086 | 0 | 0 | 0 | 0 | 0 | 0 | 337 | 0.055002 |
9025967f892fe7e0100f3ff33e467fab11a11531 | 4,830 | py | Python | dcm2niix.py | rhancockn/dcm2bids | 16597eeb20edfa0ec707c9bd0bf8468d94e0c925 | [
"MIT"
] | null | null | null | dcm2niix.py | rhancockn/dcm2bids | 16597eeb20edfa0ec707c9bd0bf8468d94e0c925 | [
"MIT"
] | null | null | null | dcm2niix.py | rhancockn/dcm2bids | 16597eeb20edfa0ec707c9bd0bf8468d94e0c925 | [
"MIT"
] | null | null | null | #!/use/bin/python
import tempfile
import os
import dicom
import pandas
import json
import numpy as np
from os.path import join
import glob
import errno
import shutil
class dcm2niix(object):
"""A wrapper for the dcm2niix command
"""
def __init__(self, row, bids_dir, intent = None):
self.intent = intent
#Dicom keys of interest
self.keys=['RepetitionTime', 'AcquisitionMatrix', 'EchoTime', 'EchoTrainLength','FlipAngle', 'Manufacturer', 'ManufacturerModelName', 'MagneticFieldStrength', 'DeviceSerialNumber', 'SoftwareVersions', 'InversionTime', 'PixelBandwidth', 'ScanOptions', 'InPlanePhaseEncodingDirection']
self.wd = os.getcwd()
self.row = row
self.bids_basename = join(bids_dir, 'sub-' + row.PatientID, row.target_path)
files = glob.glob(join(row.DICOMPath, 'IM-*-0001.dcm'))
self.dcm = dicom.read_file(files[0])
#def __del__(self):
# os.rmdir(self.tempdir)
def _make_dicom_json(self):
self.json_dict = {}
keys = np.intersect1d(self.keys,self.dcm.dir())
for k in keys:
self.json_dict[k] = self.dcm.get(k)
if self.dcm.has_key((0x19,0x1028)):
self.json_dict['EffectiveEchoSpacing'] = 1.0/(self.dcm[0x19,0x1028].value*self.dcm.AcquisitionMatrix[0])
self.json_dict['TotalReadoutTime'] = 1.0/self.dcm[0x19,0x1028].value
if self.dcm.has_key((0x19,0x1029)):
self.json_dict['SliceTiming'] = self.dcm[0x19,0x1029].value
self.json_dict['PulseSequenceDetails'] = self.dcm[0x18,0x24].value
if self.dcm.has_key((0x20,0x4000)):
self.json_dict['PulseSequenceDetails'] = self.json_dict['PulseSequenceDetails'] + ' ' + self.dcm[0x20,0x4000].value
if self.dcm.has_key((0x51,0x100f)):
self.json_dict['ReceiveCoilName'] = self.dcm[0x51,0x100f].value
self.json_dict['TaskName'] = self.row.task
self.json_dict['PhaseEncodingDirectionPositive'] = self.row.PhaseEncodingDirectionPositive
#add the list of intent scans, if any.
if self.intent:
self.json_dict['IntendedFor'] = self.intent
def _convert(self):
self.tempdir = tempfile.mkdtemp()
cmd = 'dcm2niix -b y -o . -z y -x n -f out "%s"' % self.row.DICOMPath
os.chdir(self.tempdir)
err = os.system(cmd)
if err != 0:
raise Exception('Error converting DICOM %s' % self.row.DICOMPath)
os.chdir(self.wd)
def _copy(self):
bids_dir = os.path.dirname(self.bids_basename)
self._mkdir_p(bids_dir)
#the magnitudes from both echoes are in the same directory
#dcm2niix splits the echoes. Copy them appropriately
if self.row.type == 'magnitude':
if os.path.isfile(join(self.tempdir,'out.nii.gz')):
shutil.copyfile(join(self.tempdir,'out.nii.gz'), self.bids_basename + '1.nii.gz')
if os.path.isfile(join(self.tempdir,'_e2out.nii.gz')):
shutil.copyfile(join(self.tempdir,'_e2out.nii.gz'), self.bids_basename + '2.nii.gz')
if os.path.isfile(join(self.tempdir,'out.bids')):
json_fname = self.bids_basename + '1.json'
shutil.copyfile(join(self.tempdir,'out.bids'), json_fname)
self._update_json(json_fname)
json_fname = self.bids_basename + '2.json'
shutil.copyfile(join(self.tempdir,'_e2out.bids'), json_fname)
self._update_json(json_fname)
elif self.row.type in ['phasediff', 'magnitude2']:
shutil.copyfile(glob.glob(join(self.tempdir,'*out.nii.gz'))[0], self.bids_basename + '.nii.gz')
json_fname = self.bids_basename + '.json'
shutil.copyfile(glob.glob(join(self.tempdir,'*out.bids'))[0], json_fname)
self._update_json(json_fname)
#anything but a single magnitude directory should produce one out.nii.gz/out.bids pair
else:
imgs = glob.glob(join(self.tempdir, '*out*.nii.gz'))
if len(imgs) > 1:
raise Exception('More out.nii.gz files than expected')
shutil.copyfile(join(self.tempdir,'out.nii.gz'), self.bids_basename + '.nii.gz')
json_fname = self.bids_basename + '.json'
shutil.copyfile(join(self.tempdir,'out.bids'), json_fname)
self._update_json(json_fname)
if self.row.type == 'dwi':
shutil.copyfile(join(self.tempdir,'out.bval'), self.bids_basename + '.bval')
shutil.copyfile(join(self.tempdir,'out.bvec'), self.bids_basename + '.bvec')
def _update_json(self, fname):
fp=open(fname, 'r+')
meta = json.load(fp)
orig_keys = np.intersect1d(self.json_dict.keys(), meta.keys())
for k in np.setdiff1d(self.json_dict.keys(),meta.keys()):
meta[k]=self.json_dict[k]
for k in orig_keys:
meta['o'+k] = self.json_dict[k]
fp.seek(0)
json.dump(meta,fp,indent=2)
fp.close()
def _mkdir_p(self,path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def process(self):
self._make_dicom_json()
self._convert()
self._copy()
os.system('chmod 2550 %s*' % self.bids_basename)
shutil.rmtree(self.tempdir)
| 32.2 | 285 | 0.695238 | 4,654 | 0.963561 | 0 | 0 | 0 | 0 | 0 | 0 | 1,238 | 0.256315 |
902da8cbd33808618399125bb013b3cfef957b80 | 4,479 | py | Python | src/sss/genkey.py | foundriesio/plug-and-trust-ssscli | f77c65d5b3de649d7db1c023ee41d871f77cd224 | [
"Apache-2.0"
] | null | null | null | src/sss/genkey.py | foundriesio/plug-and-trust-ssscli | f77c65d5b3de649d7db1c023ee41d871f77cd224 | [
"Apache-2.0"
] | null | null | null | src/sss/genkey.py | foundriesio/plug-and-trust-ssscli | f77c65d5b3de649d7db1c023ee41d871f77cd224 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2018-2020 NXP
# SPDX-License-Identifier: Apache-2.0
#
#
"""License text"""
import logging
from . import sss_api as apis
from .keystore import KeyStore
from .keyobject import KeyObject
from .getkey import Get
from .util import get_ecc_cypher_type
log = logging.getLogger(__name__)
class Generate:
"""
Generate key pair/public key of ecc/rsa
"""
def __init__(self, session_obj):
"""
Constructor
:param session_obj: Instance of session
"""
self._session = session_obj
self._ctx_ks = KeyStore(self._session)
self._ctx_key = KeyObject(self._ctx_ks)
self.key_obj_mode = apis.kKeyObject_Mode_Persistent
def gen_ecc_public(self, key_id, curve_type, file_name, policy, encode_format=""): # pylint: disable=too-many-arguments
"""
Generate ecc public key
:param key_id: Key index
:param curve_type: ECC curve type
:param file_name: File name to store public key
:param policy: Policy to be applied
:param encode_format: File format to store public key
:return: Status
"""
if file_name[-4:] != '.pem' and file_name[-4:] != '.der':
log.error("Unsupported file type. File type should be in pem or der format")
return apis.kStatus_SSS_Fail
status = self.gen_ecc_pair(key_id, curve_type, policy)
if status != apis.kStatus_SSS_Success:
return status
get = Get(self._session)
status = get.get_key(key_id, file_name, encode_format)
return status
def gen_ecc_pair(self, key_id, curve_type, policy):
"""
Generate ecc key pair
:param key_id: Key index
:param curve_type: ECC curve type
:param policy: Policy to be applied
:return: Status
"""
cypher_type, key_size = get_ecc_cypher_type(curve_type)
key_type = apis.kSSS_KeyPart_Pair
if key_size == 0:
log.error("curve type not supported")
return apis.kStatus_SSS_Fail
status = self._gen_key_pair(key_id, key_size, key_type, cypher_type, policy)
return status
def gen_rsa_public(self, key_id, key_size, file_name, policy):
"""
Generate rsa public key
:param key_id: Key index
:param key_size: Key size to generate
:param file_name: File name to store public key
:param policy: Policy to be applied
:return: Status
"""
if file_name[-4:] != '.pem' and file_name[-4:] != '.der':
log.error("Unsupported file type. File type should be in pem or der format")
return apis.kStatus_SSS_Fail
status = self.gen_rsa_pair(key_id, key_size, policy)
if status != apis.kStatus_SSS_Success:
return status
get = Get(self._session)
status = get.get_key(key_id, file_name)
return status
def gen_rsa_pair(self, key_id, key_size, policy):
"""
Generate rsa key pair
:param key_id: Key index
:param key_size: RSA key size to generate
:param policy: Policy to be applied
:return: Status
"""
key_type = apis.kSSS_KeyPart_Pair
cypher_type = apis.kSSS_CipherType_RSA_CRT
status = self._gen_key_pair(key_id, key_size, key_type, cypher_type, policy)
return status
def _gen_key_pair(self, key_id, key_size, key_type, cypher_type, policy): # pylint: disable=too-many-arguments
"""
Generate key pair
:param key_id: Key index
:param key_size: Key size
:param key_type: Key type
:param cypher_type: Cypher type
:param policy: Policy to be applied
:return: Status
"""
# Key length calculation based on key bit length
# if modulus of key_bit_len is non zero, then allocate extra byte
if (key_size % 8) != 0:
key_len = (key_size / 8) + 1
else:
key_len = key_size / 8
status = self._ctx_key.allocate_handle(key_id, key_type, cypher_type, int(key_len),
self.key_obj_mode)
if status != apis.kStatus_SSS_Success:
return status
status = self._ctx_ks.generate_key(self._ctx_key, key_size, policy)
if status != apis.kStatus_SSS_Success:
return status
status = self._ctx_ks.save_key_store()
return status
| 33.177778 | 124 | 0.621121 | 4,179 | 0.933021 | 0 | 0 | 0 | 0 | 0 | 0 | 1,780 | 0.39741 |
902e9315b6728c26e93d095508b7d9dca413b5b0 | 979 | py | Python | Python/uds/uds_client.py | kaehsu/template-bash | f8a8a4babb8537622a4e4246701761a9832d6aeb | [
"MIT"
] | null | null | null | Python/uds/uds_client.py | kaehsu/template-bash | f8a8a4babb8537622a4e4246701761a9832d6aeb | [
"MIT"
] | null | null | null | Python/uds/uds_client.py | kaehsu/template-bash | f8a8a4babb8537622a4e4246701761a9832d6aeb | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#
# To communicate with UDS server by nc: "echo -e "string\c" | sudo nc -q 1 -U /var/run/uds_led"
import socket
serverAddress = '/tmp/portex_tmp'
def main():
try:
while True:
message = input(
'Enter the message send to server ("Quit" to quit): ')
if message:
if message == 'Quit':
raise SystemExit
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(serverAddress)
sock.send(message.encode('utf-8'))
#r = sock.recv(1024)
print('Receiving message "{}" from server.\n'.format(
sock.recv(1024).decode()))
sock.close()
else:
print('You have to enter something.....\n')
continue
except KeyboardInterrupt:
print('\n')
# sock.close()
if __name__ == '__main__':
main()
| 28.794118 | 95 | 0.507661 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 324 | 0.33095 |
902fb2aef8b4515b1af62bd380980dd14457df65 | 502 | py | Python | apps/workspaces/migrations/0007_workspacegeneralsettings_auto_map_employees.py | fylein/fyle-xero-api | ba81af058dc413fc801d4cf7d1a8961bd42df469 | [
"MIT"
] | null | null | null | apps/workspaces/migrations/0007_workspacegeneralsettings_auto_map_employees.py | fylein/fyle-xero-api | ba81af058dc413fc801d4cf7d1a8961bd42df469 | [
"MIT"
] | 6 | 2020-12-24T10:24:02.000Z | 2021-11-30T05:04:53.000Z | apps/workspaces/migrations/0007_workspacegeneralsettings_auto_map_employees.py | fylein/fyle-xero-api | ba81af058dc413fc801d4cf7d1a8961bd42df469 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.3 on 2021-02-19 18:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('workspaces', '0006_workspacegeneralsettings_import_categories'),
]
operations = [
migrations.AddField(
model_name='workspacegeneralsettings',
name='auto_map_employees',
field=models.CharField(help_text='Auto Map Employees from Xero to Fyle', max_length=50, null=True),
),
]
| 26.421053 | 111 | 0.663347 | 409 | 0.814741 | 0 | 0 | 0 | 0 | 0 | 0 | 192 | 0.38247 |
903025199c8cb18d7b43068916c16d96cb4139f2 | 2,967 | py | Python | 0x06-python-classes/100-singly_linked_list.py | Trice254/alx-higher_level_programming | b49b7adaf2c3faa290b3652ad703914f8013c67c | [
"MIT"
] | null | null | null | 0x06-python-classes/100-singly_linked_list.py | Trice254/alx-higher_level_programming | b49b7adaf2c3faa290b3652ad703914f8013c67c | [
"MIT"
] | null | null | null | 0x06-python-classes/100-singly_linked_list.py | Trice254/alx-higher_level_programming | b49b7adaf2c3faa290b3652ad703914f8013c67c | [
"MIT"
] | null | null | null | #!/usr/bin/python3
"""
Module 100-singly_linked_list
Defines class Node (with private data and next_node)
Defines class SinglyLinkedList (with private head and public sorted_insert)
"""
class Node:
"""
class Node definition
Args:
data (int): private
next_node : private; can be None or Node object
Functions:
__init__(self, data, next_node=None)
data(self)
data(self, value)
next_node(self)
next_node(self, value)
"""
def __init__(self, data, next_node=None):
"""
Initializes node
Attributes:
data (int): private
next_node : private; can be None or Node object
"""
self.data = data
self.next_node = next_node
@property
def data(self):
""""
Getter
Return: data
"""
return self.__data
@data.setter
def data(self, value):
"""
Setter
Args:
value: sets data to value if int
"""
if type(value) is not int:
raise TypeError("data must be an integer")
else:
self.__data = value
@property
def next_node(self):
""""
Getter
Return: next_node
"""
return self.__next_node
@next_node.setter
def next_node(self, value):
"""
Setter
Args:
value: sets next_node if value is next_node or None
"""
if type(value) is not Node and value is not None:
raise TypeError("next_node must be a Node object")
else:
self.__next_node = value
class SinglyLinkedList:
"""
class SinglyLinkedList definition
Args:
head: private
Functions:
__init__(self)
sorted_insert(self, value)
"""
def __init__(self):
"""
Initializes singly linked list
Attributes:
head: private
"""
self.__head = None
def __str__(self):
"""
String representation of singly linked list needed to print
"""
string = ""
tmp = self.__head
while tmp is not None:
string += str(tmp.data)
tmp = tmp.next_node
if tmp is not None:
string += "\n"
return string
def sorted_insert(self, value):
"""
Inserts new nodes into singly linked list in sorted order
Args:
value: int data for node
"""
new = Node(value)
if self.__head is None:
self.__head = new
return
tmp = self.__head
if new.data < tmp.data:
new.next_node = self.__head
self.__head = new
return
while (tmp.next_node is not None) and (new.data > tmp.next_node.data):
tmp = tmp.next_node
new.next_node = tmp.next_node
tmp.next_node = new
return
| 23.179688 | 78 | 0.532524 | 2,770 | 0.933603 | 0 | 0 | 860 | 0.289855 | 0 | 0 | 1,462 | 0.492754 |
90303a8de55d76b20b74b604783236c6d15111a5 | 310 | py | Python | BI-IOS/semester-project/webapp/beecon/campaigns/urls.py | josefdolezal/fit-cvut | 6b6abea4232b946246d33290718d6c5007926b63 | [
"MIT"
] | 20 | 2016-05-15T10:39:53.000Z | 2022-03-29T00:06:06.000Z | BI-IOS/semester-project/webapp/beecon/campaigns/urls.py | josefdolezal/fit-cvut | 6b6abea4232b946246d33290718d6c5007926b63 | [
"MIT"
] | 3 | 2017-05-27T16:44:01.000Z | 2019-01-02T21:02:59.000Z | BI-IOS/semester-project/webapp/beecon/campaigns/urls.py | josefdolezal/fit-cvut | 6b6abea4232b946246d33290718d6c5007926b63 | [
"MIT"
] | 11 | 2018-08-22T21:16:32.000Z | 2021-04-10T22:42:34.000Z | from django.conf.urls import url
from . import views
app_name = 'campaigns'
urlpatterns = [
url( r'^$', views.JsonView.response, name='index' ),
url( r'^(?P<app_code>[a-zA-Z0-9]+)/info/$', views.info, name='info' ),
url( r'^(?P<app_code>[a-zA-Z0-9]+)/services/$', views.services, name='services' ),
]
| 25.833333 | 84 | 0.632258 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 117 | 0.377419 |
9032381dcc04f711d03772a06dc91a54a4d1b366 | 5,802 | py | Python | models/joint_representation.py | ybCliff/VideoCaptioning | 93fc3b095c970e51e1e24909163a827df98d6ef3 | [
"MIT"
] | 3 | 2020-05-16T23:59:57.000Z | 2021-06-14T01:59:41.000Z | models/joint_representation.py | ybCliff/VideoCaptioning | 93fc3b095c970e51e1e24909163a827df98d6ef3 | [
"MIT"
] | null | null | null | models/joint_representation.py | ybCliff/VideoCaptioning | 93fc3b095c970e51e1e24909163a827df98d6ef3 | [
"MIT"
] | 3 | 2020-05-17T00:01:01.000Z | 2020-07-28T18:04:05.000Z | import torch
import torch.nn as nn
class Gated_Sum(nn.Module):
def __init__(self, opt):
super(Gated_Sum, self).__init__()
hidden_size = opt['dim_hidden']
nf = opt.get('num_factor', 512)
self.hidden_size = hidden_size
self.num_feats = len(opt['modality']) - sum(opt['skip_info'])
#self.emb_weight = Parameter(torch.Tensor(self.num_feats * hidden_size, hidden_size))
#self.emb_bias = Parameter(torch.Tensor(self.num_feats * hidden_size))
self.weight_a = Parameter(torch.Tensor(self.num_feats * hidden_size, nf))
self.weight_b = Parameter(torch.Tensor(nf, self.num_feats))
self.weight_c = Parameter(torch.Tensor(nf, hidden_size))
self.bias = Parameter(torch.Tensor(self.num_feats * hidden_size))
self.dropout = nn.Dropout(0.5)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
weight.data.uniform_(-stdv, stdv)
def get_gated_result(self, weight, bias, feats, index):
assert len(feats) == self.num_feats
#assert len(feats.shape)
#ew = self.emb_weight.chunk(self.num_feats, 0)
#eb = self.emb_bias.chunk(self.num_feats, 0)
w = weight.chunk(self.num_feats, 0)
b = bias.chunk(self.num_feats, 0)
res = []
for i in range(self.num_feats):
#if i == index:
# emb = F.linear(feats[i], ew[i], eb[i])
res.append(F.linear(self.dropout(feats[i]), w[i], b[i]))
#res.append(F.linear(feats[i], w[i], b[i]))
gated_result = F.sigmoid(torch.stack(res, 0).sum(0)) * feats[index]
#gated_result = F.sigmoid(torch.stack(res, 0).sum(0)) * emb
return gated_result
def forward(self, encoder_outputs):
bsz, seq_len, _ = encoder_outputs[0].shape
feats = [item.contiguous().view(bsz * seq_len, -1) for item in encoder_outputs]
#feats = [self.dropout(item.contiguous().view(bsz * seq_len, -1)) for item in encoder_outputs]
gated_results = []
for i in range(self.num_feats):
tag = torch.zeros(self.num_feats, 1).to(feats[0].device)
tag[i] = 1
#query = feats[i].mean(0).unsqueeze(0).repeat(self.num_feats, 1) # [3, dim]
#key = torch.stack(feats, 1).mean(0) # [3, dim]
#tag = F.cosine_similarity(query, key).unsqueeze(1)
weight_mid = torch.mm(self.weight_b, tag)
weight_mid = torch.diag(weight_mid.squeeze(1))
weight = torch.mm(torch.mm(self.weight_a, weight_mid), self.weight_c)
gated_results.append(self.get_gated_result(weight, self.bias, feats, i))
gated_results = torch.stack(gated_results, 0).sum(0)
gated_results = gated_results.contiguous().view(bsz, seq_len, self.hidden_size)
return gated_results
class Joint_Representaion_Learner(nn.Module):
def __init__(self, feats_size, opt):
super(Joint_Representaion_Learner, self).__init__()
self.encoder_type = opt['encoder_type']
self.decoder_type = opt['decoder_type']
self.addition = opt.get('addition', False)
self.temporal_concat = opt.get('temporal_concat', False)
self.opt = opt
self.att = None
if opt['multi_scale_context_attention']:
from models.rnn import Multi_Scale_Context_Attention
self.att = Multi_Scale_Context_Attention(opt)
if opt.get('gated_sum', False):
self.att = Gated_Sum(opt)
self.bn_list = []
if not opt['no_encoder_bn']:
if self.addition:
feats_size = [feats_size[0]]
print(self.addition)
print(feats_size)
for i, item in enumerate(feats_size):
tmp_module = nn.BatchNorm1d(item)
self.bn_list.append(tmp_module)
self.add_module("bn%d"%(i), tmp_module)
def forward(self, encoder_outputs, encoder_hiddens):
if (self.decoder_type != 'ENSEMBLE' and self.encoder_type == 'GRU' and not self.opt.get('two_stream', False)) \
or self.encoder_type == 'IEL' \
or (self.encoder_type == 'IPE' and self.opt.get('MSLSTM', False)):
if isinstance(encoder_hiddens[0], tuple):
hx = []
cx = []
for h in encoder_hiddens:
hx.append(h[0])
cx.append(h[1])
encoder_hiddens = (torch.stack(hx, dim=0).mean(0), torch.stack(cx, dim=0).mean(0))
else:
encoder_hiddens = torch.stack(encoder_hiddens, dim=0).mean(0)
if self.att is not None:
encoder_outputs = self.att(encoder_outputs)
if self.addition:
assert isinstance(encoder_outputs, list)
encoder_outputs = torch.stack(encoder_outputs, dim=0).mean(0)
#encoder_outputs = torch.stack(encoder_outputs, dim=0).max(0)[0]
encoder_outputs = encoder_outputs if isinstance(encoder_outputs, list) else [encoder_outputs]
if len(self.bn_list):
assert len(encoder_outputs) == len(self.bn_list)
for i in range(len(encoder_outputs)):
batch_size, seq_len, _ = encoder_outputs[i].shape
encoder_outputs[i] = self.bn_list[i](encoder_outputs[i].contiguous().view(batch_size * seq_len, -1)).view(batch_size, seq_len, -1)
if self.temporal_concat:
assert isinstance(encoder_outputs, list)
encoder_outputs = torch.cat(encoder_outputs, dim=1)
#print(encoder_outputs.shape)
return encoder_outputs, encoder_hiddens | 39.739726 | 146 | 0.601344 | 5,763 | 0.993278 | 0 | 0 | 0 | 0 | 0 | 0 | 1,032 | 0.17787 |
9034fc76134be07855830d17f0d402a691811b26 | 2,489 | py | Python | scream/monorepo.py | r-kells/scream | 3f5d325cd05a0f3eccc4b579b4929be49029ab09 | [
"MIT"
] | 26 | 2018-11-29T13:33:25.000Z | 2021-11-22T18:45:19.000Z | scream/monorepo.py | r-kells/scream | 3f5d325cd05a0f3eccc4b579b4929be49029ab09 | [
"MIT"
] | 14 | 2019-01-20T00:07:13.000Z | 2020-07-15T13:19:29.000Z | scream/monorepo.py | r-kells/scream | 3f5d325cd05a0f3eccc4b579b4929be49029ab09 | [
"MIT"
] | 2 | 2019-02-25T17:31:47.000Z | 2020-01-22T22:10:41.000Z | import collections
from scream.files import Docs, Scream, Tox
class Monorepo(object):
def __init__(self, root_dir):
self.root_dir = root_dir
self.config = Scream(self.root_dir)
def sync(self):
"""Used internally ensure monorepo maintains certain standards.
"""
self.config = Scream(self.root_dir)
Tox(self.config.packages).write(self.root_dir)
Docs(self.config.packages).write(self.root_dir)
def validate_mono_repo(self):
all_pypi_packages = self.get_all_pypi_packages()
warn_unpinned = self.warn_unpinned_packages(all_pypi_packages)
warn_dependency_conflict = self.warn_dependency_conflict(all_pypi_packages)
for package in self.config.packages:
self.intersect_warning(package.package_name, "has unpinned dependencies",
warn_unpinned, package.other_dependencies)
self.intersect_warning(package.package_name, "more than 1 package has a different version for",
warn_dependency_conflict, package.other_dependencies)
def warn_unpinned_packages(self, pypi_packages):
to_report_packages = []
for p in pypi_packages:
if "==" not in p:
to_report_packages.append(p)
return to_report_packages
def warn_dependency_conflict(self, pypi_packages):
to_report_packages = []
counts = version_counter(pypi_packages)
for p in pypi_packages:
if len(counts[(p.split("==")[0])]) > 1:
to_report_packages.append(p)
return to_report_packages
def get_all_pypi_packages(self):
p = []
for package in self.config.packages:
p.extend(package.other_dependencies)
return p
@staticmethod
def intersect_warning(name, description, list1, list2):
intersect = set(list1).intersection(set(list2))
if intersect:
print("Warning: Package {name} {description}: {intersect}.".format(
name=name,
description=description,
intersect=', '.join(intersect)
))
def version_counter(pypi_packages):
results = collections.defaultdict(set)
for p in pypi_packages:
try:
name, version = p.split("==")
except Exception:
name = p.split("==")[0]
version = 'LATEST'
results[name].update([version])
return results
| 34.09589 | 107 | 0.627561 | 2,107 | 0.846525 | 0 | 0 | 361 | 0.145038 | 0 | 0 | 232 | 0.09321 |
9036fe41e979c0dcc374002d6e7021871b3e9ae0 | 9,837 | py | Python | src/oddball/oddball.py | stillarrow/NetworkAnomalyDetection | f41fd6b6d7cb302dc2eaa4c0f8d7b04b7ed4fd3c | [
"MIT"
] | 1 | 2021-05-12T09:37:47.000Z | 2021-05-12T09:37:47.000Z | src/oddball/oddball.py | stillarrow/NetworkAnomalyDetection | f41fd6b6d7cb302dc2eaa4c0f8d7b04b7ed4fd3c | [
"MIT"
] | null | null | null | src/oddball/oddball.py | stillarrow/NetworkAnomalyDetection | f41fd6b6d7cb302dc2eaa4c0f8d7b04b7ed4fd3c | [
"MIT"
] | null | null | null | '''
Python3 implementation of oddball
@author:
Tao Yu (gloooryyt@gmail.com)
'''
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.neighbors import LocalOutlierFactor
# feature dictionary which format is {node i's id:Ni, Ei, Wi, λw,i}
def star_or_clique(featureDict):
N = []
E = []
for key in featureDict.keys():
N.append(featureDict[key][0])
E.append(featureDict[key][1])
# E=CN^α => log on both sides => logE=logC+αlogN
# regard as y=b+wx to do linear regression
# here the base of log is 2
y_train = np.log2(E)
y_train = np.array(y_train)
y_train = y_train.reshape(len(E), 1)
x_train = np.log2(N)
x_train = np.array(x_train)
x_train = x_train.reshape(len(N), 1)
model = LinearRegression()
model.fit(x_train, y_train)
w = model.coef_[0][0]
b = model.intercept_[0]
C = 2**b
alpha = w
outlineScoreDict = {}
for key in featureDict.keys():
yi = featureDict[key][1]
xi = featureDict[key][0]
outlineScore = (max(yi, C*(xi**alpha))/min(yi, C*(xi**alpha)))*np.log(abs(yi-C*(xi**alpha))+1)
outlineScoreDict[key] = outlineScore
return outlineScoreDict
def heavy_vicinity(featureDict):
W = []
E = []
for key in featureDict.keys():
W.append(featureDict[key][2])
E.append(featureDict[key][1])
# W=CE^β => log on both sides => logW=logC+βlogE
# regard as y=b+wx to do linear regression
# here the base of log is 2
y_train = np.log2(W)
y_train = np.array(y_train)
y_train = y_train.reshape(len(W), 1)
x_train = np.log2(E)
x_train = np.array(x_train)
x_train = x_train.reshape(len(E), 1)
model = LinearRegression()
model.fit(x_train, y_train)
w = model.coef_[0][0]
b = model.intercept_[0]
C = 2**b
beta = w
outlineScoreDict = {}
for key in featureDict.keys():
yi = featureDict[key][2]
xi = featureDict[key][1]
outlineScore = (max(yi, C*(xi**beta))/min(yi, C*(xi**beta)))*np.log(abs(yi-C*(xi**beta))+1)
outlineScoreDict[key] = outlineScore
return outlineScoreDict
def dominant_edge(featureDict):
Lambda_w_i = []
W = []
for key in featureDict.keys():
Lambda_w_i.append(featureDict[key][3])
W.append(featureDict[key][2])
#λ=CW^γ => log on both sides => logλ=logC+γlogW
#regard as y=b+wx to do linear regression
#here the base of log is 2
y_train = np.log2(Lambda_w_i)
y_train = np.array(y_train)
y_train = y_train.reshape(len(Lambda_w_i), 1)
x_train = np.log2(W)
x_train = np.array(x_train)
x_train = x_train.reshape(len(W), 1)
model = LinearRegression()
model.fit(x_train, y_train)
w = model.coef_[0][0]
b = model.intercept_[0]
C = 2 ** b
beta = w
outlineScoreDict = {}
for key in featureDict.keys():
yi = featureDict[key][3]
xi = featureDict[key][2]
outlineScore = (max(yi, C * (xi ** beta)) / min(yi, C * (xi ** beta))) * np.log(abs(yi - C * (xi ** beta)) + 1)
outlineScoreDict[key] = outlineScore
return outlineScoreDict
def star_or_clique_withLOF(featureDict):
N = []
E = []
for key in featureDict.keys():
N.append(featureDict[key][0])
E.append(featureDict[key][1])
# E=CN^α => log on both sides => logE=logC+αlogN
# regard as y=b+wx to do linear regression
# here the base of log is 2
y_train = np.log2(E)
y_train = np.array(y_train)
y_train = y_train.reshape(len(E), 1)
x_train = np.log2(N)
x_train = np.array(x_train)
x_train = x_train.reshape(len(N), 1) # the order in x_train and y_train is the same as which in featureDict.keys() now
# prepare data for LOF
xAndyForLOF = []
for index in range(len(N)):
tempArray = np.array([x_train[index][0], y_train[index][0]])
xAndyForLOF.append(tempArray)
xAndyForLOF = np.array(xAndyForLOF)
model = LinearRegression()
model.fit(x_train, y_train)
w = model.coef_[0][0]
b = model.intercept_[0]
C = 2**b
alpha = w
print('alpha={}'.format(alpha))
# LOF algorithm
clf = LocalOutlierFactor(n_neighbors=20)
clf.fit(xAndyForLOF)
LOFScoreArray = -clf.negative_outlier_factor_
outScoreDict = {}
count = 0 # Used to take LOFScore in sequence from LOFScoreArray
# get the maximum outLine
maxOutLine = 0
for key in featureDict.keys():
yi = featureDict[key][1]
xi = featureDict[key][0]
outlineScore = (max(yi, C*(xi**alpha))/min(yi, C*(xi**alpha)))*np.log(abs(yi-C*(xi**alpha))+1)
if outlineScore > maxOutLine:
maxOutLine = outlineScore
print('maxOutLine={}'.format(maxOutLine))
# get the maximum LOFScore
maxLOFScore = 0
for ite in range(len(N)):
if LOFScoreArray[ite] > maxLOFScore:
maxLOFScore = LOFScoreArray[ite]
print('maxLOFScore={}'.format(maxLOFScore))
for key in featureDict.keys():
yi = featureDict[key][1]
xi = featureDict[key][0]
outlineScore = (max(yi, C*(xi**alpha))/min(yi, C*(xi**alpha)))*np.log(abs(yi-C*(xi**alpha))+1)
LOFScore = LOFScoreArray[count]
count += 1
outScore = outlineScore/maxOutLine + LOFScore/maxLOFScore
outScoreDict[key] = outScore
return outScoreDict
def heavy_vicinity_withLOF(featureDict):
W = []
E = []
for key in featureDict.keys():
W.append(featureDict[key][2])
E.append(featureDict[key][1])
# W=CE^β => log on both sides => logW=logC+βlogE
# regard as y=b+wx to do linear regression
# here the base of log is 2
y_train = np.log2(W)
y_train = np.array(y_train)
y_train = y_train.reshape(len(W), 1)
x_train = np.log2(E)
x_train = np.array(x_train)
x_train = x_train.reshape(len(E), 1) # the order in x_train and y_train is the same as which in featureDict.keys() now
# prepare data for LOF
xAndyForLOF = []
for index in range(len(W)):
tempArray = np.array([x_train[index][0], y_train[index][0]])
xAndyForLOF.append(tempArray)
xAndyForLOF = np.array(xAndyForLOF)
model = LinearRegression()
model.fit(x_train, y_train)
w = model.coef_[0][0]
b = model.intercept_[0]
C = 2**b
beta = w
print('beta={}'.format(beta))
# LOF algorithm
clf = LocalOutlierFactor(n_neighbors=20)
clf.fit(xAndyForLOF)
LOFScoreArray = -clf.negative_outlier_factor_
outScoreDict = {}
count = 0 # Used to take LOFScore in sequence from LOFScoreArray
# get the maximum outLine
maxOutLine = 0
for key in featureDict.keys():
yi = featureDict[key][2]
xi = featureDict[key][1]
outlineScore = (max(yi, C*(xi**beta))/min(yi, C*(xi**beta)))*np.log(abs(yi-C*(xi**beta))+1)
if outlineScore > maxOutLine:
maxOutLine = outlineScore
print('maxOutLine={}'.format(maxOutLine))
# get the maximum LOFScore
maxLOFScore = 0
for ite in range(len(W)):
if LOFScoreArray[ite] > maxLOFScore:
maxLOFScore = LOFScoreArray[ite]
print('maxLOFScore={}'.format(maxLOFScore))
for key in featureDict.keys():
yi = featureDict[key][2]
xi = featureDict[key][1]
outlineScore = (max(yi, C*(xi**beta))/min(yi, C*(xi**beta)))*np.log(abs(yi-C*(xi**beta))+1)
LOFScore = LOFScoreArray[count]
count += 1
outScore = outlineScore/maxOutLine + LOFScore/maxLOFScore
outScoreDict[key] = outScore
return outScoreDict
def dominant_edge_withLOF(featureDict):
Lambda_w_i = []
W = []
for key in featureDict.keys():
Lambda_w_i.append(featureDict[key][3])
W.append(featureDict[key][2])
# λ=CW^γ => log on both sides => logλ=logC+γlogW
# regard as y=b+wx to do linear regression
# here the base of log is 2
y_train = np.log2(Lambda_w_i)
y_train = np.array(y_train)
y_train = y_train.reshape(len(Lambda_w_i), 1)
x_train = np.log2(W)
x_train = np.array(x_train)
x_train = x_train.reshape(len(W), 1) # the order in x_train and y_train is the same as which in featureDict.keys() now
# prepare data for LOF
xAndyForLOF = []
for index in range(len(W)):
tempArray = np.array([x_train[index][0], y_train[index][0]])
xAndyForLOF.append(tempArray)
xAndyForLOF = np.array(xAndyForLOF)
model = LinearRegression()
model.fit(x_train, y_train)
w = model.coef_[0][0]
b = model.intercept_[0]
C = 2**b
gamma = w
print('gamma={}'.format(gamma))
# LOF algorithm
clf = LocalOutlierFactor(n_neighbors=20)
clf.fit(xAndyForLOF)
LOFScoreArray = -clf.negative_outlier_factor_
outScoreDict = {}
count = 0 # Used to take LOFScore in sequence from LOFScoreArray
# get the maximum outLine
maxOutLine = 0
for key in featureDict.keys():
yi = featureDict[key][3]
xi = featureDict[key][2]
outlineScore = (max(yi, C*(xi**gamma))/min(yi, C*(xi**gamma)))*np.log(abs(yi-C*(xi**gamma))+1)
if outlineScore > maxOutLine:
maxOutLine = outlineScore
print('maxOutLine={}'.format(maxOutLine))
# get the maximum LOFScore
maxLOFScore = 0
for ite in range(len(W)):
if LOFScoreArray[ite] > maxLOFScore:
maxLOFScore = LOFScoreArray[ite]
print('maxLOFScore={}'.format(maxLOFScore))
for key in featureDict.keys():
yi = featureDict[key][3]
xi = featureDict[key][2]
outlineScore = (max(yi, C*(xi**gamma))/min(yi, C*(xi**gamma)))*np.log(abs(yi-C*(xi**gamma))+1)
LOFScore = LOFScoreArray[count]
count += 1
outScore = outlineScore/maxOutLine + LOFScore/maxLOFScore
outScoreDict[key] = outScore
return outScoreDict
| 31.630225 | 125 | 0.620718 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,655 | 0.167952 |
9037bc76d22fa05dd0f3bfed5e08c4fd3d0cc516 | 538 | py | Python | tbase/common/logger.py | iminders/TradeBaselines | 26eb87f2bcd5f6ff479149219b38b17002be6a40 | [
"MIT"
] | 16 | 2020-03-19T15:12:28.000Z | 2021-12-20T06:02:32.000Z | tbase/common/logger.py | iminders/TradeBaselines | 26eb87f2bcd5f6ff479149219b38b17002be6a40 | [
"MIT"
] | 14 | 2020-03-23T03:57:00.000Z | 2021-12-20T05:53:33.000Z | tbase/common/logger.py | iminders/TradeBaselines | 26eb87f2bcd5f6ff479149219b38b17002be6a40 | [
"MIT"
] | 7 | 2020-03-25T00:30:18.000Z | 2021-01-31T18:45:09.000Z | import logging
import os
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(filename)s[%(lineno)d] %(levelname)s %(message)s')
logger = logging.getLogger()
dir_name = os.path.join("/tmp", "tbase")
if not os.path.exists(dir_name):
os.makedirs(dir_name)
handler = logging.FileHandler(os.path.join(dir_name, "tbase.log"))
handler.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s %(filename)s[%(lineno)d] %(levelname)s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
| 26.9 | 76 | 0.723048 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 152 | 0.282528 |
9037ec295216061bb4362e12f89dff7a2894c6f3 | 3,125 | py | Python | profesionales/migrations/0005_auto_20190928_2338.py | cluster311/ggg | 262173c66fe40ada30083d439a79f16f841f5772 | [
"BSD-3-Clause"
] | 6 | 2020-03-16T02:51:16.000Z | 2020-11-10T00:58:01.000Z | profesionales/migrations/0005_auto_20190928_2338.py | cluster311/ggg | 262173c66fe40ada30083d439a79f16f841f5772 | [
"BSD-3-Clause"
] | 204 | 2019-09-19T02:00:57.000Z | 2022-02-10T10:48:52.000Z | profesionales/migrations/0005_auto_20190928_2338.py | cluster311/ggg | 262173c66fe40ada30083d439a79f16f841f5772 | [
"BSD-3-Clause"
] | 3 | 2019-09-16T22:59:24.000Z | 2022-03-21T22:52:44.000Z | # Generated by Django 2.2.4 on 2019-09-28 23:38
import address.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('address', '0002_auto_20160213_1726'),
('profesionales', '0004_auto_20190927_0004'),
]
operations = [
migrations.AlterModelOptions(
name='profesional',
options={'permissions': [('can_view_tablero', 'Puede ver los tableros de comandos sobre profesionales')], 'verbose_name_plural': 'Profesionales'},
),
migrations.RemoveField(
model_name='profesional',
name='departamento',
),
migrations.RemoveField(
model_name='profesional',
name='dni',
),
migrations.RemoveField(
model_name='profesional',
name='domicilio',
),
migrations.RemoveField(
model_name='profesional',
name='localidad',
),
migrations.RemoveField(
model_name='profesional',
name='telefono',
),
migrations.AddField(
model_name='profesional',
name='direccion',
field=address.models.AddressField(null=True, on_delete=django.db.models.deletion.SET_NULL, to='address.Address'),
),
migrations.AddField(
model_name='profesional',
name='fecha_nacimiento',
field=models.DateField(blank=True, null=True),
),
migrations.AddField(
model_name='profesional',
name='nacionalidad',
field=models.CharField(choices=[('argentina', 'argentina'), ('boliviana', 'boliviana'), ('brasilera', 'brasilera'), ('chilena', 'chilena'), ('colombiana', 'colombiana'), ('ecuatoriana', 'ecuatoriana'), ('paraguaya', 'paraguaya'), ('peruana', 'peruana'), ('uruguaya', 'uruguaya'), ('venezolana', 'venezolana'), ('otra', 'otra')], default='argentina', max_length=50),
),
migrations.AddField(
model_name='profesional',
name='numero_documento',
field=models.CharField(blank=True, help_text='Deje en blanco si está indocumentado', max_length=30, null=True),
),
migrations.AddField(
model_name='profesional',
name='sexo',
field=models.CharField(choices=[('masculino', 'masculino'), ('femenino', 'femenino'), ('otro', 'otro')], default='masculino', max_length=20),
),
migrations.AddField(
model_name='profesional',
name='tipo_documento',
field=models.CharField(choices=[('DNI', 'DNI'), ('LC', 'LC'), ('LE', 'LE'), ('PASAPORTE', 'PASAPORTE'), ('OTRO', 'OTRO')], default='DNI', max_length=20),
),
migrations.AlterField(
model_name='profesional',
name='apellidos',
field=models.CharField(max_length=30),
),
migrations.AlterField(
model_name='profesional',
name='nombres',
field=models.CharField(max_length=50),
),
]
| 38.580247 | 377 | 0.57824 | 2,978 | 0.952655 | 0 | 0 | 0 | 0 | 0 | 0 | 1,007 | 0.322137 |
903b18672626ab4ad3a5e3aded8aea7688abd5d5 | 757 | py | Python | standards/tests/models.py | GROCCAD/groccad | 1d461043030bebe277d74b1c9df9877436baa270 | [
"MIT"
] | 1 | 2022-03-05T03:11:51.000Z | 2022-03-05T03:11:51.000Z | standards/tests/models.py | rocdata/rocserver | 1d461043030bebe277d74b1c9df9877436baa270 | [
"MIT"
] | null | null | null | standards/tests/models.py | rocdata/rocserver | 1d461043030bebe277d74b1c9df9877436baa270 | [
"MIT"
] | null | null | null | from functools import partial
from django.db import models
from standards.fields import CharIdField
# MODEL FIXTURES
################################################################################
class CharIdModel(models.Model):
field = CharIdField()
class CharIdModelWithPrefix(models.Model):
field = CharIdField(prefix='WP', length=10)
class NullableCharIdModel(models.Model):
field = CharIdField(blank=True, null=True)
class PrimaryKeyCharIdModel(models.Model):
id = CharIdField(primary_key=True)
class RelatedToCharIdModel(models.Model):
char_fk = models.ForeignKey('PrimaryKeyCharIdModel', models.CASCADE)
class CharIdChildModel(PrimaryKeyCharIdModel):
pass
class CharIdGrandchildModel(CharIdChildModel):
pass
| 23.65625 | 80 | 0.698811 | 540 | 0.713342 | 0 | 0 | 0 | 0 | 0 | 0 | 123 | 0.162483 |
903bd4a9af5949595a7b7528cd44f5048565dedd | 10,070 | py | Python | torchsrc/models/fcn32s_BN.py | yuankaihuo/MedPhysics | 94d8c5357b76658b9b161b541a1f195c6550ce55 | [
"Apache-2.0"
] | null | null | null | torchsrc/models/fcn32s_BN.py | yuankaihuo/MedPhysics | 94d8c5357b76658b9b161b541a1f195c6550ce55 | [
"Apache-2.0"
] | null | null | null | torchsrc/models/fcn32s_BN.py | yuankaihuo/MedPhysics | 94d8c5357b76658b9b161b541a1f195c6550ce55 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# https://github.com/shelhamer/fcn.berkeleyvision.org/blob/master/surgery.py
def get_upsample_filter(size):
"""Make a 2D bilinear kernel suitable for upsampling"""
factor = (size + 1) // 2
if size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:size, :size]
filter = (1 - abs(og[0] - center) / factor) * \
(1 - abs(og[1] - center) / factor)
return torch.from_numpy(filter).float()
class FCN32s_BN(nn.Module):
def __init__(self, n_class=21, nodeconv=False):
super(FCN32s_BN, self).__init__()
self.nodeconv = nodeconv
self.conv1 = nn.Sequential(
# conv1
nn.Conv2d(3, 64, 3, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, 3, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2, ceil_mode=True), # 1/2
)
self.conv2 = nn.Sequential(
# conv2
nn.Conv2d(64, 128, 3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 128, 3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2, ceil_mode=True), # 1/4
)
self.conv3 = nn.Sequential(
# conv3
nn.Conv2d(128, 256, 3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, 3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, 3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2, ceil_mode=True), # 1/8
)
self.conv4 = nn.Sequential(
# conv4
nn.Conv2d(256, 512, 3, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, 3, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, 3, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2, ceil_mode=True), # 1/16
)
self.conv5 = nn.Sequential(
# conv5
nn.Conv2d(512, 512, 3, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, 3, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, 3, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2, ceil_mode=True), # 1/32
)
self.classifier = nn.Sequential(
# fc6
nn.Conv2d(512, 1024, 7, padding=1),
nn.BatchNorm2d(1024),
nn.ReLU(inplace=True),
nn.Dropout2d(),
# fc7
nn.Conv2d(1024, 1024, 1, padding=1),
nn.BatchNorm2d(1024),
nn.ReLU(inplace=True),
nn.Dropout2d(),
# score_fr
nn.Conv2d(1024, n_class, 1, padding=1),
)
self.maxPool_fc = nn.Sequential(
nn.MaxPool2d(2, stride=2, ceil_mode=True),
nn.BatchNorm2d(512),
)
self.upscore = nn.Sequential(
nn.ConvTranspose2d(n_class,n_class,4,stride=2,padding=1,output_padding=0,bias=True),
nn.BatchNorm2d(n_class),
)
self.upscore4 = nn.Sequential(
nn.ConvTranspose2d(n_class,n_class,4,stride=2,padding=1,output_padding=0,bias=True),
nn.BatchNorm2d(n_class),
)
self.upscore3 = nn.Sequential(
nn.ConvTranspose2d(n_class,n_class,4,stride=2,padding=1,output_padding=0,bias=True),
nn.BatchNorm2d(n_class),
)
self.upscore2 = nn.Sequential(
nn.ConvTranspose2d(n_class,n_class,4,stride=2,padding=1,output_padding=0,bias=True),
nn.BatchNorm2d(n_class),
)
self.upscore1 = nn.Sequential(
nn.ConvTranspose2d(n_class,n_class,4,stride=2,padding=1,output_padding=0,bias=True),
nn.BatchNorm2d(n_class),
)
self.score4 = nn.Sequential(
# torch.nn.Conv2d(in_channels, out_channels, kernel_size,
# stride=1, padding=0, dilation=1,
# groups=1, bias=True)
# batch x 1 x 28 x 28 -> batch x 512
nn.Conv2d(512, n_class, 1, stride=1, padding=0),
nn.BatchNorm2d(n_class),
)
self.score3 = nn.Sequential(
nn.Conv2d(256, n_class, 1, stride=1, padding=0),
nn.BatchNorm2d(n_class),
)
self.score2 = nn.Sequential(
nn.Conv2d(128, n_class, 1, stride=1, padding=0),
nn.BatchNorm2d(n_class),
)
self.score1 = nn.Sequential(
nn.Conv2d(64, n_class, 1, stride=1, padding=0),
nn.BatchNorm2d(n_class),
)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
if isinstance(m, nn.ConvTranspose2d):
m.weight.data.normal_(0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
#print("input size = %s"%(str(x.size())))
hc1 = self.conv1(x)
#print("conv1 size = %s"%(str(hc1.size())))
hc2 = self.conv2(hc1)
#print("conv2 size = %s"%(str(hc2.size())))
hc3 = self.conv3(hc2)
#print("conv3 size = %s"%(str(hc3.size())))
hc4 = self.conv4(hc3)
#print("conv4 size = %s"%(str(hc4.size())))
hc5 = self.conv5(hc4)
#print("conv5 size = %s"%(str(hc5.size())))
hc5_f = self.maxPool_fc(hc5)
hc5_f = hc5_f.view(-1,8*8*512)
ha = self.classifier(hc5)
# #print("classifer size = %s"%(str(ha.size())))
hs4 = self.score4(hc4)
hd4 = self.upscore4(ha)
hf4 = torch.add(hs4, hd4)
# #print("deconv4 size = %s"%(str(hf4.size())))
hs3 = self.score3(hc3)
hd3 = self.upscore3(hf4)
hf3 = torch.add(hs3, hd3)
# #print("deconv3 size = %s"%(str(hf3.size())))
hs2 = self.score2(hc2)
hd2 = self.upscore2(hf3)
hf2 = torch.add(hs2, hd2)
# #print("deconv2 size = %s"%(str(hf2.size())))
hs1 = self.score1(hc1)
hd1 = self.upscore1(hf2)
hf1 = torch.add(hs1, hd1)
# #print("deconv1 size = %s"%(str(hf1.size())))
h = self.upscore(hf1)
# #print("output size = %s"%(str(h.size())))
return h
def copy_params_from_vgg16(self, vgg16, copy_classifier=True, copy_fc8=True, init_upscore=True):
self.conv1[0].weight.data = vgg16.features[0].weight.data;
self.conv1[0].bias.data = vgg16.features[0].bias.data;
self.conv1[3].weight.data = vgg16.features[2].weight.data;
self.conv1[3].bias.data = vgg16.features[2].bias.data;
self.conv2[0].weight.data = vgg16.features[5].weight.data;
self.conv2[0].bias.data = vgg16.features[5].bias.data;
self.conv2[3].weight.data = vgg16.features[7].weight.data;
self.conv2[3].bias.data = vgg16.features[7].bias.data;
self.conv3[0].weight.data = vgg16.features[10].weight.data;
self.conv3[0].bias.data = vgg16.features[10].bias.data;
self.conv3[3].weight.data = vgg16.features[12].weight.data;
self.conv3[3].bias.data = vgg16.features[12].bias.data;
self.conv3[6].weight.data = vgg16.features[14].weight.data;
self.conv3[6].bias.data = vgg16.features[14].bias.data;
self.conv4[0].weight.data = vgg16.features[17].weight.data;
self.conv4[0].bias.data = vgg16.features[17].bias.data;
self.conv4[3].weight.data = vgg16.features[19].weight.data;
self.conv4[3].bias.data = vgg16.features[19].bias.data;
self.conv4[6].weight.data = vgg16.features[21].weight.data;
self.conv4[6].bias.data = vgg16.features[21].bias.data;
self.conv5[0].weight.data = vgg16.features[24].weight.data;
self.conv5[0].bias.data = vgg16.features[24].bias.data;
self.conv5[3].weight.data = vgg16.features[26].weight.data;
self.conv5[3].bias.data = vgg16.features[26].bias.data;
self.conv5[6].weight.data = vgg16.features[28].weight.data;
self.conv5[6].bias.data = vgg16.features[28].bias.data;
if copy_classifier:
for i in [0, 3]:
l1 = vgg16.classifier[i]
l2 = self.classifier[i]
l2.weight.data = l1.weight.data.view(l2.weight.size())
l2.bias.data = l1.bias.data.view(l2.bias.size())
n_class = self.classifier[6].weight.size()[0]
if copy_fc8:
l1 = vgg16.classifier[6]
l2 = self.classifier[6]
l2.weight.data = l1.weight.data[:n_class, :].view(l2.weight.size())
l2.bias.data = l1.bias.data[:n_class]
if init_upscore:
# initialize upscore layer
c1, c2, h, w = self.upscore.weight.data.size()
assert c1 == c2 == n_class
assert h == w
weight = get_upsample_filter(h)
self.upscore.weight.data = \
weight.view(1, 1, h, w).repeat(c1, c2, 1, 1)
| 36.751825 | 101 | 0.523932 | 9,497 | 0.943098 | 0 | 0 | 0 | 0 | 0 | 0 | 987 | 0.098014 |
903c7397f31fe34f15318c7f6541642d7c880c26 | 1,067 | py | Python | src/schema/models.py | prashant0079/metabolic_assignment | 9660ef06e6015833e3c64de9c3fe34927c85ba49 | [
"MIT"
] | null | null | null | src/schema/models.py | prashant0079/metabolic_assignment | 9660ef06e6015833e3c64de9c3fe34927c85ba49 | [
"MIT"
] | 1 | 2021-09-05T15:39:56.000Z | 2021-09-05T20:26:39.000Z | src/schema/models.py | prashant0079/metabolic_assignment | 9660ef06e6015833e3c64de9c3fe34927c85ba49 | [
"MIT"
] | null | null | null | from pydantic import BaseModel
from typing import List
# The models used in this module are being used by the API
# for type validation using Pydantic as FastAPI is reliant
# on pydantic for Data validation
class GeographySchema(BaseModel):
id: int
short_name: str
name: str
class EntrySchema(BaseModel):
id: str
unit: str
geography_id: int
product_name: str
class Config:
orm_mode = True
class IndicatorSchema(BaseModel):
id: int
method: str
category: str
indicator: str
unit: str
class Config:
orm_mode = True
class ImpactSchema(BaseModel):
id: int
indicator_id: int
entry_id: int
coefficient: float
class Config:
orm_mode = True
class ImpactSchemaExtended(BaseModel):
id: int
indicator: IndicatorSchema
entry: EntrySchema
coefficient: float
class EntrySchemaExtended(BaseModel):
id: str
product_name: str
geography: GeographySchema
unit: str
impact: List[ImpactSchema]
class Config:
orm_mode = True
| 17.209677 | 58 | 0.68135 | 841 | 0.788191 | 0 | 0 | 0 | 0 | 0 | 0 | 149 | 0.139644 |
903c8cc3beaefd54c725e04177cee3cf91f69504 | 348 | py | Python | myproject/myapp/admin.py | wasit7/cs459_2018 | 78243bbc939fcc2ed7528df8c14ad75e4b78d9a2 | [
"BSD-2-Clause"
] | null | null | null | myproject/myapp/admin.py | wasit7/cs459_2018 | 78243bbc939fcc2ed7528df8c14ad75e4b78d9a2 | [
"BSD-2-Clause"
] | null | null | null | myproject/myapp/admin.py | wasit7/cs459_2018 | 78243bbc939fcc2ed7528df8c14ad75e4b78d9a2 | [
"BSD-2-Clause"
] | null | null | null | from django.contrib import admin
from .models import Product
class ProductAdmin(admin.ModelAdmin):
fields = ('name', 'price', 'category', 'image')
list_display = ('name', 'price', 'category', 'image')
list_filter = ('category', 'price', )
list_editable = ('price', 'category', 'image', )
admin.site.register(Product, ProductAdmin) | 34.8 | 57 | 0.678161 | 242 | 0.695402 | 0 | 0 | 0 | 0 | 0 | 0 | 101 | 0.29023 |
903de14b44d3f2b0857467165c0169f5dac5d5b8 | 404 | py | Python | app.py | 923310233/APk-Down-Load | e7c3e4fdfbf9f7d8398d91ce0c5c028dfc685f3a | [
"MIT"
] | 2 | 2021-07-28T07:06:55.000Z | 2021-07-28T07:08:19.000Z | app.py | 923310233/APk-Down-Load | e7c3e4fdfbf9f7d8398d91ce0c5c028dfc685f3a | [
"MIT"
] | null | null | null | app.py | 923310233/APk-Down-Load | e7c3e4fdfbf9f7d8398d91ce0c5c028dfc685f3a | [
"MIT"
] | null | null | null | import subprocess
f = open("app_list.csv","r")
lines = f.readlines()
for line in lines:
print(line.strip())
command = "node app.js " + line.strip();
display = subprocess.run(command, stdout=subprocess.PIPE, shell=True)
# display = subprocess.run(["sudo","-u",username,"tshark", "-r", pcapname, "-Y", display_filter[sp]], stdout=subprocess.PIPE)
# display_in_list = display.stdout.split() | 31.076923 | 126 | 0.680693 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 199 | 0.492574 |
903e7c7c7eb9a7d02da0c1871291e12b6246e93e | 20,260 | py | Python | shoptimizer_api/optimizers_builtin/title_word_order_optimizer_test.py | astivi/shoptimizer | e9e415650b2b8fc07e4ae68c741e692b538e4a2c | [
"Apache-2.0"
] | null | null | null | shoptimizer_api/optimizers_builtin/title_word_order_optimizer_test.py | astivi/shoptimizer | e9e415650b2b8fc07e4ae68c741e692b538e4a2c | [
"Apache-2.0"
] | null | null | null | shoptimizer_api/optimizers_builtin/title_word_order_optimizer_test.py | astivi/shoptimizer | e9e415650b2b8fc07e4ae68c741e692b538e4a2c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for title_word_order_optimizer."""
from absl.testing import parameterized
import unittest.mock as mock
from optimizers_builtin import title_word_order_optimizer
from test_data import requests_bodies
from util import app_util
import constants
# GPC ID IS 201
_PROPER_GPC_CATEGORY_EN = 'Apparel & Accessories > Jewelry > Watches'
# GPC ID is 201
_PROPER_GPC_CATEGORY_JA = ('ファッション・アクセサリー > '
'ジュエリー > 腕時計')
# GPC ID is 5598
_GPC_CATEGORY_LEVEL_4_JA = ('ファッション・アクセサリー > '
'衣料品 > アウター > '
'コート・ジャケット')
_MAX_WMM_MOVE_THRESHOLD_EN = 25
_MAX_WMM_MOVE_THRESHOLD_JA = 12
@mock.patch(
'optimizers_builtin.title_word_order_optimizer._GCP_STRING_TO_ID_MAPPING_CONFIG_FILE_NAME',
'gpc_string_to_id_mapping_{}_test')
@mock.patch(
'optimizers_builtin.title_word_order_optimizer._TITLE_WORD_ORDER_CONFIG_FILE_NAME',
'title_word_order_config_{}_test')
@mock.patch(
'optimizers_builtin.title_word_order_optimizer._TITLE_WORD_ORDER_BLOCKLIST_FILE_NAME',
'title_word_order_blocklist_{}_test')
@mock.patch(
'optimizers_builtin.title_word_order_optimizer._TITLE_WORD_ORDER_OPTIONS_FILE_NAME',
'title_word_order_options_test')
class TitleWordOrderOptimizerTest(parameterized.TestCase):
def setUp(self):
super(TitleWordOrderOptimizerTest, self).setUp()
app_util.setup_test_app()
self.optimizer = title_word_order_optimizer.TitleWordOrderOptimizer()
def test_process_copies_highest_performing_keyword_to_front_of_title(self):
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title': 'Some title with heavy_keyword in the middle',
'googleProductCategory': _PROPER_GPC_CATEGORY_EN,
})
optimized_data, optimization_result = self.optimizer.process(
original_data, constants.LANGUAGE_CODE_EN)
product = optimized_data['entries'][0]['product']
expected_title = ('[heavy_keyword] Some title with heavy_keyword in the '
'middle')
self.assertEqual(expected_title, product['title'])
self.assertEqual(1, optimization_result.num_of_products_optimized)
def test_process_copies_multiple_performing_keywords_to_front_of_title(self):
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title': 'Some title with multiple keywords heavy_keyword '
'heavy_keyword_2 in the middle',
'googleProductCategory': _PROPER_GPC_CATEGORY_EN,
})
optimized_data, optimization_result = self.optimizer.process(
original_data, constants.LANGUAGE_CODE_EN)
product = optimized_data['entries'][0]['product']
expected_title = (
'[heavy_keyword_2][heavy_keyword] Some title with multiple keywords '
'heavy_keyword heavy_keyword_2 in the middle')
self.assertEqual(expected_title, product['title'])
self.assertEqual(1, optimization_result.num_of_products_optimized)
def test_process_copies_multiple_performing_keywords_to_front_of_title_in_descending_order_of_weight(
self):
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title':
'Some title with multiple keywords keyword2 keyword1 in the '
'middle',
'googleProductCategory': _PROPER_GPC_CATEGORY_EN,
})
optimized_data, optimization_result = self.optimizer.process(
original_data, constants.LANGUAGE_CODE_EN)
product = optimized_data['entries'][0]['product']
expected_title = (
'[keyword1][keyword2] Some title with multiple keywords keyword2 '
'keyword1 in the middle')
self.assertEqual(expected_title, product['title'])
self.assertEqual(1, optimization_result.num_of_products_optimized)
def test_process_copies_at_most_three_performing_keywords_to_front_of_title(
self):
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title': 'Some title with multiple keywords keyword2 keyword1 '
'heavy_keyword heavy_keyword_2 in the middle',
'googleProductCategory': _PROPER_GPC_CATEGORY_EN,
})
optimized_data, optimization_result = self.optimizer.process(
original_data, constants.LANGUAGE_CODE_EN)
product = optimized_data['entries'][0]['product']
expected_title = (
'[keyword1][keyword2][heavy_keyword_2] Some title with multiple '
'keywords keyword2 keyword1 heavy_keyword heavy_keyword_2 in the '
'middle')
self.assertEqual(expected_title, product['title'])
self.assertEqual(1, optimization_result.num_of_products_optimized)
def test_process_does_not_modify_title_when_the_google_product_category_is_not_in_the_config(
self):
original_title = 'Some title with heavy_keyword in the middle'
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title': 'Some title with heavy_keyword in the middle',
'googleProductCategory': 'DIY用品 > DIY小物類',
})
optimized_data, optimization_result = self.optimizer.process(
original_data, constants.LANGUAGE_CODE_EN)
product = optimized_data['entries'][0]['product']
self.assertEqual(original_title, product['title'])
self.assertEqual(0, optimization_result.num_of_products_optimized)
def test_process_does_not_modify_title_when_the_google_product_category_is_in_the_config_but_no_keywords(
self):
original_title = 'Some title with no target keywords in the middle'
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title': 'Some title with no target keywords in the middle',
'googleProductCategory': _PROPER_GPC_CATEGORY_EN,
})
optimized_data, optimization_result = self.optimizer.process(
original_data, constants.LANGUAGE_CODE_EN)
product = optimized_data['entries'][0]['product']
self.assertEqual(original_title, product['title'])
self.assertEqual(0, optimization_result.num_of_products_optimized)
def test_process_moves_keyword_if_title_more_than_max_title_length(self):
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title':
'a' * (title_word_order_optimizer._MAX_TITLE_LENGTH -
len(' heavy_keyword')) + ' heavy_keyword',
'googleProductCategory':
_PROPER_GPC_CATEGORY_EN,
})
optimized_data, optimization_result = self.optimizer.process(
original_data, constants.LANGUAGE_CODE_EN)
product = optimized_data['entries'][0]['product']
expected_title = '[heavy_keyword] ' + 'a' * (
title_word_order_optimizer._MAX_TITLE_LENGTH - len(' heavy_keyword'))
self.assertEqual(expected_title, product['title'])
self.assertEqual(1, optimization_result.num_of_products_optimized)
def test_process_skips_one_character_wmm_keyword(self):
original_title = 'a' * _MAX_WMM_MOVE_THRESHOLD_EN + (
('Some title with single a character keyword'))
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title': original_title,
'googleProductCategory': _PROPER_GPC_CATEGORY_EN
})
optimized_data, optimization_result = self.optimizer.process(
original_data, constants.LANGUAGE_CODE_EN)
product = optimized_data['entries'][0]['product']
self.assertEqual(original_title, product['title'])
self.assertEqual(0, optimization_result.num_of_products_optimized)
@parameterized.named_parameters([{
'testcase_name':
'partial_match',
'original_title':
'a' * _MAX_WMM_MOVE_THRESHOLD_JA + '有名ブランドTシャツ',
'expected_title':
'a' * _MAX_WMM_MOVE_THRESHOLD_JA + '有名ブランドTシャツ'
}, {
'testcase_name':
'accurate_match',
'original_title':
'a' * _MAX_WMM_MOVE_THRESHOLD_JA + ' 有名ブランドシャツ',
'expected_title':
'[シャツ] ' + 'a' * _MAX_WMM_MOVE_THRESHOLD_JA +
' 有名ブランドシャツ'
}])
def test_wmm_keyword_is_copied_only_with_accurate_match(
self, original_title, expected_title):
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title': original_title,
'googleProductCategory': _PROPER_GPC_CATEGORY_JA
})
optimized_data, _ = self.optimizer.process(original_data,
constants.LANGUAGE_CODE_JA)
product = optimized_data['entries'][0]['product']
self.assertEqual(expected_title, product['title'])
@parameterized.named_parameters([{
'testcase_name': 'one_word_excluded_then_added_back',
'original_title':
'レッド・スニーカー、ブランド: '
'カイナ、モデル:エオファース、色:レッド',
'expected_title':
'[カイナ][エオファース] '
'レッド・スニーカー、ブランド: '
'カイナ、モデル:エオファース、色:レッド'
}, {
'testcase_name':
'keyword_kaina_already_in_first_12_char_no_change_to_title',
'original_title':
'レッド・、カイナ,スニーカー,ブランド:、色:レッド',
'expected_title':
'レッド・、カイナ,スニーカー,ブランド:、色:レッド'
}, {
'testcase_name':
'keyword_kaina_right_at_the_limit_of_12_char_no_change_to_title',
'original_title':
'レッド・レッド1,カイナ,ブランド:、色:レッド',
'expected_title':
'レッド・レッド1,カイナ,ブランド:、色:レッド'
}, {
'testcase_name':
'keyword_kaina_is_partially_in_the_first_12_char_and_partially_out_we_copy_it_to_front_title',
'original_title':
'レッド2・レッド1,カイナ,ブランド:、色:レッド',
'expected_title':
'[カイナ] '
'レッド2・レッド1,カイナ,ブランド:、色:レッド'
}, {
'testcase_name':
'keyword_kaina_is_right_out_of_the_12_chars_we_copy_it_to_front_title',
'original_title':
'レッド21・レッド12,カイナ,ブランド:、色:レッド',
'expected_title':
'[カイナ] '
'レッド21・レッド12,カイナ,ブランド:、色:レッド'
}])
def test_scenario_jp_wmm_keyword_in_first_12_char_of_title(
self, original_title, expected_title):
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title': original_title,
'googleProductCategory': _PROPER_GPC_CATEGORY_JA
})
optimized_data, _ = self.optimizer.process(original_data,
constants.LANGUAGE_CODE_JA)
product = optimized_data['entries'][0]['product']
self.assertEqual(expected_title, product['title'])
@mock.patch(
'optimizers_builtin.title_word_order_optimizer.TitleWordOrderOptimizer._optimization_includes_description',
return_value=True)
def test_wmm_keyword_in_description_is_copied_to_title_when_options_toggle_is_on(
self, _):
description = 'とても良い カイナ とても良い'
original_title = ('レッド・スニーカー、ブランド: '
'色:レッド')
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title': original_title,
'description': description,
'googleProductCategory': _PROPER_GPC_CATEGORY_JA
})
optimized_data, _ = self.optimizer.process(original_data,
constants.LANGUAGE_CODE_JA)
product = optimized_data['entries'][0]['product']
expected_title = ('[カイナ] '
'レッド・スニーカー、ブランド: '
'色:レッド')
self.assertEqual(expected_title, product['title'])
@mock.patch(
'optimizers_builtin.title_word_order_optimizer.TitleWordOrderOptimizer._optimization_includes_description',
return_value=False)
def test_wmm_keyword_in_description_is_not_copied_when_options_toggle_is_off(
self, _):
description = 'とても良い カイナ とても良い'
original_title = ('レッド・スニーカー、ブランド: '
'、色:レッド')
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title': original_title,
'description': description,
'googleProductCategory': _PROPER_GPC_CATEGORY_JA
})
optimized_data, optimization_result = self.optimizer.process(
original_data, constants.LANGUAGE_CODE_JA)
product = optimized_data['entries'][0]['product']
self.assertEqual(original_title, product['title'])
self.assertEqual(0, optimization_result.num_of_products_optimized)
@parameterized.named_parameters([{
'testcase_name': 'wmm_word_in_product_type_should_move_to_front_title',
'original_title': 'レッド・スニーカー、ブランド: '
'モデル:エオファース、色:レッド',
'product_types': ['シャツ'],
'expected_title': '[シャツ][エオファース] '
'レッド・スニーカー、ブランド: '
'モデル:エオファース、色:レッド'
}, {
'testcase_name': 'wmm_word_in_product_type_list_move_to_front_title',
'original_title': 'レッド・スニーカー、ブランド: '
'モデル:エオファース、色:レッド',
'product_types': ['シャツ', 'セーター', 'ジャケット'],
'expected_title': '[シャツ][エオファース] '
'レッド・スニーカー、ブランド: '
'モデル:エオファース、色:レッド'
}])
@mock.patch(
'optimizers_builtin.title_word_order_optimizer.TitleWordOrderOptimizer._optimization_includes_product_types',
return_value=True)
def test_wmm_keyword_in_product_types_is_copied_to_title_when_options_toggle_is_on(
self, _, original_title, product_types, expected_title):
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title': original_title,
'productTypes': product_types,
'googleProductCategory': _PROPER_GPC_CATEGORY_JA
})
optimized_data, _ = self.optimizer.process(original_data,
constants.LANGUAGE_CODE_JA)
product = optimized_data['entries'][0]['product']
self.assertEqual(expected_title, product['title'])
@mock.patch(
'optimizers_builtin.title_word_order_optimizer.TitleWordOrderOptimizer._optimization_includes_product_types',
return_value=False)
def test_wmm_keyword_in_product_types_is_not_copied_to_title_when_options_toggle_is_off(
self, _):
original_title = ('レッド・スニーカー、ブランド: '
'色:レッド')
product_types = ['シャツ']
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title': original_title,
'productTypes': product_types,
'googleProductCategory': _PROPER_GPC_CATEGORY_JA
})
optimized_data, _ = self.optimizer.process(original_data,
constants.LANGUAGE_CODE_JA)
product = optimized_data['entries'][0]['product']
self.assertEqual(original_title, product['title'])
@parameterized.named_parameters([{
'testcase_name':
'japanese_title',
'original_title':
'a' * _MAX_WMM_MOVE_THRESHOLD_JA + 'タイトルブロック'
}, {
'testcase_name': 'check_case_insensitive',
'original_title': 'a' * _MAX_WMM_MOVE_THRESHOLD_JA + 'Title Block'
}])
def test_wmm_keyword_in_blocklist_is_not_copied_to_front(
self, original_title):
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title': original_title,
'googleProductCategory': _PROPER_GPC_CATEGORY_JA
})
optimized_data, optimization_result = self.optimizer.process(
original_data, constants.LANGUAGE_CODE_JA)
product = optimized_data['entries'][0]['product']
self.assertEqual(original_title, product['title'])
self.assertEqual(0, optimization_result.num_of_products_optimized)
@mock.patch(
'optimizers_builtin.title_word_order_optimizer.TitleWordOrderOptimizer._get_optimization_level',
return_value=title_word_order_optimizer._OptimizationLevel.AGGRESSIVE)
def test_keywords_in_gpc_level_3_is_copied_to_front_when_gpc_level_is_deeper_than_3_and_optimization_level_is_aggressive(
self, _):
original_title = '寒い冬からあなたを守る!モデル:ジャケット、カラー:ブラック、防寒仕様ダウンジャケット'
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title': original_title,
'googleProductCategory': _GPC_CATEGORY_LEVEL_4_JA
})
optimized_data, optimization_result = self.optimizer.process(
original_data, constants.LANGUAGE_CODE_JA)
product = optimized_data['entries'][0]['product']
expected_title = f'[防寒][ダウンジャケット] {original_title}'
self.assertEqual(expected_title, product['title'])
self.assertEqual(1, optimization_result.num_of_products_optimized)
@mock.patch(
'optimizers_builtin.title_word_order_optimizer.TitleWordOrderOptimizer._get_optimization_level',
return_value=title_word_order_optimizer._OptimizationLevel.STANDARD)
def test_optimization_is_skipped_when_gpc_level_is_deeper_than_3_and_optimization_level_is_standard(
self, _):
original_title = '寒い冬からあなたを守る!モデル:ジャケット、カラー:ブラック、防寒仕様ダウンジャケット'
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title': original_title,
'googleProductCategory': _GPC_CATEGORY_LEVEL_4_JA
})
optimized_data, optimization_result = self.optimizer.process(
original_data, constants.LANGUAGE_CODE_JA)
product = optimized_data['entries'][0]['product']
self.assertEqual(original_title, product['title'])
self.assertEqual(0, optimization_result.num_of_products_optimized)
def test_process_interprets_valid_gpc_id_and_copies_performant_keyword(self):
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title': 'Some title with heavy_keyword in the middle',
'googleProductCategory': '201',
})
optimized_data, optimization_result = self.optimizer.process(
original_data, constants.LANGUAGE_CODE_EN)
product = optimized_data['entries'][0]['product']
expected_title = ('[heavy_keyword] Some title with heavy_keyword in the '
'middle')
self.assertEqual(expected_title, product['title'])
self.assertEqual(1, optimization_result.num_of_products_optimized)
def test_process_ignores_invalid_gpc_id_and_does_nothing(self):
original_title = 'Some title with heavy_keyword in the middle'
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title': original_title,
'googleProductCategory': '202',
})
optimized_data, optimization_result = self.optimizer.process(
original_data, constants.LANGUAGE_CODE_EN)
product = optimized_data['entries'][0]['product']
self.assertEqual(original_title, product['title'])
self.assertEqual(0, optimization_result.num_of_products_optimized)
def test_promo_text_dont_get_move_to_the_front(self):
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title':
'寒い冬からあなたを守る!モデル:ジャケット、[送料無料] , カイナ ,カラー:ブラック、防寒仕様ダウンジャケット',
'googleProductCategory':
_PROPER_GPC_CATEGORY_JA,
})
optimized_data, optimization_result = self.optimizer.process(
original_data, constants.LANGUAGE_CODE_JA)
product = optimized_data['entries'][0]['product']
expected_title = ('[カイナ] '
'寒い冬からあなたを守る!モデル:ジャケット、[送料無料]'
' , カイナ '
',カラー:ブラック、防寒仕様ダウンジャケット')
self.assertEqual(expected_title, product['title'])
self.assertEqual(1, optimization_result.num_of_products_optimized)
| 40.846774 | 123 | 0.696249 | 19,988 | 0.911861 | 0 | 0 | 20,562 | 0.938047 | 0 | 0 | 7,923 | 0.361451 |
903ed7280655c7a88f5f5eb4e9a427e26a17d12e | 4,035 | py | Python | contracts/models.py | sivanagarajumolabanti/IPFS | 9ae01ce09c97660ca312aad7d612bbc8eb8146e7 | [
"MIT"
] | 1 | 2019-08-27T04:20:06.000Z | 2019-08-27T04:20:06.000Z | contracts/models.py | sivanagarajumolabanti/IPFS | 9ae01ce09c97660ca312aad7d612bbc8eb8146e7 | [
"MIT"
] | null | null | null | contracts/models.py | sivanagarajumolabanti/IPFS | 9ae01ce09c97660ca312aad7d612bbc8eb8146e7 | [
"MIT"
] | null | null | null | from django.contrib.auth.models import User
from django.db import models
from django.utils.timezone import now
class Vendor(models.Model):
user = models.ManyToManyField(User)
name = models.CharField(max_length=30, null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
class File(models.Model):
file = models.FileField(upload_to='documents/')
def __str__(self):
return self.file.name
class Contract(models.Model):
approved = 1
pending = 0
vendor = 2
STATUS_CHOICES = (
(approved, 'Approved'),
(vendor, 'Vendors Approved'),
(pending, 'Pending'),
)
smart_choices = ((1, 'Yes'), (0, 'No'))
name = models.CharField(max_length=255)
user = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
vendor = models.ForeignKey(Vendor, on_delete=models.CASCADE, null=True)
amount = models.DecimalField(null=True, max_digits=10, decimal_places=2)
installments = models.IntegerField(null=True)
amount_paid = models.DecimalField(null=True, max_digits=10, decimal_places=2)
status = models.CharField(max_length=2,
choices=STATUS_CHOICES, default=0, null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
validity = models.DateField(default=now())
comments = models.TextField(null=True, blank=True)
smart_contract = models.BooleanField(max_length=2, choices=smart_choices, default=0)
files = models.ManyToManyField(File, related_name='files')
hash_key = models.CharField(max_length=255, blank=True)
def __str__(self):
return self.name
class Approvals(models.Model):
LEVEL_CHOICES = (
('0', 'Contract'),
('1', 'Sow'),
('2', 'Invoice'),
)
Approvals = ((1, 'Yes'), (0, 'No'))
contracts = models.ForeignKey(Contract, on_delete=models.CASCADE, null=True)
user = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
status = models.BooleanField(max_length=2, choices=Approvals, default=0)
comments = models.TextField(null=True, blank=True)
contract_level = models.CharField(max_length=2,
choices=LEVEL_CHOICES, default=0, null=True, blank=True)
def __str__(self):
return self.contracts.name
class DocuSign(models.Model):
contract = models.ForeignKey(Contract, on_delete=models.CASCADE, null=True)
user = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
envelope = models.CharField(max_length=255, null=True, blank=True)
document_name = models.CharField(max_length=255, null=True, blank=True)
files = models.FileField(upload_to='media/', null=True, blank=True)
def __str__(self):
return self.contract.name
class IPFSModel(models.Model):
name = models.CharField(max_length=255)
hashkey = models.CharField(max_length=255)
size = models.CharField(max_length=255)
def __unicode__(self):
return self.name
class Sow(models.Model):
contract = models.ForeignKey(Contract, on_delete=models.CASCADE,null=True)
smart_choices = ((1, 'Yes'), (0, 'No'))
smart_contract = models.BooleanField(max_length=2, choices=smart_choices, default=0)
file = models.FileField(upload_to='documents/')
def __str__(self):
return self.contract.name
class Invoice(models.Model):
amount = models.DecimalField(null=True, max_digits=10, decimal_places=2)
contract = models.ForeignKey(Contract, on_delete=models.CASCADE,null=True)
file = models.FileField(upload_to='documents/')
user = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
smart_choices = (('1', 'Declined'), ('0', 'Approved'), ('2', 'Created'))
status = models.CharField(max_length=2, choices=smart_choices, default='2', null=True, blank=True)
def __str__(self):
return self.contract.name
| 36.351351 | 102 | 0.691698 | 3,900 | 0.966543 | 0 | 0 | 0 | 0 | 0 | 0 | 189 | 0.04684 |
903f6e6ec0a321ed686c231ab9ebc657c40c7407 | 1,500 | py | Python | models/DenseNet.py | Apollo1840/DeepECG | 5132b5fc8f6b40c4b2f175cd5e56c4aec128ab3e | [
"MIT"
] | 2 | 2020-11-16T10:50:56.000Z | 2020-11-23T12:31:30.000Z | models/DenseNet.py | Apollo1840/DeepECG | 5132b5fc8f6b40c4b2f175cd5e56c4aec128ab3e | [
"MIT"
] | null | null | null | models/DenseNet.py | Apollo1840/DeepECG | 5132b5fc8f6b40c4b2f175cd5e56c4aec128ab3e | [
"MIT"
] | 1 | 2020-08-05T00:23:54.000Z | 2020-08-05T00:23:54.000Z | from keras.models import Sequential
from keras.layers import Dense, Dropout
def denseNet(input_dim, output_dim=4):
model = Sequential()
model.add(Dense(1024, input_shape=(input_dim,), kernel_initializer='normal', activation='relu'))
model.add(Dense(1024, kernel_initializer='normal', activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1024, kernel_initializer='normal', activation='relu'))
model.add(Dense(1024, kernel_initializer='normal', activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(512, kernel_initializer='normal', activation='relu'))
model.add(Dense(512, kernel_initializer='normal', activation='relu'))
model.add(Dense(512, kernel_initializer='normal', activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(256, kernel_initializer='normal', activation='relu'))
model.add(Dense(256, kernel_initializer='normal', activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(128, kernel_initializer='normal', activation='relu'))
model.add(Dense(128, kernel_initializer='normal', activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, kernel_initializer='normal', activation='relu'))
model.add(Dense(64, kernel_initializer='normal', activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(output_dim, kernel_initializer='normal', activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
| 41.666667 | 100 | 0.719333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 241 | 0.160667 |
9040b2be08c9dcba639583373b5f0c4c01de3091 | 13,242 | py | Python | openstackclient/tests/unit/volume/v3/fakes.py | mydevice/python-openstackclient | 4891bb38208fdcd1a2ae60e47b056841e14fbdf7 | [
"Apache-2.0"
] | 262 | 2015-01-29T20:10:49.000Z | 2022-03-23T01:59:23.000Z | openstackclient/tests/unit/volume/v3/fakes.py | mydevice/python-openstackclient | 4891bb38208fdcd1a2ae60e47b056841e14fbdf7 | [
"Apache-2.0"
] | 5 | 2015-01-21T02:37:35.000Z | 2021-11-23T02:26:00.000Z | openstackclient/tests/unit/volume/v3/fakes.py | mydevice/python-openstackclient | 4891bb38208fdcd1a2ae60e47b056841e14fbdf7 | [
"Apache-2.0"
] | 194 | 2015-01-08T07:39:27.000Z | 2022-03-30T13:51:23.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from unittest import mock
import uuid
from cinderclient import api_versions
from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes
from openstackclient.tests.unit import fakes
from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes
from openstackclient.tests.unit import utils
from openstackclient.tests.unit.volume.v2 import fakes as volume_v2_fakes
class FakeVolumeClient(object):
def __init__(self, **kwargs):
self.auth_token = kwargs['token']
self.management_url = kwargs['endpoint']
self.api_version = api_versions.APIVersion('3.0')
self.attachments = mock.Mock()
self.attachments.resource_class = fakes.FakeResource(None, {})
self.groups = mock.Mock()
self.groups.resource_class = fakes.FakeResource(None, {})
self.group_snapshots = mock.Mock()
self.group_snapshots.resource_class = fakes.FakeResource(None, {})
self.group_types = mock.Mock()
self.group_types.resource_class = fakes.FakeResource(None, {})
self.messages = mock.Mock()
self.messages.resource_class = fakes.FakeResource(None, {})
self.volumes = mock.Mock()
self.volumes.resource_class = fakes.FakeResource(None, {})
self.volume_types = mock.Mock()
self.volume_types.resource_class = fakes.FakeResource(None, {})
class TestVolume(utils.TestCommand):
def setUp(self):
super().setUp()
self.app.client_manager.volume = FakeVolumeClient(
endpoint=fakes.AUTH_URL,
token=fakes.AUTH_TOKEN
)
self.app.client_manager.identity = identity_fakes.FakeIdentityv3Client(
endpoint=fakes.AUTH_URL,
token=fakes.AUTH_TOKEN
)
self.app.client_manager.compute = compute_fakes.FakeComputev2Client(
endpoint=fakes.AUTH_URL,
token=fakes.AUTH_TOKEN,
)
# TODO(stephenfin): Check if the responses are actually the same
FakeVolume = volume_v2_fakes.FakeVolume
FakeVolumeType = volume_v2_fakes.FakeVolumeType
class FakeVolumeGroup:
"""Fake one or more volume groups."""
@staticmethod
def create_one_volume_group(attrs=None):
"""Create a fake group.
:param attrs: A dictionary with all attributes of group
:return: A FakeResource object with id, name, status, etc.
"""
attrs = attrs or {}
group_type = attrs.pop('group_type', None) or uuid.uuid4().hex
volume_types = attrs.pop('volume_types', None) or [uuid.uuid4().hex]
# Set default attribute
group_info = {
'id': uuid.uuid4().hex,
'status': random.choice([
'available',
]),
'availability_zone': f'az-{uuid.uuid4().hex}',
'created_at': '2015-09-16T09:28:52.000000',
'name': 'first_group',
'description': f'description-{uuid.uuid4().hex}',
'group_type': group_type,
'volume_types': volume_types,
'volumes': [f'volume-{uuid.uuid4().hex}'],
'group_snapshot_id': None,
'source_group_id': None,
'project_id': f'project-{uuid.uuid4().hex}',
}
# Overwrite default attributes if there are some attributes set
group_info.update(attrs)
group = fakes.FakeResource(
None,
group_info,
loaded=True)
return group
@staticmethod
def create_volume_groups(attrs=None, count=2):
"""Create multiple fake groups.
:param attrs: A dictionary with all attributes of group
:param count: The number of groups to be faked
:return: A list of FakeResource objects
"""
groups = []
for n in range(0, count):
groups.append(FakeVolumeGroup.create_one_volume_group(attrs))
return groups
class FakeVolumeGroupSnapshot:
"""Fake one or more volume group snapshots."""
@staticmethod
def create_one_volume_group_snapshot(attrs=None, methods=None):
"""Create a fake group snapshot.
:param attrs: A dictionary with all attributes
:param methods: A dictionary with all methods
:return: A FakeResource object with id, name, description, etc.
"""
attrs = attrs or {}
# Set default attribute
group_snapshot_info = {
'id': uuid.uuid4().hex,
'name': f'group-snapshot-{uuid.uuid4().hex}',
'description': f'description-{uuid.uuid4().hex}',
'status': random.choice(['available']),
'group_id': uuid.uuid4().hex,
'group_type_id': uuid.uuid4().hex,
'project_id': uuid.uuid4().hex,
}
# Overwrite default attributes if there are some attributes set
group_snapshot_info.update(attrs)
group_snapshot = fakes.FakeResource(
None,
group_snapshot_info,
methods=methods,
loaded=True)
return group_snapshot
@staticmethod
def create_volume_group_snapshots(attrs=None, count=2):
"""Create multiple fake group snapshots.
:param attrs: A dictionary with all attributes of group snapshot
:param count: The number of group snapshots to be faked
:return: A list of FakeResource objects
"""
group_snapshots = []
for n in range(0, count):
group_snapshots.append(
FakeVolumeGroupSnapshot.create_one_volume_group_snapshot(attrs)
)
return group_snapshots
class FakeVolumeGroupType:
"""Fake one or more volume group types."""
@staticmethod
def create_one_volume_group_type(attrs=None, methods=None):
"""Create a fake group type.
:param attrs: A dictionary with all attributes of group type
:param methods: A dictionary with all methods
:return: A FakeResource object with id, name, description, etc.
"""
attrs = attrs or {}
# Set default attribute
group_type_info = {
'id': uuid.uuid4().hex,
'name': f'group-type-{uuid.uuid4().hex}',
'description': f'description-{uuid.uuid4().hex}',
'is_public': random.choice([True, False]),
'group_specs': {},
}
# Overwrite default attributes if there are some attributes set
group_type_info.update(attrs)
group_type = fakes.FakeResource(
None,
group_type_info,
methods=methods,
loaded=True)
return group_type
@staticmethod
def create_volume_group_types(attrs=None, count=2):
"""Create multiple fake group types.
:param attrs: A dictionary with all attributes of group type
:param count: The number of group types to be faked
:return: A list of FakeResource objects
"""
group_types = []
for n in range(0, count):
group_types.append(
FakeVolumeGroupType.create_one_volume_group_type(attrs)
)
return group_types
class FakeVolumeMessage:
"""Fake one or more volume messages."""
@staticmethod
def create_one_volume_message(attrs=None):
"""Create a fake message.
:param attrs: A dictionary with all attributes of message
:return: A FakeResource object with id, name, status, etc.
"""
attrs = attrs or {}
# Set default attribute
message_info = {
'created_at': '2016-02-11T11:17:37.000000',
'event_id': f'VOLUME_{random.randint(1, 999999):06d}',
'guaranteed_until': '2016-02-11T11:17:37.000000',
'id': uuid.uuid4().hex,
'message_level': 'ERROR',
'request_id': f'req-{uuid.uuid4().hex}',
'resource_type': 'VOLUME',
'resource_uuid': uuid.uuid4().hex,
'user_message': f'message-{uuid.uuid4().hex}',
}
# Overwrite default attributes if there are some attributes set
message_info.update(attrs)
message = fakes.FakeResource(
None,
message_info,
loaded=True)
return message
@staticmethod
def create_volume_messages(attrs=None, count=2):
"""Create multiple fake messages.
:param attrs: A dictionary with all attributes of message
:param count: The number of messages to be faked
:return: A list of FakeResource objects
"""
messages = []
for n in range(0, count):
messages.append(FakeVolumeMessage.create_one_volume_message(attrs))
return messages
@staticmethod
def get_volume_messages(messages=None, count=2):
"""Get an iterable MagicMock object with a list of faked messages.
If messages list is provided, then initialize the Mock object with the
list. Otherwise create one.
:param messages: A list of FakeResource objects faking messages
:param count: The number of messages to be faked
:return An iterable Mock object with side_effect set to a list of faked
messages
"""
if messages is None:
messages = FakeVolumeMessage.create_messages(count)
return mock.Mock(side_effect=messages)
class FakeVolumeAttachment:
"""Fake one or more volume attachments."""
@staticmethod
def create_one_volume_attachment(attrs=None):
"""Create a fake volume attachment.
:param attrs: A dictionary with all attributes of volume attachment
:return: A FakeResource object with id, status, etc.
"""
attrs = attrs or {}
attachment_id = uuid.uuid4().hex
volume_id = attrs.pop('volume_id', None) or uuid.uuid4().hex
server_id = attrs.pop('instance', None) or uuid.uuid4().hex
# Set default attribute
attachment_info = {
'id': attachment_id,
'volume_id': volume_id,
'instance': server_id,
'status': random.choice([
'attached',
'attaching',
'detached',
'reserved',
'error_attaching',
'error_detaching',
'deleted',
]),
'attach_mode': random.choice(['ro', 'rw']),
'attached_at': '2015-09-16T09:28:52.000000',
'detached_at': None,
'connection_info': {
'access_mode': 'rw',
'attachment_id': attachment_id,
'auth_method': 'CHAP',
'auth_password': 'AcUZ8PpxLHwzypMC',
'auth_username': '7j3EZQWT3rbE6pcSGKvK',
'cacheable': False,
'driver_volume_type': 'iscsi',
'encrypted': False,
'qos_specs': None,
'target_discovered': False,
'target_iqn':
f'iqn.2010-10.org.openstack:volume-{attachment_id}',
'target_lun': '1',
'target_portal': '192.168.122.170:3260',
'volume_id': volume_id,
},
}
# Overwrite default attributes if there are some attributes set
attachment_info.update(attrs)
attachment = fakes.FakeResource(
None,
attachment_info,
loaded=True)
return attachment
@staticmethod
def create_volume_attachments(attrs=None, count=2):
"""Create multiple fake volume attachments.
:param attrs: A dictionary with all attributes of volume attachment
:param count: The number of volume attachments to be faked
:return: A list of FakeResource objects
"""
attachments = []
for n in range(0, count):
attachments.append(
FakeVolumeAttachment.create_one_volume_attachment(attrs))
return attachments
@staticmethod
def get_volume_attachments(attachments=None, count=2):
"""Get an iterable MagicMock object with a list of faked volumes.
If attachments list is provided, then initialize the Mock object with
the list. Otherwise create one.
:param attachments: A list of FakeResource objects faking volume
attachments
:param count: The number of volume attachments to be faked
:return An iterable Mock object with side_effect set to a list of faked
volume attachments
"""
if attachments is None:
attachments = FakeVolumeAttachment.create_volume_attachments(count)
return mock.Mock(side_effect=attachments)
| 33.953846 | 79 | 0.614258 | 12,117 | 0.915043 | 0 | 0 | 10,167 | 0.767784 | 0 | 0 | 5,710 | 0.431204 |
9040cb412be761146b6669d9fd4eade5a3ac0512 | 12,287 | py | Python | gammapy/cube/tests/test_core.py | grburgess/gammapy | 609e460698caca7223afeef5e71826c7b32728d1 | [
"BSD-3-Clause"
] | 3 | 2019-01-28T12:21:14.000Z | 2019-02-10T19:58:07.000Z | gammapy/cube/tests/test_core.py | grburgess/gammapy | 609e460698caca7223afeef5e71826c7b32728d1 | [
"BSD-3-Clause"
] | null | null | null | gammapy/cube/tests/test_core.py | grburgess/gammapy | 609e460698caca7223afeef5e71826c7b32728d1 | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from numpy.testing import assert_allclose
from astropy.coordinates import Angle
from astropy.tests.helper import pytest, assert_quantity_allclose
from astropy.units import Quantity
from astropy.wcs import WCS
from ...utils.testing import requires_dependency, requires_data
from ...datasets import FermiGalacticCenter
from ...image import make_header
from ...irf import EnergyDependentTablePSF
from ...spectrum.powerlaw import power_law_evaluate
from .. import SkyCube, compute_npred_cube, convolve_cube
@requires_data('gammapy-extra')
@requires_dependency('scipy')
class TestSkyCube(object):
def setup(self):
self.sky_cube = FermiGalacticCenter.diffuse_model()
assert self.sky_cube.data.shape == (30, 21, 61)
def test_init(self):
name = 'Axel'
data = self.sky_cube.data
wcs = self.sky_cube.wcs
energy = self.sky_cube.energy
sky_cube = SkyCube(name, data, wcs, energy)
assert sky_cube.data.shape == (30, 21, 61)
def test_read_write(self, tmpdir):
filename = str(tmpdir / 'sky_cube.fits')
self.sky_cube.writeto(filename)
sky_cube = SkyCube.read(filename)
assert sky_cube.data.shape == (30, 21, 61)
def test_pix2world(self):
# Corner pixel with index [0, 0, 0]
lon, lat, energy = self.sky_cube.pix2world(0, 0, 0)
assert_quantity_allclose(lon, Quantity(344.75, 'deg'))
assert_quantity_allclose(lat, Quantity(-5.25, 'deg'))
assert_quantity_allclose(energy, Quantity(50, 'MeV'))
def test_world2pix(self):
lon = Quantity(344.75, 'deg')
lat = Quantity(-5.25, 'deg')
energy = Quantity(50, 'MeV')
x, y, z = self.sky_cube.world2pix(lon, lat, energy)
assert_allclose((x, y, z), (0, 0, 0))
def test_pix2world2pix(self):
# Test round-tripping
pix = 2.2, 3.3, 4.4
world = self.sky_cube.pix2world(*pix)
pix2 = self.sky_cube.world2pix(*world)
assert_allclose(pix2, pix)
# Check array inputs
pix = [2.2, 2.2], [3.3, 3.3], [4.4, 4.4]
world = self.sky_cube.pix2world(*pix)
pix2 = self.sky_cube.world2pix(*world)
assert_allclose(pix2, pix)
@pytest.mark.xfail
def test_flux_scalar(self):
# Corner pixel with index [0, 0, 0]
lon = Quantity(344.75, 'deg') # pixel 0
lat = Quantity(-5.25, 'deg') # pixel 0
energy = Quantity(50, 'MeV') # slice 0
actual = self.sky_cube.flux(lon, lat, energy)
expected = self.sky_cube.data[0, 0, 0]
assert_quantity_allclose(actual, expected)
# Galactic center position
lon = Quantity(0, 'deg') # beween pixel 11 and 12 in ds9 viewer
lat = Quantity(0, 'deg') # beween pixel 30 and 31 in ds9 viewer
energy = Quantity(528.9657943133443, 'MeV') # slice 10 in ds9 viewer
actual = self.sky_cube.flux(lon, lat, energy)
# Compute expected value by interpolating 4 neighbors
# Use data axis order: energy, lat, lon
# and remember that numpy starts counting at 0 whereas FITS start at 1
s = self.sky_cube.data
expected = s[9, 10:12, 29:31].mean()
# TODO: why are these currently inconsistent by a few % !?
# actual = 9.67254380e-07
# expected = 10.13733026e-07
assert_quantity_allclose(actual, expected)
def test_flux_mixed(self):
# Corner pixel with index [0, 0, 0]
lon = Quantity([344.75, 344.75], 'deg') # pixel 0 twice
lat = Quantity([-5.25, -5.25], 'deg') # pixel 0 twice
energy = Quantity(50, 'MeV') # slice 0
actual = self.sky_cube.flux(lon, lat, energy)
expected = self.sky_cube.data[0, 0, 0]
assert_quantity_allclose(actual, expected)
def test_flux_array(self):
pix = [2, 2], [3, 3], [4, 4]
world = self.sky_cube.pix2world(*pix)
actual = self.sky_cube.flux(*world)
expected = self.sky_cube.data[4, 3, 2]
# Quantity([3.50571123e-07, 2], '1 / (cm2 MeV s sr)')
assert_quantity_allclose(actual, expected)
def test_integral_flux_image(self):
# For a very small energy band the integral flux should be roughly
# differential flux times energy bin width
lon, lat, energy = self.sky_cube.pix2world(0, 0, 0)
denergy = 0.001 * energy
energy_band = Quantity([energy, energy + denergy])
dflux = self.sky_cube.flux(lon, lat, energy)
expected = dflux * denergy
actual = Quantity(self.sky_cube.integral_flux_image(energy_band).data[0, 0],
'1 / (cm2 s sr)')
assert_quantity_allclose(actual, expected, rtol=1e-3)
# Test a wide energy band
energy_band = Quantity([1, 10], 'GeV')
image = self.sky_cube.integral_flux_image(energy_band)
actual = image.data.sum()
# TODO: the reference result is not verified ... just pasted from the test output.
expected = 5.2481972772213124e-02
assert_allclose(actual, expected)
# Test integral flux for energy bands with units.
energy_band_check = Quantity([1000, 10000], 'MeV')
new_image = self.sky_cube.integral_flux_image(energy_band_check)
assert_allclose(new_image.data, image.data)
assert new_image.wcs.axis_type_names == ['GLON', 'GLAT']
# TODO: fix this test.
# It's currently failing. Dont' know which number (if any) is correct.
# E x: array(7.615363001210512e-05)
# E y: array(0.00015230870989335428)
@pytest.mark.xfail
def test_solid_angle(self):
actual = self.sky_cube.solid_angle[10][30]
expected = Quantity(self.sky_cube.wcs.wcs.cdelt[:-1].prod(), 'deg2')
assert_quantity_allclose(actual, expected, rtol=1e-4)
def test_coordinates(self):
coordinates = self.sky_cube.coordinates()
lon = coordinates.data.lon
lat = coordinates.data.lat
assert lon.shape == (21, 61)
assert lat.shape == (21, 61)
assert_allclose(lon[0, 0], Angle("344d45m00s"))
assert_allclose(lat[0, 0], Angle(" -5d15m00s"))
assert_allclose(lon[0, -1], Angle("14d45m00s"))
assert_allclose(lat[0, -1], Angle("-5d15m00s"))
assert_allclose(lon[-1, 0], Angle("344d45m00s"))
assert_allclose(lat[-1, 0], Angle("4d45m00s"))
assert_allclose(lon[-1, -1], Angle("14d45m00s"))
assert_allclose(lat[-1, -1], Angle("4d45m00s"))
@pytest.mark.xfail
@requires_dependency('scipy.interpolate.RegularGridInterpolator')
@requires_dependency('reproject')
def test_compute_npred_cube():
# A quickly implemented check - should be improved
filenames = FermiGalacticCenter.filenames()
sky_cube = SkyCube.read(filenames['diffuse_model'])
exposure_cube = SkyCube.read(filenames['exposure_cube'])
counts_cube = FermiGalacticCenter.counts()
energy_bounds = Quantity([10, 30, 100, 500], 'GeV')
sky_cube = sky_cube.reproject_to(exposure_cube)
npred_cube = compute_npred_cube(sky_cube,
exposure_cube,
energy_bounds)
expected_sum = counts_cube.data.sum()
actual_sum = np.nan_to_num(npred_cube.data).sum()
# Check npredicted is same order of magnitude of true counts
assert_allclose(expected_sum, actual_sum, rtol=1)
# PSF convolve the npred cube
psf = EnergyDependentTablePSF.read(FermiGalacticCenter.filenames()['psf'])
npred_cube_convolved = convolve_cube(npred_cube, psf, offset_max=Angle(3, 'deg'))
actual_convolved_sum = npred_cube_convolved.data.sum()
# Check sum is the same after convolution
assert_allclose(actual_sum, actual_convolved_sum, rtol=0.1)
# Test shape
expected = ((len(energy_bounds) - 1, exposure_cube.data.shape[1],
exposure_cube.data.shape[2]))
actual = npred_cube_convolved.data.shape
assert_allclose(actual, expected)
def make_test_cubes(energies, nxpix, nypix, binsz):
"""Makes exposure and spectral cube for tests.
Parameters
----------
energies : `~astropy.units.Quantity`
Quantity 1D array of energies of cube layers
nxpix : int
Number of pixels in x-spatial direction
nypix : int
Number of pixels in y-spatial direction
binsz : float
Spatial resolution of cube, in degrees per pixel
Returns
-------
exposure_cube : `~gammapy.sky_cube.SkyCube`
Cube of uniform exposure = 1 cm^2 s
sky_cube : `~gammapy.sky_cube.SkyCube`
Cube of differential fluxes in units of cm^-2 s^-1 GeV^-1 sr^-1
"""
header = make_header(nxpix, nypix, binsz)
header['NAXIS'] = 3
header['NAXIS3'] = len(energies)
header['CDELT3'] = 1
header['CRVAL3'] = 1
header['CRPIX3'] = 1
wcs = WCS(header)
data_array = np.ones((len(energies), 10, 10))
exposure_cube = SkyCube(data=Quantity(data_array, 'cm2 s'),
wcs=wcs, energy=energies)
flux = power_law_evaluate(energies.value, 1, 2, 1)
flux = Quantity(flux, '1/(cm2 s GeV sr)')
flux_array = np.zeros_like(data_array)
for i in np.arange(len(flux)):
flux_array[i] = flux.value[i] * data_array[i]
sky_cube = SkyCube(data=Quantity(flux_array, flux.unit),
wcs=wcs, energy=energies)
return exposure_cube, sky_cube
@requires_dependency('scipy.interpolate.RegularGridInterpolator')
@requires_dependency('reproject')
def test_analytical_npred_cube():
# Analytical check: g=2, N=1 gives int. flux 0.25 between 1 and 2
# (arbitrary units of energy).
# Exposure = 1, so solid angle only factor which varies.
# Result should be 0.5 * 1 * solid_angle_array from integrating analytically
energies = Quantity([1, 2], 'MeV')
exposure_cube, sky_cube = make_test_cubes(energies, 10, 10, 1)
solid_angle_array = exposure_cube.solid_angle
# Expected npred counts (so no quantity)
expected = 0.5 * solid_angle_array.value
# Integral resolution is 1 as this is a true powerlaw case
npred_cube = compute_npred_cube(sky_cube, exposure_cube,
energies, integral_resolution=1)
actual = npred_cube.data[0]
assert_allclose(actual, expected)
@requires_dependency('scipy.interpolate.RegularGridInterpolator')
@requires_dependency('reproject')
def test_convolve_cube():
filenames = FermiGalacticCenter.filenames()
sky_cube = SkyCube.read(filenames['diffuse_model'])
exposure_cube = SkyCube.read(filenames['exposure_cube'])
energy_bounds = Quantity([10, 30, 100, 500], 'GeV')
sky_cube = sky_cube.reproject_to(exposure_cube)
npred_cube = compute_npred_cube(sky_cube,
exposure_cube,
energy_bounds)
# PSF convolve the npred cube
psf = EnergyDependentTablePSF.read(FermiGalacticCenter.filenames()['psf'])
npred_cube_convolved = convolve_cube(npred_cube, psf, offset_max=Angle(5, 'deg'))
expected = npred_cube.data.sum()
actual = npred_cube_convolved.data.sum()
assert_allclose(actual, expected, rtol=1e-2)
@pytest.mark.xfail
@requires_dependency('scipy')
@requires_dependency('reproject')
def test_reproject_cube():
# TODO: a better test can probably be implemented here to avoid
# repeating code
filenames = FermiGalacticCenter.filenames()
sky_cube = SkyCube.read(filenames['diffuse_model'])
exposure_cube = SkyCube.read(filenames['exposure_cube'])
original_cube = Quantity(np.nan_to_num(sky_cube.data.value),
sky_cube.data.unit)
sky_cube = sky_cube.reproject_to(exposure_cube)
reprojected_cube = Quantity(np.nan_to_num(sky_cube.data.value),
sky_cube.data.unit)
# 0.5 degrees per pixel in diffuse model
# 2 degrees in reprojection reference
# sum of reprojected should be 1/16 of sum of original if flux-preserving
expected = 0.0625 * original_cube.sum()
actual = reprojected_cube.sum()
assert_quantity_allclose(actual, expected, rtol=1e-2)
| 39.763754 | 90 | 0.657524 | 5,918 | 0.481647 | 0 | 0 | 10,180 | 0.828518 | 0 | 0 | 3,179 | 0.258729 |
90429ee16f26834b4fd4e1ca6831ceabda97033d | 298 | py | Python | api/applications/migrations/0042_merge_20201213_0228.py | django-doctor/lite-api | 1ba278ba22ebcbb977dd7c31dd3701151cd036bf | [
"MIT"
] | 3 | 2019-05-15T09:30:39.000Z | 2020-04-22T16:14:23.000Z | api/applications/migrations/0042_merge_20201213_0228.py | django-doctor/lite-api | 1ba278ba22ebcbb977dd7c31dd3701151cd036bf | [
"MIT"
] | 85 | 2019-04-24T10:39:35.000Z | 2022-03-21T14:52:12.000Z | api/applications/migrations/0042_merge_20201213_0228.py | django-doctor/lite-api | 1ba278ba22ebcbb977dd7c31dd3701151cd036bf | [
"MIT"
] | 1 | 2021-01-17T11:12:19.000Z | 2021-01-17T11:12:19.000Z | # Generated by Django 2.2.16 on 2020-12-13 02:28
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("applications", "0041_goodonapplication_is_precedent"),
("applications", "0041_goodonapplicationdocument"),
]
operations = []
| 21.285714 | 64 | 0.694631 | 212 | 0.711409 | 0 | 0 | 0 | 0 | 0 | 0 | 145 | 0.486577 |
90459d8bfe26d007178d66a09649931906768496 | 5,829 | py | Python | web_app/ca_modules/make_utils.py | Lockers13/codagio | cfe9325cb3c207f7728db3c287439ce761ffea14 | [
"MIT"
] | 2 | 2021-01-16T13:42:14.000Z | 2021-03-03T19:36:47.000Z | web_app/ca_modules/make_utils.py | Lockers13/codagio | cfe9325cb3c207f7728db3c287439ce761ffea14 | [
"MIT"
] | null | null | null | web_app/ca_modules/make_utils.py | Lockers13/codagio | cfe9325cb3c207f7728db3c287439ce761ffea14 | [
"MIT"
] | null | null | null | ### A module containing various utilities used at various points throughout the processes of submitting and analyzing problems ###
import os
import json
import subprocess
import hashlib
import sys
import random
import string
from .output_processor import process_output
from . import code_templates
def make_file(path, code, problem_data):
"""Function to create script that is used for verification and profiling purposes
Returns nothing, writes to disk"""
def write_prequel(file_obj):
for line in ctemps["IMPORTS"]:
file_obj.write("{0}\n".format(line))
file_obj.write("\n")
def write_sequel(file_obj, fname):
if input_type == "file":
if init_data is not None:
text_to_write = ctemps["TEMPLATE_CODE_FILE_WITH_DATA"]
else:
text_to_write = ctemps["TEMPLATE_CODE_FILE"]
elif input_type == "default": ### CHANGE 'auto' TO 'default' AFTER PROBLEM UPLOAD VIEW IS CLEANED !!!
if is_inputs:
if is_init_data:
text_to_write = ctemps["TEMPLATE_CODE_DEFAULT_WITH_INPUT_AND_DATA"]
else:
text_to_write = ctemps["TEMPLATE_CODE_DEFAULT"]
elif is_init_data:
text_to_write = ctemps["TEMPLATE_CODE_DEFAULT"]
for line in text_to_write:
if "template_function" in line:
line = line.replace("template_function", str(fname))
file_obj.write("{0}\n".format(line))
ctemps = code_templates.get_ctemp_dict()
program_text = code
input_type = list(problem_data["metadata"]["input_type"].keys())[0]
main_function = problem_data["metadata"]["main_function"]
init_data = problem_data["init_data"]
is_init_data = problem_data["metadata"]["init_data"]
is_inputs = problem_data["metadata"]["inputs"]
with open(path, 'w') as f:
write_prequel(f)
for line in program_text:
split_line = line.split()
if len(split_line) > 0 and line.split()[0] == "def":
func_name = line.split()[1].split("(")[0]
if func_name == main_function:
fname = func_name
f.write("{0}\n".format(line))
if not line.endswith("\n"):
f.write("\n")
write_sequel(f, fname)
def gen_sample_outputs(filename, problem_data, init_data=None, input_type="default"):
"""Utility function invoked whenever a reference problem is submitted
Returns a list of outputs that are subsequently stored in DB as field associated with given problem"""
inputs = problem_data["inputs"]
platform = sys.platform.lower()
SAMPUP_TIMEOUT = "8"
SAMPUP_MEMOUT = "1000"
timeout_cmd = "gtimeout {0}".format(SAMPUP_TIMEOUT) if platform == "darwin" else "timeout {0} -m {1}".format(SAMPUP_TIMEOUT, SAMPUP_MEMOUT) if platform == "linux" or platform == "linux2" else ""
base_cmd = "{0} python".format(timeout_cmd)
outputs = []
if input_type == "default":
programmatic_inputs = inputs
if inputs is not None:
for inp in programmatic_inputs:
input_arg = json.dumps(inp)
output = process_output(base_cmd, filename, input_arg=input_arg, init_data=init_data)
### uncomment below line for debugging
# print("CSO =>", cleaned_split_output)
outputs.append(output)
else:
output = process_output(base_cmd, filename, init_data=init_data)
### uncomment below line for debugging
# print("CSO =>", cleaned_split_output)
outputs.append(output)
elif input_type == "file":
for script in inputs:
output = process_output(base_cmd, filename, input_arg=script, init_data=init_data)
### uncomment below line for debugging
# print("CSO =>", cleaned_split_output)
outputs.append(output)
try:
os.remove(script)
except:
pass
return outputs
def get_code_from_file(path):
with open(path, 'r') as f:
return f.read().splitlines()
def generate_input(input_type, input_length, num_tests):
"""Self-explanatory utility function that generates test input for a submitted reference problem based on metadata specifications
Returns jsonified list of inputs"""
def random_string(length):
rand_string = ''.join(random.choice(string.ascii_letters) for i in range(length))
return rand_string
global_inputs = []
for i in range(num_tests):
if input_type == "integer":
inp_list = [random.randint(1, 1000) for x in range(input_length)]
elif input_type == "float":
inp_list = [round(random.uniform(0.0, 1000.0), 2) for x in range(input_length)]
elif input_type == "string":
inp_list = [random_string(random.randint(1, 10)) for x in range(input_length)]
global_inputs.append(inp_list)
return global_inputs
def handle_uploaded_file_inputs(processed_data):
input_dict = {"files": {}}
count = 0
### add below for loop for multiple files
# for count, file_obj in enumerate(processed_data.get("target_file")):
input_dict["files"]["file_{0}".format(count+1)] = ""
file_obj = processed_data.get("target_file")
with open("file_{0}.py".format(count+1), 'w') as g:
for chunk in file_obj.chunks():
decoded_chunk = chunk.decode("utf-8")
input_dict["files"]["file_{0}".format(count+1)] += decoded_chunk
g.write(decoded_chunk)
return input_dict
def json_reorder(hashmap):
new_hm = {}
for k in sorted(hashmap, key=lambda item: (len(item), item), reverse=False):
new_hm[k] = hashmap[k]
return new_hm | 40.2 | 198 | 0.632699 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,573 | 0.269858 |
904821f621f97dceeec43eb063d81e21fa90c37c | 21,136 | py | Python | wazimap/data/utils.py | AssembleOnline/wazimap | 1b8b68fb231b768047eee1b20ed180e4820a2890 | [
"MIT"
] | 1 | 2019-01-14T15:37:03.000Z | 2019-01-14T15:37:03.000Z | wazimap/data/utils.py | Bhanditz/wazimap | fde22a0874020cf0ae013aeec7ab55b7c5a70b27 | [
"MIT"
] | null | null | null | wazimap/data/utils.py | Bhanditz/wazimap | fde22a0874020cf0ae013aeec7ab55b7c5a70b27 | [
"MIT"
] | null | null | null | from __future__ import division
from collections import OrderedDict
from sqlalchemy import create_engine, MetaData, func
from sqlalchemy.orm import sessionmaker, class_mapper
from django.conf import settings
from django.db.backends.base.creation import TEST_DATABASE_PREFIX
from django.db import connection
if settings.TESTING:
# Hack to ensure the sqlalchemy database name matches the Django one
# during testing
url = settings.DATABASE_URL
parts = url.split("/")
# use the test database name
db_name = connection.settings_dict.get('TEST', {}).get('NAME')
if db_name is None:
db_name = TEST_DATABASE_PREFIX + parts[-1]
parts[-1] = db_name
url = '/'.join(parts)
_engine = create_engine(url)
else:
_engine = create_engine(settings.DATABASE_URL)
# See http://docs.sqlalchemy.org/en/latest/core/constraints.html#constraint-naming-conventions
naming_convention = {
"ix": 'ix_%(column_0_label)s',
"uq": "uq_%(table_name)s_%(column_0_name)s",
"ck": "ck_%(table_name)s_%(constraint_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s"
}
_metadata = MetaData(bind=_engine, naming_convention=naming_convention)
_Session = sessionmaker(bind=_engine)
def get_session():
return _Session()
class LocationNotFound(Exception):
pass
class Location(object):
'''
Simple object to represent a location in the South African
context.
'''
def __init__(self, address, province_code, ward_code, ward_no,
municipality, coordinates):
self.address = address
self.province_code = province_code
# Northern Province is now called Limpopo
if self.province_code == 'NP':
self.province_code = 'LIM'
self.ward_code = ward_code
self.ward_no = ward_no
self.municipality = municipality
self.latitude = coordinates[0]
self.longitude = coordinates[1]
def __repr__(self):
return 'Location(address="%s", ward_code="%s", ' \
'municipality="%s", province_code="%s", ' \
'latitude=%s, longitude=%s, ward_no=%s)' \
% (self.address, self.ward_code, self.municipality,
self.province_code, self.latitude, self.longitude,
self.ward_no)
def capitalize(s):
"""
Capitalize the first char of a string, without
affecting the rest of the string.
This differs from `str.capitalize` since the latter
also lowercases the rest of the string.
"""
if not s:
return s
return ''.join([s[0].upper(), s[1:]])
def percent(num, denom, places=2):
if denom == 0:
return 0
else:
return round(num / denom * 100, places)
def ratio(num, denom, places=2):
if denom == 0:
return 0
else:
return round(num / denom, places)
def add_metadata(data, table):
if 'metadata' not in data:
data['metadata'] = {}
# this might be a SQLAlchemy model that is linked back to
# a data table
if hasattr(table, 'data_tables'):
table = table.data_tables[0]
data['metadata']['table_id'] = table.id
if table.universe:
data['metadata']['universe'] = table.universe
if table.year:
data['metadata']['year'] = table.year
# dictionaries that merge_dicts will merge
MERGE_KEYS = set(['values', 'numerators', 'error'])
def collapse_categories(data, categories, key_order=None):
if key_order:
collapsed = OrderedDict((key, {'name': key}) for key in key_order)
else:
collapsed = {}
metadata = None
if 'metadata' in data:
metadata = data['metadata']
del data['metadata']
# level 1: iterate over categories in data
for fields in data.values():
new_category_name = categories[fields['name']]
# ignore items with a None category
if new_category_name is None:
continue
collapsed.setdefault(new_category_name, {'name': new_category_name})
new_fields = collapsed[new_category_name]
# level 2: iterate over measurement objects in category
for measurement_key, measurement_objects in fields.iteritems():
if measurement_key == 'name':
continue
new_fields.setdefault(measurement_key, {})
new_measurement_objects = new_fields[measurement_key]
# level 3: iterate over data points in measurement objects
for datapoint_key, datapoint_value in measurement_objects.iteritems():
try:
new_measurement_objects.setdefault(datapoint_key, 0)
new_measurement_objects[datapoint_key] += float(datapoint_value)
except (ValueError, TypeError):
new_measurement_objects[datapoint_key] = datapoint_value
if metadata is not None:
collapsed['metadata'] = metadata
return collapsed
def calculate_median(objects, field_name):
'''
Calculates the median where obj.total is the distribution count and
getattr(obj, field_name) is the distribution segment.
Note: this function assumes the objects are sorted.
'''
total = 0
for obj in objects:
total += obj.total
half = total / 2.0
counter = 0
for i, obj in enumerate(objects):
counter += obj.total
if counter > half:
if counter - half == 1:
# total must be even (otherwise counter - half ends with .5)
return (float(getattr(objects[i - 1], field_name)) +
float(getattr(obj, field_name))) / 2.0
return float(getattr(obj, field_name))
elif counter == half:
# total must be even (otherwise half ends with .5)
return (float(getattr(obj, field_name)) +
float(getattr(objects[i + 1], field_name))) / 2.0
def calculate_median_stat(stats):
'''
Calculates the stat (key) that lies at the median for stat data from the
output of get_stat_data.
Note: this function assumes the objects are sorted.
'''
total = 0
keys = [k for k in stats.iterkeys() if k != 'metadata']
total = sum(stats[k]['numerators']['this'] for k in keys)
half = total / 2.0
counter = 0
for key in keys:
counter += stats[key]['numerators']['this']
if counter >= half:
return key
def merge_dicts(this, other, other_key):
'''
Recursively merges 'other' dict into 'this' dict. In particular
it merges the leaf nodes specified in MERGE_KEYS.
'''
for key, values in this.iteritems():
if key in MERGE_KEYS:
if key in other:
values[other_key] = other[key]['this']
elif isinstance(values, dict):
merge_dicts(values, other[key], other_key)
def group_remainder(data, num_items=4, make_percentage=True,
remainder_name="Other"):
'''
This function assumes data is an OrderedDict instance. It iterates
over the dict items, grouping items with index >= num_items - 1 together
under key remainder_name. If make_percentage = True, the 'values' dict
contains percentages and the 'numerators' dict the totals. Otherwise
'values' contains the totals.
'''
num_key = 'numerators' if make_percentage else 'values'
total_all = dict((k, 0.0) for k in data.values()[0][num_key].keys())
total_other = total_all.copy()
other_dict = {
"name": remainder_name,
"error": {"this": 0.0},
"numerator_errors": {"this": 0.0},
num_key: total_other,
}
cutoff = num_items - 2
for i, (key, values) in enumerate(data.items()):
if key == 'metadata':
continue
for k, v in values[num_key].iteritems():
total_all[k] += v
if i > cutoff:
del data[key]
data.setdefault(remainder_name, other_dict)
for k, v in values[num_key].iteritems():
total_other[k] += v
if make_percentage:
for key, values in data.iteritems():
if key != 'metadata':
values['values'] = dict((k, percent(v, total_all[k]))
for k, v in values['numerators'].iteritems())
def get_objects_by_geo(db_model, geo, session, fields=None, order_by=None,
only=None, exclude=None, data_table=None):
""" Get rows of statistics from the stats mode +db_model+ for a particular
geography, summing over the 'total' field and grouping by +fields+. Filters
to include +only+ and ignore +exclude+, if given.
"""
data_table = data_table or db_model.data_tables[0]
if fields is None:
fields = [c.key for c in class_mapper(db_model).attrs if c.key not in ['geo_code', 'geo_level', 'geo_version', 'total']]
fields = [getattr(db_model, f) for f in fields]
objects = session\
.query(func.sum(db_model.total).label('total'), *fields)\
.group_by(*fields)\
.filter(db_model.geo_code == geo.geo_code)\
.filter(db_model.geo_level == geo.geo_level)\
.filter(db_model.geo_version == geo.version)
if only:
for k, v in only.iteritems():
objects = objects.filter(getattr(db_model, k).in_(v))
if exclude:
for k, v in exclude.iteritems():
objects = objects.filter(getattr(db_model, k).notin_(v))
if order_by is not None:
attr = order_by
is_desc = False
if order_by[0] == '-':
is_desc = True
attr = attr[1:]
if attr == 'total':
if is_desc:
attr = attr + ' DESC'
else:
attr = getattr(db_model, attr)
if is_desc:
attr = attr.desc()
objects = objects.order_by(attr)
objects = objects.all()
if len(objects) == 0:
raise LocationNotFound("%s for geography %s version '%s' not found"
% (db_model.__table__.name, geo.geoid, geo.version))
return objects
def get_stat_data(fields, geo, session, order_by=None,
percent=True, total=None, table_fields=None,
table_name=None, only=None, exclude=None, exclude_zero=False,
recode=None, key_order=None, table_dataset=None,
percent_grouping=None, slices=None):
"""
This is our primary helper routine for building a dictionary suitable for
a place's profile page, based on a statistic.
It sums over the data for ``fields`` in the database for the place identified by
``geo`` and calculates numerators and values. If multiple fields are given,
it creates nested result dictionaries.
Control the rows that are included or ignored using ``only``, ``exclude`` and ``exclude_zero``.
The field values can be recoded using ``recode`` and and re-ordered using ``key_order``.
:param fields: the census field to build stats for. Specify a list of fields to build
nested statistics. If multiple fields are specified, then the values
of parameters such as ``only``, ``exclude`` and ``recode`` will change.
These must be fields in `api.models.census.census_fields`, e.g. 'highest educational level'
:type fields: str or list
:param geo: the geograhy object
:param dbsession session: sqlalchemy session
:param str order_by: field to order by, or None for default, eg. '-total'
:param bool percent: should we calculate percentages, or just sum raw values?
:param list percent_grouping: when calculating percentages, which fields should rows be grouped by?
Default: none of them -- calculate each entry as a percentage of the
whole dataset. Ignored unless ``percent`` is ``True``.
:param list table_fields: list of fields to use to find the table, defaults to `fields`
:param int total: the total value to use for percentages, or None to total columns automatically
:param str table_name: override the table name, otherwise it's calculated from the fields and geo_level
:param list only: only include these field values. If ``fields`` has many items, this must be a dict
mapping field names to a list of strings.
:type only: dict or list
:param exclude: ignore these field values. If ``fields`` has many items, this must be a dict
mapping field names to a list of strings. Field names are checked
before any recoding.
:type exclude: dict or list
:param bool exclude_zero: ignore fields that have a zero or null total
:param recode: function or dict to recode values of ``key_field``. If ``fields`` is a singleton,
then the keys of this dict must be the values to recode from, otherwise
they must be the field names and then the values. If this is a lambda,
it is called with the field name and its value as arguments.
:type recode: dict or lambda
:param key_order: ordering for keys in result dictionary. If ``fields`` has many items,
this must be a dict from field names to orderings.
The default ordering is determined by ``order``.
:type key_order: dict or list
:param str table_dataset: dataset used to help find the table if ``table_name`` isn't given.
:param list slices: return only a slice of the final data, by choosing a single value for each
field in the field list, as specified in the slice list.
:return: (data-dictionary, total)
"""
from .tables import FieldTable
if not isinstance(fields, list):
fields = [fields]
n_fields = len(fields)
many_fields = n_fields > 1
if order_by is None:
order_by = fields[0]
if only is not None:
if not isinstance(only, dict):
if many_fields:
raise ValueError("If many fields are given, then only must be a dict. I got %s instead" % only)
else:
only = {fields[0]: set(only)}
if exclude is not None:
if not isinstance(exclude, dict):
if many_fields:
raise ValueError("If many fields are given, then exclude must be a dict. I got %s instead" % exclude)
else:
exclude = {fields[0]: set(exclude)}
if key_order:
if not isinstance(key_order, dict):
if many_fields:
raise ValueError("If many fields are given, then key_order must be a dict. I got %s instead" % key_order)
else:
key_order = {fields[0]: key_order}
else:
key_order = {}
if recode:
if not isinstance(recode, dict) or not many_fields:
recode = dict((f, recode) for f in fields)
table_fields = table_fields or fields
# get the table and the model
if table_name:
data_table = FieldTable.get(table_name)
else:
data_table = FieldTable.for_fields(table_fields, table_dataset)
if not data_table:
ValueError("Couldn't find a table that covers these fields: %s" % table_fields)
objects = get_objects_by_geo(data_table.model, geo, session, fields=fields, order_by=order_by,
only=only, exclude=exclude, data_table=data_table)
if total is not None and many_fields:
raise ValueError("Cannot specify a total if many fields are given")
if total and percent_grouping:
raise ValueError("Cannot specify a total if percent_grouping is given")
if total is None and percent and data_table.total_column is None:
# The table doesn't support calculating percentages, but the caller
# has asked for a percentage without providing a total value to use.
# Either specify a total, or specify percent=False
raise ValueError("Asking for a percent on table %s that doesn't support totals and no total parameter specified." % data_table.id)
# sanity check the percent grouping
if percent:
if percent_grouping:
for field in percent_grouping:
if field not in fields:
raise ValueError("Field '%s' specified in percent_grouping must be in the fields list." % field)
# re-order percent grouping to be same order as in the field list
percent_grouping = [f for f in fields if f in percent_grouping]
else:
percent_grouping = None
denominator_key = getattr(data_table, 'denominator_key')
root_data = OrderedDict()
running_total = 0
group_totals = {}
grand_total = -1
def get_recoded_key(recode, field, key):
recoder = recode[field]
if isinstance(recoder, dict):
return recoder.get(key, key)
else:
return recoder(field, key)
def get_data_object(obj):
""" Recurse down the list of fields and return the
final resting place for data for this stat. """
data = root_data
for i, field in enumerate(fields):
key = getattr(obj, field)
if recode and field in recode:
key = get_recoded_key(recode, field, key)
else:
key = capitalize(key)
# enforce key ordering the first time we see this field
if (not data or data.keys() == ['metadata']) and field in key_order:
for fld in key_order[field]:
data[fld] = OrderedDict()
# ensure it's there
if key not in data:
data[key] = OrderedDict()
data = data[key]
# default values for intermediate fields
if data is not None and i < n_fields - 1:
data['metadata'] = {'name': key}
# data is now the dict where the end value is going to go
if not data:
data['name'] = key
data['numerators'] = {'this': 0.0}
return data
# run the stats for the objects
for obj in objects:
if not obj.total and exclude_zero:
continue
if denominator_key and getattr(obj, data_table.fields[-1]) == denominator_key:
grand_total = obj.total
# don't include the denominator key in the output
continue
# get the data dict where these values must go
data = get_data_object(obj)
if not data:
continue
if obj.total is not None:
data['numerators']['this'] += obj.total
running_total += obj.total
else:
# TODO: sanity check this is the right thing to do for multiple fields with
# nested nulls -- does aggregating over nulls treat them as zero, or should we
# treat them as null?
data['numerators']['this'] = None
if percent_grouping:
if obj.total is not None:
group_key = tuple()
for field in percent_grouping:
key = getattr(obj, field)
if recode and field in recode:
# Group by recoded keys
key = get_recoded_key(recode, field, key)
group_key = group_key + (key,)
data['_group_key'] = group_key
group_totals[group_key] = group_totals.get(group_key, 0) + obj.total
if grand_total == -1:
grand_total = running_total if total is None else total
# add in percentages
def calc_percent(data):
for key, data in data.iteritems():
if not key == 'metadata':
if 'numerators' in data:
if percent:
if '_group_key' in data:
total = group_totals[data.pop('_group_key')]
else:
total = grand_total
if total is not None and data['numerators']['this'] is not None:
perc = 0 if total == 0 else (data['numerators']['this'] / total * 100)
data['values'] = {'this': round(perc, 2)}
else:
data['values'] = {'this': None}
else:
data['values'] = dict(data['numerators'])
data['numerators']['this'] = None
else:
calc_percent(data)
calc_percent(root_data)
if slices:
for v in slices:
root_data = root_data[v]
add_metadata(root_data, data_table)
return root_data, grand_total
def create_debug_dump(data, geo_level, name):
import os
import json
debug_dir = os.path.join(os.path.dirname(__file__), 'debug')
if not os.path.exists(debug_dir):
os.mkdir(debug_dir)
with open(os.path.join(debug_dir, '%s_%s.json' % (name, geo_level)), 'w') as f:
f.write(json.dumps(data, indent=4))
| 36.758261 | 138 | 0.607116 | 1,027 | 0.04859 | 0 | 0 | 0 | 0 | 0 | 0 | 7,870 | 0.37235 |
9048acfcee11de068839ac11bcc199658e3bb1fe | 9,913 | py | Python | ovis/analysis/gradients.py | vlievin/ovis | 71f05a5f5219b2df66a9cdbd5a5339e0e179597b | [
"MIT"
] | 10 | 2020-08-06T22:25:11.000Z | 2022-03-07T13:10:15.000Z | ovis/analysis/gradients.py | vlievin/ovis | 71f05a5f5219b2df66a9cdbd5a5339e0e179597b | [
"MIT"
] | 2 | 2021-06-08T22:15:24.000Z | 2022-03-12T00:45:59.000Z | ovis/analysis/gradients.py | vlievin/ovis | 71f05a5f5219b2df66a9cdbd5a5339e0e179597b | [
"MIT"
] | null | null | null | from time import time
from typing import *
import torch
from booster import Diagnostic
from torch import Tensor
from tqdm import tqdm
from .utils import cosine, percentile, RunningMean, RunningVariance
from ..estimators import GradientEstimator
from ..models import TemplateModel
def get_grads_from_tensor(model: TemplateModel, loss: Tensor, output: Dict[str, Tensor], tensor_id: str, mc: int, iw: int):
"""
Compute the gradients given a `tensor` on which was called `tensor.retain_graph()`
Assumes `tensor` to have `tensor.shape[0] == bs * iw * mc`
:param model: VAE model
:param loss: loss value
:param output: model's output: dict
:param tensor_id: key of the tensor in the model output
:param mc: number of outer Monte-Carlo samples
:param iw: number of inner Importance-Weighted samples
:return: gradient: Tensor of shape [D,] where D is the number of elements in `tensor`
"""
assert tensor_id in output.keys(), f"Tensor_id = `{tensor_id}` not in model's output"
model.zero_grad()
loss.sum().backward(create_graph=True, retain_graph=True)
# get the tensor of interest
tensors = output[tensor_id] if isinstance(output[tensor_id], list) else output[tensor_id]
bs = tensors[0].shape[0] // (mc * iw)
# get the gradients, flatten and concat across the feature dimension
gradients = [p.grad for p in tensors]
assert not any(
[g is None for g in gradients]), f"{sum([int(g is None) for g in gradients])} tensors have no gradients. " \
f"Use `tensor.retain_graph()` in your model to enable gradients. " \
f"tensor_id = `{tensor_id}`"
# compute gradients estimate for each individual grads
# sum individual gradients because x_expanded = x.expand(bs, mc, iw)
gradients = torch.cat([g.view(bs, mc * iw, -1).sum(1) for g in gradients], 1)
# return an MC average of the grads
return gradients.mean(0)
def get_grads_from_parameters(model: TemplateModel, loss: Tensor, key_filter: str = ''):
"""
Return the gradients for the parameters matching the `key_filter`
:param model: VAE model
:param loss: loss value
:param key_filter: filter value (comma separated values accepted (e.g. "A,b"))
:return: Tensor of shape [D,] where `D` is the number of parameters
"""
key_filters = key_filter.split(',')
params = [p for k, p in model.named_parameters() if any([(_key in k) for _key in key_filters])]
assert len(params) > 0, f"No parameters matching filter = `{key_filters}`"
model.zero_grad()
# backward individual gradients \nabla L[i]
loss.mean().backward(create_graph=True, retain_graph=True)
# gather gradients for each parameter and concat such that each element across the dim 1 is a parameter
grads = [p.grad.view(-1) for p in params if p.grad is not None]
return torch.cat(grads, 0)
def get_gradients_statistics(estimator: GradientEstimator,
model: TemplateModel,
x: Tensor,
mc_samples: int = 100,
key_filter: str = 'inference_network',
oracle_grad: Optional[Tensor] = None,
return_grads: bool = False,
compute_dsnr: bool = True,
samples_per_batch: Optional[int] = None,
eps: float = 1e-15,
tqdm: Callable = tqdm,
**config: Dict) -> Tuple[Diagnostic, Dict]:
"""
Compute the gradients and return the statistics (Variance, Magnitude, SNR, DSNR)
If an `oracle` gradient is available: compute the cosine similarity with the oracle and the gradient estimate (direction)
The Magnitude, Variance and SNR are defined parameter-wise. All return values are average over the D parameters with
Variance > eps. For instance, the returned SNR is
* SNR = 1/D \sum_d SNR_d
Each MC sample is computed sequentially and the mini-batch `x` will be split into chuncks
if a value `samples_per_batch` if specified and if `samples_per_batch < x.size(0) * mc * iw`.
:param estimator: Gradient Estimator
:param model: VAE model
:param x: mini-batch of observations
:param mc_samples: number of Monte-Carlo samples
:param key_filter: key matching parameters names in the model
:param oracle_grad: true direction of the gradients [Optional]
:param return_grads: return all gradients in the `meta` output directory if set to `True`
:param compute_dsnr: compute the Directional SNR if set to `True`
:param samples_per_batch: max. number of individual samples `bs * mc * iw` per mini-batch [Optional]
:param eps: minimum Variance value used for filtering
:param config: config dictionary for the estimator
:param tqdm: custom `tqdm` function
:return: output : Diagnostic = {'grads' : {'variance': ..,
'magnitude': ..,
'snr': ..,
'dsnr' ..,
'direction': cosine similarity with the oracle,
'keep_ratio' : ratio of parameter-wise gradients > epsilon}}
'snr': {'percentiles', 'mean', 'min', 'max'}
},
meta : additional data including the gradients values if `return_grads`
"""
_start = time()
grads_dsnr = None
grads_mean = RunningMean()
grads_variance = RunningVariance()
if oracle_grad is not None:
grads_dir = RunningMean()
all_grads = None
# compute each MC sample sequentially
for i in tqdm(range(mc_samples), desc="Gradients Analysis"):
# compute number of chuncks based on the capacity `samples_per_batch`
if samples_per_batch is None:
chuncks = 1
else:
bs = x.size(0)
mc = estimator.config['mc']
iw = estimator.config['iw']
# infer number of chunks
total_samples = bs * mc * iw
chuncks = max(1, -(-total_samples // samples_per_batch)) # ceiling division
# compute mini-batch gradient by chunck if `x` is large
gradients = RunningMean()
for k, x_ in enumerate(x.chunk(chuncks, dim=0)):
model.eval()
model.zero_grad()
# forward, backward to compute the gradients
loss, diagnostics, output = estimator(model, x_, backward=False, **config)
# gather mini-batch gradients
if 'tensor:' in key_filter:
tensor_id = key_filter.replace("tensor:", "")
gradients_ = get_grads_from_tensor(model, loss, output, tensor_id, estimator.mc, estimator.iw)
else:
gradients_ = get_grads_from_parameters(model, loss, key_filter=key_filter)
# move to cpu
gradients_ = gradients_.detach().cpu()
# update average
gradients.update(gradients_, k=x_.size(0))
# gather statistics
with torch.no_grad():
gradients = gradients()
if return_grads or compute_dsnr:
all_grads = gradients[None] if all_grads is None else torch.cat([all_grads, gradients[None]], 0)
grads_mean.update(gradients)
grads_variance.update(gradients)
# compute the statistics
with torch.no_grad():
# compute statistics for each data point `x_i`
grads_variance = grads_variance()
grads_mean = grads_mean()
# compute signal-to-noise ratio. see `tighter variational bounds are not necessarily better` (eq. 4)
grad_var_sqrt = grads_variance.pow(0.5)
clipped_variance_sqrt = grad_var_sqrt.clamp(min=eps)
grads_snr = grads_mean.abs() / (clipped_variance_sqrt)
# compute DSNR, see `tighter variational bounds are not necessarily better` (eq. 12)
if compute_dsnr:
u = all_grads.mean(0, keepdim=True)
u /= u.norm(dim=1, keepdim=True, p=2)
g_parallel = u * (u * all_grads).sum(1, keepdim=True)
g_perpendicular = all_grads - g_parallel
grads_dsnr = g_parallel.norm(dim=1, p=2) / (eps + g_perpendicular.norm(dim=1, p=2))
# compute grad direction: cosine similarity between the gradient estimate and the oracle
if oracle_grad is not None:
grads_dir = cosine(grads_mean, oracle_grad, dim=-1)
# reinitialize grads
model.zero_grad()
# reduce fn: keep only parameter with variance > 0
mask = (grads_variance > eps).float()
_reduce = lambda x: (x * mask).sum() / mask.sum()
output = Diagnostic({'grads': {
'variance': _reduce(grads_variance),
'magnitude': _reduce(grads_mean.abs()),
'snr': _reduce(grads_snr),
'dsnr': grads_dsnr.mean() if grads_dsnr is not None else 0.,
'keep_ratio': mask.sum() / torch.ones_like(mask).sum()
},
'snr': {
'p25': percentile(grads_snr, q=0.25), 'p50': percentile(grads_snr, q=0.50),
'p75': percentile(grads_snr, q=0.75), 'p5': percentile(grads_snr, q=0.05),
'p95': percentile(grads_snr, q=0.95), 'min': grads_snr.min(),
'max': grads_snr.max(), 'mean': grads_snr.mean()}
})
if oracle_grad is not None:
output['grads']['direction'] = grads_dir.mean()
# additional data: raw grads, and mean,var,snr for each parameter separately
meta = {
'grads': all_grads,
'expected': grads_mean,
'magnitude': grads_mean.abs(),
'var': grads_variance,
'snr': grads_snr,
}
return output, meta
| 42.545064 | 125 | 0.611924 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,509 | 0.454857 |
904a907ab750687eb1de030da0541431f23b5d88 | 1,081 | py | Python | Sem-09-T1-Q5.py | daianasousa/Semana-09 | decfc9b47931ae4f5a4f30a0d26b931ecd548f59 | [
"MIT"
] | null | null | null | Sem-09-T1-Q5.py | daianasousa/Semana-09 | decfc9b47931ae4f5a4f30a0d26b931ecd548f59 | [
"MIT"
] | null | null | null | Sem-09-T1-Q5.py | daianasousa/Semana-09 | decfc9b47931ae4f5a4f30a0d26b931ecd548f59 | [
"MIT"
] | null | null | null | def carrega_cidades():
resultado = []
with open('cidades.csv', 'r', encoding='utf-8') as arquivo:
for linha in arquivo:
uf, ibge, nome, dia, mes, pop = linha.split(';')
resultado.append(
(uf, int(ibge), nome, int(dia), int(mes), int(pop))
)
arquivo.close()
return resultado
def main():
mes = int(input('Mês: '))
populacao = int(input('População: '))
cidades = carrega_cidades()
meses = ('JANEIRO', 'FEVEREIRO' ,'MARÇO' ,'ABRIL' ,'MAIO' ,'JUNHO' ,'JULHO' ,'AGOSTO' ,'SETEMBRO' , 'OUTUBRO', 'NOVEMBRO', 'DEZEMBRO')
for i in range(12):
meses[i]
print(f'CIDADES COM MAIS DE {populacao} HABITANTES E ANIVERSÁRIO EM {meses[mes-1]}:')
for dados in cidades:
if dados[-1] > populacao and dados[-2] == mes:
nome = dados[2]
dia = dados[3]
uf = dados[0]
pop = dados[-1]
print(f'{nome}({uf}) tem {pop} habitantes e faz aniversário em {dia} de {meses[mes-1].lower()}.')
if __name__ == '__main__':
main() | 30.885714 | 138 | 0.543941 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 331 | 0.304508 |
5f3a8ec38dd614e2783df50d617c5b8f3ca8b0f8 | 1,428 | py | Python | data_split.py | CodeDogandCat/ChineseGrammarErrorDiagnose | 4e1ec745ae938f742c6afb0e88b08ea50c6028cb | [
"Apache-2.0"
] | null | null | null | data_split.py | CodeDogandCat/ChineseGrammarErrorDiagnose | 4e1ec745ae938f742c6afb0e88b08ea50c6028cb | [
"Apache-2.0"
] | null | null | null | data_split.py | CodeDogandCat/ChineseGrammarErrorDiagnose | 4e1ec745ae938f742c6afb0e88b08ea50c6028cb | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import re
# from pyltp import Segmentor
import jieba.posseg as pseg
import jieba
import os
import sys
import json
import math
# import kenlm
import nltk
from collections import Counter
def dataSplit(inputpath, count):
(filepath, tempfilename) = os.path.split(inputpath)
(filename, extension) = os.path.splitext(tempfilename)
outputlist = []
for i in range(count):
outputpath = os.path.join('./word/', filename + "_" + str(i) + extension)
print(outputpath)
outputlist.append(outputpath)
outputfiles = []
for path in outputlist:
output = open(path, encoding='utf-8', mode='w+')
outputfiles.append(output)
print('open input')
fin = open(inputpath, encoding='utf-8')
print('read input')
lines = fin.readlines() # 调用文件的 readline()方法
print('calculate lines')
total = len(lines)
sclice = math.floor(total / count)
i = 0
while i < count - 1:
print("write file " + str(i))
outputfiles[i].writelines(lines[i * sclice:(i + 1) * sclice])
outputfiles[i].close()
print("write file " + str(i) + " is ok~~ ")
i += 1
print("write file " + str(i))
outputfiles[i].writelines(lines[i * sclice:])
outputfiles[i].close()
print("write file " + str(i) + " is ok~~ ")
print("all is ok~~")
# dataSplit('TNewsSegafter2.txt', 32)
dataSplit('TNewsSegafter1.txt', 32)
| 28 | 81 | 0.621148 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 315 | 0.218447 |
5f3bba72b50ee67716dbeda71e53db5b079da28f | 2,435 | py | Python | Code/Python/pract_fund1_sol.py | kunal-mulki/Materials | b76bba123002972e4063b9b24cd5dc3d980e16e9 | [
"MIT"
] | 27 | 2016-12-07T17:38:41.000Z | 2021-06-28T06:19:49.000Z | Code/Python/pract_fund1_sol.py | kunal-mulki/Materials | b76bba123002972e4063b9b24cd5dc3d980e16e9 | [
"MIT"
] | 27 | 2016-05-28T21:32:24.000Z | 2016-12-08T16:47:09.000Z | Code/Python/pract_fund1_sol.py | NYUDataBootcamp/Materials | b76bba123002972e4063b9b24cd5dc3d980e16e9 | [
"MIT"
] | 50 | 2016-10-12T11:04:50.000Z | 2021-06-01T23:24:45.000Z | """
Practice problems, Python fundamentals 1 -- Solutions
@authors: Balint Szoke, Daniel Csaba
@date: 06/02/2017
"""
#-------------------------------------------------------
# 1) Solution
good_string = "Sarah's code"
#or
good_string = """Sarah's code"""
#-------------------------------------------------------
# 2) Solution
i = 1234
list(str(i))
#-------------------------------------------------------
# 3) Solution
year = '2016'
next_year = str(int(year) + 1)
#-------------------------------------------------------
# 4) Solution
x, y = 3, 'hello'
print(x, y)
z = x
x = y
y = z
print(x, y)
#-------------------------------------------------------
# 5) Solution
name = 'Jones'
print(name.upper())
#-------------------------------------------------------
# 6) Solution
name = 'Ulysses'
print(name.count('s'))
#-------------------------------------------------------
# 7) Solution
long_string = 'salamandroid'
long_string = long_string.replace('a', '*')
print(long_string)
#-------------------------------------------------------
# 8) Solution
ll = [1, 2, 3, 4, 5]
ll.reverse()
print(ll)
#ll.pop(1)
# or better
ll.pop(ll.index(4))
print(ll)
ll.append(1.5)
print(ll)
ll.sort()
print(ll)
#%% #-------------------------------------------------------
# 9) Solution
number = "32,054.23"
number_no_comma = number.replace(',', '')
number_float = float(number_no_comma)
print(number_float)
#or
print(float(number.replace(',', '')))
#-------------------------------------------------------
# 10) Solution
firstname_lastname = 'john_doe'
firstname, lastname = firstname_lastname.split('_')
Firstname = firstname.capitalize()
Lastname = lastname.capitalize()
print(Firstname, Lastname)
#-------------------------------------------------------
# 11-12) Solution
l = [0, 1, 2, 4, 5]
index = l.index(4)
l.insert(index, 3)
print(l)
#-------------------------------------------------------
# 13) Solution
s = 'www.example.com'
s = s.lstrip('w.')
s = s.rstrip('.c')
# or in a single line
(s.lstrip('w.')).rstrip('.com')
#-------------------------------------------------------
# 14) Solution
link = 'https://play.spotify.com/collection/albums'
splitted_link = link.rsplit('/', 1)
print(splitted_link[0])
#or
link.rsplit('/', 1)[0]
#-------------------------------------------------------
# 15) Solution
amount = "32.054,23"
ms = amount.maketrans(',.', '.,')
amount = amount.translate(ms)
print(amount)
| 21.936937 | 62 | 0.433265 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,398 | 0.574127 |
5f3df5f78e78d0ee2fc42ec4cf3a85208b508f67 | 7,178 | py | Python | eos/old_scott_ANEOS_conversion.py | ScottHull/FDPS_SPH | 6db11d599d433f889da100e78c17d6f65365ceda | [
"MIT"
] | null | null | null | eos/old_scott_ANEOS_conversion.py | ScottHull/FDPS_SPH | 6db11d599d433f889da100e78c17d6f65365ceda | [
"MIT"
] | null | null | null | eos/old_scott_ANEOS_conversion.py | ScottHull/FDPS_SPH | 6db11d599d433f889da100e78c17d6f65365ceda | [
"MIT"
] | null | null | null | """
This is a python script that converts u(rho, T), P(rho, T), Cs(rho,T), S(rho, T)
to T(rho, u), P(rho, u), Cs(rho, u), S(rho, u), which is more useful for SPH calculations
"""
import matplotlib.pyplot as plt
from collections import OrderedDict
import numpy as np
import pandas as pd
import csv
import sys
from scipy.interpolate import interp1d
from scipy import interpolate
def emptyLineIndices(f):
empty_lines = [0]
with open(f, 'r') as infile:
reader = csv.reader(infile)
next(reader) # drop header row
for index, row in enumerate(reader):
if len(row) == 0:
empty_lines.append(index)
infile.close()
return empty_lines
def chunkFile(f, emtpy_lines):
densities = []
d = {}
with open(f, 'r') as infile:
reader = csv.reader(infile)
headers = next(reader)
reader = list(reader)
for index, line in enumerate(empty_lines):
temp_dict = {}
for i in headers:
temp_dict.update({i: []})
if (index + 1) != len(empty_lines):
min, max = empty_lines[index] + 1, empty_lines[index + 1] - 1
trimmed_reader = reader[min:max]
for row in trimmed_reader:
for index2, i in enumerate(row):
header = headers[index2]
temp_dict[header].append(reformat(i))
density = reformat(temp_dict['Pressure (Pa)'][0])
densities.append(density)
d.update({density: temp_dict})
return d
def reformat(number):
if isinstance(number, str):
if '-101' in str(number):
new_num = float(number.split('-')[0]) * (10**(-101))
return new_num
else:
return float(number)
else:
return number
def recalculateEnergies(d, grid_number, min_energy, delta):
"""
For each density sample, we want the same exponential energy grid
:param d:
:param grid_number:
:param min_energy:
:param delta:
:return:
"""
densities = d.keys()
new_energies = []
for i in range(0, grid_number):
new_energy = min_energy * (delta**i)
new_energies.append(new_energy)
for i in densities:
d[i].update({'Energy (J/kg)': new_energies})
return d
nu = 120 # number of the grid for the internal energy (exponential)
infile_path = 'granite.table.csv'
empty_lines = emptyLineIndices(f=infile_path)
sorted_dict = chunkFile(f=infile_path, emtpy_lines=empty_lines)
densities = sorted_dict.keys()
infile_df = pd.read_csv(infile_path)
energy = [reformat(i) for i in list(infile_df['Energy (J/kg)'])]
min_energy = min(energy)
max_energy = max(energy)
delta = (min_energy / max_energy)**(1/(nu-1))
sorted_dict = recalculateEnergies(d=sorted_dict, grid_number=nu, min_energy=min_energy, delta=delta)
for i in densities:
energies = sorted_dict[i]['Energy (J/kg)']
temperatures = sorted_dict[i]['Temperature (K)']
pressures = sorted_dict[i]['Pressure (Pa)']
sound_speeds = sorted_dict[i]['Sound speed (m/s)']
entropies = sorted_dict[i]['Entropy (J/kg/K)']
f_temperature = interpolate.interp1d(energies, temperatures, kind='linear', fill_value='extrapolate')
sorted_dict[i].update({'Temperature (K)': f_temperature(energies)})
f_pressure = interpolate.interp1d(temperatures, pressures, kind='linear', fill_value='extrapolate')
sorted_dict[i].update({'Pressure (Pa)': f_pressure(sorted_dict[i]['Temperature (K)'])})
f_soundspeed = interpolate.interp1d(temperatures, sound_speeds, kind='linear', fill_value='extrapolate')
sorted_dict[i].update({'Sound speed (m/s)': f_soundspeed(sorted_dict[i]['Temperature (K)'])})
f_entropy = interpolate.interp1d(temperatures, entropies, kind='linear', fill_value='extrapolate')
sorted_dict[i].update({'Entropy (J/kg/K)': f_entropy(sorted_dict[i]['Temperature (K)'])})
# infile_df = pd.read_csv(infile_path)
#
# density = sorted(list(set([reformat(i) for i in list(infile_df['Density (kg/m3)'])]))) # remove duplicates, then sort
# temperature = sorted(list(set([reformat(i) for i in list(infile_df['Temperature (K)'])])))
# energy = [reformat(i) for i in list(infile_df['Energy (J/kg)'])]
# pressure = [reformat(i) for i in list(infile_df['Pressure (Pa)'])]
# sound_speed = [reformat(i) for i in list(infile_df['Sound speed (m/s)'])]
# entropy = [reformat(i) for i in list(infile_df['Entropy (J/kg/K)'])]
#
# min_energy = min(energy)
# max_energy = max(energy)
# delta = (min_energy / max_energy)**(1 / (nu - 1))
#
# new_energy = [min_energy * (delta**i) for i in range(0, nu)]
#
# new_temperature = []
# new_pressure = []
# new_sound_speed = []
# new_entropy = []
#
# for m in range(0, nu):
#
# # internal energy
# f_temperature = interpolate.interp1d(energy[m:], temperature[m:], kind='linear', fill_value='extrapolate')
# new_temperature.append(f_temperature(new_energy))
#
# # pressure
# f_pressure = interpolate.interp1d(temperature[m:], pressure[m:], kind='linear', fill_value='extrapolate')
# new_pressure.append(f_pressure(new_temperature[m]))
#
# # sound speed
# f_soundspeed = interpolate.interp1d(temperature[m:], sound_speed[m:], kind='linear', fill_value='extrapolate')
# new_sound_speed.append(f_soundspeed(new_temperature[m]))
#
# # entropy
# f_entropy = interpolate.interp1d(temperature[m:], entropy[m:], kind='linear', fill_value='extrapolate')
# new_entropy.append(f_entropy(new_temperature[m]))
#
# new_temperature = np.array(new_temperature)
# new_pressure = np.array(new_pressure)
# new_sound_speed = np.array(new_sound_speed)
# new_entropy = np.array(new_entropy)
#
# for m in range(0, len(density), int(len(density)/6)):
#
# ax = [0, 0, 0, 0]
#
# fig = plt.figure(figsize = (10,6.128))
#
# ax[0] = fig.add_subplot(221)
# ax[1] = fig.add_subplot(222)
# ax[2] = fig.add_subplot(223)
# ax[3] = fig.add_subplot(224)
#
# ax[0].semilogy(np.array(temperature) * 1e-3, np.array(energy[m:]) * 1e-6, '--', label="original ANEOS")
# ax[0].semilogy(new_temperature[m:] * 1e-3, np.array(new_energy[m:]) * 1e-6, '-.', label="modified")
# ax[1].semilogy(np.array(temperature) * 1e-3, np.array(pressure[m:]) * 1e-6,'--', new_temperature[m:] * 1e-3, new_pressure[m:] * 1e-6,'-.')
# ax[2].plot(np.array(temperature) * 1e-3, np.array(sound_speed[m:]) * 1e-3,'--', new_temperature[m:] * 1e-3, new_sound_speed[m:] * 1e-3,'-.')
# ax[3].plot(np.array(temperature) * 1e-3, np.array(entropy[m:]) * 1e-3,'--', new_temperature[m:] * 1e-3, new_entropy[m:] * 1e-3,'-.')
#
# ax[0].legend(frameon=False)
#
# ax[0].set_ylabel('Energy (MJ/kg)', fontsize=10)
# ax[1].set_ylabel('Pressure (MPa)', fontsize=10)
# ax[2].set_ylabel('Sound Speed (km/s)', fontsize=10)
# ax[3].set_ylabel('Entropy (kJ/K/kg)', fontsize=10)
# ax[2].set_xlabel('Temperature ($10^3$ K)', fontsize=10)
# ax[3].set_xlabel('Temperature ($10^3$ K)',fontsize=10)
#
# fig.suptitle("Density: %3.3f kg/m$^3$" %(density[m]))
# # plt.show()
# # fig.savefig("Density" + str(m) + ".png")
| 34.344498 | 146 | 0.636389 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,924 | 0.54667 |
5f3f44af77a5d9949e7fe7c6858624af3b7fa923 | 346 | py | Python | scheduler/post_scheduler/urls.py | Awinja-j/Social-Media-post-Scheduler | 4f95b4bb2ca3f890d3e22bcda859b94ebc483b87 | [
"MIT"
] | 1 | 2021-05-08T08:21:06.000Z | 2021-05-08T08:21:06.000Z | scheduler/post_scheduler/urls.py | Awinja-j/Social-Media-post-Scheduler | 4f95b4bb2ca3f890d3e22bcda859b94ebc483b87 | [
"MIT"
] | null | null | null | scheduler/post_scheduler/urls.py | Awinja-j/Social-Media-post-Scheduler | 4f95b4bb2ca3f890d3e22bcda859b94ebc483b87 | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('post_posts', views.post_posts),
path('fetch_posts', views.get_posts),
path('fetch_post/<pk>', views.get_post),
path('delete_post/<pk>', views.delete_post),
path('edit_post/<pk>', views.edit_post),
path('search_for_a_post', views.search_for_a_post)
] | 28.833333 | 54 | 0.699422 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 95 | 0.274566 |
5f3fad78b868dac1b90ecb78a5594353e0e31396 | 506 | py | Python | dato-graphlab/src/config.py | warreee/apache-flink_vs_dato-graphlab | cd01cee208461479d3f27489ab45df439b8b9820 | [
"Apache-2.0"
] | null | null | null | dato-graphlab/src/config.py | warreee/apache-flink_vs_dato-graphlab | cd01cee208461479d3f27489ab45df439b8b9820 | [
"Apache-2.0"
] | null | null | null | dato-graphlab/src/config.py | warreee/apache-flink_vs_dato-graphlab | cd01cee208461479d3f27489ab45df439b8b9820 | [
"Apache-2.0"
] | null | null | null | import os
def getDataPath():
return os.getcwd().replace("dato-graphlab/src", "data/")
def getSmall():
return getDataPath() + "sample-small.txt"
def getMedium():
return getDataPath() + "sample-medium.txt"
def getLarge():
return getDataPath() + "sample-large.txt"
def getGoogle():
return getDataPath() + "web-Google.txt"
def getStanford():
return getDataPath() + "web-Stanford.txt"
def getOutputFolder():
return os.getcwd().replace("dato-graphlab/src", "results/")
| 16.322581 | 63 | 0.666008 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 144 | 0.284585 |
5f42caff296a8e9070523febb1d633e533ecbfff | 950 | py | Python | tools.py | chougousui/keyboard_layout_for_mobile | 3bb59169f10ac56fb82cb62be07f821f1ecac22e | [
"MIT"
] | 5 | 2019-06-12T09:29:06.000Z | 2020-12-31T08:53:19.000Z | tools.py | chougousui/keyboard_layout_for_mobile | 3bb59169f10ac56fb82cb62be07f821f1ecac22e | [
"MIT"
] | null | null | null | tools.py | chougousui/keyboard_layout_for_mobile | 3bb59169f10ac56fb82cb62be07f821f1ecac22e | [
"MIT"
] | null | null | null | import numpy as np
def generate_cost_dict():
def inner_func(i, j):
x1 = i % 10
y1 = i // 10
x2 = j % 10
y2 = j // 10
alpha = 5
x_center = 5.5
x_radius = 7.5
y_center = 1
y_radius = 4.5
dist = np.sqrt(47 * 47 * np.square(x1 - x2) + 77 * 77 * np.square(y1 - y2))
force1 = np.exp(-1 * alpha * (
1 / (np.sqrt(np.square(x1 - x_center) + np.square(x_radius / y_radius * (y1 - y_center))) - x_radius) +
1 / x_radius))
force2 = np.exp(-1 * alpha * (
1 / (np.sqrt(np.square(x2 - x_center) + np.square(x_radius / y_radius * (y2 - y_center))) - x_radius) +
1 / x_radius))
res = (force1 + force2) / 2 * dist
return res
cost_dict = np.delete(np.delete(np.fromfunction(
lambda i, j: inner_func(i, j),
(28, 28)), 20, axis=0), 20, axis=1)
return cost_dict
| 29.6875 | 119 | 0.489474 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
5f43a06d91c00b879b94bd9ca11de4d7d8fcab07 | 377 | py | Python | full-stack/backend/django-app/django-jwt-app/settings/urls.py | mp5maker/library | b4d2eea70ae0da9d917285569031edfb4d8ab9fc | [
"MIT"
] | null | null | null | full-stack/backend/django-app/django-jwt-app/settings/urls.py | mp5maker/library | b4d2eea70ae0da9d917285569031edfb4d8ab9fc | [
"MIT"
] | 23 | 2020-08-15T15:18:32.000Z | 2022-02-26T13:49:05.000Z | full-stack/backend/django-app/django-jwt-app/settings/urls.py | mp5maker/library | b4d2eea70ae0da9d917285569031edfb4d8ab9fc | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.urls import path, include
from rest_framework_jwt.views import (
obtain_jwt_token,
refresh_jwt_token,
)
urlpatterns = [
path('admin/', admin.site.urls),
path('token-auth/', obtain_jwt_token),
path('token-refresh/', refresh_jwt_token),
path('employee/', include('employee.urls', namespace='employee'))
]
| 22.176471 | 70 | 0.710875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 73 | 0.193634 |
5f45037068a6ca19658fc2ba430b609e4386fc29 | 15,989 | py | Python | models/train_classifier.py | tarcisobraz/disaster-message-clf | 22de03350a0f993005564a1d07a43da6bd989e67 | [
"DOC"
] | null | null | null | models/train_classifier.py | tarcisobraz/disaster-message-clf | 22de03350a0f993005564a1d07a43da6bd989e67 | [
"DOC"
] | null | null | null | models/train_classifier.py | tarcisobraz/disaster-message-clf | 22de03350a0f993005564a1d07a43da6bd989e67 | [
"DOC"
] | null | null | null | #General libs
import sys
import os
import json
from datetime import datetime
import time
#Data wrangling libs
import pandas as pd
import numpy as np
#DB related libs
from sqlalchemy import create_engine
#ML models related libs
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.multioutput import MultiOutputClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.metrics import classification_report
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LogisticRegression
#Gensim
from gensim.models import KeyedVectors
#Custom Transformers and Estimators
import nlp_estimators
#Model Saver
import dill
#Workspace Utils
from workspace_utils import active_session
#Glove Models dictionary (to be filled in when needed)
glove_models_by_size = {50: None,
100: None,
300: None}
#Train Configurations to be filled in when script is called
train_configs = {}
def get_or_load_glove_model(num_dims):
'''
INPUT
num_dims - int, number of dimensions of the Glove model to be loaded
OUTPUT
glove_model - object, the pre-trained glove model with the specified number of dimensions
This function either retrieves the already-stored glove model or loads and
stores it from file using the train configuration `glove_models_folderpath`
'''
if glove_models_by_size[num_dims] == None:
print('Pre-trained Glove Model with {} dims not found. '\
'\nLoading it from file...'.format(num_dims))
glove_models_by_size[num_dims] = KeyedVectors.load_word2vec_format(
os.path.join(train_configs['glove_models_folderpath'],
'glove.6B.{}d_word2vec.txt'.format(num_dims)),
binary=False)
return glove_models_by_size[num_dims]
def load_data(database_filepath):
'''
INPUT
database_filepath - string, filepath of database from which data will be loaded
OUTPUT
X - numpy array, The raw messages ready to be used to train the pipelines
X_tokenized - numpy array, The tokenized messages ready to be used to train the pipelines
Y - numpy array, The list of categories to which each message belongs
category_columns - pandas series, The names of the categories
categories_tokens - numpy array, The tokenized categories names (to be used by cats_sim feature set)
This function loads and prepares data for the models training
'''
engine = create_engine('sqlite:///' + database_filepath)
messages_df = pd.read_sql_table(con=engine, table_name='Message')
categories_df = pd.read_sql_table(con=engine, table_name='CorpusWide')
messages_tokens = pd.read_sql_table(con=engine, table_name='MessageTokens')
X = messages_df.message.values
X_tokenized = messages_tokens.tokens_str.values
Y_df = categories_df.drop(['message_id', 'message', 'original', 'genre'], axis=1)
Y = Y_df.values
category_columns = Y_df.columns
categories_tokens = np.array([np.array(cat.split('_')) for cat in category_columns])
return X, X_tokenized, Y, category_columns, categories_tokens
def build_estimator_obj(estimator_code):
'''
INPUT
estimator_code - string, the code of the classifier object to be built
OUTPUT
classifier_obj - sklearn estimator, the built classifier object
This function builds a classifier object based on the estimator code received as input.
For unexpected codes, it prints an error and exits the script execution
'''
classifier_obj = None
if estimator_code == 'rf':
classifier_obj = RandomForestClassifier()
elif estimator_code == 'lr':
classifier_obj = LogisticRegression()
else:
print("Invalid Classifier Estimator Code " + estimator_code)
exit(1)
return classifier_obj
def build_classifiers_build_params(classifiers_configs):
'''
INPUT
classifiers_configs - dict, a dictionary containing the configuration for each classifier
OUTPUT
classifiers_params_dict - dict, a dictionary containing the grid params to be used for
each classifier in the training process
This function builds a dictionary with grid params to be used in training process for each
classifier whose configurations were given as input.
It can handle a single classifier or a list of classifiers.
'''
if len(classifiers_configs) > 1:
classifiers_params_list = []
classifiers_params_dict = {}
for classifier in classifiers_configs:
classifier_estimator = classifier['estimator']
classifier_obj = build_estimator_obj(classifier_estimator)
classifier_obj = MultiOutputClassifier(classifier_obj.set_params(**classifier['params']))
classifiers_params_list.append(classifier_obj)
classifiers_params_dict['clf'] = classifiers_params_list
elif len(classifiers_configs) == 1:
classifier = classifiers_configs[0]
classifier_estimator = classifier['estimator']
classifier_obj = build_estimator_obj(classifier_estimator)
classifier_obj = MultiOutputClassifier(classifier_obj)
classifiers_params_dict = {'clf' : [classifier_obj]}
classifiers_params_dict.update(classifier['params'])
print(classifiers_params_dict)
return classifiers_params_dict
def build_model(model_config,classifiers_params,categories_tokens):
'''
INPUT
model_config - dict, a dictionary containing the configuration for a model pipeline
classifiers_configs - dict, a dictionary containing the configuration for each classifier
categories_tokens - numpy array, array containing the tokenized categories names
OUTPUT
grid_search_cv - sklearn GridSearchCV, a grid search CV object containing specifications
on how to train the model based on the input configs
This function builds a Grid Search CV object with specifications for training process for a
given model and its classifiers whose configurations were given as input.
It can handle different feature_sets:
- Local Word2Vec
- Pre-Trained Glove
- Doc2Vec
- Category Similarity
- All Features Sets together
'''
feature_set = model_config['feature_set']
print("Building Model for feature set: {}".format(feature_set))
print("Grid Params: {}".format(model_config['grid_params']))
pipeline = grid_search_params = grid_search_cv = None
jobs = -1
score = 'f1_micro'
def_cv = 3
verbosity_level=10
if feature_set == 'local_w2v':
pipeline = Pipeline([
('local_w2v', nlp_estimators.TfidfEmbeddingTrainVectorizer()),
('clf', MultiOutputClassifier(GaussianNB()))
])
grid_search_params = model_config['grid_params']
elif feature_set == 'glove':
pipeline = Pipeline([
('glove', nlp_estimators.TfidfEmbeddingTrainVectorizer(
get_or_load_glove_model(50))),
('clf', MultiOutputClassifier(GaussianNB()))
])
grid_search_params = {'glove__word2vec_model' :
[get_or_load_glove_model(num_dims) for num_dims in
model_config['grid_params']['glove__num_dims']]}
elif feature_set == 'doc2vec':
pipeline = Pipeline([
('doc2vec', nlp_estimators.Doc2VecTransformer()),
('clf', MultiOutputClassifier(GaussianNB()))
])
grid_search_params = model_config['grid_params']
elif feature_set == 'cats_sim':
pipeline = Pipeline([
('cats_sim', nlp_estimators.CategoriesSimilarity(
categories_tokens=categories_tokens)),
('clf', MultiOutputClassifier(GaussianNB()))
])
grid_search_params = {'cats_sim__word2vec_model' :
[get_or_load_glove_model(num_dims) for num_dims in
model_config['grid_params']['cats_sim__num_dims']]}
elif feature_set == 'all_feats':
pipeline = Pipeline([
('features', FeatureUnion([
('local_w2v', nlp_estimators.TfidfEmbeddingTrainVectorizer(num_dims=50)),
('glove', nlp_estimators.TfidfEmbeddingTrainVectorizer(
get_or_load_glove_model(50)
)),
('doc2vec', nlp_estimators.Doc2VecTransformer(vector_size=50)),
('cats_sim', nlp_estimators.CategoriesSimilarity(categories_tokens=categories_tokens,
word2vec_model=get_or_load_glove_model(50)))
])),
('clf', MultiOutputClassifier(GaussianNB()))
])
grid_search_params = model_config['grid_params']
else:
print("Error: Invalid Feature Set: " + feature_set)
sys.exit(1)
# Adds classifiers params to grid params
grid_search_params.update(classifiers_params)
grid_search_cv = GridSearchCV(estimator=pipeline,
param_grid=grid_search_params,
scoring=score,
cv=def_cv,
n_jobs=jobs,
verbose=verbosity_level)
return grid_search_cv
def evaluate_model(model, X_test, Y_test, category_names):
'''
INPUT
model - sklearn GridSearchCV, the GridSearch containing the model with best performance on the training set
X_test - numpy array, tokenized messages ready to be used to test the fit pipelines
Y_test - numpy array, array containing the tokenized categories names for the test set
category_names - pandas series, the categories names
OUTPUT
test_score - float, the score of the input model on the test data
This function runs the model with best performance on the training set on the test dataset,
printing the precision, recall and f-1 per category and returning the overall prediction score.
'''
print('Best params: %s' % model.best_params_)
# Best training data accuracy
print('Best training score: %.3f' % model.best_score_)
# Predict on test data with best params
Y_pred = model.predict(X_test)
test_score = model.score(X_test, Y_test)
# Test data accuracy of model with best params
print('Test set score for best params: %.3f ' % test_score)
for category_idx in range(len(category_names)):
print(classification_report(y_pred=Y_pred[:,category_idx],
y_true=Y_test[:,category_idx],
labels=[0,1],
target_names=[category_names[category_idx] + '-0',
category_names[category_idx] + '-1']))
return test_score
def save_model(model, model_filepath):
'''
INPUT
model - sklearn Estimator, the model with best performance on the training set
model_filepath - string, path where model picke will be saved
This function saves the model with best performance on the training set to a given filepath.
'''
# Output a pickle file for the model
with open(model_filepath,'wb') as f:
dill.dump(model, f)
def build_grid_search_results_df(gs_results, gs_name, test_score):
'''
INPUT
gs_results - dict, dictionary containing the results of GridSearchCV training
gs_name - string, the name of the GridSearchCV feature set
test_score - float, the score of the best performing model of the GridSearchCV on the test set
OUTPUT
gs_results_df - pandas DataFrame, a dataframe holding information of the GridSearchCV results
(train and test) for record
This function builds a dataframe with information of the GridSearchCV results
(train and test) for record.
'''
gs_results_df = pd.DataFrame(gs_results)
gs_results_df['grid_id'] = gs_name
gs_results_df['best_model_test_score'] = test_score
gs_results_df['param_set_order'] = np.arange(len(gs_results_df))
return gs_results_df
def run_grid_search():
'''
This function runs the whole model selection phase:
- Load Data from DB
- Build Model
- Run GridSearch
- Save results to file
- Save best model pickle file
'''
start = time.time()
print("Train configuration:")
print(json.dumps(train_configs, indent=4))
print('Loading data...\n DATABASE: {}'.format(train_configs['database_filepath']))
X, X_tokenized, Y, category_names, categories_tokens = load_data(train_configs['database_filepath'])
X_train, X_test, Y_train, Y_test = train_test_split(X_tokenized, Y, test_size=0.25)
classifiers_params = build_classifiers_build_params(train_configs['classifiers'])
print('Running GridSearch on models parameters...')
best_score = 0.0
best_gs = ''
overall_results_df = pd.DataFrame()
for model_config in train_configs['models']:
print('Building model...')
model = build_model(model_config,
classifiers_params,
categories_tokens)
print('Training model...')
model.fit(X_train, Y_train)
print('Evaluating model...')
test_score = evaluate_model(model, X_test, Y_test, category_names)
gs_results_df = build_grid_search_results_df(model.cv_results_,
model_config['feature_set'],
test_score)
overall_results_df = pd.concat([overall_results_df, gs_results_df])
print('Saving model...\n MODEL: {}'.format(
model_config['model_ouput_filepath']))
save_model(model.best_estimator_, model_config['model_ouput_filepath'])
print('Trained model saved!')
# Track best (highest test accuracy) model
if test_score > best_score:
best_score = test_score
best_gs = model_config['feature_set']
output_filepath = train_configs['results_folderpath'] + \
'res-' + train_configs['name'] + '-' + \
datetime.now().strftime('%Y-%m-%d_%H:%M:%S') + \
'.csv'
print('Saving Results...\n FILEPATH: {}'.format(output_filepath))
overall_results_df.to_csv(output_filepath, index=False)
print('\nClassifier with best test set accuracy: %s' % best_gs)
end = time.time()
print("Training Time: " + str(int(end - start)) + "s")
def main():
if len(sys.argv) >= 3:
train_config_filepath, using_udacity_workspace = sys.argv[1:]
# Read train config from file
with open(train_config_filepath, 'r') as f:
global train_configs
train_configs = json.load(f)
if using_udacity_workspace == 1:
with active_session():
run_grid_search()
else:
run_grid_search()
else:
print('Please provide the filepath of train configuration file and '\
' whether or not you are using udacity workspace (0,1) \n\n'\
'Example running local: python train_classifier.py configs/train_config_simple.json 0'\
'\nExample running at Udacity: python train_classifier.py configs/train_config_simple.json 1')
if __name__ == '__main__':
main()
| 37.888626 | 117 | 0.659704 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,550 | 0.409657 |
5f468ef647d08df9b7e435bbbbaaf01ef4277cf4 | 148 | py | Python | src/cortexpy/test/constants.py | karljohanw/cortexpy | 70dcce771136f98edb5250ad8abd2a46bda7f0a6 | [
"Apache-2.0"
] | 2 | 2020-04-08T15:31:12.000Z | 2020-07-01T11:04:47.000Z | src/cortexpy/test/constants.py | karljohanw/cortexpy | 70dcce771136f98edb5250ad8abd2a46bda7f0a6 | [
"Apache-2.0"
] | 9 | 2018-09-12T09:29:43.000Z | 2020-03-15T09:11:25.000Z | src/cortexpy/test/constants.py | karljohanw/cortexpy | 70dcce771136f98edb5250ad8abd2a46bda7f0a6 | [
"Apache-2.0"
] | 1 | 2019-03-29T10:59:13.000Z | 2019-03-29T10:59:13.000Z | import struct
MAX_UINT = 2 ** (struct.calcsize('I') * 8) - 1
MAX_ULONG = 2 ** (struct.calcsize('L') * 8) - 1
UINT8_T = 1
UINT32_T = 4
UINT64_T = 8
| 18.5 | 47 | 0.614865 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0.040541 |
5f47bfe261a0653163329656400b45e38dc2e334 | 2,103 | py | Python | tests/functional_tests/authors/test_authors_login.py | Kaique425/recipes | ab188dbe1ca3891160f65a7858613b8750faa721 | [
"MIT"
] | null | null | null | tests/functional_tests/authors/test_authors_login.py | Kaique425/recipes | ab188dbe1ca3891160f65a7858613b8750faa721 | [
"MIT"
] | null | null | null | tests/functional_tests/authors/test_authors_login.py | Kaique425/recipes | ab188dbe1ca3891160f65a7858613b8750faa721 | [
"MIT"
] | null | null | null | import pytest
from django.contrib.auth.models import User
from django.urls import reverse
from selenium.webdriver.common.by import By
from .base import AuthorBaseFunctionalTest
@pytest.mark.functional_test
class AuthorLoginTest(AuthorBaseFunctionalTest):
def test_user_valid_data_can_login_successfully(self):
password = "testpassword"
user = User.objects.create_user(username="teste123", password=password)
self.browser.get(self.live_server_url + reverse("author:login"))
form = self.get_form()
username_field = self.get_by_id("id_username", form)
password_field = self.get_by_id("id_password", form)
username_field.send_keys(user.username)
password_field.send_keys(password)
form.submit()
body = self.browser.find_element(By.TAG_NAME, "body")
self.assertIn(f"Your logged as {user.username}", body.text)
def test_if_login_form_is_invalid(self):
self.browser.get(self.live_server_url + reverse("author:login"))
form = self.browser.find_element(
By.XPATH, "/html/body/main/div[1]/div/div[2]/form"
)
form.click()
username = self.get_by_id("id_username", form)
password = self.get_by_id("id_password", form)
username.send_keys(" ")
password.send_keys(" ")
form.submit()
self.assertIn(
"Invalid form data.", self.browser.find_element(By.TAG_NAME, "body").text
)
def test_if_login_credentials_is_invalid(self):
self.browser.get(self.live_server_url + reverse("author:login"))
form = self.browser.find_element(
By.XPATH, "/html/body/main/div[1]/div/div[2]/form"
)
form.click()
username = self.get_by_id("id_username", form)
password = self.get_by_id("id_password", form)
username.send_keys("invalid_username")
password.send_keys("invalid_password")
form.submit()
self.assertIn(
"Invalid password or username.",
self.browser.find_element(By.TAG_NAME, "body").text,
)
| 37.553571 | 85 | 0.661912 | 1,893 | 0.900143 | 0 | 0 | 1,922 | 0.913932 | 0 | 0 | 368 | 0.174988 |
5f483bb62aff2e6859980fe0b75abe98dd22d479 | 3,873 | py | Python | fixture/contact.py | piersto/python_training | a5e323f62177e97d31e39449d675192354ec70ed | [
"Apache-2.0"
] | null | null | null | fixture/contact.py | piersto/python_training | a5e323f62177e97d31e39449d675192354ec70ed | [
"Apache-2.0"
] | null | null | null | fixture/contact.py | piersto/python_training | a5e323f62177e97d31e39449d675192354ec70ed | [
"Apache-2.0"
] | null | null | null | class ContactHelper:
def __init__(self, app):
self.app = app
def submit_specified_user(self):
wd = self.app.wd
wd.find_element_by_xpath("(//input[@name='submit'])[2]").click()
def add_user(self, add_new_contact):
wd = self.app.wd
wd.find_element_by_link_text("add new").click()
wd.find_element_by_name("firstname").click()
wd.find_element_by_name("firstname").clear()
wd.find_element_by_name("firstname").send_keys(add_new_contact.firstname)
wd.find_element_by_name("middlename").clear()
wd.find_element_by_name("middlename").send_keys(add_new_contact.middlename)
wd.find_element_by_name("lastname").clear()
wd.find_element_by_name("lastname").send_keys(add_new_contact.lastname)
wd.find_element_by_name("nickname").clear()
wd.find_element_by_name("nickname").send_keys(add_new_contact.nickname)
wd.find_element_by_name("title").clear()
wd.find_element_by_name("title").send_keys(add_new_contact.title)
wd.find_element_by_name("company").clear()
wd.find_element_by_name("company").send_keys(add_new_contact.company)
wd.find_element_by_name("address").clear()
wd.find_element_by_name("address").send_keys(add_new_contact.address)
wd.find_element_by_name("home").clear()
wd.find_element_by_name("home").send_keys(add_new_contact.homephone)
wd.find_element_by_name("mobile").clear()
wd.find_element_by_name("mobile").send_keys(add_new_contact.mobilephone)
wd.find_element_by_name("work").clear()
wd.find_element_by_name("work").send_keys(add_new_contact.workphone)
wd.find_element_by_name("fax").clear()
wd.find_element_by_name("fax").send_keys(add_new_contact.fax)
wd.find_element_by_name("email").clear()
wd.find_element_by_name("email").send_keys(add_new_contact.email)
wd.find_element_by_name("homepage").clear()
wd.find_element_by_name("homepage").send_keys(add_new_contact.homepage)
wd.find_element_by_name("bday").click()
Select(wd.find_element_by_name("bday")).select_by_visible_text(add_new_contact.birthday)
wd.find_element_by_name("bday").click()
wd.find_element_by_name("bmonth").click()
Select(wd.find_element_by_name("bmonth")).select_by_visible_text(add_new_contact.birthmonth)
wd.find_element_by_name("bmonth").click()
wd.find_element_by_name("byear").click()
wd.find_element_by_name("byear").clear()
wd.find_element_by_name("byear").send_keys(add_new_contact.birthyear)
wd.find_element_by_name("aday").click()
Select(wd.find_element_by_name("aday")).select_by_visible_text(add_new_contact.anniversaryday)
wd.find_element_by_name("aday").click()
wd.find_element_by_name("amonth").click()
Select(wd.find_element_by_name("amonth")).select_by_visible_text(add_new_contact.anniversarymonth)
wd.find_element_by_name("amonth").click()
wd.find_element_by_name("ayear").click()
wd.find_element_by_name("ayear").clear()
wd.find_element_by_name("ayear").send_keys(add_new_contact.anniversaryyear)
wd.find_element_by_name("address2").click()
wd.find_element_by_name("address2").clear()
wd.find_element_by_name("address2").send_keys(add_new_contact.address_2)
wd.find_element_by_name("phone2").clear()
wd.find_element_by_name("phone2").send_keys(add_new_contact.phone_2)
wd.find_element_by_name("notes").clear()
wd.find_element_by_name("notes").send_keys(add_new_contact.notes) | 60.515625 | 110 | 0.674154 | 3,873 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 456 | 0.117738 |
5f4b11817e6c6f5664fb7eebcff8bd3df9ed5773 | 42 | py | Python | varex/__init__.py | weiyi-bitw/varex | 765e8876c0ced480a47c0e523736bd31b7897644 | [
"MIT"
] | null | null | null | varex/__init__.py | weiyi-bitw/varex | 765e8876c0ced480a47c0e523736bd31b7897644 | [
"MIT"
] | null | null | null | varex/__init__.py | weiyi-bitw/varex | 765e8876c0ced480a47c0e523736bd31b7897644 | [
"MIT"
] | null | null | null | from .commons import VCFEntry, LabeledMat
| 21 | 41 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
5f4ba7ea00a9b4ae2bec68e16163449e185187d1 | 2,612 | py | Python | simulation/battery/base_battery.py | BillMakwae/Simulation | 8d0ec274643f23bc0e78c96e50508b60791c11d2 | [
"MIT"
] | 8 | 2020-03-29T01:44:16.000Z | 2022-03-26T23:15:34.000Z | simulation/battery/base_battery.py | BillMakwae/Simulation | 8d0ec274643f23bc0e78c96e50508b60791c11d2 | [
"MIT"
] | 60 | 2020-02-08T22:07:16.000Z | 2022-03-26T23:51:55.000Z | simulation/battery/base_battery.py | BillMakwae/Simulation | 8d0ec274643f23bc0e78c96e50508b60791c11d2 | [
"MIT"
] | 1 | 2021-10-20T20:07:06.000Z | 2021-10-20T20:07:06.000Z | from simulation.common import Storage
from simulation.common import BatteryEmptyError
class BaseBattery(Storage):
def __init__(self, initial_energy, max_current_capacity, max_energy_capacity,
max_voltage, min_voltage, voltage, state_of_charge):
super().__init__()
# Constants
self.max_current_capacity = max_current_capacity # max capacity of battery (Ah)
self.max_energy_capacity = max_energy_capacity # max energy inside battery (Wh)
self.max_voltage = max_voltage # maximum battery voltage (V)
self.min_voltage = min_voltage # battery cut-off voltage (V)
# Variables
self.stored_energy = initial_energy # energy inside battery (Wh)
self.state_of_charge = state_of_charge # battery state of charge
self.voltage = voltage # terminal voltage of the battery (V)
if self.state_of_charge > 0:
self.empty = False # 1 if battery is empty, 0 if battery is not empty
else:
self.empty = True
def update(self, tick):
raise NotImplementedError
def charge(self, energy):
# handles the possibility that adding energy exceeds the max capacity of the battery
if self.stored_energy + energy >= self.max_energy_capacity:
self.stored_energy = self.max_energy_capacity
else:
self.stored_energy += energy
def discharge(self, energy):
# in the case that the required energy is more than what the battery currently stores
if self.stored_energy - energy <= 0:
# currently the remaining energy in the battery just evaporates but this should be changed in the future
self.stored_energy = 0
self.empty = True
# TODO: consider removing exception
raise BatteryEmptyError("ERROR: Battery is empty.\n")
else:
self.stored_energy -= energy
return energy
def is_empty(self):
return self.empty
def get_stored_energy(self):
return self.stored_energy
def get_state_of_charge(self):
return self.state_of_charge
def get_output_voltage(self):
return self.voltage
def __str__(self):
return (f"Battery stored energy: {self.stored_energy:.2f}Wh\n"
f"Battery state of charge: {self.state_of_charge * 100:.1f}%\n"
f"Battery voltage: {self.voltage:.2f}V\n")
| 39.575758 | 116 | 0.61562 | 2,523 | 0.965926 | 0 | 0 | 0 | 0 | 0 | 0 | 776 | 0.29709 |
5f501af017d1618fd9d8ac7f58bef0af07c22038 | 2,757 | py | Python | MLP/Detectar cancer de mama/Cancer_mama_simples.py | alex7alves/Deep-Learning | 7843629d5367f3ea8b15915a7ba3667cf7a65587 | [
"Apache-2.0"
] | null | null | null | MLP/Detectar cancer de mama/Cancer_mama_simples.py | alex7alves/Deep-Learning | 7843629d5367f3ea8b15915a7ba3667cf7a65587 | [
"Apache-2.0"
] | null | null | null | MLP/Detectar cancer de mama/Cancer_mama_simples.py | alex7alves/Deep-Learning | 7843629d5367f3ea8b15915a7ba3667cf7a65587 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 17 21:04:48 2018
@author: Alex Alves
Programa para determinar se um tumor de mama
é benigno (saida 0) ou maligno (saida 1)
"""
import pandas as pa
# Importação para poder dividir os dados entre treinamento da rede e testes de validação
from sklearn.model_selection import train_test_split
import keras
from keras.models import Sequential
from keras.layers import Dense
from sklearn.metrics import confusion_matrix, accuracy_score
entrada = pa.read_csv('entradas-breast.csv')
esperado = pa.read_csv('saidas-breast.csv')
# Treinamento com 75% e validação com 25%
entrada_treinar, entrada_teste, esperado_treinar,esperado_teste =train_test_split(entrada,esperado,test_size=0.25)
# Criando a rede neural
detectar_cancer = Sequential()
#Adicionando camada de entrada
detectar_cancer.add(Dense(units=16,activation='relu',kernel_initializer='random_uniform',input_dim=30))
#Adicionando uma camada oculta
detectar_cancer.add(Dense(units=16,activation='relu',kernel_initializer='random_uniform'))
# Adicionando camada de saida
detectar_cancer.add(Dense(units=1,activation='sigmoid'))
# Compilar a rede
#compile(descida_gradiente,função do erro- MSE, precisão da rede)
# clipvalue -> delimita os valores dos pesos entre 0.5 e -0.5
# lr = tamanho do passo, decay-> redução do passo
otimizar = keras.optimizers.Adam(lr=0.001,decay=0.0001)
# Nesse caso o clipvalue prejudicou
#otimizar = keras.optimizers.Adam(lr=0.004,decay=0.0001,clipvalue=0.5)
detectar_cancer.compile(otimizar,loss='binary_crossentropy',metrics=['binary_accuracy'])
#detectar_cancer.compile(optimizer='adam',loss='binary_crossentropy',metrics=['binary_accuracy'])
# Fazer o treinamento da rede - erro calculado para 10 amostras
#depois atualiza os pesos -descida do gradiente estocasticos de 10 em 10 amostras
detectar_cancer.fit(entrada_treinar,esperado_treinar,batch_size=10,epochs=100)
# Pegando os pesos
pesosCamadaEntrada = detectar_cancer.layers[0].get_weights()
pesosCamadaOculta = detectar_cancer.layers[1].get_weights()
pesosCamadaSaida = detectar_cancer.layers[2].get_weights()
# Realizando teste de validação
# retorna probabilidade de acerto
validar = detectar_cancer.predict(entrada_teste)
# convertendo para true ou false (1 ou 0) para comparar
# se for maior que 0.5 é true, caso contrário é false
validar = (validar > 0.5)
# compara os 2 vetores e calcula a porcentagem de acerto
# da rede usando o conjunto de treinamento
precisao = accuracy_score(esperado_teste,validar)
# Matriz de acertos da rede
acertos = confusion_matrix(esperado_teste,validar)
# Outra maneira de resultado
# retorna o erro e a precisão
resultado = detectar_cancer.evaluate(entrada_teste, esperado_teste)
| 33.216867 | 114 | 0.791077 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,504 | 0.541982 |
5f50dd9219cff3c1253c4849dd5381638d312cc3 | 1,214 | py | Python | py/py_0736_paths_to_equality.py | lcsm29/project-euler | fab794ece5aa7a11fc7c2177f26250f40a5b1447 | [
"MIT"
] | null | null | null | py/py_0736_paths_to_equality.py | lcsm29/project-euler | fab794ece5aa7a11fc7c2177f26250f40a5b1447 | [
"MIT"
] | null | null | null | py/py_0736_paths_to_equality.py | lcsm29/project-euler | fab794ece5aa7a11fc7c2177f26250f40a5b1447 | [
"MIT"
] | null | null | null | # Solution of;
# Project Euler Problem 736: Paths to Equality
# https://projecteuler.net/problem=736
#
# Define two functions on lattice points:$r(x,y) = (x+1,2y)$$s(x,y) =
# (2x,y+1)$A path to equality of length $n$ for a pair $(a,b)$ is a sequence
# $\Big((a_1,b_1),(a_2,b_2),\ldots,(a_n,b_n)\Big)$, where:$(a_1,b_1) =
# (a,b)$$(a_k,b_k) = r(a_{k-1},b_{k-1})$ or $(a_k,b_k) = s(a_{k-1},b_{k-1})$
# for $k > 1$$a_k \ne b_k$ for $k < n$$a_n = b_n$$a_n = b_n$ is called the
# final value. For example,$(45,90)\xrightarrow{r}
# (46,180)\xrightarrow{s}(92,181)\xrightarrow{s}(184,182)\xrightarrow{s}(368,183)\xrightarrow{s}(736,184)\xrightarrow{r}$$(737,368)\xrightarrow{s}(1474,369)\xrightarrow{r}(1475,738)\xrightarrow{r}(1476,1476)$This
# is a path to equality for $(45,90)$ and is of length 10 with final value
# 1476. There is no path to equality of $(45,90)$ with smaller length. Find
# the unique path to equality for $(45,90)$ with smallest odd length. Enter
# the final value as your answer.
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 736
timed.caller(dummy, n, i, prob_id)
| 40.466667 | 213 | 0.651565 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,056 | 0.869852 |
5f548523f9dcf1f62a0e2fe0f345f22d699939d1 | 1,728 | py | Python | codejam/2020-qualification/d.py | Ashindustry007/competitive-programming | 2eabd3975c029d235abb7854569593d334acae2f | [
"WTFPL"
] | 506 | 2018-08-22T10:30:38.000Z | 2022-03-31T10:01:49.000Z | codejam/2020-qualification/d.py | Ashindustry007/competitive-programming | 2eabd3975c029d235abb7854569593d334acae2f | [
"WTFPL"
] | 13 | 2019-08-07T18:31:18.000Z | 2020-12-15T21:54:41.000Z | codejam/2020-qualification/d.py | Ashindustry007/competitive-programming | 2eabd3975c029d235abb7854569593d334acae2f | [
"WTFPL"
] | 234 | 2018-08-06T17:11:41.000Z | 2022-03-26T10:56:42.000Z | #!/usr/bin/env python3
# https://codingcompetitions.withgoogle.com/codejam/round/000000000019fd27/0000000000209a9e
t, b = map(int, input().split())
for _ in range(t):
xs = [None] * b
q, k, k1, k2 = 0, 0, None, None
def query(k):
global q
q += 1
print(k)
r = int(input())
return r
def complement():
global xs
for i in range(b):
if xs[i] == 0:
xs[i] = 1
elif xs[i] == 1:
xs[i] = 0
def solve():
print(''.join(str(x) for x in xs))
assert(input() == 'Y')
while True:
if q > 0 and q % 10 == 0:
if k1 is not None and k2 is not None:
v1 = query(k1+1)
v2 = query(k2+1)
if xs[k1] == v1 and xs[k2] == v2:
pass
elif xs[k1] != v1 and xs[k2] != v2:
complement()
elif xs[k1] != v1:
xs = xs[::-1]
complement()
else:
xs = xs[::-1]
elif k1 is not None:
v1 = query(k1+1)
v1 = query(k1+1)
if xs[k1] != v1:
complement()
else:
v2 = query(k2+1)
v2 = query(k2+1)
if xs[k2] != v2:
xs = xs[::-1]
else:
v1 = query(k+1)
v2 = query(b-k)
xs[k] = v1
xs[b-k-1] = v2
if v1 == v2 and k1 is None:
k1 = k
elif v1 != v2 and k2 is None:
k2 = k
k += 1
if k*2 == b:
solve()
break
| 27 | 91 | 0.358218 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 118 | 0.068287 |
5f54a49151f01f5e6cf35cf93d8091376f6fa1d7 | 431 | py | Python | doping_test/Standard.py | Biewer/Doping-Tests-for-Cyber-Physical-Systems-Tool | eb359ba618f0022dcd403edc99904f3ef2940e65 | [
"MIT"
] | null | null | null | doping_test/Standard.py | Biewer/Doping-Tests-for-Cyber-Physical-Systems-Tool | eb359ba618f0022dcd403edc99904f3ef2940e65 | [
"MIT"
] | null | null | null | doping_test/Standard.py | Biewer/Doping-Tests-for-Cyber-Physical-Systems-Tool | eb359ba618f0022dcd403edc99904f3ef2940e65 | [
"MIT"
] | null | null | null | class Standard(object):
"""Abstract class for representation of Standard LTS"""
def __init__(self):
super(Standard, self).__init__()
def get_any_trace(self, boundary):
# Return any of the traces of standard LTS S
raise NotImplementedError('Abstract method not implemented!')
def get_traces(self, boundary):
# Return all traces up to length `boundary`
raise NotImplementedError('Abstract method not implemented!')
| 30.785714 | 63 | 0.756381 | 429 | 0.99536 | 0 | 0 | 0 | 0 | 0 | 0 | 210 | 0.487239 |
5f587bf36e711ee18aa81e26269a6338ac9328eb | 1,388 | py | Python | Stephanie/updater.py | JeremyARussell/stephanie-va | acc894fa69b4e5559308067d525f71f951ecc258 | [
"MIT"
] | 866 | 2017-06-10T19:25:28.000Z | 2022-01-06T18:29:36.000Z | Stephanie/updater.py | JeremyARussell/stephanie-va | acc894fa69b4e5559308067d525f71f951ecc258 | [
"MIT"
] | 54 | 2017-06-11T06:41:19.000Z | 2022-01-10T23:06:03.000Z | Stephanie/updater.py | JeremyARussell/stephanie-va | acc894fa69b4e5559308067d525f71f951ecc258 | [
"MIT"
] | 167 | 2017-06-10T19:32:54.000Z | 2022-01-03T07:01:39.000Z | import requests
from Stephanie.configurer import config
class Updater:
def __init__(self, speaker):
self.speaker = speaker
self.c = config
self.current_version = self.c.config.get("APPLICATION", "version")
self.update_url = "https://raw.githubusercontent.com/SlapBot/va-version-check/master/version.json"
self.requests = requests
self.data = None
def check_for_update(self):
try:
self.data = self.get_update_information()
except Exception:
print("Couldn't access stephanie's version update information.")
return
try:
if str(self.current_version) != str(self.data['version']):
print("Your virtual assistant's version is %s, while the latest one is %s" % (self.current_version, self.data['version']))
if int(self.data['print_status']):
print("Kindly visit the main website of stephanie at www.github.com/slapbot/stephanie-va to update the software to it's latest version.")
if int(self.data['speak_status']):
self.speaker.speak(self.data['message'])
for message in self.data['additional_information']:
print(message)
if self.data['speak_announcement']:
self.speaker.speak(self.data['speak_announcement'])
except Exception:
print("There's some problem in recieving version update information.")
return
def get_update_information(self):
r = self.requests.get(self.update_url)
data = r.json()
return data
| 34.7 | 142 | 0.730548 | 1,329 | 0.957493 | 0 | 0 | 0 | 0 | 0 | 0 | 539 | 0.388329 |
5f591fe59a581e7f936f818cedb0f094b131b698 | 24,533 | py | Python | WORC/featureprocessing/ComBat.py | MStarmans91/WORC | b6b8fc2ccb7d443a69b5ca20b1d6efb65b3f0fc7 | [
"ECL-2.0",
"Apache-2.0"
] | 47 | 2018-01-28T14:08:15.000Z | 2022-03-24T16:10:07.000Z | WORC/featureprocessing/ComBat.py | JZK00/WORC | 14e8099835eccb35d49b52b97c0be64ecca3809c | [
"ECL-2.0",
"Apache-2.0"
] | 13 | 2018-08-28T13:32:57.000Z | 2020-10-26T16:35:59.000Z | WORC/featureprocessing/ComBat.py | JZK00/WORC | 14e8099835eccb35d49b52b97c0be64ecca3809c | [
"ECL-2.0",
"Apache-2.0"
] | 16 | 2017-11-13T10:53:36.000Z | 2022-03-18T17:02:04.000Z | #!/usr/bin/env python
# Copyright 2020 Biomedical Imaging Group Rotterdam, Departments of
# Medical Informatics and Radiology, Erasmus MC, Rotterdam, The Netherlands
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import scipy.io as sio
import WORC.IOparser.file_io as wio
import WORC.IOparser.config_io_combat as cio
import numpy as np
import random
import pandas as pd
from WORC.addexceptions import WORCValueError, WORCKeyError
import tempfile
from sys import platform
from WORC.featureprocessing.VarianceThreshold import selfeat_variance
from sklearn.preprocessing import StandardScaler
from neuroCombat import neuroCombat
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from WORC.featureprocessing.Imputer import Imputer
def ComBat(features_train_in, labels_train, config, features_train_out,
features_test_in=None, labels_test=None, features_test_out=None,
VarianceThreshold=True, scaler=False, logarithmic=False):
"""
Apply ComBat feature harmonization.
Based on: https://github.com/Jfortin1/ComBatHarmonization
"""
# Load the config
print('############################################################')
print('# Initializing ComBat. #')
print('############################################################\n')
config = cio.load_config(config)
excluded_features = config['ComBat']['excluded_features']
# If mod, than also load moderating labels
if config['ComBat']['mod'][0] == '[]':
label_names = config['ComBat']['batch']
else:
label_names = config['ComBat']['batch'] + config['ComBat']['mod']
# Load the features for both training and testing, match with batch and mod parameters
label_data_train, image_features_train =\
wio.load_features(features_train_in, patientinfo=labels_train,
label_type=label_names)
feature_labels = image_features_train[0][1]
image_features_train = [i[0] for i in image_features_train]
label_data_train['patient_IDs'] = list(label_data_train['patient_IDs'])
# Exclude features
if excluded_features:
print(f'\t Excluding features containing: {excluded_features}')
# Determine indices of excluded features
included_feature_indices = []
excluded_feature_indices = []
for fnum, i in enumerate(feature_labels):
if not any(e in i for e in excluded_features):
included_feature_indices.append(fnum)
else:
excluded_feature_indices.append(fnum)
# Actually exclude the features
image_features_train_combat = [np.asarray(i)[included_feature_indices].tolist() for i in image_features_train]
feature_labels_combat = np.asarray(feature_labels)[included_feature_indices].tolist()
image_features_train_noncombat = [np.asarray(i)[excluded_feature_indices].tolist() for i in image_features_train]
feature_labels_noncombat = np.asarray(feature_labels)[excluded_feature_indices].tolist()
else:
image_features_train_combat = image_features_train
feature_labels_combat = feature_labels.tolist()
image_features_train_noncombat = []
feature_labels_noncombat = []
# Detect NaNs, otherwise first feature imputation is required
if any(np.isnan(a) for a in np.asarray(image_features_train_combat).flatten()):
print('\t [WARNING] NaNs detected, applying median imputation')
imputer = Imputer(missing_values=np.nan, strategy='median')
imputer.fit(image_features_train_combat)
image_features_train_combat = imputer.transform(image_features_train_combat)
else:
imputer = None
# Apply a scaler to the features
if scaler:
print('\t Fitting scaler on dataset.')
scaler = StandardScaler().fit(image_features_train_combat)
image_features_train_combat = scaler.transform(image_features_train_combat)
# Remove features with a constant value
if VarianceThreshold:
print(f'\t Applying variance threshold on dataset.')
image_features_train_combat, feature_labels_combat, VarSel =\
selfeat_variance(image_features_train_combat, np.asarray([feature_labels_combat]))
feature_labels_combat = feature_labels_combat[0].tolist()
if features_test_in:
label_data_test, image_features_test =\
wio.load_features(features_test_in, patientinfo=labels_test,
label_type=label_names)
image_features_test = [i[0] for i in image_features_test]
label_data_test['patient_IDs'] = list(label_data_test['patient_IDs'])
if excluded_features:
image_features_test_combat = [np.asarray(i)[included_feature_indices].tolist() for i in image_features_test]
image_features_test_noncombat = [np.asarray(i)[excluded_feature_indices].tolist() for i in image_features_test]
else:
image_features_test_combat = image_features_test
image_features_test_noncombat = []
# Apply imputation if required
if imputer is not None:
image_features_test_combat = imputer.transform(image_features_test_combat)
# Apply a scaler to the features
if scaler:
image_features_test_combat = scaler.transform(image_features_test_combat)
# Remove features with a constant value
if VarianceThreshold:
image_features_test_combat = VarSel.transform(image_features_test_combat)
all_features = image_features_train_combat.tolist() + image_features_test_combat.tolist()
all_labels = list()
for i in range(label_data_train['label'].shape[0]):
all_labels.append(label_data_train['label'][i, :, 0].tolist() + label_data_test['label'][i, :, 0].tolist())
all_labels = np.asarray(all_labels)
else:
all_features = image_features_train_combat.tolist()
all_labels = label_data_train['label']
# Convert data to a single array
all_features_matrix = np.asarray(all_features)
all_labels = np.squeeze(all_labels)
# Apply logarithm if required
if logarithmic:
print('\t Taking log10 of features before applying ComBat.')
all_features_matrix = np.log10(all_features_matrix)
# Convert all_labels to dictionary
if len(all_labels.shape) == 1:
# No mod variables
all_labels = {label_data_train['label_name'][0]: all_labels}
else:
all_labels = {k: v for k, v in zip(label_data_train['label_name'], all_labels)}
# Split labels in batch and moderation labels
bat = config['ComBat']['batch']
mod = config['ComBat']['mod']
print(f'\t Using batch variable {bat}, mod variables {mod}.')
batch = [all_labels[l] for l in all_labels.keys() if l in config['ComBat']['batch']]
batch = batch[0]
if config['ComBat']['mod'][0] == '[]':
mod = None
else:
mod = [all_labels[l] for l in all_labels.keys() if l in config['ComBat']['mod']]
# Set parameters for output files
parameters = {'batch': config['ComBat']['batch'],
'mod': config['ComBat']['mod'],
'par': config['ComBat']['par']}
name = 'Image features: ComBat corrected'
panda_labels = ['parameters',
'patient',
'feature_values',
'feature_labels']
feature_labels = feature_labels_combat + feature_labels_noncombat
# Convert all inputs to arrays with right shape
all_features_matrix = np.transpose(all_features_matrix)
if mod is not None:
mod = np.transpose(np.asarray(mod))
# Patients identified with batch -1.0 should be skipped
skipname = 'Image features: ComBat skipped'
ntrain = len(image_features_train_combat)
ndel = 0
print(features_test_out)
for bnum, b in enumerate(batch):
bnum -= ndel
if b == -1.0:
if bnum < ntrain - ndel:
# Training patient
print('train')
pid = label_data_train['patient_IDs'][bnum]
out = features_train_out[bnum]
# Combine ComBat and non-ComBat features
feature_values_temp = list(all_features_matrix[:, bnum]) + list(image_features_train_noncombat[bnum])
# Delete patient for later processing
del label_data_train['patient_IDs'][bnum]
del image_features_train_noncombat[bnum]
del features_train_out[bnum]
image_features_train_combat = np.delete(image_features_train_combat, bnum, 0)
else:
# Test patient
print('test')
pid = label_data_test['patient_IDs'][bnum - ntrain]
out = features_test_out[bnum - ntrain]
# Combine ComBat and non-ComBat features
feature_values_temp = list(all_features_matrix[:, bnum]) + list(image_features_test_noncombat[bnum - ntrain])
# Delete patient for later processing
del label_data_test['patient_IDs'][bnum - ntrain]
del image_features_test_noncombat[bnum - ntrain]
del features_test_out[bnum - ntrain]
image_features_test_combat = np.delete(image_features_test_combat, bnum - ntrain, 0)
# Delete some other variables for later processing
all_features_matrix = np.delete(all_features_matrix, bnum, 1)
if mod is not None:
mod = np.delete(mod, bnum, 0)
batch = np.delete(batch, bnum, 0)
# Notify user
print(f'[WARNING] Skipping patient {pid} as batch variable is -1.0.')
# Sort based on feature label
feature_labels_temp, feature_values_temp =\
zip(*sorted(zip(feature_labels, feature_values_temp)))
# Convert to pandas Series and save as hdf5
panda_data = pd.Series([parameters, pid, feature_values_temp,
feature_labels_temp],
index=panda_labels,
name=skipname
)
print(f'\t Saving image features to: {out}.')
panda_data.to_hdf(out, 'image_features')
ndel += 1
print(features_test_out)
# Run ComBat in Matlab
if config['ComBat']['language'] == 'matlab':
print('\t Executing ComBat through Matlab')
data_harmonized = ComBatMatlab(dat=all_features_matrix,
batch=batch,
command=config['ComBat']['matlab'],
mod=mod,
par=config['ComBat']['par'],
per_feature=config['ComBat']['per_feature'])
elif config['ComBat']['language'] == 'python':
print('\t Executing ComBat through neuroComBat in Python')
data_harmonized = ComBatPython(dat=all_features_matrix,
batch=batch,
mod=mod,
eb=config['ComBat']['eb'],
par=config['ComBat']['par'],
per_feature=config['ComBat']['per_feature'])
else:
raise WORCKeyError(f"Language {config['ComBat']['language']} unknown.")
# Convert values back if logarithm was used
if logarithmic:
data_harmonized = 10 ** data_harmonized
# Convert again to train hdf5 files
feature_values_train_combat = [data_harmonized[:, i] for i in range(len(image_features_train_combat))]
for fnum, i_feat in enumerate(feature_values_train_combat):
# Combine ComBat and non-ComBat features
feature_values_temp = i_feat.tolist() + image_features_train_noncombat[fnum]
# Sort based on feature label
feature_labels_temp, feature_values_temp =\
zip(*sorted(zip(feature_labels, feature_values_temp)))
# Convert to pandas Series and save as hdf5
pid = label_data_train['patient_IDs'][fnum]
panda_data = pd.Series([parameters, pid, feature_values_temp,
feature_labels_temp],
index=panda_labels,
name=name
)
print(f'Saving image features to: {features_train_out[fnum]}.')
panda_data.to_hdf(features_train_out[fnum], 'image_features')
# Repeat for testing if required
if features_test_in:
print(len(image_features_test_combat))
print(data_harmonized.shape[1])
feature_values_test_combat = [data_harmonized[:, i] for i in range(data_harmonized.shape[1] - len(image_features_test_combat), data_harmonized.shape[1])]
for fnum, i_feat in enumerate(feature_values_test_combat):
print(fnum)
# Combine ComBat and non-ComBat features
feature_values_temp = i_feat.tolist() + image_features_test_noncombat[fnum]
# Sort based on feature label
feature_labels_temp, feature_values_temp =\
zip(*sorted(zip(feature_labels, feature_values_temp)))
# Convert to pandas Series and save as hdf5
pid = label_data_test['patient_IDs'][fnum]
panda_data = pd.Series([parameters, pid, feature_values_temp,
feature_labels_temp],
index=panda_labels,
name=name
)
print(f'Saving image features to: {features_test_out[fnum]}.')
panda_data.to_hdf(features_test_out[fnum], 'image_features')
def ComBatPython(dat, batch, mod=None, par=1,
eb=1, per_feature=False, plotting=False):
"""
Run the ComBat Function python script.
par = 0 is non-parametric.
"""
# convert inputs to neuroCombat format.
covars = dict()
categorical_cols = list()
covars['batch'] = batch
if mod is not None:
for i_mod in range(mod.shape[1]):
label = f'mod_{i_mod}'
covars[label] = [m for m in mod[:, i_mod]]
categorical_cols.append(label)
covars = pd.DataFrame(covars)
batch_col = 'batch'
if par == 0:
parametric = False
elif par == 1:
parametric = True
else:
raise WORCValueError(f'Par should be 0 or 1, now {par}.')
if eb == 0:
eb = False
elif eb == 1:
eb = True
else:
raise WORCValueError(f'eb should be 0 or 1, now {eb}.')
if per_feature == 0:
per_feature = False
elif per_feature == 1:
per_feature = True
else:
raise WORCValueError(f'per_feature should be 0 or 1, now {per_feature}.')
# execute ComBat
if not per_feature:
data_harmonized = neuroCombat(dat=dat, covars=covars, batch_col=batch_col,
categorical_cols=categorical_cols,
eb=eb, parametric=parametric)
elif per_feature:
print('\t Executing ComBat per feature.')
data_harmonized = np.zeros(dat.shape)
# Shape: (features, samples)
for i in range(dat.shape[0]):
if eb:
# Copy feature + random noise
random_feature = np.random.rand(dat[i, :].shape[0])
feat_temp = np.asarray([dat[i, :], dat[i, :] + random_feature])
else:
# Just use the single feature
feat_temp = np.asarray([dat[i, :]])
feat_temp = neuroCombat(dat=feat_temp, covars=covars,
batch_col=batch_col,
categorical_cols=categorical_cols,
eb=eb, parametric=parametric)
data_harmonized[i, :] = feat_temp[0, :]
if plotting:
feat1 = dat[i, :]
feat1_harm = data_harmonized[i, :]
print(len(feat1))
feat1_b1 = [f for f, b in zip(feat1, batch[0]) if b == 1.0]
feat1_b2 = [f for f, b in zip(feat1, batch[0]) if b == 2.0]
print(len(feat1_b1))
print(len(feat1_b2))
feat1_harm_b1 = [f for f, b in zip(feat1_harm, batch[0]) if b == 1.0]
feat1_harm_b2 = [f for f, b in zip(feat1_harm, batch[0]) if b == 2.0]
plt.figure()
ax = plt.subplot(2, 1, 1)
ax.scatter(np.ones((len(feat1_b1))), feat1_b1, color='red')
ax.scatter(np.ones((len(feat1_b2))) + 1, feat1_b2, color='blue')
plt.title('Before Combat')
ax = plt.subplot(2, 1, 2)
ax.scatter(np.ones((len(feat1_b1))), feat1_harm_b1, color='red')
ax.scatter(np.ones((len(feat1_b2))) + 1, feat1_harm_b2, color='blue')
plt.title('After Combat')
plt.show()
else:
raise WORCValueError(f'per_feature should be False or True, now {per_feature}.')
return data_harmonized
def Synthetictest(n_patients=50, n_features=10, par=1, eb=1,
per_feature=False, difscale=False, logarithmic=False,
oddpatient=True, oddfeat=True, samefeat=True):
"""Test for ComBat with Synthetic data."""
features = np.zeros((n_features, n_patients))
batch = list()
# First batch: Gaussian with loc 0, scale 1
for i in range(0, int(n_patients/2)):
feat_temp = [np.random.normal(loc=0.0, scale=1.0) for i in range(n_features)]
if i == 1 and oddpatient:
feat_temp = [np.random.normal(loc=10.0, scale=1.0) for i in range(n_features)]
elif oddfeat:
feat_temp = [np.random.normal(loc=0.0, scale=1.0) for i in range(n_features - 1)] + [np.random.normal(loc=10000.0, scale=1.0)]
if samefeat:
feat_temp[-1] = 1
features[:, i] = feat_temp
batch.append(1)
# Get directions for features
directions = list()
for i in range(n_features):
direction = random.random()
if direction > 0.5:
directions.append(1.0)
else:
directions.append(-1.0)
# First batch: Gaussian with loc 5, scale 1
for i in range(int(n_patients/2), n_patients):
feat_temp = [np.random.normal(loc=direction*5.0, scale=1.0) for i in range(n_features)]
if oddfeat:
feat_temp = [np.random.normal(loc=5.0, scale=1.0) for i in range(n_features - 1)] + [np.random.normal(loc=10000.0, scale=1.0)]
if difscale:
feat_temp = [f + 1000 for f in feat_temp]
feat_temp = np.multiply(feat_temp, directions)
if samefeat:
feat_temp[-1] = 1
features[:, i] = feat_temp
batch.append(2)
# Create mod var
mod = [[np.random.randint(30, 100) for i in range(n_patients)]]
# Apply ComBat
batch = np.asarray([batch])
mod = np.transpose(np.asarray(mod))
if logarithmic:
minfeat = np.min(features)
features = np.log10(features + np.abs(minfeat) + 1E-100)
data_harmonized = ComBatPython(dat=features, batch=batch, mod=mod, par=par,
eb=eb, per_feature=per_feature)
if logarithmic:
data_harmonized = 10 ** data_harmonized - np.abs(minfeat)
for i in range(n_features):
f = plt.figure()
ax = plt.subplot(2, 1, 1)
ax.scatter(np.ones((int(n_patients/2))), features[i, 0:int(n_patients/2)], color='red')
ax.scatter(np.ones((n_patients - int(n_patients/2))) + 1, features[i, int(n_patients/2):], color='blue')
plt.title('Before Combat')
ax = plt.subplot(2, 1, 2)
ax.scatter(np.ones((int(n_patients/2))), data_harmonized[i, 0:int(n_patients/2)], color='red')
ax.scatter(np.ones((n_patients - int(n_patients/2))) + 1, data_harmonized[i, int(n_patients/2):], color='blue')
plt.title('After Combat')
plt.show()
f.savefig(f'combat_par{par}_eb{eb}_perfeat{per_feature}_feat{i}.png')
# Logarithmic: not useful, as we have negative numbers, and (almost) zeros.
# so combat gives unuseful results.
# Same feature twice with eb and par: nans
def ComBatMatlab(dat, batch, command, mod=None, par=1, per_feature='true'):
"""
Run the ComBat Function Matlab script.
par = 0 is non-parametric.
"""
# Mod: default argument is empty list
if mod is None:
mod = []
# TODO: Add check whether matlab executable is found
# Save the features in a .mat MatLab Compatible format
# NOTE: Should change this_folder to a proper temporary directory
this_folder = os.path.dirname(os.path.realpath(__file__))
tempdir = tempfile.gettempdir()
tempfile_in = os.path.join(tempdir, 'combat_input.mat')
tempfile_out = os.path.join(tempdir, 'combat_output.mat')
ComBatFolder = os.path.join(os.path.dirname(this_folder),
'external',
'ComBatHarmonization',
'Matlab',
'scripts')
dict = {'output': tempfile_out,
'ComBatFolder': ComBatFolder,
'datvar': dat,
'batchvar': batch,
'modvar': mod,
'parvar': par,
'per_feature': per_feature
}
sio.savemat(tempfile_in, dict)
# Make sure there is no tempfile out from the previous run
if os.path.exists(tempfile_out):
os.remove(tempfile_out)
# Run ComBat
currentdir = os.getcwd()
if platform == "linux" or platform == "linux2":
commandseparator = ' ; '
elif platform == "win32":
commandseparator = ' & '
# BIGR Cluster: /cm/shared/apps/matlab/R2015b/bin/matlab
regcommand = ('cd "' + this_folder + '"' + commandseparator +
'"' + command + '" -nodesktop -nosplash -nojvm -r "combatmatlab(' + "'" + str(tempfile_in) + "'" + ')"' +
commandseparator +
'cd "' + currentdir + '"')
print(f'Executing ComBat in Matlab through command: {regcommand}.')
proc = subprocess.Popen(regcommand,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
proc.wait()
stdout_value, stderr_value = proc.communicate()
# BUG: Waiting does not work, just wait for output to arrive, either with
# the actual output or an error message
succes = False
while succes is False:
if os.path.exists(tempfile_out):
try:
mat_dict = sio.loadmat(tempfile_out)
try:
data_harmonized = mat_dict['data_harmonized']
succes = True
except KeyError:
try:
message = mat_dict['message']
raise WORCValueError(f'Error in Matlab ComBat execution: {message}.')
except KeyError:
pass
except (sio.matlab.miobase.MatReadError, ValueError):
pass
# Check if expected output file exists
if not os.path.exists(tempfile_out):
raise WORCValueError(f'Error in Matlab ComBat execution: command: {regcommand}, stdout: {stdout_value}, stderr: {stderr_value}')
# Read the output from ComBat
mat_dict = sio.loadmat(tempfile_out)
data_harmonized = mat_dict['data_harmonized']
data_harmonized = np.transpose(data_harmonized)
# Remove temporary files
os.remove(tempfile_out)
os.remove(tempfile_in)
return data_harmonized
| 40.684909 | 161 | 0.604329 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,937 | 0.242001 |
5f59e320e469d3924b3247fe49f94eea11acee62 | 727 | py | Python | setup.py | mariocesar/pg-worker | d79c6daa8825226c754330c21150e4e416b09005 | [
"MIT"
] | 1 | 2020-06-03T21:21:03.000Z | 2020-06-03T21:21:03.000Z | setup.py | mariocesar/pg-worker | d79c6daa8825226c754330c21150e4e416b09005 | [
"MIT"
] | null | null | null | setup.py | mariocesar/pg-worker | d79c6daa8825226c754330c21150e4e416b09005 | [
"MIT"
] | null | null | null | import os
import sys
from setuptools import setup, find_packages
ROOT = os.path.realpath(os.path.join(os.path.dirname(
sys.modules['__main__'].__file__)))
sys.path.insert(0, os.path.join(ROOT, 'src'))
setup(
name='pgworker',
packages=find_packages('src'),
package_dir={'': 'src'},
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
],
entry_points={
'console_scripts': [
'pgworker = pgworker.runner:main'
]
}
)
| 24.233333 | 53 | 0.603851 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 315 | 0.433287 |
5f5a0eafce7a5f076591e84cd9440a10e1d4e795 | 2,040 | py | Python | PyBank/main.py | gentikosumi/python-challenge | e6532bf1033f5272616d4f8a1cf623bbafe1a1c2 | [
"ADSL"
] | null | null | null | PyBank/main.py | gentikosumi/python-challenge | e6532bf1033f5272616d4f8a1cf623bbafe1a1c2 | [
"ADSL"
] | null | null | null | PyBank/main.py | gentikosumi/python-challenge | e6532bf1033f5272616d4f8a1cf623bbafe1a1c2 | [
"ADSL"
] | null | null | null | import os
import csv
path = '/Users/kevinkosumi12345/Genti/python-challenge/PyBank/Resources/budget_data.csv'
budget_csv=os.path.join("../Resources", "budget_data.csv")
csvfile = open(path, newline="")
reader=csv.reader(csvfile, delimiter=",")
header = next(reader)
# print(header)
# the columns we have to convert into lists
# Create first 2 empty lists according 2 columns
date = []
profloss = []
# print("Financial Anaysis")
# print("-----------------------------------------")
for row in reader:
date.append(row[0])
profloss.append(int(row[1]))
# getting the total of Profit/Losses
total_profloss='Total Profit/Losses: $ ' + str(sum(profloss))
# print(total_profloss)
# getting the number of months in entire period
monthcount = 'Total months: ' + str(len(date))
# print(monthcount)
# before finding the averadge of change in Profit/Losses, first we have to find the total change
Total_change_profloss = 0
for x in range(1, len(profloss)):
Total_change_profloss = Total_change_profloss + (profloss[x] - profloss[x-1])
# finding the averidge of change in Profit/Losses
avg_change_profloss = 'Averidge change in Profit/Loss: ' + str(round(Total_change_profloss/(len(profloss)-1),2))
# print(avg_change_profloss)
# getting the max value of data in Profit/Losses which is the Greatest Increase of Profit/Losses
maxVal = 'Greatest increase of Profit/Losses: ' + ' on ' + str(date[profloss.index(max(profloss))]) + ' $ ' + str(max(profloss))
# print(maxVal)
# the min Value of date in Profit/Losses which is the Greatest Decrease
minVal = 'Greatest decrease of Profit/Losses: ' + ' on ' + str(date[profloss.index(min(profloss))]) + ' $ ' + str(min(profloss))
# print(minVal)
DataBudget = open('analisis.csv' , 'w')
DataBudget.write('Financial Analysus\n')
DataBudget.write('------------------------\n')
DataBudget.write(monthcount + '\n')
DataBudget.write(total_profloss + '\n')
DataBudget.write(avg_change_profloss + '\n')
DataBudget.write(maxVal + '\n')
DataBudget.write(minVal + '\n')
DataBudget.close | 30.909091 | 129 | 0.702451 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,058 | 0.518627 |
5f5b2c35892025ff370debbb01a9bff69a798ad0 | 1,516 | py | Python | models/python/hypothalamus/dynamical/old/simple.py | ABRG-Models/MammalBot | 0b153232b94197c7a65156c1c3451ab2b9f725ae | [
"MIT"
] | null | null | null | models/python/hypothalamus/dynamical/old/simple.py | ABRG-Models/MammalBot | 0b153232b94197c7a65156c1c3451ab2b9f725ae | [
"MIT"
] | null | null | null | models/python/hypothalamus/dynamical/old/simple.py | ABRG-Models/MammalBot | 0b153232b94197c7a65156c1c3451ab2b9f725ae | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
T = 30000
# v = 0.02906
# v = 0.617085
v = 0.99
h = 0.01
a = 0.5
b = 0.5
epsilon = 0.05
c = 0.4
eta = lambda rho: np.exp(-(rho)**2/(2*c**2))
nrho = lambda rho, v: -2.0*(rho**3 + (rho-1.0)*v/2.0 - rho)/(rho + 1.0)
nu = lambda rho: (b - eta(rho+1))/a
u = np.zeros(T)
rho = np.zeros(T)
time = np.zeros(T)
# Maps
f = lambda rho, u, v: -rho**3 - (rho + 1.0)*u/2.0 - (rho - 1.0)*v/2.0 + rho
g1 = lambda rho, u, v: epsilon*(b - a*u - eta(rho+1))
# Initial conditions
u[0] = 0.0
rho[0] = -0.0
for i in range(T-1):
rho[i+1] = rho[i] + h*f(rho[i], u[i], v)
u[i+1] = u[i] + h*g1(rho[i], u[i], v)
time[i+1] = time[i] + h
fig, ax = plt.subplots(1, 2)
# X, Y = np.meshgrid(np.arange(-0.6, 0.6, 0.1), np.arange(-0.2, 1.0, .1))
# U = f(X, Y, v)/epsilon #rho
# V = g1(X, Y, v)/epsilon #u
# q = ax[0].quiver(X, Y, U, V, units='x', pivot='tip')#, width=0.022, scale=1 / 0.40)
rhos = np.linspace(-0.99, 1, 100)
ax[0].plot( rhos, nrho(rhos, v), color = [0.8, 0.5, 0.5], linewidth = 3.0)
ax[0].plot( rhos, nu(rhos), color = [0.5, 0.5, 0.8], linewidth = 3.0)
ax[0].plot( rho[0], u[0], 'k.', linewidth = 3.0)
ax[0].plot( rho, u, 'k' )
ax[0].plot( [-1, -1], [-1.5, 1.5], 'k--')
ax[0].set_ylabel('u')
ax[0].set_xlabel(r'$\rho$')
ax[0].text(0.5, nu(0.5)+0.05, r'$u_0$')
ax[0].text(0.95, nrho(0.9, v), r'$\rho_0$')
ax[0].axis([-2, 2, -1.0, 1.5])
ax[1].plot( time, u, label = 'u')
ax[1].plot( time, rho, label = r'$\rho$' )
ax[1].legend()
ax[1].set_xlabel('time')
plt.show() | 28.603774 | 85 | 0.529024 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 329 | 0.217018 |
5f5c0b0acb48624cb76c04ec88d096e81b40a0f1 | 176 | py | Python | test_script.py | SamPurle/DE17_Flask | a6462b85854f7bd72c80ebcc555d50488ef17e67 | [
"MIT"
] | null | null | null | test_script.py | SamPurle/DE17_Flask | a6462b85854f7bd72c80ebcc555d50488ef17e67 | [
"MIT"
] | null | null | null | test_script.py | SamPurle/DE17_Flask | a6462b85854f7bd72c80ebcc555d50488ef17e67 | [
"MIT"
] | null | null | null | import numpy as np
import os
my_array = np.zeros(10)
print(my_array)
os.system('pip freeze > requirements.txt')
my_list = [1,2,3,4,5]
for item in my_list:
print(item)
| 12.571429 | 42 | 0.693182 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 0.176136 |
5f5ebabcae4886b932638d5f3ecd10d1eb595d7b | 6,058 | py | Python | lib/blastin.py | zbwrnz/blastdbm | ee694c01ebb00779623702738a9c958fd496a080 | [
"Unlicense"
] | 1 | 2018-03-22T10:34:20.000Z | 2018-03-22T10:34:20.000Z | lib/blastin.py | arendsee/blastdbm | ee694c01ebb00779623702738a9c958fd496a080 | [
"Unlicense"
] | null | null | null | lib/blastin.py | arendsee/blastdbm | ee694c01ebb00779623702738a9c958fd496a080 | [
"Unlicense"
] | null | null | null | #! /usr/bin/python3
import argparse
import os
import re
import sqlite3 as sql
import sys
import xml.etree.cElementTree as et
import traceback
import lib.initialize as initialize
import lib.sqlite_interface as misc
import lib.meta as meta
# ==================
# EXPORTED FUNCTIONS
# ==================
def parse(parent, *args, **kwargs):
parser = parent.add_parser(
'blast',
help="Read BLAST XML report into SQL database",
parents=args)
parser.add_argument(
'-c', '--collection',
metavar="COL",
help="blast collection")
parser.add_argument(
'-m', '--db_desc',
metavar="DESC",
help="BLAST database description")
parser.add_argument(
'-s', '--small',
help="Reduce database size by not writing alignment sequences",
action=('store_true'), default=False)
parser.add_argument(
'-x', '--max-hits',
metavar="INT",
help='Maximum number of hits to store (default 500)',
type=int,
default=500
)
parser.set_defaults(func=parse_blast_xml)
def parse_blast_xml(args, cur):
if args.input:
for f in args.input:
con = et.iterparse(f, events=('end', 'start'))
_parse_blast_xml(args, cur, con)
else:
con = et.iterparse(sys.stdin, events=('end', 'start'))
_parse_blast_xml(args, cur, con)
def _parse_blast_xml(args, cur, con):
# Initialize tables as necessary
if(not misc.table_exists('blastreport', cur)):
initialize.init_blastreport(cur, verbose=False)
if(not misc.table_exists('blastdatabase', cur)):
initialize.init_blastdatabase(cur, verbose=False)
bdat = Blastdat(cur, args)
for event, elem in con:
if(event == 'start'): continue
if(elem.tag == 'Hsp'):
bdat.add_partial_row()
bdat.clear_hsp()
elif(elem.tag == 'Hit'):
bdat.clear_hit()
elif(elem.tag == 'Iteration'):
if(not bdat.has_hits()):
bdat.add_partial_row()
bdat.clear_iter()
elem.clear()
elif('BlastOutput_db' in elem.tag):
base = os.path.basename(elem.text)
if(not misc.entry_exists('blastdatabase', 'database', base, cur)):
misc.insert({'database': base}, 'blastdatabase', cur)
bdat.add(elem.tag, base)
else:
bdat.add(elem.tag, elem.text)
bdat.write_rows_to_sqldb()
meta.update_dbinfo(cur, verbose=True)
meta.update_mrca(cur, verbose=True)
def _parse_fasta_header(header):
dic = {}
try:
for match in re.finditer('([^|]+)\|([^|]+)', header):
for tag in ('locus', 'gi', 'taxon', 'gb', 'gene'):
if(match.group(1) == tag and match.group(2) != None):
dic['Query_' + tag] = match.group(2)
return(dic)
except:
print("Cannot parse header {}".format(header), file=sys.stderr)
return({})
class Blastdat:
def __init__(self, cur, args):
self.cur = cur
self.args = args
self.dat = {'root':{}, 'iter':{}, 'stat':{}, 'hit':{}, 'hsp':{}}
self.dat['root']['collection'] = args.collection
self.dat['root']['db_desc'] = args.db_desc
self.iter_dicts = []
self.row_by_col = {}
def write_rows_to_sqldb(self):
for col in self.row_by_col.keys():
misc.insertmany(col, self.row_by_col[col], 'BlastReport',
self.cur, replace=True)
def has_hits(self):
try:
if('No hits found' in dat['iter']['Iteration_message']):
return False
except:
pass
return True
def add_partial_row(self):
table = {}
for key in self.dat.keys():
for subkey in self.dat[key].keys():
table[subkey] = self.dat[key][subkey]
self.iter_dicts.append(table)
def _add_rows(self):
if(not self.iter_dicts):
self.add_partial_row()
else:
for d in self.iter_dicts:
if(int(d['Hit_num']) <= self.args.max_hits):
for key, val in self.dat['stat'].items():
d[key] = val
col = tuple(sorted(d.keys()))
row = tuple(map(d.get, col))
try:
self.row_by_col[col].append(row)
except:
self.row_by_col[col] = [row]
self.iter_dicts = []
def clear_iter(self):
'''
Adds all data from current iteration to the database and frees the
iteration and its children hits and hsps from memory
'''
self._add_rows()
self.dat['iter'] = {}
self.dat['stat'] = {}
self.clear_hit()
def clear_hit(self):
'''
Clears the current hit and all children hsps from memory
'''
self.dat['hit'] = {}
self.clear_hsp()
def clear_hsp(self):
'''
Clears hsp from memmory
'''
self.dat['hsp'] = {}
def add(self, tag, text):
'''
Input: One tag and its text (possibly None)
'''
tag = re.sub('-', '_', tag)
if(text is None or text.isspace()): pass
elif('Hsp_' in tag):
if(tag in ('Hsp_qseq', 'Hsp_hseq', 'Hsp_midline') and self.args.small):
pass
else:
self.dat['hsp'][tag] = text
elif('Hit_' in tag):
self.dat['hit'][tag] = text
elif('Iteration_' in tag):
if(tag == 'Iteration_query_def'):
self.dat['iter']['query_seqid'] = re.sub('(\S+).*', '\\1', text)
self.dat['iter'][tag] = text
elif('Statistics_' in tag):
self.dat['stat'][tag] = text
elif('BlastOutput_' in tag or 'Parameters_' in tag):
if('reference' in tag or 'query' in tag):
pass
else:
self.dat['root'][tag] = text
| 31.552083 | 83 | 0.530538 | 3,064 | 0.505777 | 0 | 0 | 0 | 0 | 0 | 0 | 1,322 | 0.218224 |
5f63c4934790515bb6fc74d4d7ecc9a70d977a36 | 646 | py | Python | tests/test_get_image.py | kortizceballos/codeastro-group6 | 9f0ceb8a0fca3e619dbabe97105a3f283e59fa04 | [
"BSD-3-Clause"
] | 1 | 2021-06-25T21:20:42.000Z | 2021-06-25T21:20:42.000Z | tests/test_get_image.py | kortizceballos/codeastro-group6 | 9f0ceb8a0fca3e619dbabe97105a3f283e59fa04 | [
"BSD-3-Clause"
] | null | null | null | tests/test_get_image.py | kortizceballos/codeastro-group6 | 9f0ceb8a0fca3e619dbabe97105a3f283e59fa04 | [
"BSD-3-Clause"
] | null | null | null | from matplotlib.pyplot import get
import pyhips
from pyhips import get_image
def test_get_image():
"""
Tests the get_image() function to make sure no errors are thrown.
"""
assert get_image("Vega", frame="ICRS", survey="DSS", cmap="plasma") == 0
assert get_image("notanid", frame="ICRS", survey="DSS", cmap="plasma") == 1
assert get_image("Vega", frame="notaframe", survey="DSS", cmap="plasma") == 1
assert get_image("Vega", frame="ICRS", survey="notasurvey", cmap="plasma") == 1
assert get_image("Vega", frame="ICRS", survey="DSS", cmap="notacolormap") == 1
if __name__ == "__main__":
test_get_image() | 35.888889 | 83 | 0.662539 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 237 | 0.366873 |
5f65055d81665e397feccfc78dd6d2f299634b64 | 138 | py | Python | cumulus2/template.py | peterkh/cumulus2 | 11352ce469acb0c319ba9cfb8691d80f4ae5048e | [
"Apache-2.0"
] | 1 | 2016-02-12T11:54:07.000Z | 2016-02-12T11:54:07.000Z | cumulus2/template.py | peterkh/cumulus2 | 11352ce469acb0c319ba9cfb8691d80f4ae5048e | [
"Apache-2.0"
] | null | null | null | cumulus2/template.py | peterkh/cumulus2 | 11352ce469acb0c319ba9cfb8691d80f4ae5048e | [
"Apache-2.0"
] | null | null | null | """
Template module for cumulus.
template class for reading yaml tempalte and creating data_source objects to
retrieve external data.
"""
| 23 | 76 | 0.797101 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 137 | 0.992754 |
5f67096a7114362044846dbb3a2978d1562f88ac | 700 | py | Python | Python-AI-Algorithms/Bubble_sort.py | screadore/Artificial-Intelligence-Sorting-Algorithms | d69f34dbd02556c6a7bbb8e0dee45ab7fdb4b12c | [
"MIT"
] | null | null | null | Python-AI-Algorithms/Bubble_sort.py | screadore/Artificial-Intelligence-Sorting-Algorithms | d69f34dbd02556c6a7bbb8e0dee45ab7fdb4b12c | [
"MIT"
] | null | null | null | Python-AI-Algorithms/Bubble_sort.py | screadore/Artificial-Intelligence-Sorting-Algorithms | d69f34dbd02556c6a7bbb8e0dee45ab7fdb4b12c | [
"MIT"
] | null | null | null | # Bubble sort steps through the list and compares adjacent pairs of elements. The elements are swapped if they are in the wrong order. The pass through the unsorted portion of the list is repeated until the list is sorted. Because Bubble sort repeatedly passes through the unsorted part of the list, it has a worst case complexity of O(n²).
def bubble_sort(arr):
def swap(i, j):
arr[i], arr[j] = arr[j], arr[i]
n = len(arr)
swapped = True
x = -1
while swapped:
swapped = False
x = x + 1
for i in range(1, n - x):
if arr[i - 1] > arr[i]:
swap(i - 1, i)
swapped = True
return arr | 36.842105 | 342 | 0.591429 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 343 | 0.489301 |
5f670af72f12c73cbff679c29371d4269f74b778 | 551 | py | Python | Practice/Python/Strings/the_minion_game.py | nifannn/HackerRank | b05318251226704b1fb949c29aa49493d6ced44b | [
"MIT"
] | 7 | 2019-02-22T10:34:26.000Z | 2021-07-13T01:51:48.000Z | Practice/Python/Strings/the_minion_game.py | nifannn/HackerRank | b05318251226704b1fb949c29aa49493d6ced44b | [
"MIT"
] | null | null | null | Practice/Python/Strings/the_minion_game.py | nifannn/HackerRank | b05318251226704b1fb949c29aa49493d6ced44b | [
"MIT"
] | 7 | 2018-11-09T13:52:34.000Z | 2021-03-18T20:36:22.000Z | def minion_game(string):
# Stuart score
s_idx = [i for i, c in enumerate(string) if c not in 'AEIOU']
s_score = sum([len(string)-i for i in s_idx])
# Kevin score
k_idx = [i for i, c in enumerate(string) if c in 'AEIOU']
k_score = sum([len(string)-i for i in k_idx])
# final result
if k_score > s_score:
print("Kevin {}".format(k_score))
elif k_score < s_score:
print("Stuart {}".format(s_score))
else:
print("Draw")
if __name__ == '__main__':
minion_game(input("Enter a string: "))
| 30.611111 | 65 | 0.604356 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 110 | 0.199637 |
5f6e27388481683369aca2bd805d2c503d7286e8 | 189 | py | Python | deep_learning_zero/ch5/sample.py | kaito0223/shakyou | 8d901b4da24fbf0c708e3eb429a57d194e9857c1 | [
"MIT"
] | null | null | null | deep_learning_zero/ch5/sample.py | kaito0223/shakyou | 8d901b4da24fbf0c708e3eb429a57d194e9857c1 | [
"MIT"
] | null | null | null | deep_learning_zero/ch5/sample.py | kaito0223/shakyou | 8d901b4da24fbf0c708e3eb429a57d194e9857c1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import numpy as np
X = np.random.rand(2) #input
W = np.random.rand(2,3) #weight
B = np.random.rand(3) #bias
print(X)
print(W)
print(B)
Y=np.dot(X,W)+B
print(Y)
| 11.8125 | 31 | 0.613757 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 41 | 0.216931 |
5f71554b9254c1a62eba83f18f61c6f664cfe709 | 2,485 | py | Python | bdd/contact_stepts.py | LukinVV/python_training | 9e6eb57fe9527fd591d563b4219c19e49188c4de | [
"Apache-2.0"
] | null | null | null | bdd/contact_stepts.py | LukinVV/python_training | 9e6eb57fe9527fd591d563b4219c19e49188c4de | [
"Apache-2.0"
] | null | null | null | bdd/contact_stepts.py | LukinVV/python_training | 9e6eb57fe9527fd591d563b4219c19e49188c4de | [
"Apache-2.0"
] | null | null | null | from pytest_bdd import given, when, then
from model.contact import Contact
import random
@given('a contact list')
def contact_list(orm):
return orm.get_contact_list()
@given('a contact with <firstname>, <lastname> and <address>')
def new_contact(firstname, lastname, address):
return Contact(firstname=firstname, lastname=lastname, address=address)
@when('I add the contact to the list')
def add_new_contact(app, new_contact):
app.contact.create_new(new_contact)
@then('the new contact list is equal to the old contact list with the added contact')
def verify_contact_added(orm, contact_list, new_contact):
old_contacts = contact_list
new_contacts = orm.get_contact_list()
old_contacts.append(new_contact)
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
@given('a non-empty contact list')
def non_empty_contact_list(app, orm):
if len(orm.get_contact_list()) < 0:
app.group.create_new(Contact(firstname='some firstname'))
return orm.get_contact_list()
@given('a random contact from the list')
def random_contact(non_empty_contact_list):
return random.choice(non_empty_contact_list)
@when('I delete the contact from the list')
def delete_contact(app, random_contact):
app.contact.del_contact_by_id(random_contact.id)
@then('the new contact list is equal to the old contact list without the contact')
def verify_contact_del(orm, non_empty_contact_list, random_contact):
old_contacts = non_empty_contact_list
new_contacts = orm.get_contact_list()
old_contacts.remove(random_contact)
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
@when('I modify the contact from the list')
def modify_contact(app, new_contact, random_contact):
new_contact.id = random_contact.id
app.contact.mod_contact_by_id(new_contact)
@then('the new contact list is equal to the old contact list with the modified contact')
def verify_contact_del(orm, non_empty_contact_list, random_contact, new_contact):
old_contacts = non_empty_contact_list
non_empty_contact_list.remove(random_contact)
random_contact.firstname = new_contact.firstname
random_contact.lastname = new_contact.lastname
random_contact.address = new_contact.address
old_contacts.append(new_contact)
new_contacts = orm.get_contact_list()
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max) | 42.118644 | 101 | 0.781087 | 0 | 0 | 0 | 0 | 2,377 | 0.956539 | 0 | 0 | 481 | 0.193561 |
5f72286dd657c066d24e11dfe7993aa6f68aabbc | 769 | py | Python | FigureMaker.py | space-physics/histfeas | caa0100087d8c2b8711c1c3cb60c322379ef5431 | [
"MIT"
] | null | null | null | FigureMaker.py | space-physics/histfeas | caa0100087d8c2b8711c1c3cb60c322379ef5431 | [
"MIT"
] | null | null | null | FigureMaker.py | space-physics/histfeas | caa0100087d8c2b8711c1c3cb60c322379ef5431 | [
"MIT"
] | 1 | 2015-05-22T23:51:58.000Z | 2015-05-22T23:51:58.000Z | #!/usr/bin/env python
"""
Figures generated by HiST program
intended for use with in/ files including:
*_flame.ini
*_impulse.ini
*_trans.ini
Flaming Aurora 2 cameras:
./FigureMaker.py in/2cam_flame.ini
Translating Aurora 2 cameras:
./FigureMaker.py in/2cam_trans.ini
Impulse Aurora (for testing):
./FigureMaker.py in/2cam_impulse.ini
Table of results for 2 and 3 cam:
./FigureMaker.py in/table_flame{2,3}.ini
REAL actual camera data (just dump synchroinzed frames:
./FigureMaker.py -m realvid in/apr14T085454
-m optim reconstruct only
"""
from histfeas import userinput, hist_figure
from histfeas.loadAnalyze import readresults, findxlsh5
P = userinput()
#%% compute
if not P["load"]:
hist_figure(P)
#%% load
flist, P = findxlsh5(P)
readresults(flist, P)
| 20.783784 | 55 | 0.758127 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 571 | 0.742523 |
5f72433b75556b159f57faa7593653f49eb2cb21 | 3,557 | py | Python | T53/webapp/accounts/models.py | DevelopAppWithMe/Hackathon_5.0 | 6af503a995721c04986931d6a29d8f946ceaa067 | [
"MIT"
] | null | null | null | T53/webapp/accounts/models.py | DevelopAppWithMe/Hackathon_5.0 | 6af503a995721c04986931d6a29d8f946ceaa067 | [
"MIT"
] | null | null | null | T53/webapp/accounts/models.py | DevelopAppWithMe/Hackathon_5.0 | 6af503a995721c04986931d6a29d8f946ceaa067 | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here.
from django.contrib.auth.models import (
AbstractBaseUser,
BaseUserManager,
PermissionsMixin,
)
from django.core.validators import RegexValidator
from django.db import models
class UserManager(BaseUserManager):
def create_user(self, phone_number, password):
if not phone_number:
raise ValueError("Phone number must be set")
if not password:
raise ValueError("Password must be set")
# pass fields as arguments which are REQUIRED_FIELDS to user = self.model()
user = self.model(phone_number=phone_number)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, phone_number, password):
# for super user admin role is fixed to 1
user = self.create_user(phone_number, password=password,)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
phone_regex = RegexValidator(
regex=r"^\+?1?\d{9,15}$",
message="Phone number must be entered in the format: '+919939799264'. Up to 15 digits allowed.",
)
phone_number = models.CharField(
validators=[phone_regex], max_length=15, unique=True, null=False, blank=False
)
is_active = models.BooleanField(default=True, null=False, blank=False)
is_staff = models.BooleanField(default=False, null=False, blank=False)
is_superuser = models.BooleanField(default=False, null=False, blank=False)
created_at = models.DateTimeField(auto_now_add=True)
last_login = models.DateTimeField(auto_now_add=True)
# set USERNAME_FIELD to phone_number
USERNAME_FIELD = "phone_number"
# username AND password by default are included in REQUIRED_FIELDS
REQUIRED_FIELDS = []
objects = UserManager()
def __str__(self):
return self.phone_number
def get_full_name(self):
return self.phone_number
class PhoneOTP(models.Model):
phone_regex = RegexValidator(
regex=r"^\+?1?\d{9,15}$",
message="Phone number must be entered in the format: '+919939799264'. Up to 15 digits allowed.",
)
phone_number = models.CharField(
validators=[phone_regex], max_length=15, null=False, blank=False
)
otp = models.CharField(max_length=9, blank=False, null=False)
timestamp = models.DateTimeField(auto_now_add=True)
validated = models.BooleanField(default=False)
def __str__(self):
return str(self.phone_number) + " otp is " + str(self.otp)
class UserLoginActivity(models.Model):
SUCCESS = "S"
FAILED = "F"
LOGIN_STATUS = ((SUCCESS, "Success"), (FAILED, "Failed"))
phone_regex = RegexValidator(
regex=r"^\+?1?\d{9,15}$",
message="Phone number must be entered in the format: '+919939799264'. Up to 15 digits allowed.",
) # phone_number max length 15 including country code
login_phone_number = models.CharField(
validators=[phone_regex], max_length=15, null=False, blank=False
)
login_ip = models.GenericIPAddressField(null=True, blank=True)
login_datetime = models.DateTimeField(auto_now=True)
status = models.CharField(
max_length=1, choices=LOGIN_STATUS, default=SUCCESS, null=True, blank=True
)
class Meta:
verbose_name = "user_login_activity"
verbose_name_plural = "user_login_activities"
def __str__(self):
return str(self.login_phone_number)
| 32.336364 | 104 | 0.687939 | 3,301 | 0.928029 | 0 | 0 | 0 | 0 | 0 | 0 | 749 | 0.210571 |
5f72dad431a7abe4ecae9aa703b14fc2183ff13a | 2,998 | py | Python | pyv6m/ha/v6m.py | dubnom/pyv6m | d56bf3f3d39b7c2f747b08bc1974dc3dbe6ccff8 | [
"MIT"
] | 1 | 2020-02-16T00:42:17.000Z | 2020-02-16T00:42:17.000Z | pyv6m/ha/v6m.py | dubnom/pyv6m | d56bf3f3d39b7c2f747b08bc1974dc3dbe6ccff8 | [
"MIT"
] | null | null | null | pyv6m/ha/v6m.py | dubnom/pyv6m | d56bf3f3d39b7c2f747b08bc1974dc3dbe6ccff8 | [
"MIT"
] | null | null | null | """Component to control v6m relays and sensors.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/v6m/
"""
import logging
import voluptuous as vol
from homeassistant.const import (
EVENT_HOMEASSISTANT_STOP, CONF_HOST, CONF_PORT, CONF_NAME)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['pyv6m==0.0.1']
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'v6m'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PORT): cv.port,
vol.Optional(CONF_NAME, default=DOMAIN): cv.string,
}),
}, extra=vol.ALLOW_EXTRA)
def setup(hass, base_config):
"""Start V6M controller."""
from pyv6m.pyv6m import V6M
class V6MController(V6M):
"""Interface between HASS and V6M controller."""
def __init__(self, host, port):
"""Host and port of the controller."""
V6M.__init__(self, host, port, self.relay_callback,
self.sensor_callback)
self._relay_subs = {}
self._sensor_subs = {}
def register_relay(self, device):
"""Add a device to subscribe to events."""
self._register(self._relay_subs, device)
def relay_callback(self, num, old_state, new_state):
"""Process relay states."""
self._dispatch(self._relay_subs, num, new_state)
def register_sensor(self, device):
"""Add a device to subscribe to events."""
self._register(self._sensor_subs, device)
def sensor_callback(self, num, old_state, new_state):
"""Process sensor states."""
self._dispatch(self._sensor_subs, num, new_state)
def _register(self, subs, device):
if device.num not in subs:
subs[device.num] = []
subs[device.num].append(device)
def _dispatch(self, subs, num, new_state):
if num in subs:
for sub in subs[num]:
if sub.callback(new_state):
sub.schedule_update_ha_state()
config = base_config.get(DOMAIN)
host = config[CONF_HOST]
port = config[CONF_PORT]
controller = V6MController(host, port)
hass.data[config[CONF_NAME]] = controller
def cleanup(event):
controller.close()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, cleanup)
return True
class V6MDevice():
"""Base class of a V6M device."""
def __init__(self, controller, num, name):
"""Controller, address, and name of the device."""
self._num = num
self._name = name
self._controller = controller
@property
def num(self):
"""Device number."""
return self._num
@property
def name(self):
"""Device name."""
return self._name
@property
def should_poll(self):
"""No need to poll."""
return False
| 28.552381 | 75 | 0.615744 | 1,894 | 0.631755 | 0 | 0 | 252 | 0.084056 | 0 | 0 | 584 | 0.194797 |
5f7622df0a14efca2dcdfe048326621ae11f4cbc | 550 | py | Python | blog/models.py | Happy-Project-Foundation/HappyProject | 86e9fa7633e68c026e0003f8494df0226fa0dfcf | [
"Apache-2.0"
] | 3 | 2021-12-04T15:00:54.000Z | 2021-12-08T16:07:35.000Z | blog/models.py | BirnadinErick/HappyProject | 4993a2d966d9c1458ce0e29e72c3a758a7a4ef54 | [
"Apache-2.0"
] | 3 | 2021-12-15T00:49:01.000Z | 2021-12-16T00:46:14.000Z | blog/models.py | Happy-Project-Foundation/HappyProject | 86e9fa7633e68c026e0003f8494df0226fa0dfcf | [
"Apache-2.0"
] | 3 | 2021-12-04T14:18:15.000Z | 2021-12-05T08:40:13.000Z | import uuid
from django.db import models
from django.db.models.fields import TextField
class Blog(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
title = models.CharField(verbose_name="Title", max_length=150, default="Happy Blog", blank=False)
content = models.TextField(verbose_name="Content:", max_length=500, blank=False, default="Happy Content")
summary = models.TextField(verbose_name="Summary", max_length=300,
blank=True)
def __str__(self):
return self.title
| 32.352941 | 109 | 0.736364 | 454 | 0.825455 | 0 | 0 | 0 | 0 | 0 | 0 | 53 | 0.096364 |