hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
11 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
251
max_stars_repo_name
stringlengths
4
130
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
sequencelengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
251
max_issues_repo_name
stringlengths
4
130
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
sequencelengths
1
10
max_issues_count
int64
1
116k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
251
max_forks_repo_name
stringlengths
4
130
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
sequencelengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
1
1.05M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.04M
alphanum_fraction
float64
0
1
8414f299e33cb1d7f5931b3a7e8db59199dffc99
4,165
py
Python
MstarHe2R/components/models.py
IzayoiRin/MstarHe2R
938d83acdfa5ec4464cf9113fef104a6e80ad662
[ "MIT" ]
null
null
null
MstarHe2R/components/models.py
IzayoiRin/MstarHe2R
938d83acdfa5ec4464cf9113fef104a6e80ad662
[ "MIT" ]
2
2021-06-08T21:19:41.000Z
2021-09-08T01:54:27.000Z
MstarHe2R/components/models.py
IzayoiRin/MstarHe2R
938d83acdfa5ec4464cf9113fef104a6e80ad662
[ "MIT" ]
null
null
null
import os import numpy as np import pandas as pd import torch as th from mstarhe.core.nn.models import PrettyFeedForward from MstarHe2R.components.dataloader import Mstar2RDataLoader __IMG_SIZE__ = 128 * 128 def _example(): Net = MSTARNet Net.device = None from components.graphs.graph2 import TestL4MSTARANNetGraph G = [TestL4MSTARANNetGraph] for g, params in G: Net.model_graph_class = g Net.alpha = params["aph"] Net.step = params["stp"] net = Net(3, reg=None, dropout=False) print(net.graph.__class__.__name__) # print(net.get_data_loader(False)) # print(len(net.test_samples_)) net.train(params['n'], 'PQ', checkpoint=params['cp']) if __name__ == '__main__': _example()
32.038462
100
0.614646
8418475e8b117a7899349c6df5fd5aeff3d447b2
996
py
Python
4 - observer pattern/api/event_system.py
lucascionis/betterpython
ab8db8c016ff0bccc443443740a26bccb70402f3
[ "MIT" ]
null
null
null
4 - observer pattern/api/event_system.py
lucascionis/betterpython
ab8db8c016ff0bccc443443740a26bccb70402f3
[ "MIT" ]
null
null
null
4 - observer pattern/api/event_system.py
lucascionis/betterpython
ab8db8c016ff0bccc443443740a26bccb70402f3
[ "MIT" ]
null
null
null
from abc import ABC, abstractmethod '''Comments In the original solution only functions were used to implement the event system (observer pattern). In this implementation I wanted to write classes (to be as nearest as possible to the pattern (?)). It is surely better to use python first-citizen functions to create the event handles (basically this is what I done, I created handle classes to write different implementations of update method). '''
25.538462
68
0.696787
84188f6567eb4fd0ad0c89e940fd5e2fe14303c7
3,056
py
Python
predict_yolo3_disconnect.py
RentadroneCL/model-definition
9dab1f1a808a1efc54d64144745277396c145ff7
[ "MIT" ]
2
2020-01-22T19:54:16.000Z
2020-02-07T12:20:17.000Z
predict_yolo3_disconnect.py
RentadroneCL/model-definition
9dab1f1a808a1efc54d64144745277396c145ff7
[ "MIT" ]
4
2020-06-03T00:27:22.000Z
2020-07-15T17:15:23.000Z
predict_yolo3_disconnect.py
RentadroneCL/model-definition
9dab1f1a808a1efc54d64144745277396c145ff7
[ "MIT" ]
1
2020-01-21T22:38:22.000Z
2020-01-21T22:38:22.000Z
#! /usr/bin/env python import time import os import argparse import json import cv2 import sys sys.path += [os.path.abspath('keras-yolo3-master')] from utils.utils import get_yolo_boxes, makedirs from utils.bbox import draw_boxes from tensorflow.keras.models import load_model from tqdm import tqdm import numpy as np from panel_disconnect import disconnect if __name__ == '__main__': argparser = argparse.ArgumentParser(description='Predict with a trained yolo model') argparser.add_argument('-c', '--conf', help='path to configuration file') argparser.add_argument('-i', '--input', help='path to an image, a directory of images, a video, or webcam') argparser.add_argument('-o', '--output', default='output/', help='path to output directory') args = argparser.parse_args() _main_(args)
32.168421
113
0.629581
8419172381c9e4256607a0db506cd791eeb0f296
11,655
py
Python
tenning/layers/resnet_block.py
guilherme9820/Tenning
c0fe7695ef3dd791ea1083f39d6b312266fb0512
[ "MIT" ]
null
null
null
tenning/layers/resnet_block.py
guilherme9820/Tenning
c0fe7695ef3dd791ea1083f39d6b312266fb0512
[ "MIT" ]
null
null
null
tenning/layers/resnet_block.py
guilherme9820/Tenning
c0fe7695ef3dd791ea1083f39d6b312266fb0512
[ "MIT" ]
null
null
null
import tensorflow.keras.constraints as constraints from tensorflow.keras.layers import GlobalAveragePooling2D from tensorflow.keras.layers import BatchNormalization from tensorflow.keras.layers import Conv2DTranspose from tensorflow.keras.layers import LeakyReLU from tensorflow.keras.layers import ReLU from tensorflow.keras.layers import Conv2D from tensorflow.keras.layers import Lambda from tensorflow.keras.layers import Layer from tensorflow.keras.layers import Dense from tensorflow.keras.layers import Add from tensorflow_addons.layers import InstanceNormalization from tensorflow_addons.layers import GroupNormalization from tenning.generic_utils import get_object_config from tenning.activations import Swish import tensorflow as tf
51.8
163
0.620764
841b950a79e8d2aae01b030de733c8d1017b6718
3,649
py
Python
libs/token.py
yareally/twitter-clone-python
1323c3fa4bf66f479a3092c09fb165a323eb1c85
[ "MIT" ]
1
2020-05-22T22:13:48.000Z
2020-05-22T22:13:48.000Z
libs/token.py
yareally/twitter-clone-python
1323c3fa4bf66f479a3092c09fb165a323eb1c85
[ "MIT" ]
null
null
null
libs/token.py
yareally/twitter-clone-python
1323c3fa4bf66f479a3092c09fb165a323eb1c85
[ "MIT" ]
null
null
null
# coding=utf-8 from operator import xor import os import scrypt import time from libs.rediswrapper import UserHelper try: xrange except NameError: xrange = range
28.960317
102
0.609482
841dd327848fd2568a5c74230c7b659174fee507
2,961
py
Python
saefportal/datastores/util.py
harry-consulting/SAEF1
055d6e492ba76f90e3248b9da2985fdfe0c6b430
[ "BSD-2-Clause" ]
null
null
null
saefportal/datastores/util.py
harry-consulting/SAEF1
055d6e492ba76f90e3248b9da2985fdfe0c6b430
[ "BSD-2-Clause" ]
null
null
null
saefportal/datastores/util.py
harry-consulting/SAEF1
055d6e492ba76f90e3248b9da2985fdfe0c6b430
[ "BSD-2-Clause" ]
1
2020-12-16T15:02:52.000Z
2020-12-16T15:02:52.000Z
import json from collections import defaultdict import fastavro import pandas as pd from django.contrib import messages from django.http import HttpResponseRedirect from django.urls import reverse from datasets.models import Connection from users.models import User def get_supported_file_types(): """Return a list of the viable file type extensions.""" return ["csv", "avro", "parquet", "xlsx", "xls", "xlsm", "xlsb"] def initialize_connection(datastore, connection_name, connection_owner_id, connection_type, request): """Create a connection and save the datastore on the connection object for later use.""" owner = User.objects.get(id=connection_owner_id) connection = Connection.objects.create(name=connection_name, owner=owner, type=connection_type) connection.datastore = datastore connection.save() messages.success(request, "Connection was created.") return HttpResponseRedirect(reverse("datasets:index")) def get_query(dataset, query): """Go through the potentially None valued given dataset and query and extract the query.""" if query: return query elif dataset.query: return dataset.query else: return f"SELECT * FROM {dataset.table}" def structure_tables_views(table, views): """Return a structured dictionary containing the given tables and views.""" table_dict = defaultdict(list) [table_dict[schema].append({"value": f"{schema}.{table}", "display": table}) for (schema, table) in table] view_dict = defaultdict(list) [view_dict[schema].append({"value": f"{schema}.{view}", "display": view}) for (schema, view) in views] return {"Tables": dict(table_dict), "Views": dict(view_dict)} def convert_to_dataframe(file_type, data): """Convert the given bytes data into a dataframe based on the given file type.""" if file_type == "csv": df = pd.read_csv(data, sep=None) elif file_type == "avro": df = pd.DataFrame.from_records(fastavro.reader(data)) elif file_type == "parquet": df = pd.read_parquet(data) else: df = pd.read_excel(data) return df def get_viable_blob_datasets(blobs, name_attr): """ Used to get the viable datasets for blob datastores. Used for Google Cloud Storage, Azure Blob Storage, Azure Data Lake and Amazon S3 datastores. """ viable_blobs = [] for blob in blobs: if getattr(blob, name_attr).split(".")[-1].lower() in get_supported_file_types(): viable_blobs.append(blob) viable_datasets = defaultdict(list) for blob in viable_blobs: split_path = getattr(blob, name_attr).split("/") parent_folder = split_path[-2] if len(split_path) >= 2 else "root" value = json.dumps({"id": getattr(blob, name_attr), "name": split_path[-1].split(".")[0]}) viable_datasets[parent_folder].append({"value": value, "display": split_path[-1]}) return {"Files": dict(viable_datasets)}
34.835294
110
0.695373
841fba8a3c7dd4e8b6e7d2a9101dcfe6a12ffb43
637
py
Python
count_div.py
odellus/year_of_code
bfa2b30893bcc12f46e73ac34c63b5b05b27af5f
[ "MIT" ]
1
2017-01-03T02:24:34.000Z
2017-01-03T02:24:34.000Z
count_div.py
odellus/year_of_code
bfa2b30893bcc12f46e73ac34c63b5b05b27af5f
[ "MIT" ]
null
null
null
count_div.py
odellus/year_of_code
bfa2b30893bcc12f46e73ac34c63b5b05b27af5f
[ "MIT" ]
null
null
null
#! /usr/bin/python
21.233333
37
0.400314
842064b9ee1d937a6d9bb100474bd7dafa3c5859
3,766
py
Python
applications/plugins/SofaPython/python/SofaPython/DAGValidation.py
sofa-framework/issofa
94855f488465bc3ed41223cbde987581dfca5389
[ "OML" ]
null
null
null
applications/plugins/SofaPython/python/SofaPython/DAGValidation.py
sofa-framework/issofa
94855f488465bc3ed41223cbde987581dfca5389
[ "OML" ]
null
null
null
applications/plugins/SofaPython/python/SofaPython/DAGValidation.py
sofa-framework/issofa
94855f488465bc3ed41223cbde987581dfca5389
[ "OML" ]
null
null
null
import sys import Sofa import Tools
30.128
123
0.521774
842128da3d89d5f7a471cc4a5a88b8952b188592
7,216
py
Python
models/DGIFullPipeline.py
nicolas-racchi/hpc2020-graphML
7f0d8b7c18469e1c793c7097bd10a9e0322e75be
[ "Apache-2.0" ]
null
null
null
models/DGIFullPipeline.py
nicolas-racchi/hpc2020-graphML
7f0d8b7c18469e1c793c7097bd10a9e0322e75be
[ "Apache-2.0" ]
null
null
null
models/DGIFullPipeline.py
nicolas-racchi/hpc2020-graphML
7f0d8b7c18469e1c793c7097bd10a9e0322e75be
[ "Apache-2.0" ]
null
null
null
import time import os import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.manifold import TSNE from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import f1_score import stellargraph as sg from stellargraph.mapper import CorruptedGenerator, HinSAGENodeGenerator from stellargraph.layer import DeepGraphInfomax, HinSAGE import tensorflow as tf from tensorflow.keras.optimizers import Adam from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras import Model, optimizers, losses, metrics ''' Runs the entire pipeline: - Takes preprocessed data as input - Outputs predictions on the test_set nodes. '''
33.877934
106
0.651746
84226726736f353bcbde4bab4581da03be81116f
878
py
Python
Newsfeed/Newsfeed/app.py
akshayseth7/Intership_Snapshot
e262ec4939e2e5c5e2037333b7fa37f7c57d5425
[ "MIT" ]
null
null
null
Newsfeed/Newsfeed/app.py
akshayseth7/Intership_Snapshot
e262ec4939e2e5c5e2037333b7fa37f7c57d5425
[ "MIT" ]
null
null
null
Newsfeed/Newsfeed/app.py
akshayseth7/Intership_Snapshot
e262ec4939e2e5c5e2037333b7fa37f7c57d5425
[ "MIT" ]
null
null
null
from flask import Flask , render_template, request import google_news app = Flask(__name__) outFile = '' if __name__ == "__main__": app.run()
20.904762
50
0.67426
842586bea147f3e4d054e06882c5e5cefb545add
1,222
py
Python
physics_planning_games/mujoban/mujoban_level_test.py
mitchchristow/deepmind-research
49c7ebe6acc48dd276ca09eca6924ba6cb5ec3a3
[ "Apache-2.0" ]
10,110
2019-08-27T20:05:30.000Z
2022-03-31T16:31:56.000Z
physics_planning_games/mujoban/mujoban_level_test.py
ibex-training/deepmind-research
6f8ae40b2626b30f5f80dfc92f5676689eff5599
[ "Apache-2.0" ]
317
2019-11-09T10:19:10.000Z
2022-03-31T00:05:19.000Z
physics_planning_games/mujoban/mujoban_level_test.py
ibex-training/deepmind-research
6f8ae40b2626b30f5f80dfc92f5676689eff5599
[ "Apache-2.0" ]
2,170
2019-08-28T12:53:36.000Z
2022-03-31T13:15:11.000Z
# Copyright 2020 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for mujoban_level.""" from absl.testing import absltest from physics_planning_games.mujoban import mujoban_level _LEVEL = """ ##### # @#### # $. # ###$.# # # $.# # # #$. # # ### ######""" _GRID_LEVEL = """******** *..P**** *..BG..* ***BG*.* *..BG*.* *.*BG..* *....*** ******** """ if __name__ == '__main__': absltest.main()
22.62963
78
0.636661
84272a9b78bd142a71da410927baa64f753039be
1,069
py
Python
TeamX/TeamXapp/migrations/0040_auto_20190712_1351.py
rootfinlay/SageTeamX
cf4cde3360c8cccb8a727ba64d66345805d7a0ed
[ "Unlicense" ]
null
null
null
TeamX/TeamXapp/migrations/0040_auto_20190712_1351.py
rootfinlay/SageTeamX
cf4cde3360c8cccb8a727ba64d66345805d7a0ed
[ "Unlicense" ]
null
null
null
TeamX/TeamXapp/migrations/0040_auto_20190712_1351.py
rootfinlay/SageTeamX
cf4cde3360c8cccb8a727ba64d66345805d7a0ed
[ "Unlicense" ]
null
null
null
# Generated by Django 2.2.3 on 2019-07-12 12:51 from django.db import migrations, models import django.db.models.deletion
35.633333
168
0.649205
842b2f72a08093681d688bb2f92eb0afb6f06354
742
py
Python
quartic_sdk/core/entities/site.py
divyquartic/QuarticSDK
e3ce6387ed5f27845e0909878c831ae39badd8eb
[ "MIT" ]
1
2021-03-26T12:39:44.000Z
2021-03-26T12:39:44.000Z
quartic_sdk/core/entities/site.py
divyquartic/QuarticSDK
e3ce6387ed5f27845e0909878c831ae39badd8eb
[ "MIT" ]
95
2021-02-18T03:15:38.000Z
2022-03-25T05:39:12.000Z
quartic_sdk/core/entities/site.py
divyquartic/QuarticSDK
e3ce6387ed5f27845e0909878c831ae39badd8eb
[ "MIT" ]
1
2021-09-03T12:46:18.000Z
2021-09-03T12:46:18.000Z
""" The given file contains the class to refer to the Site entity """ from quartic_sdk.core.entities.base import Base import quartic_sdk.utilities.constants as Constants
23.935484
91
0.630728
842c3f72d982dddd1077d864f70783e67cb8182b
525
py
Python
newapp/migrations/0003_auto_20190524_1511.py
HCDigitalScholarship/liason_lair
6035d100e3ea1216af2907a4cccd319a1cc4f8d8
[ "MIT" ]
null
null
null
newapp/migrations/0003_auto_20190524_1511.py
HCDigitalScholarship/liason_lair
6035d100e3ea1216af2907a4cccd319a1cc4f8d8
[ "MIT" ]
null
null
null
newapp/migrations/0003_auto_20190524_1511.py
HCDigitalScholarship/liason_lair
6035d100e3ea1216af2907a4cccd319a1cc4f8d8
[ "MIT" ]
1
2019-08-03T01:30:30.000Z
2019-08-03T01:30:30.000Z
# Generated by Django 2.0.5 on 2019-05-24 15:11 from django.db import migrations, models
21.875
47
0.565714
842f1947d1778a3623e9a7a62865a578b298416e
2,027
py
Python
comment/views/blocker.py
Italo-Carvalho/Comment
86424d02a901b74ccbcaa438fffc38f352535301
[ "MIT" ]
75
2018-09-08T14:29:35.000Z
2022-03-25T16:17:06.000Z
comment/views/blocker.py
p0-oya/Comment
39f6fb6c40314d97391d36fc25112d6420c96991
[ "MIT" ]
165
2018-10-07T21:55:31.000Z
2022-02-27T14:44:32.000Z
comment/views/blocker.py
p0-oya/Comment
39f6fb6c40314d97391d36fc25112d6420c96991
[ "MIT" ]
37
2019-12-01T19:44:23.000Z
2022-02-13T16:46:14.000Z
from django.views import View from comment.models import BlockedUser, BlockedUserHistory, Comment from comment.mixins import CanBlockUsersMixin from comment.responses import UTF8JsonResponse, DABResponseData from comment.messages import BlockUserError
34.948276
99
0.655649
84377da9e8bef2666e66841f43d9581ba693e418
39,550
py
Python
wicon/glyph.py
Wudan07/wIcon
9189b7029759a22371827426b5342b6dc976f1b2
[ "MIT" ]
null
null
null
wicon/glyph.py
Wudan07/wIcon
9189b7029759a22371827426b5342b6dc976f1b2
[ "MIT" ]
null
null
null
wicon/glyph.py
Wudan07/wIcon
9189b7029759a22371827426b5342b6dc976f1b2
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Copyright (c) 2015 Brad Newbold (wudan07 [at] gmail.com) # See LICENSE for details. # glyph.py # """wIcon library: glyph provides GlyphObject """ ##from handy import * ##from common import * ### represents a character in a glyphString def glyphstr_length(gls): """ Returns length of glyphstr gls """ length = 0 for gl in gls: length += gl.flash return length - 2 def glyphstr_monospace(gls, wide=6): """ for each GlyphObject in gls, calls .center(wide) """ for gl in gls: gl.center(wide) def glyphstr_center(gls, width=100): """ given a width of an area (such as column heading width) it will adjust the start point of each glyph in a glyphstr_, centering the string """ length = glyphstr_length(gls) glen = len(gls) #addlen = (width-length)/(glen)) print length print width - length hl = (width-length)/2 for i in range(0, glen): gl = gls[i] flash = gl.flash gl._flash(flash+hl) def glyphstr_justify(gls, width=100): """ given a width of an area (such as column heading width) it will adjust the start point of each glyph in a glyphstr_, justifying the string """ length = glyphstr_length(gls) glen = len(gls) #addlen = (width-length)/(glen)) print length print width - length ct = 0 for i in range(0, width-length): if ct >= glen-1: ct = 0 gl = gls[ct] flash = gl.flash gl._flash(flash+1) ct += 1 def glyphstr_bounds_get(string, mono=False): """ Returns 2 len integer array, size and height of string as glyphstr_ """ #xk = 0 #yk = 0 xz = 0 #yz = 10 vals = string.split('\n') yz = len(vals) * 10 for val in vals: gs = glyphstr_get(val) if mono: glyphstr_monospace(gs) sz = glyphstr_length(gs) if sz > xz: xz = sz return [xz, yz] def glyphstr_get(string): """ given a string, Returns glyphs, a list of glyphs """ glyphs = [] i = 0 while i < len(string): letter = string[i:i+1] glyphs.append(GlyphObject(letter)) i += 1 return glyphs
26.759134
143
0.596207
8438a4e8ec614cde523653248e7af3039519099a
463
py
Python
mqtt_sender.py
kehtolaulu/iot-ccs811
611ca30ffaec067d730ac95c59b6800fda2cf148
[ "MIT" ]
null
null
null
mqtt_sender.py
kehtolaulu/iot-ccs811
611ca30ffaec067d730ac95c59b6800fda2cf148
[ "MIT" ]
null
null
null
mqtt_sender.py
kehtolaulu/iot-ccs811
611ca30ffaec067d730ac95c59b6800fda2cf148
[ "MIT" ]
null
null
null
import json from paho.mqtt.client import Client from subscriber import Subscriber from datetime import datetime
25.722222
72
0.695464
8439225f8d80c110768afbd91dc3a48cb1f55f67
1,914
py
Python
users/migrations/0004_auto_20201228_1613.py
hhdMrLion/django-crm
9f6f021e0cddc323c88280b733144366a0cb9fa6
[ "Apache-2.0" ]
1
2021-06-18T03:03:43.000Z
2021-06-18T03:03:43.000Z
users/migrations/0004_auto_20201228_1613.py
hhdMrLion/django-crm
9f6f021e0cddc323c88280b733144366a0cb9fa6
[ "Apache-2.0" ]
null
null
null
users/migrations/0004_auto_20201228_1613.py
hhdMrLion/django-crm
9f6f021e0cddc323c88280b733144366a0cb9fa6
[ "Apache-2.0" ]
null
null
null
# Generated by Django 2.2.17 on 2020-12-28 08:13 from django.db import migrations, models
46.682927
106
0.581505
843bb9c05ba6309f2f5fa04bc4ff12d51bd9395e
430
py
Python
pages/homepage.py
eugenexxx/laptop_docker
362ea238296e64fdd5c49ac55185d65b05e718cc
[ "Apache-2.0" ]
null
null
null
pages/homepage.py
eugenexxx/laptop_docker
362ea238296e64fdd5c49ac55185d65b05e718cc
[ "Apache-2.0" ]
null
null
null
pages/homepage.py
eugenexxx/laptop_docker
362ea238296e64fdd5c49ac55185d65b05e718cc
[ "Apache-2.0" ]
null
null
null
from webium import BasePage, Finds, Find from selenium.webdriver.common.by import By
43
99
0.746512
843c2a9f5e722e97bca056334565acff3143bb58
3,112
py
Python
finetune/TensorFlow/download_model_and_dataset.py
cgouttham/microsoft-hackathon
7e50981e0f165543676504592ad26818db13432f
[ "MIT" ]
340
2019-05-15T06:42:37.000Z
2022-02-23T13:29:34.000Z
finetune/TensorFlow/download_model_and_dataset.py
cgouttham/microsoft-hackathon
7e50981e0f165543676504592ad26818db13432f
[ "MIT" ]
43
2019-05-14T21:26:06.000Z
2022-02-13T02:42:57.000Z
finetune/TensorFlow/download_model_and_dataset.py
cgouttham/microsoft-hackathon
7e50981e0f165543676504592ad26818db13432f
[ "MIT" ]
113
2019-05-23T08:21:48.000Z
2022-03-03T19:18:17.000Z
from __future__ import print_function import argparse import sys import os import shutil import zipfile import urllib parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--bert_model_name", default = None, type = str, required = True, help = "Name of pretrained BERT model. Possible values: " "uncased_L-12_H-768_A-12,uncased_L-24_H-1024_A-16,cased_L-12_H-768_A-12," "multilingual_L-12_H-768_A-12,chinese_L-12_H-768_A-12") parser.add_argument("--model_dump_path", default = None, type = str, required = True, help = "Path to the output model.") parser.add_argument("--glue_data_path", default = None, type = str, required = True, help = "Path to store downloaded GLUE dataset") args = parser.parse_args() bert_model_url_map = { 'uncased_L-12_H-768_A-12': 'https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-12_H-768_A-12.zip', 'uncased_L-24_H-1024_A-16': 'https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-24_H-1024_A-16.zip', 'cased_L-12_H-768_A-12': 'https://storage.googleapis.com/bert_models/2018_10_18/cased_L-12_H-768_A-12.zip', 'multilingual_L-12_H-768_A-12': 'https://storage.googleapis.com/bert_models/2018_11_03/multilingual_L-12_H-768_A-12.zip', 'chinese_L-12_H-768_A-12': 'https://storage.googleapis.com/bert_models/2018_11_03/chinese_L-12_H-768_A-12.zip' } if args.bert_model_name not in bert_model_url_map: sys.stderr.write('Unknown BERT model name ' + args.bert_model_name) sys.exit(1) pretrained_model_url = bert_model_url_map.get(args.bert_model_name) # make local directory for pretrained tensorflow BERT model tensorflow_model_dir = './tensorflow_model' if not os.path.exists(tensorflow_model_dir): os.makedirs(tensorflow_model_dir) # download and extract pretrained tensorflow BERT model download_file_name = 'tensorflow_model.zip' urllib.request.urlretrieve(pretrained_model_url, filename=download_file_name) print('Extracting pretrained model...') with zipfile.ZipFile(download_file_name, 'r') as z: z.extractall(tensorflow_model_dir) # make destination path if not os.path.exists(args.model_dump_path): os.makedirs(args.model_dump_path) files = ['bert_model.ckpt.meta', 'bert_model.ckpt.index', 'bert_model.ckpt.data-00000-of-00001', 'bert_config.json', 'vocab.txt'] for file in files: shutil.copy(os.path.join(tensorflow_model_dir, args.bert_model_name, file), os.path.join(args.model_dump_path, file)) print('Start to download GLUE dataset...\n') urllib.request.urlretrieve( 'https://gist.githubusercontent.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e/raw/17b8dd0d724281ed7c3b2aeeda662b92809aadd5/download_glue_data.py', filename='download_glue_data.py') if os.system('python download_glue_data.py --data_dir {0} --tasks all'.format(args.glue_data_path)) != 0: sys.exit(1)
43.222222
151
0.70662
843ca99856298b4d971576c36ef2ff0db2f48136
1,386
py
Python
pdata_app/migrations/0035_auto_20180221_1515.py
jonseddon/primavera-dmt
1239044e37f070b925a3d06db68351f285df780c
[ "BSD-3-Clause" ]
null
null
null
pdata_app/migrations/0035_auto_20180221_1515.py
jonseddon/primavera-dmt
1239044e37f070b925a3d06db68351f285df780c
[ "BSD-3-Clause" ]
49
2018-11-14T17:00:03.000Z
2021-12-20T11:04:22.000Z
pdata_app/migrations/0035_auto_20180221_1515.py
jonseddon/primavera-dmt
1239044e37f070b925a3d06db68351f285df780c
[ "BSD-3-Clause" ]
2
2018-07-04T10:58:43.000Z
2018-09-29T14:55:08.000Z
# -*- coding: utf-8 -*- # Generated by Django 1.11 on 2018-02-21 15:15 from __future__ import unicode_literals from django.db import migrations, models
33.804878
101
0.622655
843d9417ba37601232cb640d55f1d03f38cd7f76
3,226
py
Python
python/examples/imagenet/image_reader.py
gongweibao/Serving
d234a1421e8b964c5fa3e9901f57f24aa49e3a91
[ "Apache-2.0" ]
null
null
null
python/examples/imagenet/image_reader.py
gongweibao/Serving
d234a1421e8b964c5fa3e9901f57f24aa49e3a91
[ "Apache-2.0" ]
null
null
null
python/examples/imagenet/image_reader.py
gongweibao/Serving
d234a1421e8b964c5fa3e9901f57f24aa49e3a91
[ "Apache-2.0" ]
null
null
null
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import cv2 import numpy as np
32.26
74
0.591135
843f97dd8ec994e4357ed02f96f7842db3d9a402
5,867
py
Python
cloudflare-deploy.py
antonini/certbot-hooks
61e200b7a038952f2f559953f47be62e1f992e39
[ "Apache-2.0" ]
null
null
null
cloudflare-deploy.py
antonini/certbot-hooks
61e200b7a038952f2f559953f47be62e1f992e39
[ "Apache-2.0" ]
null
null
null
cloudflare-deploy.py
antonini/certbot-hooks
61e200b7a038952f2f559953f47be62e1f992e39
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python import logging import sys import CloudFlare import os import re from os import path from certbot.plugins import dns_common __author__ = "Endrigo Antonini" __copyright__ = "Copyright 2020, Endrigo Antonini" __license__ = "Apache License 2.0" __version__ = "1.0" __maintainer__ = "Endrigo Antonini" __email__ = "eantonini@eidoscode.com" __status__ = "Production" logger = logging.getLogger(__name__) DEFAULT_CERT_FOLDER = "/etc/letsencrypt/live" CERTBOT_CONF_DIR = "/etc/letsencrypt/renewal" PROPERTIES = {} def read_file(filename): """ Read a file from disk and return all the content :param str filename: File name of the file that is going to read. :raises Exception: if the file doesn't exists """ if not path.isfile(filename): raise Exception("File {} doesn't exists!".format(filename)) with open(filename) as f: return f.read() if __name__ == '__main__': main()
32.236264
110
0.670701
84416b0aa44ff310962bcf2724c753d72fba9519
476
py
Python
main/schemas/location_lat.py
ohioh/ohioh_Framework_Cluster_3_Flask
69e50b9d697b5e8818305328335d26314b625732
[ "Apache-2.0" ]
1
2020-08-11T18:37:36.000Z
2020-08-11T18:37:36.000Z
main/schemas/location_lat.py
ohioh/ohioh_Framework_Cluster_3_Flask
69e50b9d697b5e8818305328335d26314b625732
[ "Apache-2.0" ]
null
null
null
main/schemas/location_lat.py
ohioh/ohioh_Framework_Cluster_3_Flask
69e50b9d697b5e8818305328335d26314b625732
[ "Apache-2.0" ]
null
null
null
from datetime import datetime from marshmallow import Schema, EXCLUDE import marshmallow.fields as ms_fields
23.8
63
0.737395
84418df14873be48f72ce565d6b9bb740aefa623
411
py
Python
Python/34-match.py
strawman2511/Learning
21ee7bdad376060503fdc0a739fed2d7bd40f9b9
[ "MIT" ]
1
2022-03-16T23:25:54.000Z
2022-03-16T23:25:54.000Z
Python/34-match.py
strawman2511/Learning
21ee7bdad376060503fdc0a739fed2d7bd40f9b9
[ "MIT" ]
null
null
null
Python/34-match.py
strawman2511/Learning
21ee7bdad376060503fdc0a739fed2d7bd40f9b9
[ "MIT" ]
null
null
null
# Till now only Python 3.10 can run match statement x = 1 y = 2 point = (x, y) check_point(point)
20.55
51
0.452555
8441be7fed412cc2b0c06a54eaceebee4908fef7
272
py
Python
incremental/settings.py
Nana0606/IUAD
c52439eb5bbbef6bd50533b5d9e142e18091d85e
[ "BSD-2-Clause" ]
1
2021-07-05T02:20:32.000Z
2021-07-05T02:20:32.000Z
incremental/settings.py
Nana0606/IUAD
c52439eb5bbbef6bd50533b5d9e142e18091d85e
[ "BSD-2-Clause" ]
null
null
null
incremental/settings.py
Nana0606/IUAD
c52439eb5bbbef6bd50533b5d9e142e18091d85e
[ "BSD-2-Clause" ]
1
2021-08-22T08:45:18.000Z
2021-08-22T08:45:18.000Z
# python3 # -*- coding: utf-8 -*- # @Author : lina # @Time : 2018/4/22 21:17 """ code function: define all parameters. """ matched_file_name = "../data/gcn_res.txt" wordvec_path = '../data/word2vec.model' incremental_path = "../data/incremental_res.txt"
20.923077
49
0.628676
84425e6e37d98a459d555c6b47a64806ebbb0769
246
py
Python
app/newsletter/views.py
valeriansaliou/waaave-web
8a0cde773563865a905af38f5a0b723a43b17341
[ "RSA-MD" ]
1
2020-04-06T10:04:43.000Z
2020-04-06T10:04:43.000Z
app/newsletter/views.py
valeriansaliou/waaave-web
8a0cde773563865a905af38f5a0b723a43b17341
[ "RSA-MD" ]
null
null
null
app/newsletter/views.py
valeriansaliou/waaave-web
8a0cde773563865a905af38f5a0b723a43b17341
[ "RSA-MD" ]
null
null
null
from django.shortcuts import render from django.http import HttpResponseRedirect from django.core.urlresolvers import reverse def root(request): """ Newsletter > Root """ return render(request, 'newsletter/newsletter_root.jade')
24.6
61
0.756098
84464ba3de7de8074ab4f3a72392eb3da290f401
16,826
py
Python
transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/search_space/mobilenet_block.py
wwhio/awesome-DeepLearning
2cc92edcf0c22bdfc670c537cc819c8fadf33fac
[ "Apache-2.0" ]
1,150
2021-06-01T03:44:21.000Z
2022-03-31T13:43:42.000Z
transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/search_space/mobilenet_block.py
wwhio/awesome-DeepLearning
2cc92edcf0c22bdfc670c537cc819c8fadf33fac
[ "Apache-2.0" ]
358
2021-06-01T03:58:47.000Z
2022-03-28T02:55:00.000Z
transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/search_space/mobilenet_block.py
wwhio/awesome-DeepLearning
2cc92edcf0c22bdfc670c537cc819c8fadf33fac
[ "Apache-2.0" ]
502
2021-05-31T12:52:14.000Z
2022-03-31T02:51:41.000Z
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License" # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import paddle.fluid as fluid from paddle.fluid.param_attr import ParamAttr from .search_space_base import SearchSpaceBase from .base_layer import conv_bn_layer from .search_space_registry import SEARCHSPACE from .utils import compute_downsample_num, check_points, get_random_tokens __all__ = ["MobileNetV1BlockSpace", "MobileNetV2BlockSpace"]
39.130233
178
0.530013
8446c1c3c431374432b1f4f4b191c7dc6650169d
1,580
py
Python
src/cltk/phonology/gmh/phonology.py
yelircaasi/cltk
1583aa24682543a1f33434a21918f039ca27d60c
[ "MIT" ]
757
2015-11-20T00:58:52.000Z
2022-03-31T06:34:24.000Z
src/cltk/phonology/gmh/phonology.py
yelircaasi/cltk
1583aa24682543a1f33434a21918f039ca27d60c
[ "MIT" ]
950
2015-11-17T05:38:29.000Z
2022-03-14T16:09:34.000Z
src/cltk/phonology/gmh/phonology.py
yelircaasi/cltk
1583aa24682543a1f33434a21918f039ca27d60c
[ "MIT" ]
482
2015-11-22T18:13:02.000Z
2022-03-20T21:22:02.000Z
"""Middle High German phonology tools """ from typing import List from cltk.phonology.gmh.transcription import Transcriber from cltk.phonology.syllabify import Syllabifier __author__ = ["Clment Besnier <clem@clementbesnier.fr>"]
26.333333
103
0.666456
844826018788435b356bf6f9c896357ffb15fd09
11,680
py
Python
baiduspider/core/parser.py
samzhangjy/GSSpider
344d9c9053a5d5bf08692e0c817d30763dbd8ab7
[ "MIT" ]
31
2020-07-17T08:26:37.000Z
2021-08-24T02:28:50.000Z
baiduspider/core/parser.py
samzhangjy/GSSpider
344d9c9053a5d5bf08692e0c817d30763dbd8ab7
[ "MIT" ]
6
2020-07-14T17:13:17.000Z
2020-09-12T06:02:01.000Z
baiduspider/core/parser.py
samzhangjy/GSSpider
344d9c9053a5d5bf08692e0c817d30763dbd8ab7
[ "MIT" ]
12
2020-07-27T08:38:26.000Z
2021-07-28T16:05:58.000Z
import json from html import unescape from bs4 import BeautifulSoup from baiduspider.core._spider import BaseSpider from baiduspider.errors import ParseError
36.72956
162
0.447603
8449b868c5c55bebc3c70da12ca1d458ad2a711a
2,142
py
Python
virtual/lib/python3.6/site-packages/requests_unixsocket/adapters.py
marknesh/pitches
0a480d9bc2beafaefa0121393b1502cc05edab89
[ "MIT" ]
null
null
null
virtual/lib/python3.6/site-packages/requests_unixsocket/adapters.py
marknesh/pitches
0a480d9bc2beafaefa0121393b1502cc05edab89
[ "MIT" ]
10
2020-03-08T21:13:29.000Z
2021-04-08T19:41:14.000Z
flask/lib/python3.6/site-packages/requests_unixsocket/adapters.py
JOFLIX/grapevines
34576e01184570d79cc140b42ffb71d322132da6
[ "MIT", "Unlicense" ]
1
2020-11-04T06:48:34.000Z
2020-11-04T06:48:34.000Z
import socket from requests.adapters import HTTPAdapter from requests.compat import urlparse, unquote try: from requests.packages.urllib3.connection import HTTPConnection from requests.packages.urllib3.connectionpool import HTTPConnectionPool except ImportError: from urllib3.connection import HTTPConnection from urllib3.connectionpool import HTTPConnectionPool # The following was adapted from some code from docker-py # https://github.com/docker/docker-py/blob/master/docker/unixconn/unixconn.py
35.114754
77
0.710551
844a39e610cb54a65514ca7f805b41f45b503518
3,021
py
Python
jarvis/resume/tests/test_utils.py
Anubhav722/blahblah
160698e06a02e671ac40de3113cd37d642e72e96
[ "MIT" ]
1
2019-01-03T06:10:04.000Z
2019-01-03T06:10:04.000Z
jarvis/resume/tests/test_utils.py
Anubhav722/blahblah
160698e06a02e671ac40de3113cd37d642e72e96
[ "MIT" ]
1
2021-03-31T19:11:52.000Z
2021-03-31T19:11:52.000Z
jarvis/resume/tests/test_utils.py
Anubhav722/blahblah
160698e06a02e671ac40de3113cd37d642e72e96
[ "MIT" ]
null
null
null
from django.test import TestCase from jarvis.resume.utils.extractor import get_text from jarvis.resume.utils.parser_helper import get_urls, get_url_response, url_categories, get_github_username, get_stackoverflow_userid, get_stackoverflow_username, get_name, get_id_from_linkedin_url, get_email from unidecode import unidecode path_to_test_data = 'resume/tests/test_data/1.pdf' urls = ['https://github.com/imnithin', 'http://imnithin.github.io', 'https://gist.github.com/imnithin', 'http://stackoverflow.com/users/2231236/nithin', 'https://www.linkedin.com/in/imnithink'] categories = {'blog': ['http://imnithin.github.io'], 'coding': [], 'contributions': ['https://github.com/imnithin', 'https://gist.github.com/imnithin'], 'forums': ['http://stackoverflow.com/users/2231236/nithin'], 'others': [], 'social': ['https://www.linkedin.com/in/imnithink']} url_response = [{'name': 'https://github.com/imnithin', 'type': 'contributions'}, {'name': 'https://gist.github.com/imnithin', 'type': 'contributions'}, {'name': 'https://www.linkedin.com/in/imnithink', 'type': 'social'}, {'name': 'http://imnithin.github.io', 'type': 'blog'}, {'name': 'http://stackoverflow.com/users/2231236/nithin', 'type': 'forums'}]
43.782609
210
0.676266
844aff8b757e567eab04101d17c08cb3e245797f
8,032
py
Python
profiles_weak.py
andreuvall/HybridPlaylistContinuation
6e31e50050c61a2c3ae55183e18b665fd54c7250
[ "BSD-2-Clause" ]
8
2017-06-04T11:42:49.000Z
2021-10-19T12:16:01.000Z
profiles_weak.py
andreuvall/HybridPlaylistContinuation
6e31e50050c61a2c3ae55183e18b665fd54c7250
[ "BSD-2-Clause" ]
null
null
null
profiles_weak.py
andreuvall/HybridPlaylistContinuation
6e31e50050c61a2c3ae55183e18b665fd54c7250
[ "BSD-2-Clause" ]
5
2017-08-27T17:02:14.000Z
2020-06-09T01:21:09.000Z
from __future__ import print_function from __future__ import division from sklearn.utils import check_random_state from sklearn import preprocessing as prep from utils.data import load_data, show_data_splits, shape_data from utils.evaluation import evaluate from utils.profiles import select_model, show_design, train, fit, compute_scores import theano import lasagne as lg import numpy as np import argparse import os ''' Hybrid music playlist continuation based on a song-to-playlist classifier. We learn a classifier that takes song features as inputs and predicts the playlists songs belong to. Once it is learned, such classifier can be used to populate a matrix of song-playlist scores describing how well a song and a playlist fit together. Thus, a playlist can be extended by selecting the songs with highest score. This approach is "hybrid" in the usual sense in the recommender systems literature, i.e., it combines content (given by the song features) and cf information (given by playlists examples). As it is, this approach only works on the so-called weak generalization setting. That is, the model is trained on the same playlists that will be extended. ''' if __name__ == '__main__': parser = argparse.ArgumentParser(description='Hybrid music playlist continuation based on a song-to-playlist classifier.') parser.add_argument('--model', type=str, help='path to the model specification file', metavar='') parser.add_argument('--dataset', type=str, help='path to the playlists dataset directory', metavar='') parser.add_argument('--msd', type=str, help='path to the MSD directory', metavar='') parser.add_argument('--train', action='store_true', help='train the song-to-playist classifier with monitoring') parser.add_argument('--fit', action='store_true', help='fit the song-to-playlist classifier') parser.add_argument('--test', action='store_true', help='evaluate the playlist continuations') parser.add_argument('--ci', action='store_true', help='compute confidence intervals if True') parser.add_argument('--song_occ', type=int, help='test on songs observed song_occ times during training', nargs='+', metavar='') parser.add_argument('--metrics_file', type=str, help='file name to save metrics', metavar='') parser.add_argument('--seed', type=int, help='set random behavior', metavar='') args = parser.parse_args() # set random behavior rng = check_random_state(args.seed) lg.random.set_rng(rng) # set model configuration model = select_model(args.model) # prepare output directory data_name = os.path.basename(os.path.normpath(args.dataset)) out_dir = os.path.join('params', 'profiles', model.name + '_' + data_name + '_weak') if not os.path.exists(out_dir): os.makedirs(out_dir) # load data: playlists, splits, features and artist info data = load_data(args.dataset, args.msd, model) playlists_coo, split_weak, _, features, song2artist = data # playlists_coo are the playlists stored in coordinate format playlists_idx, songs_idx, _, idx2song = playlists_coo # each playlist is split into a "query" of ~80% of the songs (train_idx + # valid_idx) and a "continuation" of ~20% of the songs (test_idx) train_idx, valid_idx, test_idx = split_weak # define splits for this experiment # train model on the training queries # validate model on the validation queries # fit the model on the full queries # extend all the playlists, using all queries and continuations train_idx = train_idx valid_idx = valid_idx fit_idx = np.hstack((train_idx, valid_idx)) query_idx = fit_idx cont_idx = test_idx # provide data information show_data_splits(playlists_idx, songs_idx, idx2song, song2artist, train_idx, valid_idx, fit_idx, query_idx, cont_idx) # provide model information print('\nNetwork:') show_design(model) if args.train: # # train the hybrid model while validating on withheld playlists # # prepare input song features and playlist targets at training X_train, Y_train = shape_data( playlists_idx, songs_idx, idx2song, features, mode='train', subset=train_idx ) # prepare input song features and playlist targets at validation X_valid, Y_valid = shape_data( playlists_idx, songs_idx, idx2song, features, mode='test', subset=valid_idx ) # preprocess input features if required # use the training song features to standardize the validation data if model.standardize: scaler = prep.RobustScaler() X_train = scaler.fit_transform(X_train) X_valid = scaler.transform(X_valid) if model.normalize: X_train = prep.normalize(X_train, norm=model.normalize) X_valid = prep.normalize(X_valid, norm=model.normalize) # train the classifier train( model=model, train_input=X_train.astype(theano.config.floatX), train_target=Y_train.astype(np.int8), valid_input=X_valid.astype(theano.config.floatX), valid_target=Y_valid.astype(np.int8), out_dir=out_dir, random_state=rng ) if args.fit: # # fit the hybrid model # # prepare input song features and playlist targets at training X_fit, Y_fit = shape_data( playlists_idx, songs_idx, idx2song, features, mode='train', subset=fit_idx ) # preprocess input features if required if model.standardize: X_fit = prep.robust_scale(X_fit) if model.normalize: X_fit = prep.normalize(X_fit, norm=model.normalize) # fit the classifier fit( model=model, fit_input=X_fit.astype(theano.config.floatX), fit_target=Y_fit.astype(np.int8), out_dir=out_dir, random_state=rng ) if args.test: # # extend the playlists in the query split and evaluate the # continuations by comparing them to actual withheld continuations # # prepare input song features and playlist targets at test X_cont, Y_cont = shape_data( playlists_idx, songs_idx, idx2song, features, mode='test', subset=cont_idx ) # preprocess input features if required # use the training song features to standardize the test data if model.standardize: X_fit, _ = shape_data( playlists_idx, songs_idx, idx2song, features, mode='train', subset=fit_idx ) scaler = prep.RobustScaler() scaler.fit(X_fit) X_cont = scaler.transform(X_cont) if model.normalize: X_cont = prep.normalize(X_cont, norm=model.normalize) # songs in the "query" playlists need to be masked to make sure that # they are not recommended as continuations _, Y_query = shape_data( playlists_idx, songs_idx, idx2song, features, mode='test', subset=query_idx ) # get number of song occurrences when fitting for cold-start analysis # Y_fit = Y_query train_occ = np.asarray(Y_query.sum(axis=1)).flatten() # compute the song-playlist scores cont_output = compute_scores( model=model, params_dir=out_dir, cont_input=X_cont.astype(theano.config.floatX), cont_target=Y_cont.astype(np.int8) ) # evaluate the continuations evaluate( scores=[cont_output.T], targets=[Y_cont.T.tocsr()], queries=[Y_query.T.tocsr()], train_occ=[train_occ], k_list=[10, 30, 100], ci=args.ci, song_occ=args.song_occ, metrics_file=args.metrics_file )
37.886792
132
0.662475
844bd667c2563dc8f5e9e83fc9eaf8e0c1857eb6
560
py
Python
news/admin.py
trojsten/news
aa1dfb4ee31a7f810dcd484eecafd49659292d76
[ "BSD-3-Clause" ]
null
null
null
news/admin.py
trojsten/news
aa1dfb4ee31a7f810dcd484eecafd49659292d76
[ "BSD-3-Clause" ]
6
2016-07-10T00:22:02.000Z
2021-12-23T22:43:41.000Z
news/admin.py
trojsten/news
aa1dfb4ee31a7f810dcd484eecafd49659292d76
[ "BSD-3-Clause" ]
2
2019-04-30T20:20:38.000Z
2021-02-16T18:41:01.000Z
from django.contrib import admin from django.db import models from easy_select2.widgets import Select2Multiple from news.models import Entry admin.site.register(Entry, EntryAdmin)
25.454545
61
0.682143
844c48d7274f542cdb76ae374555eb9e43a3cc30
21,999
py
Python
deliverable1/analyzer/clientGUI.py
tonellotto/pira-project
13f1f40fd3339d60067c09396822af8f3c83239c
[ "MIT" ]
null
null
null
deliverable1/analyzer/clientGUI.py
tonellotto/pira-project
13f1f40fd3339d60067c09396822af8f3c83239c
[ "MIT" ]
null
null
null
deliverable1/analyzer/clientGUI.py
tonellotto/pira-project
13f1f40fd3339d60067c09396822af8f3c83239c
[ "MIT" ]
null
null
null
import analyzer_client as analyzer from tkinter import * from tkinter import filedialog from tkinter import messagebox from tkinter import ttk import json import os from pathlib import Path IP_ADDRESS = "localhost" PORT = "8061" ENGINE_CURR_OPTIONS = {} ANALYZE_CURR_OPTIONS = {'language':'en', 'entities': None, 'correlation_id': None, 'score_threshold': "0.1", 'return_decision_process': "0" } DENY_LIST = {'supported_entities': [], 'valuesList': [], 'length': 0 } REGEX_LIST = {'entities': [], 'names_pattern': [], 'patterns': [], 'scores': [], 'context_words': [], 'length': 0 } root = Tk() app = Frames(root) root.mainloop()
50.456422
208
0.589027
844d85dc62ed6dfb3a4f73a387bf2a08be758a8e
53
py
Python
05-functions_and_modules/mods_1.py
palmieric/Tecnologie_Web-Introduzione_a_Python
b10ce49a947b239ca2af1938248f7191937b2f89
[ "CC0-1.0" ]
3
2021-05-17T14:48:42.000Z
2021-05-24T10:12:06.000Z
05-functions_and_modules/mods_1.py
palmieric/Tecnologie_Web-Introduzione_a_Python
b10ce49a947b239ca2af1938248f7191937b2f89
[ "CC0-1.0" ]
null
null
null
05-functions_and_modules/mods_1.py
palmieric/Tecnologie_Web-Introduzione_a_Python
b10ce49a947b239ca2af1938248f7191937b2f89
[ "CC0-1.0" ]
2
2021-05-17T13:52:15.000Z
2021-05-24T10:44:54.000Z
# mods 1 import random print(random.randint(1,10))
10.6
27
0.716981
844ee290c97366006e042d8ac5ba0899c883ac56
1,903
py
Python
kge/core/component.py
Fredkiss3/kge
389d5ab21ecb6dc1a25dd9f98245ba5938a5d253
[ "CC0-1.0" ]
4
2020-03-17T02:15:10.000Z
2021-06-29T13:34:40.000Z
kge/core/component.py
Fredkiss3/kge
389d5ab21ecb6dc1a25dd9f98245ba5938a5d253
[ "CC0-1.0" ]
4
2020-05-23T05:47:30.000Z
2022-01-13T02:15:35.000Z
kge/core/component.py
Fredkiss3/kge
389d5ab21ecb6dc1a25dd9f98245ba5938a5d253
[ "CC0-1.0" ]
null
null
null
from typing import Callable import kge from kge.core import events from kge.core.eventlib import EventMixin from kge.core.events import Event Component = BaseComponent
33.982143
104
0.603783
84508cc0743106693c25a4c91852516182d10958
11,162
py
Python
generate_population_dataset.py
p-enel/stable-and-dynamic-value
3f78e24f5bef9b12b8cc43d075d2e66b8a603325
[ "CC0-1.0" ]
1
2020-07-29T09:18:00.000Z
2020-07-29T09:18:00.000Z
generate_population_dataset.py
p-enel/stable-and-dynamic-value
3f78e24f5bef9b12b8cc43d075d2e66b8a603325
[ "CC0-1.0" ]
null
null
null
generate_population_dataset.py
p-enel/stable-and-dynamic-value
3f78e24f5bef9b12b8cc43d075d2e66b8a603325
[ "CC0-1.0" ]
3
2020-07-27T03:12:19.000Z
2021-11-02T20:03:00.000Z
from pathlib import Path import numpy as np import pickle as pk from itertools import chain, product from collections import OrderedDict from structure import Struct MONKEYS = ['M', 'N'] REGIONS = ['OFC', 'ACC'] TASKVARS = ['value', 'type'] SUBSPACES = [True, False] EVT_WINS = OrderedDict((('cues ON', (-500, 1500)), ('response cue', (-500, 500)), ('rwd', (-400, 400)))) def pp_from_filename(filename): '''Get the preprocessing parameters from a unit data set filename Arguments: filename - str or Path: name or full path of unit data set file ''' fnamestr = filename if isinstance(filename, str) else filename.name params = [paramstr.split('.') for paramstr in fnamestr.split('_')[2:]] preproc_params = {'align': params[0][1], 'binsize': int(params[1][1]), 'smooth': params[2][1], 'smoothsize': int(params[3][1]), 'step': int(params[4][1])} return preproc_params def get_dataset_fname(dataseed, pp): '''Generate the file name of a population data set given data seed and preprocessing parameters Arguments: dataseed - int: the seed of the data set that will be included in the file name pp - dict: the pre-processing parameters of the data set''' fname = "population_dataset_align.{align}_binsize.{binsize}_smooth.{smooth}" fname += "_smoothsize.{smoothsize}_step.{step}_seed.%d.pk" % dataseed fname = fname.format(**pp) return fname def generate_dataset(dataseed, unit_folder, unit_file, save_folder=None): '''Generate a pseudo-population by combining data from monkeys and sessions Arguments: dataseed - int: the seed for pseudo-random selection of the trials to be part of the data set unit_file - str: the path to the file containing the unit data set save_folder - str or Path: optional, a folder to save the generated data set. After being saved once, if the same folder is specified, it will be loaded instead of being generated. Returns: X - Structure: A structure that contains the pseudo-population firing rate data. The structure contains 3 levels: - monkey: which can take values 'M' or 'N' for individual monkey data, or 'both' for the data of both monkeys combined - region: which can take value 'OFC' or 'ACC' - task variable: which can take value 'value' or 'type' for data sets targeted to decoding these variables The elements of the structure are numpy arrays of the shape: trials x bins x neurons Example: X['N', 'ACC', 'value'] contains a matrix of the pseudo-population firing rate of monkey N for region ACC meant to decode value y - Structure: A structure of numpy vectors with the same map as 'X' that contains the ground truth of the related variable for each trial. Example: y['N', 'ACC', 'value'] contains the value of each trials of monkey N for ACC population. delaymask - numpy vector of booleans: A boolean mask for the time bin dimension to select time bins that are part of the delay activity bins - numpy vector of ints: The time of each bin of the firing rate data in the structure X, with events ordered this way: 'cues ON' -> 'response cue' -> 'rwd' ''' events = list(EVT_WINS.keys()) pp = pp_from_filename(unit_file) if save_folder is not None: dataset_fname = get_dataset_fname(dataseed, pp) dataset_fullpath = Path(save_folder)/dataset_fname if dataset_fullpath.exists(): print("Data set already generated, loading...") with open(dataset_fullpath, 'rb') as f: X, y, delaymask, bins = pk.load(f) return X, y, delaymask, bins with open(Path(unit_folder)/unit_file, 'rb') as f: data = pk.load(f) evtxs = data['M']['OFC'][0]['bins'] #### Format the data for decoding ################################# keymap = [MONKEYS, REGIONS, TASKVARS] act = Struct.new_empty(keymap) minntrials = Struct.new_empty(keymap) for monkey, region in product(MONKEYS, REGIONS): act[monkey, region, 'value'] = [[] for _ in range(4)] act[monkey, region, 'type'] = [[], []] minntrials[monkey, region, 'value'] = [[] for _ in range(4)] minntrials[monkey, region, 'type'] = [[], []] datamr = data[monkey][region] ## Select bins that are within the window of interest for each event ## then concatenate the activity of the different events in a single tensor catepochs = [] for sessdata in datamr: if sessdata['fr'] is not None: cattmp = [] for evt in events: included_bins = (evtxs[evt] >= EVT_WINS[evt][0]) & (evtxs[evt] <= EVT_WINS[evt][1]) cattmp.append(sessdata['fr'][evt][included_bins]) catepochs.append(np.concatenate(cattmp)) else: catepochs.append(None) ## Separate trials by value and type for sessfr, sessdata in zip(catepochs, datamr): if sessfr is not None: if sessdata['fr'] is not None: sessvars = sessdata['vars'] for val in range(1, 5): trialbool = (sessvars.value == val) act[monkey, region, 'value'][val-1].append(sessfr[:, :, trialbool]) for itype, type_ in enumerate(['juice', 'bar']): trialbool = (sessvars.type == type_) act[monkey, region, 'type'][itype].append(sessfr[:, :, trialbool]) ## Get the minimum number of trials across all sessions for each value/type minntrials[monkey, region, 'value'] = [np.nanmin([sessfr.shape[2] for sessfr in valdata]) for valdata in act[monkey, region, 'value']] minntrials[monkey, region, 'type'] = [np.nanmin([sessfr.shape[2] for sessfr in typedata]) for typedata in act[monkey, region, 'type']] ## Get the minimum number of trials for pooled data across monkeys minntrials.move_level_(0, 2) mintogether = minntrials.apply(lambda x: [min(valmin) for valmin in zip(*x.values())], depth=2) mintogether = Struct.from_nested_dict({'both': mintogether.ndict}, n_layers=3) minntrials.move_level_(2, 0) minntrials = minntrials.combine(mintogether) # extra trials are discarded after trials are shuffled np.random.seed(dataseed) catactboth = Struct.empty_like(act, values=list) # taskvar, monkey, region = next(product(TASKVARS, MONKEYS, REGIONS)) for taskvar, monkey, region in product(TASKVARS, MONKEYS, REGIONS): keymap = [monkey, region, taskvar] minns = minntrials['both', region, taskvar] # minn, acttmp = next(zip(minns, act[keymap])) for minn, acttmp in zip(minns, act[keymap]): tocat = [] for sessdata in acttmp: ntrials = sessdata.shape[2] trialind = np.arange(ntrials) np.random.shuffle(trialind) tmp = sessdata[:, :, trialind] tocat.append(tmp[:, :, :minn]) catactboth[keymap].append(np.concatenate(tocat, 1)) catact = Struct.empty_like(act, values=list) for taskvar, monkey, region in product(TASKVARS, MONKEYS, REGIONS): keymap = [monkey, region, taskvar] minns = minntrials[keymap] for minn, acttmp in zip(minns, act[keymap]): tocat = [] for sessdata in acttmp: ntrials = sessdata.shape[2] trialind = np.arange(ntrials) np.random.shuffle(trialind) tmp = sessdata[:, :, trialind] tocat.append(tmp[:, :, :minn]) catact[keymap].append(np.concatenate(tocat, 1)) catactboth.move_level_(0, 2) def cat_monkeys(x): '''x: {monkey}[4 (values)] np.array<nbins*nneurons*ntrials>''' return [np.concatenate([x['M'][ival], x['N'][ival]], axis=1) for ival in range(len(x['M']))] catactboth.apply_agg_(cat_monkeys, depth=2) catactboth = Struct.from_nested_dict({'both': catactboth.ndict}, n_layers=3) catact = catact.combine(catactboth) #### Moving data from arrays to a list #### actvallist = catact.apply(get_actvallist) X, y = actvallist.apply(lambda x: x[0]), actvallist.apply(lambda x: x[1]) X.apply_(np.stack) y.apply_(np.array) del(catact, act) #### Defining a boolean mask to get only the bins between cue ON and rwd ######################################################################## cuesON_bins_mask = (evtxs['cues ON'] >= EVT_WINS['cues ON'][0]) & (evtxs['cues ON'] <= EVT_WINS['cues ON'][1]) cuesON_bins = evtxs['cues ON'][cuesON_bins_mask] resp_bins_mask = (evtxs['response cue'] >= EVT_WINS['response cue'][0]) &\ (evtxs['response cue'] <= EVT_WINS['response cue'][1]) resp_bins = evtxs['response cue'][resp_bins_mask] rwd_bins_mask = (evtxs['rwd'] >= EVT_WINS['rwd'][0]) & (evtxs['rwd'] <= EVT_WINS['rwd'][1]) rwd_bins = evtxs['rwd'][rwd_bins_mask] delaymask = np.concatenate((cuesON_bins >= 0, np.ones(resp_bins.shape, dtype=bool), rwd_bins <= 0)) bins = {} for evt, (start, end) in EVT_WINS.items(): xs = evtxs[evt] bins[evt] = xs[(xs >= start) & (xs <= end)] if save_folder is not None: with open(dataset_fullpath, 'wb') as f: pk.dump((X, y, delaymask, bins), f) print(f'data set created and saved in {unit_folder}') return X, y, delaymask, bins # The following is an example. Replace the right hand side of the first three # statements to get a specific data set if __name__ == '__main__': # Data seeds used to generate the pseudo population data for decoding are # listed below: # dataseeds = [634564236, 9453241, 70010207, 43661999, 60410205] dataseed = 634564236 # The following folder path must contain the unit data set file specified # below unit_folder = Path("/home/john/datasets") # The following statement specifies which unit data set (with which # preprocessing parameters) is to be used to generate the population data # set unit_file = "unit_dataset_align.center_binsize.100_smooth.gaussian_smoothsize.100_step.25.pk" # The last argument of the function allows you to save the data set in a # specified folder, or to load an already generated population data set if # it already exists in this folder. In this example the population data set # is saved in the same folder as the unit data set. X, y, delaymask, bins = generate_dataset(dataseed, unit_folder, unit_file, save_folder=unit_folder)
43.601563
114
0.614854
8450d07e5cec286e40f858637377c3e87f1ab9e5
634
py
Python
setup.py
joepatmckenna/ohmlr
2f3e63243758b995596f37897814634fc432f337
[ "MIT" ]
null
null
null
setup.py
joepatmckenna/ohmlr
2f3e63243758b995596f37897814634fc432f337
[ "MIT" ]
null
null
null
setup.py
joepatmckenna/ohmlr
2f3e63243758b995596f37897814634fc432f337
[ "MIT" ]
null
null
null
import setuptools with open('README.rst', 'r') as f: readme = f.read() with open('version', 'r') as f: version = f.read() if __name__ == '__main__': setuptools.setup( name='ohmlr', version=version, description='One-hot multinomial logisitc regression', long_description=readme, author='Joseph P. McKenna', author_email='joepatmckenna@gmail.com', url='http://joepatmckenna.github.io/ohmlr', download_url='https://pypi.org/project/ohmlr', packages=['ohmlr'], license='MIT', keywords=['inference', 'statistics', 'machine learning'])
27.565217
65
0.615142
8450ee0e08874b8a26468c905f5abfbc7260c448
1,301
py
Python
commands/climber/holdcimbersposition.py
1757WestwoodRobotics/2022-RapidReact
b6d9cf203fd35e93dc5d26ba2d6889e2a9edb137
[ "MIT" ]
1
2022-01-21T22:00:24.000Z
2022-01-21T22:00:24.000Z
commands/climber/holdcimbersposition.py
1757WestwoodRobotics/2022-RapidReact
b6d9cf203fd35e93dc5d26ba2d6889e2a9edb137
[ "MIT" ]
40
2022-01-18T21:20:54.000Z
2022-03-31T20:56:44.000Z
commands/climber/holdcimbersposition.py
1757WestwoodRobotics/2022-RapidReact
b6d9cf203fd35e93dc5d26ba2d6889e2a9edb137
[ "MIT" ]
1
2022-01-28T02:46:38.000Z
2022-01-28T02:46:38.000Z
from commands2 import CommandBase, ParallelCommandGroup from subsystems.climbers.leftclimbersubsystem import LeftClimber from subsystems.climbers.rightclimbersubsystem import RightClimber
35.162162
77
0.730976
84513ebf1e835ed2f032b1060dd720580d3e05a2
668
py
Python
practicer_flask/app.py
DominikPott/practicer-flask
c8e523095bdd5912dadb7357d16a4e76229a04da
[ "MIT" ]
null
null
null
practicer_flask/app.py
DominikPott/practicer-flask
c8e523095bdd5912dadb7357d16a4e76229a04da
[ "MIT" ]
null
null
null
practicer_flask/app.py
DominikPott/practicer-flask
c8e523095bdd5912dadb7357d16a4e76229a04da
[ "MIT" ]
null
null
null
import os from flask import Flask import practicer_flask.auth import practicer_flask.exercises import practicer_flask.dashboard import practicer_flask.topic import practicer_flask.model_viewer app = create_app() if __name__ == "__main__": app.run(debug=os.environ.get("DEV", False))
23.857143
77
0.784431
84533ec2f7f2ad9597755a4499563c795ed9f246
737
py
Python
algo/visualizations/temporalchart.py
alexeyev/visartm
d19e193b3c084d7f355a45b966c8bb2ebb6fa366
[ "BSD-3-Clause" ]
1
2020-10-01T10:11:21.000Z
2020-10-01T10:11:21.000Z
algo/visualizations/temporalchart.py
alexeyev/visartm
d19e193b3c084d7f355a45b966c8bb2ebb6fa366
[ "BSD-3-Clause" ]
null
null
null
algo/visualizations/temporalchart.py
alexeyev/visartm
d19e193b3c084d7f355a45b966c8bb2ebb6fa366
[ "BSD-3-Clause" ]
null
null
null
from models.models import Topic, TopicInTopic import json
32.043478
76
0.639077
84580bc22605d3bb58c5f232f6e1f847342e88fa
3,596
py
Python
submissions-api/app/main/model/submissions_manifest.py
sanger-tol/tol-submissions
8dbbfaa98b1dfa09a09cb54cf1b2eb9d1dca5331
[ "MIT" ]
null
null
null
submissions-api/app/main/model/submissions_manifest.py
sanger-tol/tol-submissions
8dbbfaa98b1dfa09a09cb54cf1b2eb9d1dca5331
[ "MIT" ]
null
null
null
submissions-api/app/main/model/submissions_manifest.py
sanger-tol/tol-submissions
8dbbfaa98b1dfa09a09cb54cf1b2eb9d1dca5331
[ "MIT" ]
null
null
null
# SPDX-FileCopyrightText: 2021 Genome Research Ltd. # # SPDX-License-Identifier: MIT from .base import Base, db
44.95
88
0.614294
8458ddef5330c4ed60d249ea5883464e063cf5ba
6,411
py
Python
eden/integration/hg/histedit_test.py
jmswen/eden
5e0b051703fa946cc77fc43004435ae6b20599a1
[ "BSD-3-Clause" ]
null
null
null
eden/integration/hg/histedit_test.py
jmswen/eden
5e0b051703fa946cc77fc43004435ae6b20599a1
[ "BSD-3-Clause" ]
null
null
null
eden/integration/hg/histedit_test.py
jmswen/eden
5e0b051703fa946cc77fc43004435ae6b20599a1
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python3 # # Copyright (c) 2016-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. An additional grant # of patent rights can be found in the PATENTS file in the same directory. import os from eden.integration.lib import hgrepo from .lib.hg_extension_test_base import EdenHgTestCase, hg_test from .lib.histedit_command import HisteditCommand
36.220339
86
0.608641
8459ea4275ad26f2fdfb1430948999a41ff39caf
408
py
Python
dailypy/__init__.py
HuangJiaLian/dailypy
b838a4f6743fca8ccc5c4fa73142d0f7095dbbc3
[ "CC0-1.0" ]
null
null
null
dailypy/__init__.py
HuangJiaLian/dailypy
b838a4f6743fca8ccc5c4fa73142d0f7095dbbc3
[ "CC0-1.0" ]
1
2020-08-19T13:42:52.000Z
2020-08-19T14:32:31.000Z
dailypy/__init__.py
HuangJiaLian/dailypy
b838a4f6743fca8ccc5c4fa73142d0f7095dbbc3
[ "CC0-1.0" ]
null
null
null
import numpy as np import os # Data manipulate
20.4
53
0.568627
845c29a7df8a071ea4d00366b934a8a0a5899a8f
2,832
py
Python
vbb_backend/session/migrations/0002_auto_20210215_1509.py
patrickb42/backend-vbb-portal
88362bc5b4d5cab95aa67e12694f98371604b65a
[ "MIT" ]
3
2021-04-14T02:59:09.000Z
2021-06-08T00:17:27.000Z
vbb_backend/session/migrations/0002_auto_20210215_1509.py
patrickb42/backend-vbb-portal
88362bc5b4d5cab95aa67e12694f98371604b65a
[ "MIT" ]
81
2020-12-08T00:11:52.000Z
2021-08-09T18:13:32.000Z
vbb_backend/session/migrations/0002_auto_20210215_1509.py
patrickb42/backend-vbb-portal
88362bc5b4d5cab95aa67e12694f98371604b65a
[ "MIT" ]
5
2021-01-12T04:50:26.000Z
2021-06-04T02:00:03.000Z
# Generated by Django 3.0.10 on 2021-02-15 15:09 from django.db import migrations, models import django.db.models.deletion import uuid
37.76
153
0.598517
845d03992ff2924ffdc957b51de3c6b486a7c0ea
880
py
Python
src/las_util/serializers.py
dcslagel/las-util-django
cea8437813969b3b22182de6b9553b9e6694c548
[ "BSD-3-Clause" ]
3
2020-05-15T05:40:43.000Z
2021-11-09T06:19:41.000Z
src/las_util/serializers.py
dcslagel/las-util-django
cea8437813969b3b22182de6b9553b9e6694c548
[ "BSD-3-Clause" ]
26
2020-05-20T13:03:02.000Z
2021-09-23T19:36:39.000Z
src/las_util/serializers.py
dcslagel/las-util-django
cea8437813969b3b22182de6b9553b9e6694c548
[ "BSD-3-Clause" ]
1
2021-11-09T01:40:05.000Z
2021-11-09T01:40:05.000Z
""" File-Name: [app]/serializers.py File-Desc: Rest API serializers for las_util App-Name: las_util Project-Name: Las-Util-Django Copyright: Copyright (c) 2019, DC Slagel License-Identifier: BSD-3-Clause """ from rest_framework import serializers from las_util.models import SectionInfo # TODO: replace view.api_upload with to use this # class UploadSerializer(serializer.ModelSerializer): # """Link ModelSerializer to the Upload model""" # class Meta: # model = Upload # fields = ['filename',]
29.333333
55
0.7125
845d9d3e1de64db798d6f4d7e46d76bf4c2959c6
3,965
py
Python
UI/python/runtext.py
maxxscholten/nyc-train-sign
7da32c413270f3bf4629969bcf16f7def4ddb372
[ "MIT" ]
8
2020-02-19T21:17:04.000Z
2022-01-04T03:52:56.000Z
UI/python/runtext.py
maxxscholten/nyc-train-sign
7da32c413270f3bf4629969bcf16f7def4ddb372
[ "MIT" ]
1
2021-09-20T02:13:41.000Z
2021-09-21T07:01:14.000Z
UI/python/runtext.py
maxxscholten/nyc-train-sign
7da32c413270f3bf4629969bcf16f7def4ddb372
[ "MIT" ]
4
2021-03-11T17:11:40.000Z
2021-11-10T01:20:33.000Z
#!/usr/bin/env python # Display a runtext with double-buffering. from samplebase import SampleBase from rgbmatrix import graphics import time import requests import transitfeed import datetime import arrow import schedule today = datetime.date.today() starttime = time.time() schedule = transitfeed.Schedule() url = "http://localhost:5000/by-id/077e" font = graphics.Font() font.LoadFont("../fonts/tom-thumb.bdf") textColor = graphics.Color(0, 110, 0) circleColor = graphics.Color(110, 0, 0) circleNumberColor = graphics.Color(0, 0, 0) # Main function if __name__ == "__main__": run_text = RunText() if (not run_text.process()): run_text.print_help()
37.056075
121
0.640858
8460d12d7847a84d2047777b7d49fa013dbd6421
92
py
Python
back/apps/base_user/apps.py
pylvin/nuxt-drf-template
e3f32cf98a2bdf393f88fbda2afedf2c0bf99dc7
[ "MIT" ]
2
2021-11-16T03:47:06.000Z
2021-12-21T20:19:30.000Z
back/apps/base_user/apps.py
pylvin/nuxt-drf-template
e3f32cf98a2bdf393f88fbda2afedf2c0bf99dc7
[ "MIT" ]
null
null
null
back/apps/base_user/apps.py
pylvin/nuxt-drf-template
e3f32cf98a2bdf393f88fbda2afedf2c0bf99dc7
[ "MIT" ]
2
2021-12-21T20:19:39.000Z
2022-01-03T11:27:04.000Z
from django.apps import AppConfig
15.333333
33
0.76087
8462591fa4b3c8c3275d239bf45765f52bee1b94
1,188
py
Python
model/board_generator.py
myrmarachne/minesweeper
777170b7a31f1feed0bdf7aca31aaa9916c9b915
[ "AFL-1.1" ]
null
null
null
model/board_generator.py
myrmarachne/minesweeper
777170b7a31f1feed0bdf7aca31aaa9916c9b915
[ "AFL-1.1" ]
null
null
null
model/board_generator.py
myrmarachne/minesweeper
777170b7a31f1feed0bdf7aca31aaa9916c9b915
[ "AFL-1.1" ]
null
null
null
from random import sample from tile import Tile from utils import neighbours
38.322581
115
0.616162
8463673ccc7d5d8251d46b1bed4eb08caa70dd68
1,054
py
Python
src/the_tale/the_tale/game/pvp/objects.py
al-arz/the-tale
542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5
[ "BSD-3-Clause" ]
85
2017-11-21T12:22:02.000Z
2022-03-27T23:07:17.000Z
src/the_tale/the_tale/game/pvp/objects.py
al-arz/the-tale
542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5
[ "BSD-3-Clause" ]
545
2017-11-04T14:15:04.000Z
2022-03-27T14:19:27.000Z
src/the_tale/the_tale/game/pvp/objects.py
al-arz/the-tale
542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5
[ "BSD-3-Clause" ]
45
2017-11-11T12:36:30.000Z
2022-02-25T06:10:44.000Z
import smart_imports smart_imports.all()
31.939394
85
0.66129
8463c9fd49aa8b29fdde20c5d6a8fdfd2fb75f46
300
py
Python
src/gencoef/test.py
bwasti/sleef
4d260ae7f5d0e76a3c5424149deb838373e1894b
[ "BSL-1.0" ]
null
null
null
src/gencoef/test.py
bwasti/sleef
4d260ae7f5d0e76a3c5424149deb838373e1894b
[ "BSL-1.0" ]
null
null
null
src/gencoef/test.py
bwasti/sleef
4d260ae7f5d0e76a3c5424149deb838373e1894b
[ "BSL-1.0" ]
null
null
null
import numpy as np import math p = np.poly1d([ +0.1429511242e-53, +0.1561712123e-44, -0.2259472298e-35, -0.2669710222e-26, +0.9784247973e-18, +0.1655572013e-8, +0.3991098106e+0, ]) for i in range(1000): k = float(i) / 100 print(sigmoid(k), p(k))
15.789474
31
0.656667
846536aeea05536d64f4f59f9d2196f85d857b4d
19,035
py
Python
forever/Database.py
dss285/4ever
bd6f70f92d76d43342da401562f2c504adaf3867
[ "MIT" ]
null
null
null
forever/Database.py
dss285/4ever
bd6f70f92d76d43342da401562f2c504adaf3867
[ "MIT" ]
null
null
null
forever/Database.py
dss285/4ever
bd6f70f92d76d43342da401562f2c504adaf3867
[ "MIT" ]
null
null
null
import psycopg2 import psycopg2.extras import discord from models.BotMention import BotMention from models.UpdatedMessage import UpdatedMessage from forever.Steam import Steam_API, Dota_Match, Dota_Match_Player from forever.Utilities import run_in_executor, log from forever.Warframe import CetusMessage, FissureMessage, SortieMessage, NightwaveMessage, InvasionMessage, SolSystem from forever.Newswire import NewswireMessage from models.Server import Server from forever.Arknights import Formula, Item, Stage from forever.GFL import Doll, Fairy
38.222892
180
0.640294
8465f309612202475ac3cb61d22a9dcf1509182e
822
py
Python
Week06/q_cifar10_cnn.py
HowardNTUST/HackNTU_Data_2017
ad8e753a16719b6f9396d88b313a5757f5ed4794
[ "MIT" ]
null
null
null
Week06/q_cifar10_cnn.py
HowardNTUST/HackNTU_Data_2017
ad8e753a16719b6f9396d88b313a5757f5ed4794
[ "MIT" ]
null
null
null
Week06/q_cifar10_cnn.py
HowardNTUST/HackNTU_Data_2017
ad8e753a16719b6f9396d88b313a5757f5ed4794
[ "MIT" ]
1
2019-02-24T17:41:45.000Z
2019-02-24T17:41:45.000Z
import keras from keras.layers import Dense, Activation, Conv2D, MaxPool2D, Reshape model = Sequential() model.add(Reshape((3, 32, 32), input_shape=(3*32*32,) )) model.add(Conv2D(filters=32, kernel_size=(3,3), padding='same', activation="relu", data_format='channels_first')) model.add(MaxPool2D()) model.add(Conv2D(filters=64, kernel_size=(3,3), padding='same', activation="relu", data_format='channels_first')) model.add(MaxPool2D()) model.add(Reshape((-1,))) model.add(Dense(units=1024, activation="relu")) model.add(Dense(units=10, activation="softmax")) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.fit(train_X, train_Y, validation_split=0.02, batch_size=128, epochs=30) rtn = model.evaluate(test_X, test_Y) print("\ntest accuracy=", rtn[1])
48.352941
113
0.723844
8465fe705e2203a309cb2f80aab7f362306bc341
1,111
py
Python
testesDuranteAulas/aula019.py
Igor3550/Exercicios-de-python
e0f6e043df4f0770ac15968485fbb19698b4ac6b
[ "MIT" ]
null
null
null
testesDuranteAulas/aula019.py
Igor3550/Exercicios-de-python
e0f6e043df4f0770ac15968485fbb19698b4ac6b
[ "MIT" ]
null
null
null
testesDuranteAulas/aula019.py
Igor3550/Exercicios-de-python
e0f6e043df4f0770ac15968485fbb19698b4ac6b
[ "MIT" ]
null
null
null
# Dicionarios pessoas = {'nome': 'Igor', 'sexo': 'M', 'idade': 20} print(f'O {pessoas["nome"]} tem {pessoas["idade"]} anos.') print(pessoas.keys()) #chaves do dicionario print(pessoas.values())#valores das chaves print(pessoas.items())#mostra os itens do dicionario print() for k in pessoas.keys(): print(k) for v in pessoas.values(): print(v) for k, v in pessoas.items(): print(k, v) print() for k, v in pessoas.items(): print(f'{k} = {v}') print() del pessoas['sexo']# deleta uma chave pessoas['peso'] = 72# adiciona uma nova chave for k, v in pessoas.items(): print(f'{k} = {v}') print() # Dicionario dentro de uma lista brasil = [] estado1 = {'uf': 'Rio de Janeiro', 'sigla': 'RJ'} estado2 = {'uf': 'So Paulo', 'sigla': 'SP'} brasil.append(estado1) brasil.append(estado2) print(brasil[0]['uf']) print() brasil = list() estado = dict() for c in range(0, 3): estado['uf'] = str(input('Unidade federativa: ')) estado['sigla'] = str(input('Sigla: ')) brasil.append(estado.copy())# cpia de um dicionario for e in brasil: for k, v in e.items(): print(f'{k} = {v}')
26.452381
58
0.629163
84664082e1511f1729add08f835b69444a8edf67
9,697
py
Python
polyanalyst6api/api.py
Megaputer/polyanalyst6api-python
c6626a8a5f8f926b1f32285e18457ed70dfba73a
[ "MIT" ]
2
2021-01-30T19:04:12.000Z
2021-06-18T09:41:15.000Z
polyanalyst6api/api.py
Megaputer/polyanalyst6api-python
c6626a8a5f8f926b1f32285e18457ed70dfba73a
[ "MIT" ]
null
null
null
polyanalyst6api/api.py
Megaputer/polyanalyst6api-python
c6626a8a5f8f926b1f32285e18457ed70dfba73a
[ "MIT" ]
1
2021-04-19T09:57:14.000Z
2021-04-19T09:57:14.000Z
""" polyanalyst6api.api ~~~~~~~~~~~~~~~~~~~ This module contains functionality for access to PolyAnalyst API. """ import configparser import contextlib import pathlib import warnings from typing import Any, Dict, List, Tuple, Union, Optional from urllib.parse import urljoin, urlparse import requests import urllib3 from . import __version__ from .drive import Drive from .project import Parameters, Project from .exceptions import APIException, ClientException, _WrapperNotFound __all__ = ['API'] urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) warnings.simplefilter( 'always', UserWarning ) # without this set_parameters will show warnings only once NodeTypes = [ "CSV Exporter/", "DataSource/CSV", "DataSource/EXCEL", "DataSource/FILES", "DataSource/INET", "DataSource/ODBC", "DataSource/RSS", "DataSource/XML", "Dataset/Biased", "Dataset/ExtractTerms", "Dataset/Python", "Dataset/R", "Dataset/ReplaceTerms", "ODBC Exporter/", "PA6TaxonomyResult/TaxonomyResult", "SRLRuleSet/Filter Rows", "SRLRuleSet/SRL Rule", "TmlEntityExtractor/FEX", "Sentiment Analysis", "TmlLinkTerms/", ]
35.00722
113
0.608642
ffbcc28e993823f93d8f7e3809b6abd49a5cc187
1,998
py
Python
froide/publicbody/admin.py
rufuspollock/froide
8ef4dbdd54a74f8c986d59e90348dfdbd85c5da4
[ "MIT" ]
null
null
null
froide/publicbody/admin.py
rufuspollock/froide
8ef4dbdd54a74f8c986d59e90348dfdbd85c5da4
[ "MIT" ]
null
null
null
froide/publicbody/admin.py
rufuspollock/froide
8ef4dbdd54a74f8c986d59e90348dfdbd85c5da4
[ "MIT" ]
null
null
null
from django.contrib import admin from django.http import HttpResponse from django.utils.translation import ugettext as _ from froide.publicbody.models import (PublicBody, FoiLaw, PublicBodyTopic, Jurisdiction) admin.site.register(PublicBody, PublicBodyAdmin) admin.site.register(FoiLaw, FoiLawAdmin) admin.site.register(Jurisdiction, JurisdictionAdmin) admin.site.register(PublicBodyTopic, PublicBodyTopicAdmin)
34.448276
87
0.699199
ffbf148e7df59ebdd237d38695723231b7824b44
462
py
Python
src/abc/106/106_b.py
ryuichi1208/atcoder_stack
19ec81fb9a3edb44be422b79e98b23e8ff17ef60
[ "MIT" ]
null
null
null
src/abc/106/106_b.py
ryuichi1208/atcoder_stack
19ec81fb9a3edb44be422b79e98b23e8ff17ef60
[ "MIT" ]
null
null
null
src/abc/106/106_b.py
ryuichi1208/atcoder_stack
19ec81fb9a3edb44be422b79e98b23e8ff17ef60
[ "MIT" ]
null
null
null
n = int(input()) # @return [0]: [1]: L=[] ans=0 for i in range(1,n+1): if(i%2==0): continue else: for j in range(1,n+1): if(i%j==0): L.append(j) if (len(L)==8): ans+=1 L.clear() print(ans) print(divisor(15))
14.4375
30
0.452381
ffbfef0bf96a36ae6a5da2b1bf0bdc5756f1e309
3,931
py
Python
poc.py
evinr/basis-scraper
cb20574aadc469c6d1527ddb5a3cc69df531cbfd
[ "MIT" ]
null
null
null
poc.py
evinr/basis-scraper
cb20574aadc469c6d1527ddb5a3cc69df531cbfd
[ "MIT" ]
null
null
null
poc.py
evinr/basis-scraper
cb20574aadc469c6d1527ddb5a3cc69df531cbfd
[ "MIT" ]
null
null
null
import serial #TODO: define the gathering of all of the possible data sets being extracted #Biometrics # Heart Rate # STEPS # CALORIES # SKIN TEMP # PERSPIRATION #Activity # Walking # Running # Biking #Sleep # REM # Mind Refresh # Light # Deep # Body Refresh # Interruptions # Toss & Turn
27.110345
135
0.675401
ffc0fbef26aaac232d9390250ce89e31f64b7ad8
31
py
Python
multinet/db/models/__init__.py
multinet-app/multinet-girder
f34c87849d92c7fe2f8589760f97bebbe04bd4af
[ "Apache-2.0" ]
3
2019-10-22T15:21:10.000Z
2020-02-13T17:40:07.000Z
multinet/db/models/__init__.py
multinet-app/multinet
f34c87849d92c7fe2f8589760f97bebbe04bd4af
[ "Apache-2.0" ]
183
2019-08-01T14:27:00.000Z
2020-03-04T17:47:49.000Z
multinet/db/models/__init__.py
multinet-app/multinet-girder
f34c87849d92c7fe2f8589760f97bebbe04bd4af
[ "Apache-2.0" ]
2
2020-08-20T11:57:17.000Z
2020-11-10T22:54:19.000Z
"""ORM models for multinet."""
15.5
30
0.645161
ffc1536722c6684539bdbe4eaba7de45c07a8edb
6,296
py
Python
dataPipelines/gc_crawler/nato_stanag/models.py
ekmixon/gamechanger-crawlers
60a0cf20338fb3dc134eec117bccd519cede9288
[ "MIT" ]
null
null
null
dataPipelines/gc_crawler/nato_stanag/models.py
ekmixon/gamechanger-crawlers
60a0cf20338fb3dc134eec117bccd519cede9288
[ "MIT" ]
4
2021-07-27T21:44:51.000Z
2022-03-04T01:38:48.000Z
dataPipelines/gc_crawler/nato_stanag/models.py
ekmixon/gamechanger-crawlers
60a0cf20338fb3dc134eec117bccd519cede9288
[ "MIT" ]
null
null
null
import bs4 import os import re from typing import Iterable from selenium import webdriver from selenium.webdriver.support.ui import WebDriverWait # for implicit and explict waits from selenium.webdriver.support import expected_conditions as ec from selenium.webdriver.common.by import By from dataPipelines.gc_crawler.requestors import MapBasedPseudoRequestor from dataPipelines.gc_crawler.exec_model import Crawler, Parser, Pager from dataPipelines.gc_crawler.data_model import Document, DownloadableItem from dataPipelines.gc_crawler.utils import abs_url, close_driver_windows_and_quit from . import SOURCE_SAMPLE_DIR, BASE_SOURCE_URL
38.625767
127
0.550191
ffc168320dcc3879d9935e0c48e2582d2d304fa1
3,938
py
Python
app/signals.py
MakuZo/bloggy
550e5285728b285e0d5243670d6aa0f40c414777
[ "MIT" ]
7
2018-11-12T20:52:53.000Z
2021-12-17T23:04:41.000Z
app/signals.py
MakuZo/bloggy
550e5285728b285e0d5243670d6aa0f40c414777
[ "MIT" ]
2
2019-12-24T08:53:51.000Z
2019-12-26T19:26:51.000Z
app/signals.py
MakuZo/bloggy
550e5285728b285e0d5243670d6aa0f40c414777
[ "MIT" ]
8
2018-12-28T12:31:51.000Z
2020-01-25T09:07:52.000Z
import re from django.db.models.signals import m2m_changed, post_save, pre_delete from django.dispatch import receiver from django.urls import reverse from .models import Entry, Notification, User
39.777778
127
0.561199
ffc1d0bbd0644054a0b22502249482b17c06c941
2,532
py
Python
tests/utils/test_commons.py
jajomi/flow
c984be6f7de1a34192601c129dbc19f2ce45f135
[ "Apache-2.0" ]
null
null
null
tests/utils/test_commons.py
jajomi/flow
c984be6f7de1a34192601c129dbc19f2ce45f135
[ "Apache-2.0" ]
6
2021-03-05T16:39:42.000Z
2021-06-11T01:04:57.000Z
tests/utils/test_commons.py
jajomi/flow
c984be6f7de1a34192601c129dbc19f2ce45f135
[ "Apache-2.0" ]
null
null
null
from unittest.mock import mock_open from unittest.mock import patch import flow.utils.commons as commons commit_example = [ "223342f Adding ability to specify artifactory user [#134082057]", "4326d00 Adding slack channel option for errors [#130798449]", "09c1983 Merge pull request #25 from ci-cd/revert-18-github-version-fix", "445fd02 Revert \"GitHub version fix\"" ] commit_example_nested_brackets = [ "223342f Adding ability to specify artifactory user [#134082057, [bubba]]", "4326d00 Adding slack channel option for errors [#130798449]", "09c1983 Merge pull request #25 from ci-cd/revert-18-github-version-fix", "445fd02 Revert \"GitHub version fix\"" ] commit_example_multiple_per_brackets = [ "223342f Adding ability to specify artifactory user [#134082057,#134082058]", "4326d00 Adding slack channel option for errors [#130798449,123456]", "09c1983 Merge pull request #25 from ci-cd/revert-18-github-version-fix", "445fd02 Revert \"GitHub version fix\"" ] commit_example_dedup = [ "223342f Adding ability to specify artifactory user [#134082057,#134082057]", "4326d00 Adding slack channel option for errors [#134082057,134082057]", "09c1983 Merge pull request #25 from ci-cd/revert-18-github-version-fix", "445fd02 Revert \"GitHub version fix\"" ]
36.171429
100
0.781991
ffc234c8fa1382a81cd3f2b1ea5e202da915c840
28,482
py
Python
swagger_client/models/client_configuration.py
chbndrhnns/finapi-client
259beda8b05e912c49d2dc4c3ed71205134e5d8a
[ "MIT" ]
2
2019-04-15T05:58:21.000Z
2021-11-15T18:26:37.000Z
swagger_client/models/client_configuration.py
chbndrhnns/finapi-client
259beda8b05e912c49d2dc4c3ed71205134e5d8a
[ "MIT" ]
1
2021-06-18T09:46:25.000Z
2021-06-18T20:12:41.000Z
swagger_client/models/client_configuration.py
chbndrhnns/finapi-client
259beda8b05e912c49d2dc4c3ed71205134e5d8a
[ "MIT" ]
2
2019-07-08T13:41:09.000Z
2020-12-07T12:10:04.000Z
# coding: utf-8 """ finAPI RESTful Services finAPI RESTful Services # noqa: E501 OpenAPI spec version: v1.42.1 Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ClientConfiguration): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
71.027431
1,696
0.743066
ffc35164c1764ae381a92d8e3682d0250a4793ea
912
py
Python
utils/jwt_custom_decorator.py
w0rm1995/face-comparison-backend
9e231aabcf129e887e25a8ffdb5ae9617fee3e00
[ "MIT" ]
null
null
null
utils/jwt_custom_decorator.py
w0rm1995/face-comparison-backend
9e231aabcf129e887e25a8ffdb5ae9617fee3e00
[ "MIT" ]
3
2021-06-08T22:05:30.000Z
2022-01-13T03:04:03.000Z
utils/jwt_custom_decorator.py
w0rm1995/face-comparison-backend
9e231aabcf129e887e25a8ffdb5ae9617fee3e00
[ "MIT" ]
null
null
null
from functools import wraps from flask_jwt_extended import verify_jwt_in_request, get_jwt_claims, exceptions from jwt import exceptions as jwt_exception from utils.custom_response import bad_request
35.076923
80
0.638158
ffc40ad7630c4587dcf4487c052a523769c15b4a
1,254
py
Python
packages/M2Crypto-0.21.1/demo/smime/unsmime.py
RaphaelPrevost/Back2Shops
5f2d369e82fe2a7b9b3a6c55782319b23d142dfd
[ "CECILL-B" ]
null
null
null
packages/M2Crypto-0.21.1/demo/smime/unsmime.py
RaphaelPrevost/Back2Shops
5f2d369e82fe2a7b9b3a6c55782319b23d142dfd
[ "CECILL-B" ]
6
2021-03-31T19:21:50.000Z
2022-01-13T01:46:09.000Z
packages/M2Crypto-0.21.1/demo/smime/unsmime.py
RaphaelPrevost/Back2Shops
5f2d369e82fe2a7b9b3a6c55782319b23d142dfd
[ "CECILL-B" ]
null
null
null
#!/usr/bin/env python """S/MIME demo. Copyright (c) 2000 Ng Pheng Siong. All rights reserved.""" from M2Crypto import BIO, Rand, SMIME, X509 import sys if __name__ == '__main__': Rand.load_file('../randpool.dat', -1) decrypt_verify(BIO.File(sys.stdin), 'client.pem', 'client2.pem','ca.pem') Rand.save_file('../randpool.dat')
24.588235
77
0.6563
ffc4351a518b97d5c4916014accd51d41d76de87
14,867
py
Python
skybright/skybright.py
ehneilsen/skybright
b0e2d7e6e25131393ee76ce334ce1df1521e3659
[ "MIT" ]
1
2019-09-24T21:06:45.000Z
2019-09-24T21:06:45.000Z
skybright/skybright.py
ehneilsen/skybright
b0e2d7e6e25131393ee76ce334ce1df1521e3659
[ "MIT" ]
null
null
null
skybright/skybright.py
ehneilsen/skybright
b0e2d7e6e25131393ee76ce334ce1df1521e3659
[ "MIT" ]
1
2019-09-24T21:14:35.000Z
2019-09-24T21:14:35.000Z
#!/usr/bin/env python """A model for the sky brightness """ from functools import partial from math import pi, cos, acos, sin, sqrt, log10 from datetime import datetime, tzinfo, timedelta from time import strptime from calendar import timegm from copy import deepcopy from sys import argv from collections import namedtuple, OrderedDict from argparse import ArgumentParser try: from ConfigParser import ConfigParser except: from configparser import ConfigParser import numexpr from numexpr import NumExpr import warnings from warnings import warn import numpy as np try: from palpy import rdplan as rdplan_not_vectorized from palpy import gmst as gmst_not_vectorized from palpy import dmoon from palpy import evp except ImportError: from pyslalib.slalib import sla_rdplan as rdplan_not_vectorized from pyslalib.slalib import sla_gmst as gmst_not_vectorized from pyslalib.slalib import sla_dmoon as dmoon from pyslalib.slalib import sla_evp as evp palpy_body = {'sun': 0, 'moon': 3} MAG0 = 23.9 # warnings.simplefilter("always") rdplan = np.vectorize(rdplan_not_vectorized) ## Works and is trivially faster, but less flexible w.r.t. data types # # ang_sep = NumExpr("2*arcsin(sqrt(cos(decl1)*cos(decl2)*(sin(((ra1-ra2)/2))**2) + (sin((decl1-decl2)/2))**2))", # (('ra1', np.float64), ('decl1', np.float64), ('ra2', np.float64), ('decl2', np.float64))) def elongation_not_vectorized(mjd): "Calculate the elongation of the moon in radians" pv = dmoon(mjd) moon_distance = (sum([x**2 for x in pv[:3]]))**0.5 dvb, dpb, dvh, dph = evp(mjd,-1) sun_distance = (sum([x**2 for x in dph[:3]]))**0.5 a = np.degrees(np.arccos( (-pv[0]*dph[0] - pv[1]*dph[1] - pv[2]*dph[2])/ (moon_distance*sun_distance))) return a elongation = np.vectorize(elongation_not_vectorized) def calc_moon_brightness(mjd, moon_elongation=None): """The brightness of the moon (relative to full) The value here matches about what I expect from the value in Astrophysical Quantities corresponding to the elongation calculated by http://ssd.jpl.nasa.gov/horizons.cgi >>> mjd = 51778.47 >>> print "%3.2f" % moon_brightness(mjd) 0.10 """ if moon_elongation is None: moon_elongation = elongation(mjd) alpha = 180.0-moon_elongation # Allen's _Astrophysical Quantities_, 3rd ed., p. 144 return 10**(-0.4*(0.026*abs(alpha) + 4E-9*(alpha**4))) # # Included for backword compatibility with previous implementation # def skymag(m_inf, m_zen, h, g, mie_m, rayl_m, ra, decl, mjd, k, latitude, longitude, offset=0.0, sun_dm=-14.0, twi1=-2.52333, twi2=0.01111): config = ConfigParser() sect = "Observatory Position" config.add_section(sect) config.set(sect, 'longitude', longitude) config.set(sect, 'latitude', latitude) sect = "sky" config.add_section(sect) config.set(sect, 'filters', 'x') config.set(sect, 'k', k) config.set(sect, 'm_inf', m_inf) config.set(sect, 'm_zen', m_zen) config.set(sect, 'h', h) config.set(sect, 'rayl_m', rayl_m) config.set(sect, 'g', g) config.set(sect, 'mie_m', mie_m) config.set(sect, 'sun_dm', sun_dm) config.set(sect, 'twi1', twi1) config.set(sect, 'twi2', twi2) calc_sky = MoonSkyModel(config) sky = calc_sky(mjd, ra, decl, 'x') return sky if __name__=='__main__': parser = ArgumentParser('Estimate the sky brightness') parser.add_argument("-m", "--mjd", type=float, help="Modified Julian Date (float) (UTC)") parser.add_argument("-r", "--ra", type=float, help="the RA (decimal degrees)") parser.add_argument("-d", "--dec", type=float, help="the declination (decimal degrees)") parser.add_argument("-f", "--filter", help="the filter") parser.add_argument("-c", "--config", help="the configuration file") args = parser.parse_args() model_config = ConfigParser() model_config.read(args.config) longitude = model_config.getfloat("Observatory Position", "longitude") latitude = model_config.getfloat("Observatory Position", "latitude") lst = gmst(args.mjd) + np.radians(longitude) print("GMST: %f" % np.degrees(gmst(args.mjd))) print("LST: %f" % np.degrees(lst)) sun_ra, sun_decl, diam = rdplan(args.mjd, 0, np.radians(longitude), np.radians(latitude)) sun_ha = lst - sun_ra sun_zd = np.degrees(calc_zd(np.radians(latitude), sun_ha, sun_decl)) print("Sun zenith distance: %f" % sun_zd) moon_ra, moon_decl, diam = rdplan(args.mjd, 3, longitude, latitude) moon_ha = lst - moon_ra moon_zd = np.degrees(calc_zd(np.radians(latitude), moon_ha, moon_decl)) print("Moon zenith distance: %f" % moon_zd) print("Elongation of the moon: %f" % elongation(args.mjd)) print("Moon brightness: %f" % calc_moon_brightness(args.mjd)) sep = ang_sep(moon_ra, moon_decl, np.radians(args.ra), np.radians(args.dec)) print("Pointing angle with moon: %f" % sep) ha = lst - np.radians(args.ra) print("Hour angle: %f" % np.degrees(ha)) z = calc_zd(np.radians(latitude), ha, np.radians(args.dec)) print("Pointing zenith distance: %f" % np.degrees(z)) print("Airmass: %f" % calc_airmass(np.cos(z))) sky_model = MoonSkyModel(model_config) print("Sky brightness at pointing: %f" % sky_model(args.mjd, args.ra, args.dec, args.filter))
37.1675
163
0.609807
ffc57756064cdbfdff55d925646e8ab713a50ba6
1,675
py
Python
timeseries/test.py
zoobree/MachineLearning
67fd35e67469d9f03afd5c090f2ca23f514bebfd
[ "Apache-2.0" ]
null
null
null
timeseries/test.py
zoobree/MachineLearning
67fd35e67469d9f03afd5c090f2ca23f514bebfd
[ "Apache-2.0" ]
1
2018-04-07T05:24:40.000Z
2018-04-07T05:24:40.000Z
timeseries/test.py
joybree/MachineLearning
69a381efa35436a6d211005c320576db966eea11
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- import unittest import arima import os import pandas as pd if __name__ == "__main__": unittest.main()
25.769231
99
0.587463
ffc7043d4112113fd11d3bba2367bfc4002daece
8,004
py
Python
pynetstation_send_tags/pynetstation_send_tags.py
mattmoo/Pynetstation-Plug-In
aba2d312e5543cc5c2100793805acfeff075c59c
[ "MIT" ]
null
null
null
pynetstation_send_tags/pynetstation_send_tags.py
mattmoo/Pynetstation-Plug-In
aba2d312e5543cc5c2100793805acfeff075c59c
[ "MIT" ]
null
null
null
pynetstation_send_tags/pynetstation_send_tags.py
mattmoo/Pynetstation-Plug-In
aba2d312e5543cc5c2100793805acfeff075c59c
[ "MIT" ]
null
null
null
# -*- coding:utf-8 -*- """ This file is part of OpenSesame. OpenSesame is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. OpenSesame is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with OpenSesame. If not, see <http://www.gnu.org/licenses/>. """ from libopensesame.item import item from libqtopensesame.items.qtautoplugin import qtautoplugin from openexp.canvas import canvas blankText = u'Enter Variable Name Here' blankID = u'****'
35.415929
115
0.594328
ffc7fe1be16dc65b683b9d6a05ef9740a31e195b
42,563
py
Python
ion/simulators/SBE37_SMP_simulator.py
ooici/coi-services
43246f46a82e597345507afd7dfc7373cb346afa
[ "BSD-2-Clause" ]
3
2016-09-20T09:50:06.000Z
2018-08-10T01:41:38.000Z
ion/simulators/SBE37_SMP_simulator.py
ooici/coi-services
43246f46a82e597345507afd7dfc7373cb346afa
[ "BSD-2-Clause" ]
null
null
null
ion/simulators/SBE37_SMP_simulator.py
ooici/coi-services
43246f46a82e597345507afd7dfc7373cb346afa
[ "BSD-2-Clause" ]
2
2016-03-16T22:25:49.000Z
2016-11-26T14:54:21.000Z
#!/usr/bin/env python __author__ = 'Roger Unwin' import socket import time from time import gmtime, strftime import datetime import string import sys import random import asyncore import thread import getopt import select import os ### default values defined below (b/c class is not yet defined) #default_port = 4001 # TCP port to run on. #default_message_rate = 5 # 5 sec between messages when streaming #default_sim=SBE37_random ########### BASE class here handles SBE37 behaviors ########### see below for subclasses that provide different data values def usage(): print "SBE37-SMP Simulator:\n" print "This program simulates a SBE37-SMP sensor deployed by \nbeing connected to a MOXA NPort 5410 Serial Device Server." print "Available options are:" print " -h, --help : Displays this message" print " -p, --port= : Sets the port to listen on (>1024, default = %s)." % default_port def get_opts(): opts, args = getopt.getopt(sys.argv[1:], "c:p:h", ["class=", "port=", "rate="]) out={'rate':default_message_rate,'port':default_port,'simulator':SBE37_random} for o, a in opts: if o in ("-c", "--class"): out['simulator'] = getattr(sys.modules[__name__],a) if o in ("-r", "--rate"): out['message_rate'] = int(a) elif o in ("-p", "--port"): out['port'] = int(a) else: print 'unknown option: '+o return out def main(): try: args = get_opts() except Exception as e: print 'Exception: %s'%e usage() sys.exit() print 'using args: %r'%args SBE37_server(sim_class=args['simulator'], host='', port=args['port'], rate=args['rate']) try: asyncore.loop() except: sys.exit() # Be silent when ^c pressed ################################################################################################ ## ## THESE CLASSES generate different sample values for the simulator # # return tuple of: temperature, conductivity, pressure, salinity, sound velocity import math # vary as sine wave over time # narrower, valid range to help ensure density can be calculated #> Valid ranges for conductivity are 0-7 S/m. Typical values we've seen off the Oregon coast are ~35 mS/cm, which converts to ~3.5 S/m. #> #> Valid ranges for temperature are -2-40 deg_C. Typical values we've seen off the Oregon coast are between 5 and 20 deg_C. 12 deg_C would be absolutely reasonable. #> #> Valid ranges for pressure are 0-7000 dbar. Really, just choose a depth. #> #> I would recommend the simulator produce at C of 3.5 S/m, a T of 12 deg_C and a depth of 10 dbar. Apply sine wave functions with some small fraction of random white noise and let it rip. #> ################################################################################################ default_port = 4001 # TCP port to run on. default_message_rate = 5 # 5 sec between messages when streaming default_sim=SBE37_random if __name__ == '__main__': main()
44.94509
405
0.453398
ffc857a75ba7aa5ef44304f6675fe0e78e0727a5
976
py
Python
experiments/centralisation/centralisation.py
MichaelAllen1966/2105_london_acute_stroke_unit
56b710c58b5b6bdf5c03e3fb9ec65c53cd5336ff
[ "MIT" ]
null
null
null
experiments/centralisation/centralisation.py
MichaelAllen1966/2105_london_acute_stroke_unit
56b710c58b5b6bdf5c03e3fb9ec65c53cd5336ff
[ "MIT" ]
null
null
null
experiments/centralisation/centralisation.py
MichaelAllen1966/2105_london_acute_stroke_unit
56b710c58b5b6bdf5c03e3fb9ec65c53cd5336ff
[ "MIT" ]
null
null
null
import matplotlib.pyplot as plt import numpy as np import pandas as pd data = pd.read_csv('results.csv') labels = [1,2,3,4] width = 0.75 x = np.arange(len(labels)) # the label locations fig = plt.figure(figsize=(9,6)) # Number people waiting ax1 = fig.add_subplot(121) y1 = data['av_waiting'].values.flatten() waiting = ax1.bar(x, y1, width, color='b') ax1.set_ylabel('Average number of patients waiting for ASU bed') ax1.set_xlabel('ASUs per region') ax1.set_title('Average number of patients waiting\nfor ASU bed') ax1.set_xticks(x) ax1.set_xticklabels(labels) ax2 = fig.add_subplot(122) y2 = data['av_waiting_days'].values.flatten() days = ax2.bar(x, y2, width, color='r') ax2.set_ylabel('Average waiting time (days)') ax2.set_xlabel('ASUs per region') ax2.set_title('Average waiting time\n(days, for patients who have to wait)') ax2.set_xticks(x) ax2.set_xticklabels(labels) plt.tight_layout(pad=2) plt.savefig('centralisation.png', dpi=300) plt.show()
21.217391
76
0.731557
ffc9b886976a36f8168389759472ba04ff485037
10,360
py
Python
Case_Study_1.py
Amritha29/Stout_DDA_FULL_STACK_21.github.io
89be2324468dfba2ba9afb378881c6e9e460696b
[ "CC-BY-4.0" ]
null
null
null
Case_Study_1.py
Amritha29/Stout_DDA_FULL_STACK_21.github.io
89be2324468dfba2ba9afb378881c6e9e460696b
[ "CC-BY-4.0" ]
null
null
null
Case_Study_1.py
Amritha29/Stout_DDA_FULL_STACK_21.github.io
89be2324468dfba2ba9afb378881c6e9e460696b
[ "CC-BY-4.0" ]
null
null
null
# -*- coding: utf-8 -*- """ Spyder Editor Amritha Subburayan code for STOUT DDA FULL STACK CASE STUDIES """ import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline from sklearn import preprocessing import sklearn.metrics as sm data = pd.read_csv(r'//Users//amrithasubburayan//Downloads//loans_full_schema.csv') data.info() data.describe() #Checking missing values data.isna().sum() #removing emp_title, state , num_accounts_120d_past_due , num_accounts_30d_past_due, tax_liens, public_record_bankrupt, # paid_late_fees , total_collection_amount_ever , current_accounts_delinq , num_historical_failed_to_pay # num_collections_last_12m, delinq_2y # check corr and remove this num_mort_accounts #storing data to other temp data2 = data # DATA DESCRIPTION AND ISSUES : #There are two issues in this dataset : #1) Missing values 2) Multi-collinearity #Missing values can be found in the following rows: #1) emp_title 2) emp_length 3) annual_income_joint 4) verification_income_joint # 5) debt_to_income_joint 6) months_since_last_delinq 7) months_since_90d_late #8) months_since_last_credit_inquiry 9) num_accounts_120d_past_due #Multicollinearity can be found between these columns : #1) installment and loan amount - 0.94 2) balance and loan amount - 0.93 # 3) annula income joint and total credit limit - 0.54 #4) Inquires last 12 m and months since last credit inq - 0.51 #5) total credit lines and open credit lines - 0.76 6) #num satisfactory acc and total credit lines - 0.75 #7) total credit lines and num total cc accounts - 0.77 8) #total credit lines and num open cc accounts - 0.62 #Visualizations plt.figure(figsize=(40,35)) sns.heatmap(data2.corr(), annot = True, cmap = "RdYlGn") plt.show() data2['loan_purpose'].value_counts().plot(kind='bar',color=['gray','red','blue','green','purple','yellow','black']).set_title('Loan Purpose') data2.groupby('homeownership').verified_income.value_counts().unstack(0).plot.bar() data2.groupby('homeownership').application_type.value_counts().unstack(0).plot(kind="pie",subplots=True, shadow = True,startangle=90,figsize=(15,10),autopct='%1.1f%%') plt.scatter(data2['installment'],data2['loan_amount']) d = data2.groupby('emp_length') s=[] for key,item in d: if(key!=7.0): s.append(d.get_group(key)['interest_rate'].mean()) dict1={"emp_length":[0,1,2,3,4,5,6,8,9,10],"int_rate":s} plt.plot(dict1['emp_length'],s) df= data2['application_type'] data2.groupby('application_type').loan_purpose.value_counts() data2.groupby('application_type').loan_purpose.value_counts().unstack(0).plot(kind="pie",subplots=True, shadow = True,startangle=90,figsize=(25,20),autopct='%1.1f%%') #Replacing missing rows d = data2.groupby('application_type').loan_purpose.value_counts() #data2["verification_income_joint"] = data2['verification_income_joint'].fillna('Not Verified') for i in range(0, len(data2["verification_income_joint"])): if pd.isna(data2['verification_income_joint'][i]): data2['verification_income_joint'][i] = data2['verified_income'][i] data2["debt_to_income"] = data2['debt_to_income'].fillna(0) #combining annual income with joint annual income for i in range(0, len(data2["annual_income_joint"])): if pd.isna(data2['annual_income_joint'][i]): data2['annual_income_joint'][i] = data2['annual_income'][i] #combining debt income with joint debt income for i in range(0, len(data2["debt_to_income_joint"])): if pd.isna(data2['debt_to_income_joint'][i]): data2['debt_to_income_joint'][i] = data2['debt_to_income'][i] ## Replacing with mean values data2["months_since_last_credit_inquiry"] = data2['months_since_last_credit_inquiry'].fillna(np.mean(data2["months_since_last_credit_inquiry"])) data2["emp_length"] = data2['emp_length'].fillna(np.mean(data2["emp_length"])) #Removing unwanted columns because it has more 0 values which will not impact on building a model data2.drop("emp_title", axis = 1, inplace=True) data2.drop("state", axis = 1, inplace=True) data2.drop("num_accounts_120d_past_due", axis = 1, inplace=True) data2.drop("num_accounts_30d_past_due", axis = 1, inplace=True) data2.drop("tax_liens", axis = 1, inplace=True) data2.drop("public_record_bankrupt", axis = 1, inplace=True) data2.drop("paid_late_fees", axis = 1, inplace=True) data2.drop("total_collection_amount_ever", axis = 1, inplace=True) data2.drop("current_accounts_delinq", axis = 1, inplace=True) data2.drop("num_historical_failed_to_pay", axis = 1, inplace=True) data2.drop("num_collections_last_12m", axis = 1, inplace=True) data2.drop("delinq_2y", axis = 1, inplace=True) data2.drop("verified_income", axis = 1, inplace=True) data2.drop("annual_income", axis = 1, inplace=True) data2.drop("debt_to_income", axis = 1, inplace=True) data2.drop("months_since_90d_late", axis = 1, inplace=True) data2.drop("months_since_last_delinq", axis = 1, inplace=True) data2.drop("issue_month", axis = 1, inplace=True) data2.drop("initial_listing_status", axis = 1, inplace=True) data2.drop("disbursement_method", axis = 1, inplace=True) data2.drop("grade", axis = 1, inplace=True) #removing columns based on correlation data2.drop("total_credit_limit", axis = 1, inplace=True) data2.drop("current_installment_accounts", axis = 1, inplace=True) data2.drop("accounts_opened_24m", axis = 1, inplace=True) data2.drop("open_credit_lines", axis = 1, inplace=True) data2.drop("loan_amount", axis = 1, inplace=True) data2.drop("balance", axis = 1, inplace=True) data2.drop("paid_principal", axis = 1, inplace=True) data2.drop("num_satisfactory_accounts", axis = 1, inplace=True) data2.drop("total_credit_lines", axis = 1, inplace=True) data2.drop("num_active_debit_accounts", axis = 1, inplace=True) data2.drop("num_open_cc_accounts", axis = 1, inplace=True) data2.drop("installment", axis = 1, inplace=True) data2.drop("num_total_cc_accounts", axis = 1, inplace=True) #Removing Outliers based on its Quartile and Max Value data5 = data2 sns.boxplot(data5['paid_interest']) data5 = data5.loc[data5["inquiries_last_12m"] < 15] data5 = data5.loc[data5["total_credit_utilized"] < 400000] data5 = data5.loc[data5["months_since_last_credit_inquiry"] < 20] data5 = data5.loc[data5["total_debit_limit"] < 220000] data5 = data5.loc[data5["num_cc_carrying_balance"] < 20] data5 = data5.loc[data5["num_mort_accounts"] < 10] data5 = data5.loc[data5["paid_total"] < 35000] data5 = data5.loc[data5["paid_interest"] < 3000] # Encoding Categorical Data using LabelEncoder le = preprocessing.LabelEncoder() data5['sub_grade'] = le.fit_transform(data5['sub_grade'].values) data5['verification_income_joint'] = le.fit_transform(data5['verification_income_joint'].values) data5['loan_status'] = le.fit_transform(data5['loan_status'].values) data5['loan_purpose'] = le.fit_transform(data5['loan_purpose'].values) data5['application_type'] = le.fit_transform(data5['application_type'].values) data5['homeownership'] = le.fit_transform(data5['homeownership'].values) data5 = data5.reindex(columns=['emp_length', 'homeownership', 'annual_income_joint', 'verification_income_joint', 'debt_to_income_joint', 'earliest_credit_line', 'inquiries_last_12m', 'total_credit_utilized', 'months_since_last_credit_inquiry', 'total_debit_limit', 'num_cc_carrying_balance', 'num_mort_accounts', 'account_never_delinq_percent', 'loan_purpose', 'application_type', 'term', 'sub_grade', 'loan_status', 'paid_total', 'paid_interest', 'interest_rate']) X = data5.iloc[:, :-1].values y = data5.iloc[:, -1].values y = y.reshape(len(y),1) #Feature Scaling from sklearn.preprocessing import StandardScaler sc_X = StandardScaler() sc_y = StandardScaler() X = sc_X.fit_transform(X) y = sc_y.fit_transform(y) #Train Test Split from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0) #Modelling the Data #Support Vector Regression from sklearn.svm import SVR regressor_SVM = SVR(kernel = 'rbf') regressor_SVM.fit(X_train, y_train) #For Training Data SVR_train_pred = regressor_SVM.predict(X_train) score2=r2_score(y_train,SVR_train_pred) score2 print("Mean absolute error =", round(sm.mean_absolute_error(y_train, SVR_train_pred), 2)) print("Mean squared error =", round(sm.mean_squared_error(y_train, SVR_train_pred), 2)) print("Median absolute error =", round(sm.median_absolute_error(y_train, SVR_train_pred), 2)) print("Explain variance score =", round(sm.explained_variance_score(y_train, SVR_train_pred), 2)) #For Testing data SVR_test_pred = regressor_SVM.predict(X_test) score3=r2_score(y_test,SVR_test_pred) score3 print("Mean absolute error =", round(sm.mean_absolute_error(y_test, SVR_test_pred), 2)) print("Mean squared error =", round(sm.mean_squared_error(y_test, SVR_test_pred), 2)) print("Median absolute error =", round(sm.median_absolute_error(y_test, SVR_test_pred), 2)) print("Explain variance score =", round(sm.explained_variance_score(y_test, SVR_test_pred), 2)) #Random Forest Model from sklearn.ensemble import RandomForestRegressor regressor1 = RandomForestRegressor(n_estimators = 10, random_state = 0) regressor1.fit(X_train, y_train) #For Training Data random_train_pred = regressor1.predict(X_train) score1=r2_score(y_train,random_train_pred) score1 print("Mean absolute error =", round(sm.mean_absolute_error(y_train, random_train_pred), 2)) print("Mean squared error =", round(sm.mean_squared_error(y_train, random_train_pred), 2)) print("Median absolute error =", round(sm.median_absolute_error(y_train, random_train_pred), 2)) print("Explain variance score =", round(sm.explained_variance_score(y_train, random_train_pred), 2)) #For Testing Data random_test_pred = regressor1.predict(X_test) score=r2_score(y_test,random_test_pred) score print("Mean absolute error =", round(sm.mean_absolute_error(y_test, random_test_pred), 2)) print("Mean squared error =", round(sm.mean_squared_error(y_test, random_test_pred), 2)) print("Median absolute error =", round(sm.median_absolute_error(y_test, random_test_pred), 2)) print("Explain variance score =", round(sm.explained_variance_score(y_test, random_test_pred), 2))
32.888889
167
0.754826
ffcbaba69ba29dbe70293f1d332c038a6aaf91b9
1,373
py
Python
datastore/__init__.py
Swixx/py-datastore
dfa1f9dcc3cc3beac3c3e79d085cb6e89da97a1c
[ "MIT" ]
6
2019-08-04T04:11:36.000Z
2020-02-20T17:10:26.000Z
datastore/__init__.py
Swixx/py-datastore
dfa1f9dcc3cc3beac3c3e79d085cb6e89da97a1c
[ "MIT" ]
23
2019-09-17T11:35:06.000Z
2020-04-07T16:18:15.000Z
datastore/__init__.py
Swixx/py-datastore
dfa1f9dcc3cc3beac3c3e79d085cb6e89da97a1c
[ "MIT" ]
6
2019-08-04T02:02:25.000Z
2020-03-01T15:43:41.000Z
""" Datastore is a generic layer of abstraction for data store and database access. It is a **simple** API with the aim to enable application development in a datastore-agnostic way, allowing datastores to be swapped seamlessly without changing application code. Thus, one can leverage different datastores with different strengths without committing the application to one datastore throughout its lifetime. """ __version__ = "0.3.6" __author__ = "Juan Batiz-Benet, Alexander Schlarb" __email__ = "juan@benet.ai, alexander@ninetailed.ninja" __all__ = ( "Key", "Namespace", "BinaryNullDatastore", "BinaryDictDatastore", "ObjectNullDatastore", "ObjectDictDatastore", "Query", "Cursor", "SerializerAdapter", "abc", "typing", "util" ) # import core.key from .core.key import Key from .core.key import Namespace # import core.binarystore, core.objectstore from .core.binarystore import NullDatastore as BinaryNullDatastore from .core.binarystore import DictDatastore as BinaryDictDatastore from .core.objectstore import NullDatastore as ObjectNullDatastore from .core.objectstore import DictDatastore as ObjectDictDatastore # import core.query from .core.query import Query from .core.query import Cursor # import core.serialize from .core.serialize import SerializerAdapter ### Exposed submodules ### from . import abc from . import typing from . import util
29.212766
79
0.79024
ffce3c914809fe508a87b5cc18e2cdab125e42d4
402
py
Python
public_goods_str_nonoise/tests.py
bocchan/costly
ba52f82e36e28012a63a78805963bdf384679955
[ "BSD-3-Clause" ]
null
null
null
public_goods_str_nonoise/tests.py
bocchan/costly
ba52f82e36e28012a63a78805963bdf384679955
[ "BSD-3-Clause" ]
null
null
null
public_goods_str_nonoise/tests.py
bocchan/costly
ba52f82e36e28012a63a78805963bdf384679955
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- from __future__ import division import random from otree.common import Currency as c, currency_range from . import views from ._builtin import Bot from .models import Constants
18.272727
54
0.691542
ffd1926ccd96f4b70e990d54bad23c4b64c849e9
2,531
py
Python
cloudianapi/tools/statistics.py
romerojunior/cloudian-api
f17b45653a0e3e27a78d0d6bdc094ec6ab521550
[ "Apache-2.0" ]
11
2017-11-01T17:48:10.000Z
2020-08-25T04:29:17.000Z
cloudianapi/tools/statistics.py
romerojunior/cloudian-api
f17b45653a0e3e27a78d0d6bdc094ec6ab521550
[ "Apache-2.0" ]
5
2017-11-10T12:46:44.000Z
2019-09-18T07:18:19.000Z
cloudianapi/tools/statistics.py
romerojunior/cloudian-api
f17b45653a0e3e27a78d0d6bdc094ec6ab521550
[ "Apache-2.0" ]
7
2018-01-26T20:08:37.000Z
2021-05-26T14:32:06.000Z
#!/usr/bin/env python # -*- coding:utf8 -*- # Copyright 2017, Schuberg Philis BV # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # Romero Galiza Jr. - rgaliza@schubergphilis.com """ This is not part of the Admin API, but it incorporates additional tooling to support statistical analysis of monitored data within a cluster, data center or node """ def get_hs_used_kb(node): """ Receives a node monitor JSON string and returns a list containing the used disk space in KB for each hyperstore disk. :param node: an iterable object :type node: dict :rtype: list """ if 'disksInfo' not in node: raise TypeError('Unsupported input.') # filter function to select only HyperStore disks: f = (lambda n: True if 'HS' in n['storageUse'] else False) hs_disks = filter( f, (d for d in node['disksInfo']['disks']) ) return [abs(int(disk['diskUsedKb'])) for disk in hs_disks] def disk_avg_abs_deviation(node): """ Returns the average absolute deviation for a given set of disks of a given node based entirely on used capacity (expressed in KB). Particularly useful if you want to visualize the average difference between all disks in a given node. The closer the result is to zero the better (less deviation = balanced usage). :param node: an iterable object :type node: dict :rtype: int """ try: disk_usage = get_hs_used_kb(node) except TypeError: return 0 mean = (sum(disk_usage) / len(disk_usage)) deviation = [abs(kb_used - mean) for kb_used in disk_usage] return sum(deviation)/len(deviation)
34.202703
79
0.66772
ffd4de322115b22ae4e36e0be2d07a40743376b4
1,340
py
Python
users/models.py
connorgannaway/dockmate
040d44cac896aabc1488f3ed9d59b417e20719d8
[ "MIT" ]
null
null
null
users/models.py
connorgannaway/dockmate
040d44cac896aabc1488f3ed9d59b417e20719d8
[ "MIT" ]
null
null
null
users/models.py
connorgannaway/dockmate
040d44cac896aabc1488f3ed9d59b417e20719d8
[ "MIT" ]
null
null
null
from os import name from django.db import models from django.contrib.auth.models import User from PIL import Image #Model classes are tables objects in a database. #each variable is a column and its datatype. #__str__ method defines the name of a object (row) in a database table #profile model is meant to be used as an extension to the User model #this is so users can have a profile picture and be connected to a company """ def save(self, *args, **kwargs): super().save(*args, **kwargs) image = Image.open(self.picture.path) if image.width > 300 or image.height > 300: image.thumbnail((300, 300)) image.save(self.picture.path) """
37.222222
92
0.709701
ffd544a103259a41233ed3e0af2e2d453a43568d
1,446
py
Python
E_ledproject.py
randomstring/raspberrypi
fe226ce33f116480bfea8f258fdffa1fd96e379c
[ "MIT" ]
null
null
null
E_ledproject.py
randomstring/raspberrypi
fe226ce33f116480bfea8f258fdffa1fd96e379c
[ "MIT" ]
null
null
null
E_ledproject.py
randomstring/raspberrypi
fe226ce33f116480bfea8f258fdffa1fd96e379c
[ "MIT" ]
null
null
null
#!/usr/bin/env python import RPi.GPIO as GPIO GPIO.setwarnings(False) led_color_gpio = { 'yellow': 0, 'orange': 2, 'red': 3, 'green': 4, 'blue': 5, 'white': 6 } buttons_gpio = { 'red': 28, 'blue': 29, } gpio_to_bcm = { 0: 17, 1: 18, 2: 27, 3: 22, 4: 23, 5: 24, 6: 25, 21: 5, 22: 6, 23: 13, 24: 19, 25: 26, 26: 12, 27: 16, 28: 20, 29: 21, } GPIO.setmode(GPIO.BCM) for gpio in led_color_gpio.values(): bcm_pin = gpio_to_bcm[gpio] GPIO.setup(bcm_pin, GPIO.OUT) GPIO.output(bcm_pin, True) print("Type 'quit' to quit") while True: user_input = raw_input("Enter Color and on/off: ") tokens = user_input.split() if len(tokens) < 1: continue color = tokens[0] if color == "quit": break onoff = 1 if len(tokens) > 1: onoff = tokens[1] if onoff == "on": onoff = 1 elif onoff == "off": onoff = 0 else: onoff = int(onoff) led_color(color, onoff) for gpio in led_color_gpio.values(): bcm_pin = gpio_to_bcm[gpio] GPIO.output(bcm_pin, True)
18.075
54
0.538728
ffd73066eb937a59b32d4daec9ba6f8807fa09da
5,551
py
Python
utils/StartMOOS.py
ianfixes/MOOS-python-utils
1c34f3b8cde4fdcee48a8ee128a3c160eb17d722
[ "WTFPL" ]
3
2015-07-09T17:51:20.000Z
2016-04-14T23:06:04.000Z
utils/StartMOOS.py
ifreecarve/MOOS-python-utils
1c34f3b8cde4fdcee48a8ee128a3c160eb17d722
[ "WTFPL" ]
null
null
null
utils/StartMOOS.py
ifreecarve/MOOS-python-utils
1c34f3b8cde4fdcee48a8ee128a3c160eb17d722
[ "WTFPL" ]
3
2015-03-31T04:18:21.000Z
2016-10-22T04:55:16.000Z
#!/usr/bin/env python ########################################################################### # # Written in 2009 by Ian Katz <ijk5@mit.edu> # Terms: WTFPL (http://sam.zoy.org/wtfpl/) # See COPYING and WARRANTY files included in this distribution # ########################################################################### # this program launches MOOS processes and verifies that they're up. # this sequential launch method is gentler to low-horsepower CPUs. # # It takes 2 command line arguments: # 1. the MOOS config file to be used # 2. OPTIONALLY the working directory that all apps should launch from import os import sys import time #MAKE ANY CHANGES HERE if __name__ == "__main__": if len(sys.argv) < 2: print "Usage: " + sys.argv[0] + "<MOOS config file name> [working directory]" exit(1) #The app name, and -- optionally -- its ID string moosProcList = desired_MOOS_procs() moosConfigFile = sys.argv[1] if len(sys.argv) == 3: #we want to run all processes in this directory os.chdir(sys.argv[2]) print "Starting MOOSDB...", start_MOOS_process_in_new_screen("MOOSDB", moosConfigFile) #see if we can use pyMOOS to intelligently launch processes try: import pyMOOS pi = pyMOOS.PI # force an error except: #fall back on basic implementation print "Done" print "\nNo pyMOOS detected... falling back on timed launch sequence\n" start_all_MOOSProcesses(moosProcList, moosConfigFile, 5.0) exit(0) #wait for connect myComms = pyMOOS.CMOOSCommClient() if myComms.Run("localhost", 9000, "StartMOOS.py[" + os.uname()[1] + "]"): print "Done!" print "\n\nStarting MOOS processes the SCHMANCY way!\n" else: print "Failed to connect to local MOOSDB." print "You may want to 'killall screen' and try again." exit(1) print "Connecting to MOOSDB...", while not myComms.IsConnected(): tick() print "Done!" #start each process and wait for it to connect start_MOOS_processes_sequentially(moosProcList, moosConfigFile, myComms) print "\nAll MOOS processes successfully launched!"
26.816425
85
0.572149
ffd92d23d660d2a840a6dec51a3209da982b029c
1,172
py
Python
word_vectorizer/tests/unittest/model_downloading/test_gensimModelDownloader.py
RodSernaPerez/WordVectorizer
097b2ccfc284b39ad43f56047ee25e393b7525ec
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
word_vectorizer/tests/unittest/model_downloading/test_gensimModelDownloader.py
RodSernaPerez/WordVectorizer
097b2ccfc284b39ad43f56047ee25e393b7525ec
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
word_vectorizer/tests/unittest/model_downloading/test_gensimModelDownloader.py
RodSernaPerez/WordVectorizer
097b2ccfc284b39ad43f56047ee25e393b7525ec
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
from unittest import TestCase from unittest.mock import patch from word_vectorizer.constants import Constants from word_vectorizer.model_downloading.gensim_model_downloader import \ GensimModelDownloader
41.857143
77
0.728669
ffd92f6660bddf66dfe789ef939a022a436eddba
26,840
py
Python
results/generate_result.py
riscv-android-src/platform-test-mlts-benchmark
fc22878823896b81eb8b7e63e952a13f9675edcb
[ "Apache-2.0" ]
null
null
null
results/generate_result.py
riscv-android-src/platform-test-mlts-benchmark
fc22878823896b81eb8b7e63e952a13f9675edcb
[ "Apache-2.0" ]
null
null
null
results/generate_result.py
riscv-android-src/platform-test-mlts-benchmark
fc22878823896b81eb8b7e63e952a13f9675edcb
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python3 # # Copyright 2018, The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """MLTS benchmark result generator. Reads a CSV produced by MLTS benchmark and generates an HTML page with results summary. Usage: generate_result [csv input file] [html output file] """ import argparse import collections import csv import os import re import math LatencyResult = collections.namedtuple( 'LatencyResult', ['iterations', 'total_time_sec', 'time_freq_start_sec', 'time_freq_step_sec', 'time_freq_sec']) COMPILATION_TYPES = ['compile_without_cache', 'save_to_cache', 'prepare_from_cache'] BASELINE_COMPILATION_TYPE = COMPILATION_TYPES[0] CompilationResult = collections.namedtuple( 'CompilationResult', ['cache_size_bytes'] + COMPILATION_TYPES) BenchmarkResult = collections.namedtuple( 'BenchmarkResult', ['name', 'backend_type', 'inference_latency', 'max_single_error', 'testset_size', 'evaluator_keys', 'evaluator_values', 'validation_errors', 'compilation_results']) ResultsWithBaseline = collections.namedtuple( 'ResultsWithBaseline', ['baseline', 'other']) BASELINE_BACKEND = 'TFLite_CPU' KNOWN_GROUPS = [ (re.compile('mobilenet_v1.*quant.*'), 'MobileNet v1 Quantized'), (re.compile('mobilenet_v1.*'), 'MobileNet v1 Float'), (re.compile('mobilenet_v2.*quant.*'), 'MobileNet v2 Quantized'), (re.compile('mobilenet_v2.*'), 'MobileNet v2 Float'), (re.compile('mobilenet_v3.*uint8.*'), 'MobileNet v3 Quantized'), (re.compile('mobilenet_v3.*'), 'MobileNet v3 Float'), (re.compile('tts.*'), 'LSTM Text-to-speech'), (re.compile('asr.*'), 'LSTM Automatic Speech Recognition'), ] def parse_csv_input(input_filename): """Parse input CSV file, returns: (benchmarkInfo, list of BenchmarkResult).""" with open(input_filename, 'r') as csvfile: parser = BenchmarkResultParser(csvfile) # First line contain device info benchmark_info = parser.next() results = [] while parser.next(): results.append(parser.read_benchmark_result()) return (benchmark_info, results) def group_results(results): """Group list of results by their name/backend, returns list of lists.""" # Group by name groupings = collections.defaultdict(list) for result in results: groupings[result.name].append(result) # Find baseline for each group, make ResultsWithBaseline for each name groupings_baseline = {} for name, results in groupings.items(): baseline = next(filter(lambda x: x.backend_type == BASELINE_BACKEND, results)) other = sorted(filter(lambda x: x is not baseline, results), key=lambda x: x.backend_type) groupings_baseline[name] = ResultsWithBaseline( baseline=baseline, other=other) # Merge ResultsWithBaseline for known groups known_groupings_baseline = collections.defaultdict(list) for name, results_with_bl in sorted(groupings_baseline.items()): group_name = name for known_group in KNOWN_GROUPS: if known_group[0].match(results_with_bl.baseline.name): group_name = known_group[1] break known_groupings_baseline[group_name].append(results_with_bl) # Turn into a list sorted by name groupings_list = [] for name, results_wbl in sorted(known_groupings_baseline.items()): groupings_list.append((name, results_wbl)) return groupings_list def get_frequency_graph_min_max(latencies): """Get min and max times of latencies frequency.""" mins = [] maxs = [] for latency in latencies: mins.append(latency.time_freq_start_sec) to_add = len(latency.time_freq_sec) * latency.time_freq_step_sec maxs.append(latency.time_freq_start_sec + to_add) return min(mins), max(maxs) def get_frequency_graph(time_freq_start_sec, time_freq_step_sec, time_freq_sec, start_sec, end_sec): """Generate input x/y data for latency frequency graph.""" left_to_pad = (int((time_freq_start_sec - start_sec) / time_freq_step_sec) if time_freq_step_sec != 0 else math.inf) end_time = time_freq_start_sec + len(time_freq_sec) * time_freq_step_sec right_to_pad = (int((end_sec - end_time) / time_freq_step_sec) if time_freq_step_sec != 0 else math.inf) # After pading more that 64 values, graphs start to look messy, # bail out in that case. if (left_to_pad + right_to_pad) < 64: left_pad = (['{:.2f}ms'.format( (start_sec + x * time_freq_step_sec) * 1000.0) for x in range(left_to_pad)], [0] * left_to_pad) right_pad = (['{:.2f}ms'.format( (end_time + x * time_freq_step_sec) * 1000.0) for x in range(right_to_pad)], [0] * right_to_pad) else: left_pad = [[], []] right_pad = [[], []] data = (['{:.2f}ms'.format( (time_freq_start_sec + x * time_freq_step_sec) * 1000.0) for x in range(len(time_freq_sec))], time_freq_sec) return (left_pad[0] + data[0] + right_pad[0], left_pad[1] + data[1] + right_pad[1]) def is_topk_evaluator(evaluator_keys): """Are these evaluator keys from TopK evaluator?""" return (len(evaluator_keys) == 5 and evaluator_keys[0] == 'top_1' and evaluator_keys[1] == 'top_2' and evaluator_keys[2] == 'top_3' and evaluator_keys[3] == 'top_4' and evaluator_keys[4] == 'top_5') def is_melceplogf0_evaluator(evaluator_keys): """Are these evaluator keys from MelCepLogF0 evaluator?""" return (len(evaluator_keys) == 2 and evaluator_keys[0] == 'max_mel_cep_distortion' and evaluator_keys[1] == 'max_log_f0_error') def is_phone_error_rate_evaluator(evaluator_keys): """Are these evaluator keys from PhoneErrorRate evaluator?""" return (len(evaluator_keys) == 1 and evaluator_keys[0] == 'max_phone_error_rate') def generate_accuracy_headers(result): """Accuracy-related headers for result table.""" if is_topk_evaluator(result.evaluator_keys): return ACCURACY_HEADERS_TOPK_TEMPLATE elif is_melceplogf0_evaluator(result.evaluator_keys): return ACCURACY_HEADERS_MELCEPLOGF0_TEMPLATE elif is_phone_error_rate_evaluator(result.evaluator_keys): return ACCURACY_HEADERS_PHONE_ERROR_RATE_TEMPLATE else: return ACCURACY_HEADERS_BASIC_TEMPLATE raise ScoreException('Unknown accuracy headers for: ' + str(result)) def generate_accuracy_values(baseline, result): """Accuracy-related data for result table.""" if is_topk_evaluator(result.evaluator_keys): val = [float(x) * 100.0 for x in result.evaluator_values] if result is baseline: topk = [TOPK_BASELINE_TEMPLATE.format(val=x) for x in val] return ACCURACY_VALUES_TOPK_TEMPLATE.format( top1=topk[0], top2=topk[1], top3=topk[2], top4=topk[3], top5=topk[4] ) else: base = [float(x) * 100.0 for x in baseline.evaluator_values] diff = [a - b for a, b in zip(val, base)] topk = [TOPK_DIFF_TEMPLATE.format( val=v, diff=d, span=get_diff_span(d, 1.0, positive_is_better=True)) for v, d in zip(val, diff)] return ACCURACY_VALUES_TOPK_TEMPLATE.format( top1=topk[0], top2=topk[1], top3=topk[2], top4=topk[3], top5=topk[4] ) elif is_melceplogf0_evaluator(result.evaluator_keys): val = [float(x) for x in result.evaluator_values + [result.max_single_error]] if result is baseline: return ACCURACY_VALUES_MELCEPLOGF0_TEMPLATE.format( max_log_f0=MELCEPLOGF0_BASELINE_TEMPLATE.format( val=val[0]), max_mel_cep_distortion=MELCEPLOGF0_BASELINE_TEMPLATE.format( val=val[1]), max_single_error=MELCEPLOGF0_BASELINE_TEMPLATE.format( val=val[2]), ) else: base = [float(x) for x in baseline.evaluator_values + [baseline.max_single_error]] diff = [a - b for a, b in zip(val, base)] v = [MELCEPLOGF0_DIFF_TEMPLATE.format( val=v, diff=d, span=get_diff_span(d, 1.0, positive_is_better=False)) for v, d in zip(val, diff)] return ACCURACY_VALUES_MELCEPLOGF0_TEMPLATE.format( max_log_f0=v[0], max_mel_cep_distortion=v[1], max_single_error=v[2], ) elif is_phone_error_rate_evaluator(result.evaluator_keys): val = [float(x) for x in result.evaluator_values + [result.max_single_error]] if result is baseline: return ACCURACY_VALUES_PHONE_ERROR_RATE_TEMPLATE.format( max_phone_error_rate=PHONE_ERROR_RATE_BASELINE_TEMPLATE.format( val=val[0]), max_single_error=PHONE_ERROR_RATE_BASELINE_TEMPLATE.format( val=val[1]), ) else: base = [float(x) for x in baseline.evaluator_values + [baseline.max_single_error]] diff = [a - b for a, b in zip(val, base)] v = [PHONE_ERROR_RATE_DIFF_TEMPLATE.format( val=v, diff=d, span=get_diff_span(d, 1.0, positive_is_better=False)) for v, d in zip(val, diff)] return ACCURACY_VALUES_PHONE_ERROR_RATE_TEMPLATE.format( max_phone_error_rate=v[0], max_single_error=v[1], ) else: return ACCURACY_VALUES_BASIC_TEMPLATE.format( max_single_error=result.max_single_error, ) raise ScoreException('Unknown accuracy values for: ' + str(result)) def generate_avg_ms(baseline, latency): """Generate average latency value.""" if latency is None: latency = baseline result_avg_ms = (latency.total_time_sec / latency.iterations)*1000.0 if latency is baseline: return LATENCY_BASELINE_TEMPLATE.format(val=result_avg_ms) baseline_avg_ms = (baseline.total_time_sec / baseline.iterations)*1000.0 diff = (result_avg_ms/baseline_avg_ms - 1.0) * 100.0 diff_val = result_avg_ms - baseline_avg_ms return LATENCY_DIFF_TEMPLATE.format( val=result_avg_ms, diff=diff, diff_val=diff_val, span=get_diff_span(diff, same_delta=1.0, positive_is_better=False)) def generate_latency_graph_entry(tag, latency, tmin, tmax): """Generate a single latency graph.""" return LATENCY_GRAPH_ENTRY_TEMPLATE.format( tag=tag, i=id(latency), freq_data=get_frequency_graph(latency.time_freq_start_sec, latency.time_freq_step_sec, latency.time_freq_sec, tmin, tmax)) def generate_latency_graphs_group(tags, latencies): """Generate a group of latency graphs with the same tmin and tmax.""" tmin, tmax = get_frequency_graph_min_max(latencies) return ''.join( generate_latency_graph_entry(tag, latency, tmin, tmax) for tag, latency in zip(tags, latencies)) def generate_inference_latency_graph_entry(results_with_bl): """Generate a group of latency graphs for inference latencies.""" results = [results_with_bl.baseline] + results_with_bl.other tags = [result.backend_type for result in results] latencies = [result.inference_latency for result in results] return generate_latency_graphs_group(tags, latencies) def generate_compilation_latency_graph_entry(results_with_bl): """Generate a group of latency graphs for compilation latencies.""" tags = [ result.backend_type + ', ' + snake_case_to_title(type) for result in results_with_bl.other for type in COMPILATION_TYPES if getattr(result.compilation_results, type) ] latencies = [ getattr(result.compilation_results, type) for result in results_with_bl.other for type in COMPILATION_TYPES if getattr(result.compilation_results, type) ] return generate_latency_graphs_group(tags, latencies) def generate_validation_errors(entries_group): """Generate validation errors table.""" errors = [] for result_and_bl in entries_group: for result in [result_and_bl.baseline] + result_and_bl.other: for error in result.validation_errors: errors.append((result.name, result.backend_type, error)) if errors: return VALIDATION_ERRORS_TEMPLATE.format( results=''.join( VALIDATION_ERRORS_ENTRY_TEMPLATE.format( name=name, backend=backend, error=error) for name, backend, error in errors)) return '' def generate_result(benchmark_info, data): """Turn list of results into HTML.""" return MAIN_TEMPLATE.format( jsdeps=getchartjs_source(), device_info=DEVICE_INFO_TEMPLATE.format( benchmark_time=benchmark_info[0], device_info=benchmark_info[1], ), results_list=''.join(( RESULT_GROUP_TEMPLATE.format( group_name=entries_name, accuracy_headers=generate_accuracy_headers( entries_group[0].baseline), results=''.join( RESULT_ENTRY_WITH_BASELINE_TEMPLATE.format( baseline=generate_result_entry( result_and_bl.baseline, None), other=''.join( generate_result_entry( result_and_bl.baseline, x) for x in result_and_bl.other) ) for result_and_bl in entries_group), validation_errors=generate_validation_errors(entries_group), latency_graphs=LATENCY_GRAPHS_TEMPLATE.format( results=''.join( LATENCY_GRAPH_ENTRY_GROUP_TEMPLATE.format( name=result_and_bl.baseline.name, results=generate_inference_latency_graph_entry(result_and_bl) ) for result_and_bl in entries_group) ), compilation_results=''.join( COMPILATION_RESULT_ENTRIES_TEMPLATE.format( entries=''.join( generate_compilation_result_entry(x) for x in result_and_bl.other) ) for result_and_bl in entries_group), compilation_latency_graphs=LATENCY_GRAPHS_TEMPLATE.format( results=''.join( LATENCY_GRAPH_ENTRY_GROUP_TEMPLATE.format( name=result_and_bl.baseline.name, results=generate_compilation_latency_graph_entry(result_and_bl) ) for result_and_bl in entries_group) ), ) for entries_name, entries_group in group_results(data)) )) # ----------------- # Templates below MAIN_TEMPLATE = """<!doctype html> <html lang='en-US'> <head> <meta http-equiv='Content-Type' content='text/html; charset=utf-8'> <script src='https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js'></script> <script>{jsdeps}</script> <title>MLTS results</title> <style> .results {{ border-collapse: collapse; width: 100%; }} .results td, .results th {{ border: 1px solid #ddd; padding: 6px; }} .results tbody.values {{ border-bottom: 8px solid #333; }} span.better {{ color: #070; }} span.worse {{ color: #700; }} span.same {{ color: #000; }} .results tr:nth-child(even) {{background-color: #eee;}} .results tr:hover {{background-color: #ddd;}} .results th {{ padding: 10px; font-weight: bold; text-align: left; background-color: #333; color: white; }} .results tr.failed {{ background-color: #ffc4ca; }} .group {{ padding-top: 25px; }} .group_name {{ padding-left: 10px; font-size: 140%; font-weight: bold; }} .section_name {{ padding: 10px; font-size: 120%; font-weight: bold; }} .latency_results {{ padding: 10px; border: 1px solid #ddd; overflow: hidden; }} .latency_with_baseline {{ padding: 10px; border: 1px solid #ddd; overflow: hidden; }} </style> </head> <body> {device_info} {results_list} </body> </html>""" DEVICE_INFO_TEMPLATE = """<div id='device_info'> Benchmark for {device_info}, started at {benchmark_time} </div>""" RESULT_GROUP_TEMPLATE = """<div class="group"> <div class="group_name">{group_name}</div> <div class="section_name">Inference results</div> <table class="results"> <tr> <th>Name</th> <th>Backend</th> <th>Iterations</th> <th>Test set size</th> <th>Average latency ms</th> {accuracy_headers} </tr> {results} </table> {validation_errors} {latency_graphs} <div class="section_name">Compilation results</div> <table class="results"> <tr> <th rowspan="2">Name</th> <th rowspan="2">Backend</th> <th colspan="2">Compile Without Cache</th> <th colspan="2">Save To Cache</th> <th colspan="2">Prepare From Cache</th> <th rowspan="2">Cache size bytes</th> </tr> <tr> <th>Iterations</th> <th>Average latency ms</th> <th>Iterations</th> <th>Average latency ms</th> <th>Iterations</th> <th>Average latency ms</th> </tr> {compilation_results} </table> {compilation_latency_graphs} </div>""" VALIDATION_ERRORS_TEMPLATE = """ <table class="results"> <tr> <th>Name</th> <th>Backend</th> <th>Error</th> </tr> {results} </table>""" VALIDATION_ERRORS_ENTRY_TEMPLATE = """ <tr class="failed"> <td>{name}</td> <td>{backend}</td> <td>{error}</td> </tr> """ LATENCY_GRAPHS_TEMPLATE = """ <div class="latency_results"> {results} </div> <div style="clear: left;"></div> """ LATENCY_GRAPH_ENTRY_GROUP_TEMPLATE = """ <div class="latency_with_baseline" style="float: left;"> <b>{name}</b> {results} </div> """ LATENCY_GRAPH_ENTRY_TEMPLATE = """ <div class="latency_result" style='width: 350px;'> {tag} <canvas id='latency_chart{i}' class='latency_chart'></canvas> <script> $(function() {{ var freqData = {{ labels: {freq_data[0]}, datasets: [{{ data: {freq_data[1]}, backgroundColor: 'rgba(255, 99, 132, 0.6)', borderColor: 'rgba(255, 0, 0, 0.6)', borderWidth: 1, }}] }}; var ctx = $('#latency_chart{i}')[0].getContext('2d'); window.latency_chart{i} = new Chart(ctx, {{ type: 'bar', data: freqData, options: {{ responsive: true, title: {{ display: false, text: 'Latency frequency' }}, legend: {{ display: false }}, scales: {{ xAxes: [ {{ barPercentage: 1.0, categoryPercentage: 0.9, }}], yAxes: [{{ scaleLabel: {{ display: false, labelString: 'Iterations Count' }} }}] }} }} }}); }}); </script> </div> """ RESULT_ENTRY_WITH_BASELINE_TEMPLATE = """ <tbody class="values"> {baseline} {other} </tbody> """ RESULT_ENTRY_TEMPLATE = """ <tr class={row_class}> <td>{name}</td> <td>{backend}</td> <td>{iterations:d}</td> <td>{testset_size:d}</td> <td>{avg_ms}</td> {accuracy_values} </tr>""" COMPILATION_RESULT_ENTRIES_TEMPLATE = """ <tbody class="values"> {entries} </tbody> """ COMPILATION_RESULT_ENTRY_TEMPLATE = """ <tr class={row_class}> <td>{name}</td> <td>{backend}</td> <td>{compile_without_cache_iterations}</td> <td>{compile_without_cache_avg_ms}</td> <td>{save_to_cache_iterations}</td> <td>{save_to_cache_avg_ms}</td> <td>{prepare_from_cache_iterations}</td> <td>{prepare_from_cache_avg_ms}</td> <td>{cache_size}</td> </tr>""" LATENCY_BASELINE_TEMPLATE = """{val:.2f}ms""" LATENCY_DIFF_TEMPLATE = """{val:.2f}ms <span class='{span}'> ({diff_val:.2f}ms, {diff:.1f}%)</span>""" ACCURACY_HEADERS_TOPK_TEMPLATE = """ <th>Top 1</th> <th>Top 2</th> <th>Top 3</th> <th>Top 4</th> <th>Top 5</th> """ ACCURACY_VALUES_TOPK_TEMPLATE = """ <td>{top1}</td> <td>{top2}</td> <td>{top3}</td> <td>{top4}</td> <td>{top5}</td> """ TOPK_BASELINE_TEMPLATE = """{val:.3f}%""" TOPK_DIFF_TEMPLATE = """{val:.3f}% <span class='{span}'>({diff:.1f}%)</span>""" ACCURACY_HEADERS_MELCEPLOGF0_TEMPLATE = """ <th>Max log(F0) error</th> <th>Max Mel Cep distortion</th> <th>Max scalar error</th> """ ACCURACY_VALUES_MELCEPLOGF0_TEMPLATE = """ <td>{max_log_f0}</td> <td>{max_mel_cep_distortion}</td> <td>{max_single_error}</td> """ MELCEPLOGF0_BASELINE_TEMPLATE = """{val:.2E}""" MELCEPLOGF0_DIFF_TEMPLATE = \ """{val:.2E} <span class='{span}'>({diff:.1f}%)</span>""" ACCURACY_HEADERS_PHONE_ERROR_RATE_TEMPLATE = """ <th>Max phone error rate</th> <th>Max scalar error</th> """ ACCURACY_VALUES_PHONE_ERROR_RATE_TEMPLATE = """ <td>{max_phone_error_rate}</td> <td>{max_single_error}</td> """ PHONE_ERROR_RATE_BASELINE_TEMPLATE = """{val:.3f}""" PHONE_ERROR_RATE_DIFF_TEMPLATE = \ """{val:.3f} <span class='{span}'>({diff:.1f}%)</span>""" ACCURACY_HEADERS_BASIC_TEMPLATE = """ <th>Max single scalar error</th> """ ACCURACY_VALUES_BASIC_TEMPLATE = """ <td>{max_single_error:.2f}</td> """ CHART_JS_FILE = 'Chart.bundle.min.js' if __name__ == '__main__': main()
31.613663
99
0.656222
ffda91245aed33f9125784b3f0d5a73c6224af00
6,975
py
Python
ampel/ztf/dev/DevSkyPortalClient.py
AmpelProject/Ampel-ZTF
7f9736a7be3aa526571004716160cae2a800e410
[ "BSD-3-Clause" ]
1
2021-03-11T15:39:28.000Z
2021-03-11T15:39:28.000Z
ampel/ztf/dev/DevSkyPortalClient.py
AmpelProject/Ampel-ZTF
7f9736a7be3aa526571004716160cae2a800e410
[ "BSD-3-Clause" ]
18
2021-08-02T17:11:25.000Z
2022-01-11T16:20:04.000Z
ampel/ztf/dev/DevSkyPortalClient.py
AmpelProject/Ampel-ZTF
7f9736a7be3aa526571004716160cae2a800e410
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- # File: Ampel-ZTF/ampel/ztf/dev/DevSkyPortalClient.py # Author: Jakob van Santen <jakob.van.santen@desy.de> # Date: 16.09.2020 # Last Modified Date: 16.09.2020 # Last Modified By: Jakob van Santen <jakob.van.santen@desy.de> import gzip import io from collections import defaultdict from datetime import datetime from typing import Any from collections.abc import Sequence, Generator import numpy as np import requests from ampel.protocol.AmpelAlertProtocol import AmpelAlertProtocol from astropy.io import fits from astropy.time import Time from matplotlib.colors import Normalize from matplotlib.figure import Figure def render_thumbnail(cutout_data: bytes) -> bytes: """ Render gzipped FITS as PNG """ with gzip.open(io.BytesIO(cutout_data), "rb") as f: with fits.open(f) as hdu: header = hdu[0].header img = np.flipud(hdu[0].data) mask = np.isfinite(img) fig = Figure(figsize=(1, 1)) ax = fig.add_axes([0.0, 0.0, 1.0, 1.0]) ax.set_axis_off() ax.imshow( img, # clip pixel values below the median norm=Normalize(*np.percentile(img[mask], [0.5, 99.5])), aspect="auto", origin="lower", ) with io.BytesIO() as buf: fig.savefig(buf, dpi=img.shape[0]) return buf.getvalue()
36.139896
151
0.531326
ffddb9df1f192b673556f7659d2310d13ba94e89
3,806
py
Python
tools/test_detection_features_converter.py
jialinwu17/caption_vqa
9bbbb580d031a20ba4f18ef14fcd3599b62a482a
[ "MIT" ]
139
2018-03-21T09:39:39.000Z
2021-07-07T14:19:26.000Z
tools/test_detection_features_converter.py
VincentYing/Attention-on-Attention-for-VQA
cbc767541667e9bb32760ac7cd2e822eff232ff5
[ "MIT" ]
4
2018-05-25T05:15:20.000Z
2018-10-11T00:52:14.000Z
tools/test_detection_features_converter.py
VincentYing/Attention-on-Attention-for-VQA
cbc767541667e9bb32760ac7cd2e822eff232ff5
[ "MIT" ]
23
2018-03-22T10:12:35.000Z
2021-02-20T06:18:00.000Z
""" Reads in a tsv file with pre-trained bottom up attention features and stores it in HDF5 format. Also store {image_id: feature_idx} as a pickle file. Hierarchy of HDF5 file: { 'image_features': num_images x num_boxes x 2048 array of features 'image_bb': num_images x num_boxes x 4 array of bounding boxes } """ from __future__ import print_function import os import sys sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import base64 import csv import h5py import cPickle import numpy as np import utils csv.field_size_limit(sys.maxsize) FIELDNAMES = ['image_id', 'image_w', 'image_h', 'num_boxes', 'boxes', 'features'] infile = 'data/test2015_36/test2015_resnet101_faster_rcnn_genome_36.tsv' test_data_file = 'data/test36.hdf5' test_indices_file = 'data/test36_imgid2idx.pkl' test_ids_file = 'data/test_ids.pkl' feature_length = 2048 num_fixed_boxes = 36 if __name__ == '__main__': h_test = h5py.File(test_data_file, "w") if os.path.exists(test_ids_file): test_imgids = cPickle.load(open(test_ids_file)) else: test_imgids = utils.load_imageid('data/test2015') cPickle.dump(test_imgids, open(test_ids_file, 'wb')) test_indices = {} test_img_features = h_test.create_dataset( 'image_features', (len(test_imgids), num_fixed_boxes, feature_length), 'f') test_img_bb = h_test.create_dataset( 'image_bb', (len(test_imgids), num_fixed_boxes, 4), 'f') test_spatial_img_features = h_test.create_dataset( 'spatial_features', (len(test_imgids), num_fixed_boxes, 6), 'f') test_counter = 0 print("reading tsv...") with open(infile, "r+b") as tsv_in_file: reader = csv.DictReader(tsv_in_file, delimiter='\t', fieldnames=FIELDNAMES) for item in reader: item['num_boxes'] = int(item['num_boxes']) image_id = int(item['image_id']) image_w = float(item['image_w']) image_h = float(item['image_h']) bboxes = np.frombuffer( base64.decodestring(item['boxes']), dtype=np.float32).reshape((item['num_boxes'], -1)) box_width = bboxes[:, 2] - bboxes[:, 0] box_height = bboxes[:, 3] - bboxes[:, 1] scaled_width = box_width / image_w scaled_height = box_height / image_h scaled_x = bboxes[:, 0] / image_w scaled_y = bboxes[:, 1] / image_h box_width = box_width[..., np.newaxis] box_height = box_height[..., np.newaxis] scaled_width = scaled_width[..., np.newaxis] scaled_height = scaled_height[..., np.newaxis] scaled_x = scaled_x[..., np.newaxis] scaled_y = scaled_y[..., np.newaxis] spatial_features = np.concatenate( (scaled_x, scaled_y, scaled_x + scaled_width, scaled_y + scaled_height, scaled_width, scaled_height), axis=1) if image_id in test_imgids: test_imgids.remove(image_id) test_indices[image_id] = test_counter test_img_bb[test_counter, :, :] = bboxes test_img_features[test_counter, :, :] = np.frombuffer( base64.decodestring(item['features']), dtype=np.float32).reshape((item['num_boxes'], -1)) test_spatial_img_features[test_counter, :, :] = spatial_features test_counter += 1 else: assert False, 'Unknown image id: %d' % image_id if len(test_imgids) != 0: print('Warning: test_image_ids is not empty') cPickle.dump(test_indices, open(test_indices_file, 'wb')) h_test.close() print("done!")
34.6
83
0.618497
ffde4731dad77ca75123679807fabb3875a76017
176
py
Python
src/registration/urls.py
jtrussell/swindle
914f9ddc7b155cf895fc233b9f3f0c1804bf23e3
[ "MIT" ]
1
2021-04-07T20:14:43.000Z
2021-04-07T20:14:43.000Z
src/registration/urls.py
jtrussell/swindle
914f9ddc7b155cf895fc233b9f3f0c1804bf23e3
[ "MIT" ]
null
null
null
src/registration/urls.py
jtrussell/swindle
914f9ddc7b155cf895fc233b9f3f0c1804bf23e3
[ "MIT" ]
null
null
null
from . import views from django.urls import path urlpatterns = [ path('', views.profile, name='profile'), path('sign-up', views.sign_up, name='show_sign_up_form') ]
17.6
60
0.681818
ffde4e382f893654ea15768c8c27165eee09e3a4
3,720
py
Python
src/Control/Sign.py
hieuhdh/Multi-tasking-program
2f064a554f647247c84979b7a27f0797d1e1b5af
[ "MIT" ]
null
null
null
src/Control/Sign.py
hieuhdh/Multi-tasking-program
2f064a554f647247c84979b7a27f0797d1e1b5af
[ "MIT" ]
null
null
null
src/Control/Sign.py
hieuhdh/Multi-tasking-program
2f064a554f647247c84979b7a27f0797d1e1b5af
[ "MIT" ]
null
null
null
from tkinter.font import BOLD from PIL import ImageTk from tkinter import* from PIL import Image from tkinter import messagebox from Tools.log_db import*
53.913043
265
0.596774
ffdf3cdd0117fb616bc6eff58d4c3d502c8bf807
6,301
py
Python
aydin/it/classic_denoisers/bilateral.py
AhmetCanSolak/aydin
e8bc81ee88c96e0f34986df30a63c96468a45f70
[ "BSD-3-Clause" ]
78
2021-11-08T16:11:23.000Z
2022-03-27T17:51:04.000Z
aydin/it/classic_denoisers/bilateral.py
AhmetCanSolak/aydin
e8bc81ee88c96e0f34986df30a63c96468a45f70
[ "BSD-3-Clause" ]
19
2021-11-08T17:15:40.000Z
2022-03-30T17:46:55.000Z
aydin/it/classic_denoisers/bilateral.py
AhmetCanSolak/aydin
e8bc81ee88c96e0f34986df30a63c96468a45f70
[ "BSD-3-Clause" ]
7
2021-11-09T17:42:32.000Z
2022-03-09T00:37:57.000Z
from functools import partial from typing import Optional, List, Tuple import numpy from numpy.typing import ArrayLike from skimage.restoration import denoise_bilateral as skimage_denoise_bilateral from aydin.it.classic_denoisers import _defaults from aydin.util.crop.rep_crop import representative_crop from aydin.util.denoise_nd.denoise_nd import extend_nd from aydin.util.j_invariance.j_invariance import calibrate_denoiser def calibrate_denoise_bilateral( image: ArrayLike, bins: int = 10000, crop_size_in_voxels: Optional[int] = _defaults.default_crop_size_normal.value, optimiser: str = _defaults.default_optimiser.value, max_num_evaluations: int = _defaults.default_max_evals_normal.value, blind_spots: Optional[List[Tuple[int]]] = _defaults.default_blind_spots.value, jinv_interpolation_mode: str = _defaults.default_jinv_interpolation_mode.value, display_images: bool = False, display_crop: bool = False, **other_fixed_parameters, ): """ Calibrates the bilateral denoiser for the given image and returns the optimal parameters obtained using the N2S loss. Note: it seems that the bilateral filter of scikit-image is broken! Parameters ---------- image: ArrayLike Image to calibrate denoiser for. bins: int Number of discrete values for Gaussian weights of color filtering. A larger value results in improved accuracy. (advanced) crop_size_in_voxels: int or None for default Number of voxels for crop used to calibrate denoiser. Increase this number by factors of two if denoising quality is unsatisfactory -- this can be important for very noisy images. Values to try are: 65000, 128000, 256000, 320000. We do not recommend values higher than 512000. optimiser: str Optimiser to use for finding the best denoising parameters. Can be: 'smart' (default), or 'fast' for a mix of SHGO followed by L-BFGS-B. (advanced) max_num_evaluations: int Maximum number of evaluations for finding the optimal parameters. Increase this number by factors of two if denoising quality is unsatisfactory. blind_spots: bool List of voxel coordinates (relative to receptive field center) to be included in the blind-spot. For example, you can give a list of 3 tuples: [(0,0,0), (0,1,0), (0,-1,0)] to extend the blind spot to cover voxels of relative coordinates: (0,0,0),(0,1,0), and (0,-1,0) (advanced) (hidden) jinv_interpolation_mode: str J-invariance interpolation mode for masking. Can be: 'median' or 'gaussian'. (advanced) display_images: bool When True the denoised images encountered during optimisation are shown (advanced) (hidden) display_crop: bool Displays crop, for debugging purposes... (advanced) (hidden) other_fixed_parameters: dict Any other fixed parameters Returns ------- Denoising function, dictionary containing optimal parameters, and free memory needed in bytes for computation. """ # Convert image to float if needed: image = image.astype(dtype=numpy.float32, copy=False) # obtain representative crop, to speed things up... crop = representative_crop( image, crop_size=crop_size_in_voxels, display_crop=display_crop ) # Parameters to test when calibrating the denoising algorithm parameter_ranges = {'sigma_spatial': (0.01, 1), 'sigma_color': (0.01, 1)} # Combine fixed parameters: other_fixed_parameters = other_fixed_parameters | {'bins': bins} # Partial function: _denoise_bilateral = partial(denoise_bilateral, **other_fixed_parameters) # Calibrate denoiser best_parameters = ( calibrate_denoiser( crop, _denoise_bilateral, mode=optimiser, denoise_parameters=parameter_ranges, interpolation_mode=jinv_interpolation_mode, max_num_evaluations=max_num_evaluations, blind_spots=blind_spots, display_images=display_images, ) | other_fixed_parameters ) # Memory needed: memory_needed = 2 * image.nbytes return denoise_bilateral, best_parameters, memory_needed def denoise_bilateral( image: ArrayLike, sigma_color: Optional[float] = None, sigma_spatial: float = 1, bins: int = 10000, **kwargs, ): """ Denoises the given image using a <a href="https://en.wikipedia.org/wiki/Bilateral_filter">bilateral filter</a>. The bilateral filter is a edge-preserving smoothing filter that can be used for image denoising. Each pixel value is replaced by a weighted average of intensity values from nearby pixels. The weighting is inversely related to the pixel distance in space but also in the pixels value differences. Parameters ---------- image : ArrayLike Image to denoise sigma_color : float Standard deviation for grayvalue/color distance (radiometric similarity). A larger value results in averaging of pixels with larger radiometric differences. Note, that the image will be converted using the `img_as_float` function and thus the standard deviation is in respect to the range ``[0, 1]``. If the value is ``None`` the standard deviation of the ``image`` will be used. sigma_spatial : float Standard deviation for range distance. A larger value results in averaging of pixels with larger spatial differences. bins : int Number of discrete values for Gaussian weights of color filtering. A larger value results in improved accuracy. kwargs: dict Other parameters Returns ------- Denoised image """ # Convert image to float if needed: image = image.astype(dtype=numpy.float32, copy=False) _skimage_denoise_bilateral = extend_nd(available_dims=[2])( skimage_denoise_bilateral ) return _skimage_denoise_bilateral( image, sigma_color=sigma_color, sigma_spatial=sigma_spatial, bins=bins, mode='reflect', **kwargs, )
32.989529
83
0.690525
ffe13b312ebb3748c1aadfdca895d3557dc9d9a9
1,889
py
Python
pymon/pymon.py
crest42/PyMon
96494cc37f906e6a07388af29b04c559ec72f116
[ "MIT" ]
null
null
null
pymon/pymon.py
crest42/PyMon
96494cc37f906e6a07388af29b04c559ec72f116
[ "MIT" ]
null
null
null
pymon/pymon.py
crest42/PyMon
96494cc37f906e6a07388af29b04c559ec72f116
[ "MIT" ]
null
null
null
import logging import time from .exceptions import HostEntryNotValid from .check import CheckFactory from .alert import AlertFactory from .host import Host from .logging import logger
28.19403
75
0.564849
ffe516953bedc8e02aa7624b4a14d347ba8dad15
52,397
py
Python
ambulance/tests/test_calls.py
aschrist/WebServerAndClient
3aa0af2c444acac88a1b51b4cfd4bb8d0c36e640
[ "BSD-3-Clause" ]
null
null
null
ambulance/tests/test_calls.py
aschrist/WebServerAndClient
3aa0af2c444acac88a1b51b4cfd4bb8d0c36e640
[ "BSD-3-Clause" ]
null
null
null
ambulance/tests/test_calls.py
aschrist/WebServerAndClient
3aa0af2c444acac88a1b51b4cfd4bb8d0c36e640
[ "BSD-3-Clause" ]
null
null
null
import logging import time from django.test import Client from django.conf import settings from django.urls import reverse from django.db import IntegrityError from django.utils import timezone from rest_framework.parsers import JSONParser from rest_framework import serializers from io import BytesIO import json from ambulance.models import Call, Patient, AmbulanceCall, CallStatus, CallPriority, \ AmbulanceUpdate, AmbulanceStatus, Waypoint, Location, LocationType, WaypointStatus, AmbulanceCallStatus from ambulance.serializers import CallSerializer, AmbulanceCallSerializer, PatientSerializer, \ AmbulanceUpdateSerializer, WaypointSerializer, LocationSerializer from emstrack.tests.util import date2iso, point2str from login.tests.setup_data import TestSetup logger = logging.getLogger(__name__)
37.533668
121
0.55444
ffe63e2dda8d22501b711fdd07b98a6cfff2ea5a
2,484
py
Python
bot/PythonProject/Commands.py
RamaDev09/CrateBot
34b9f50b88da42cc1c449466402897340ec142df
[ "MIT" ]
null
null
null
bot/PythonProject/Commands.py
RamaDev09/CrateBot
34b9f50b88da42cc1c449466402897340ec142df
[ "MIT" ]
null
null
null
bot/PythonProject/Commands.py
RamaDev09/CrateBot
34b9f50b88da42cc1c449466402897340ec142df
[ "MIT" ]
null
null
null
import os from bot.TextInput import TextInput from bot.prompt import color_msg
45.163636
118
0.517311
ffe68f15e3bf96bdad0cec4870fd34ce0d8fbf6c
223
py
Python
src/methods/Addition.py
svanschooten/Flow
c7c158f986f7b108a255cbaa67ec7fff3518b637
[ "MIT" ]
null
null
null
src/methods/Addition.py
svanschooten/Flow
c7c158f986f7b108a255cbaa67ec7fff3518b637
[ "MIT" ]
null
null
null
src/methods/Addition.py
svanschooten/Flow
c7c158f986f7b108a255cbaa67ec7fff3518b637
[ "MIT" ]
null
null
null
from methods.AbstactMethod import AbstractMethod
20.272727
48
0.591928
ffe7a09ec4555bf2573c09777fdb5c2946647fc9
3,914
py
Python
submissions_comments.py
jbell1991/reddit-scraping
73d88501ed0205e78000b9c30780a33186154fda
[ "MIT" ]
null
null
null
submissions_comments.py
jbell1991/reddit-scraping
73d88501ed0205e78000b9c30780a33186154fda
[ "MIT" ]
null
null
null
submissions_comments.py
jbell1991/reddit-scraping
73d88501ed0205e78000b9c30780a33186154fda
[ "MIT" ]
null
null
null
# imports from decouple import config import pandas as pd import praw import psycopg2 import schedule from sqlalchemy import create_engine import time # automate script to run at the same time everyday schedule.every().day.at("09:07").do(job) while True: schedule.run_pending() time.sleep(1)
32.890756
93
0.667092
ffe7fe43c53e89a050ea85e42fd101e3306b2423
9,139
py
Python
vision_proc/proc_frame.py
SMS-Raiders/First2016
a08eb1fa195bd869f8e7de7761d791e3fcf23d22
[ "BSD-3-Clause" ]
1
2016-03-08T14:39:52.000Z
2016-03-08T14:39:52.000Z
vision_proc/proc_frame.py
SMS-Raiders/First2016
a08eb1fa195bd869f8e7de7761d791e3fcf23d22
[ "BSD-3-Clause" ]
null
null
null
vision_proc/proc_frame.py
SMS-Raiders/First2016
a08eb1fa195bd869f8e7de7761d791e3fcf23d22
[ "BSD-3-Clause" ]
null
null
null
#!/bin/python #Frame processing and distance estimation for #goal #------------------------------------------------------------------------------- # IMPORTS #------------------------------------------------------------------------------- import cv2 import math import numpy import sys #------------------------------------------------------------------------------- # VARIABLES #------------------------------------------------------------------------------- def cvClr( R, G, B ): """ Color array macro """ return( numpy.array( [R,G,B], numpy.uint8 ) ) #===================================================================== # Approx. The green color range #===================================================================== MASK_LOW = cvClr( 0, 0, 245 ) MASK_HIGH = cvClr( 255, 70, 255 ) #===================================================================== # Approximate Areas for the goal (Pixels) #===================================================================== #MIN_AREA = 250 MIN_AREA = 1600 #MAX_AREA = 4000 MAX_AREA = 5000 #================================================================= # Numbers Determined from experiment apart from 0 and 20 # Straight on to Goal # width and height and area are in pixel area # THIS IS THE COUNTOUR AREA NOT THE CONVEX HULL AREA! #================================================================= goal_lkup = [ { 'dist ft' : 0, 'width' : 200, 'height' : 90, 'area' : 9000, 'ratio w_h' : 1.80 }, #0ft not tested needs to be large { 'dist ft' : 7, 'width' : 151, 'height' : 88, 'area' : 4828, 'ratio w_h' : 1.71 }, { 'dist ft' : 8, 'width' : 141, 'height' : 85, 'area' : 4700, 'ratio w_h' : 1.65 }, { 'dist ft' : 9, 'width' : 132, 'height' : 81, 'area' : 4300, 'ratio w_h' : 1.62 }, { 'dist ft' : 10, 'width' : 123, 'height' : 78, 'area' : 3860, 'ratio w_h' : 1.57 }, { 'dist ft' : 11, 'width' : 114, 'height' : 75, 'area' : 3420, 'ratio w_h' : 1.52 }, { 'dist ft' : 12, 'width' : 108, 'height' : 73, 'area' : 3120, 'ratio w_h' : 1.47 }, { 'dist ft' : 13, 'width' : 102, 'height' : 70, 'area' : 2770, 'ratio w_h' : 1.45 }, { 'dist ft' : 14, 'width' : 96 , 'height' : 68, 'area' : 2357, 'ratio w_h' : 1.41 }, { 'dist ft' : 20, 'width' : 60 , 'height' : 35, 'area' : 1000, 'ratio w_h' : 1.30 } ] #20 ft not tested needs to be small #------------------------------------------------------------------------------- # CLASSES #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- # PROCEDURES #------------------------------------------------------------------------------- def find_squares( contours, debug=False ): """ Find square shaped objects """ #================================================================= # The Minimum and Maximum rations for width vs height for the goal # based on experimental results goal is approx 1.5:1 #================================================================= MIN_RATIO = 1.3 MAX_RATIO = 1.8 ret = [] for shape in contours: x, y, w, h = cv2.boundingRect( shape ) w_h_ratio = float( w ) / float( h ) if debug: print "Area", (w * h) print "Width ", w print "Height", h if MIN_RATIO < w_h_ratio and w_h_ratio < MAX_RATIO: ret.append( shape ) return( ret ) def filter_area( contours, debug=False ): """ Filter out contours based on area """ ret = [] for x in contours: area = cv2.contourArea( x ) if area > MIN_AREA and area < MAX_AREA: if debug: print "Area", area ret.append( x ) return( ret ) def find_center( contours ): """ Find the center of a contour based on moments """ ret = [] for x in contours: M = cv2.moments( x ) pt = Point() pt.x = int( M['m10']/M['m00'] ) pt.y = int( M['m01']/M['m00'] ) ret.append( pt ) return( ret ); def convex_hull_area( contours, debug= False ): """ Find the Area of convex Hulls """ ret_areas = [] ret_hulls = [] for c in contours: hull = cv2.convexHull( c ) area = cv2.contourArea( hull ) ret_areas.append( area ) ret_hulls.append( hull ) if( debug ): print( "Hull area: {0}".format( area ) ) return ( ret_areas, ret_hulls ) def angle_from_point( x, img_width=640, fov_angle=44 ): """ Calculate the angle from a point """ return( -( ( img_width / 2 ) - x ) * fov_angle ) def lin_scale( val, x1, y1, x2, y2 ): """ Linearly scale Val to y1 and y2 from x1 and x2 range x1 and y1 are low values """ x_range = (x2 - x1) new_val = 0 if x_range is 0: new_val = y1 else: y_range = ( y2 - y1 ) new_val = ( ( ( val - x1 ) * y_range ) / x_range ) + y1 return new_val def dist_from_goal( area ): """ Calculates the distance to the Goal based on area, x, y Args: area: the area in pixels of the target Returns: Feet from goal """ dist = 99 prev = goal_lkup[ 0 ] for cur in goal_lkup: #============================================================= # If the area is less than the currently selected area, but # greater then the previous area, then the distance is some # where in between. Then do linear interpolation #============================================================= if area > cur[ 'area' ] and area < prev[ 'area' ]: dist = lin_scale( area, cur[ 'area' ], cur[ 'dist ft' ], prev[ 'area' ], prev[ 'dist ft' ] ) return dist prev = cur return dist def proc_frame( frame, debug=False ): """ Process a frame """ #================================================================= # Convert to HSV so we can mask more easily #================================================================= hsv_frame = cv2.cvtColor( frame, cv2.COLOR_BGR2HSV ) #================================================================= # Apply the color mask defined at the top of file #================================================================= if( debug ): hlo = cv2.getTrackbarPos( "H low", "Mask" ) hhi = cv2.getTrackbarPos( "H hi", "Mask" ) slo = cv2.getTrackbarPos( "S low", "Mask" ) shi = cv2.getTrackbarPos( "S hi", "Mask" ) vlo = cv2.getTrackbarPos( "V low", "Mask" ) vhi = cv2.getTrackbarPos( "V hi", "Mask" ) lo = numpy.array( [ hlo, slo, vlo ], numpy.uint8 ) hi = numpy.array( [ hhi, shi, vhi ], numpy.uint8 ) color_mask = cv2.inRange( hsv_frame, lo, hi ) else: color_mask = cv2.inRange( hsv_frame, MASK_LOW, MASK_HIGH ) #================================================================= # Apply our color mask #================================================================= masked_frame = cv2.bitwise_and( hsv_frame, hsv_frame, mask = color_mask ) #================================================================= # Contours stuff # First convert to Gray and find the contours #================================================================= bw_frame = cv2.cvtColor( masked_frame, cv2.COLOR_BGR2GRAY ) contours, hierarchy = cv2.findContours( bw_frame, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE ) #================================================================= # Filter the contours based on area, convex hull area etc... #================================================================= draw = filter_area( contours ) hull_areas, hulls = convex_hull_area( draw ) squares = find_squares( hulls ) centers = find_center( squares ) #================================================================= # If debug mode, show the result of the line finding in a GUI #================================================================= if( debug ): #contours cv2.drawContours( frame, draw, -1, ( 0, 255, 0 ), 3 ) cv2.drawContours( frame, squares, -1, ( 255, 255, 0 ), 3 ) for i in centers: cv2.circle( frame, ( i.x, i.y ), 3, ( 0, 255, 255 ), ) #print "X = {0} Y = {1}".format( i.x, i.y ) cv2.imshow( "Goal", frame ) #cv2.imshow( "Mask", masked_frame ) return dist_from_goal( squares ), angle_from_point( centers[0].x, len( frame[0] ) )
37.454918
135
0.402889
ffeabfb85c362b4fd5f28c9b1e056f66d191fed5
100
py
Python
9.py
sarika228/React-Projects
24c342f71f839c257150f4b5e096c127b51d525c
[ "MIT" ]
null
null
null
9.py
sarika228/React-Projects
24c342f71f839c257150f4b5e096c127b51d525c
[ "MIT" ]
null
null
null
9.py
sarika228/React-Projects
24c342f71f839c257150f4b5e096c127b51d525c
[ "MIT" ]
null
null
null
i=1 while i<=4: j=16 while j>=i: print(i,end="") j=j-1 print() i=i+1
12.5
23
0.39
ffeb87db7651191ea5cf19f49a0c7c9aa356f87d
8,539
py
Python
site-packages/playhouse/sqliteq.py
lego-cloud/MDMPy
dc676a5d2245a14b9b98a2ac2dba64ff0bf61800
[ "Python-2.0", "OLDAP-2.7" ]
674
2015-11-06T04:22:47.000Z
2022-02-26T17:31:43.000Z
site-packages/playhouse/sqliteq.py
lego-cloud/MDMPy
dc676a5d2245a14b9b98a2ac2dba64ff0bf61800
[ "Python-2.0", "OLDAP-2.7" ]
713
2015-11-06T10:48:58.000Z
2018-11-27T16:32:18.000Z
site-packages/playhouse/sqliteq.py
lego-cloud/MDMPy
dc676a5d2245a14b9b98a2ac2dba64ff0bf61800
[ "Python-2.0", "OLDAP-2.7" ]
106
2015-12-07T11:21:06.000Z
2022-03-11T10:58:41.000Z
import logging import weakref from threading import Event from threading import Thread try: from Queue import Queue except ImportError: from queue import Queue try: import gevent from gevent import Greenlet as GThread from gevent.event import Event as GEvent from gevent.queue import Queue as GQueue except ImportError: GThread = GQueue = GEvent = None from playhouse.sqlite_ext import SqliteExtDatabase logger = logging.getLogger('peewee.sqliteq') def fetchall(self): return list(self) # Iterating implies waiting until populated. def fetchone(self): self._wait() try: return next(self) except StopIteration: return None THREADLOCAL_ERROR_MESSAGE = ('threadlocals cannot be set to True when using ' 'the Sqlite thread / queue database. All queries ' 'are serialized through a single connection, so ' 'allowing multiple threads to connect defeats ' 'the purpose of this database.') WAL_MODE_ERROR_MESSAGE = ('SQLite must be configured to use the WAL journal ' 'mode when using this feature. WAL mode allows ' 'one or more readers to continue reading while ' 'another connection writes to the database.')
32.222642
79
0.610025
ffed6941b3c99947e3e5d93c80fbd2e963b7ad51
9,056
py
Python
Common/Db.py
StrawberryTeam/pi_robot
c1b8ce2ad49c64173673df0eb59e0941624556e7
[ "MIT" ]
2
2018-08-30T14:38:53.000Z
2019-12-12T09:33:42.000Z
Common/Db.py
StrawberryTeam/pi_robot
c1b8ce2ad49c64173673df0eb59e0941624556e7
[ "MIT" ]
1
2018-12-10T05:15:48.000Z
2018-12-10T05:15:48.000Z
Common/Db.py
StrawberryTeam/pi_robot
c1b8ce2ad49c64173673df0eb59e0941624556e7
[ "MIT" ]
2
2019-06-28T06:05:17.000Z
2019-10-28T08:34:50.000Z
#!/usr/bin/python3 from Common.Straw import Straw import pymongo from pymongo import MongoClient from bson.objectid import ObjectId import os if __name__ == "__main__": db()
35.100775
169
0.597946
ffed95a551ec4c75f989589df7d781a9f4387728
1,251
py
Python
baya/tests/test_templatetags.py
kreneskyp/baya
5cf04b6873927124b4a3f24c113c08699dd61315
[ "MIT" ]
4
2016-05-24T13:57:37.000Z
2020-02-27T05:22:56.000Z
baya/tests/test_templatetags.py
kreneskyp/baya
5cf04b6873927124b4a3f24c113c08699dd61315
[ "MIT" ]
29
2016-02-05T01:31:51.000Z
2022-02-23T18:50:58.000Z
baya/tests/test_templatetags.py
hrichards/baya
f319cef5e95cd6a166265d51ae0ea236b6f65be3
[ "MIT" ]
6
2016-05-20T22:22:45.000Z
2019-09-03T17:57:59.000Z
from django.template import Context from django.template import Template from .test_base import LDAPGroupAuthTestBase from django.contrib.auth.models import AnonymousUser
29.785714
69
0.608313
fff185192df2e58db961f6b323cfb8259a7a9f46
2,611
py
Python
egg/zoo/sum_game/architectures.py
CorentinKervadec/EGG
5ccd49c4a493514b1194699954d41940f5e2a5c6
[ "MIT" ]
null
null
null
egg/zoo/sum_game/architectures.py
CorentinKervadec/EGG
5ccd49c4a493514b1194699954d41940f5e2a5c6
[ "MIT" ]
null
null
null
egg/zoo/sum_game/architectures.py
CorentinKervadec/EGG
5ccd49c4a493514b1194699954d41940f5e2a5c6
[ "MIT" ]
null
null
null
# Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn from torch.nn import functional as F # In EGG, the game designer must implement the core functionality of the Sender and Receiver agents. These are then # embedded in wrappers that are used to train them to play Gumbel-Softmax- or Reinforce-optimized games. The core # Sender must take the input and produce a hidden representation that is then used by the wrapper to initialize # the RNN or other module that will generate the message. The core Receiver expects a hidden representation # generated by the message-processing wrapper, plus possibly other game-specific input, and it must generate the # game-specific output. # The RecoReceiver class implements the core Receiver agent for the reconstruction game. This is simply a linear layer # that takes as input the vector generated by the message-decoding RNN in the wrapper (x in the forward method) and # produces an output of n_features dimensionality, to be interpreted as a one-hot representation of the reconstructed # attribute-value vector # The Sender class implements the core Sender agent common to both games: it gets the input target vector and produces a hidden layer # that will initialize the message producing RNN
44.254237
133
0.711222
fff18656fd42956b8ef43e1d1fc5a06b2aa15f66
2,757
py
Python
utils/random_training_splits.py
suvarnak/GenerativeFSLCovid
0bdeb4ed444c5c9d59697c71d0733fc3a100944c
[ "MIT" ]
null
null
null
utils/random_training_splits.py
suvarnak/GenerativeFSLCovid
0bdeb4ed444c5c9d59697c71d0733fc3a100944c
[ "MIT" ]
null
null
null
utils/random_training_splits.py
suvarnak/GenerativeFSLCovid
0bdeb4ed444c5c9d59697c71d0733fc3a100944c
[ "MIT" ]
null
null
null
import os import shutil import random if __name__ == '__main__': main()
39.385714
105
0.673921
fff197ae68beb5dbb26583494df00c1fc7732948
1,285
py
Python
tools/gen_bbox_ac.py
vincentzhang/faster-rcnn-fcn
7118d715a430f0ec2697e5f7a9a39c9752b466da
[ "BSD-2-Clause" ]
7
2019-07-19T21:30:26.000Z
2021-06-17T03:57:22.000Z
tools/gen_bbox_ac.py
vincentzhang/faster-rcnn-fcn
7118d715a430f0ec2697e5f7a9a39c9752b466da
[ "BSD-2-Clause" ]
null
null
null
tools/gen_bbox_ac.py
vincentzhang/faster-rcnn-fcn
7118d715a430f0ec2697e5f7a9a39c9752b466da
[ "BSD-2-Clause" ]
1
2021-06-17T03:57:23.000Z
2021-06-17T03:57:23.000Z
# generated bbox ground truth from pixel-wise segmentation # it currently only generate one bbox from __future__ import print_function import numpy as np import h5py import os import pdb mask_path = '../data/acce' f = h5py.File(os.path.join(mask_path, "resized_label_ac_2d.h5"), 'r') bbox_path = '../data/acce/bbox' if not os.path.exists(bbox_path): os.mkdir(bbox_path) # dim: shape (256, 367, 342), slices, height, width count = 0 for k in f.keys(): #pdb.set_trace() count += 1 print("processing {}-th vol".format(count)) data = f[k][...] # convert to numpy k = k.rsplit('_',1)[0] # strip the '_label' from the vol name with open( os.path.join(bbox_path, k)+'_bbox.txt', 'w') as bbox_file: # iterate through each slice for idx in range(data.shape[0]): mask = data[idx, :, :] # get the mask i,j = np.where(mask) # find positive mask if not i.size: # no positive mask print("{}_{},{}".format(k, idx, 0), file=bbox_file) else: h_min,w_min = np.min(zip(i,j), axis=0) h_max,w_max = np.max(zip(i,j), axis=0) print("{}_{},{},{},{},{},{}".format(k, idx, 1, w_min, h_min, w_max, h_max), file=bbox_file) f.close()
34.72973
83
0.585214
fff2144edf1dc7c96f337289635ef5af44b23625
8,510
py
Python
testscript/imputation_algorithms.py
zshufan/Tattle-Tale
f9d93051efb523f1bda0cead023c2f001e18cc85
[ "BSD-3-Clause" ]
null
null
null
testscript/imputation_algorithms.py
zshufan/Tattle-Tale
f9d93051efb523f1bda0cead023c2f001e18cc85
[ "BSD-3-Clause" ]
null
null
null
testscript/imputation_algorithms.py
zshufan/Tattle-Tale
f9d93051efb523f1bda0cead023c2f001e18cc85
[ "BSD-3-Clause" ]
null
null
null
# some codes refer to Holoclean evaluation function # https://github.com/HoloClean/holoclean import pandas as pd import numpy as np import logging import random import argparse parser = argparse.ArgumentParser(description='Predict on many examples') parser.add_argument("--dataset", type=str, help="dataset path") parser.add_argument("--ground_truth", type=str, help="ground truth path") parser.add_argument("--ground_truth_2", type=str, help="ground truth path") args = parser.parse_args() NULL_REPR = '_nan_' exclude_attr = ['_tid_', 'FName', 'LName'] if __name__ == "__main__": # load dataset adv = DataCleaningAsAdv(args.dataset) f = open("baseline_cleaning_report_1", "a") print(args.dataset, file=f) # evaluate adv.evaluate(gt_fpath=args.ground_truth, tid_col='tid', attr_col='attribute', val_col='correct_val', file=f) if args.ground_truth_2 is not None: adv.evaluate(gt_fpath=args.ground_truth_2, tid_col='tid', attr_col='attribute', val_col='correct_val', file=f)
42.338308
169
0.602115
fff27be5ec642c73c3bac5ec2ecda165d7fe17c5
145
py
Python
Hackerrank_python/15.numpy/10.Min and Max.py
manish1822510059/Hackerrank
7c6e4553f033f067e04dc6c756ef90cb43f3c4a8
[ "MIT" ]
39
2020-09-27T05:32:05.000Z
2022-01-08T18:04:05.000Z
Hackerrank_python/15.numpy/10.Min and Max.py
manish1822510059/Hackerrank
7c6e4553f033f067e04dc6c756ef90cb43f3c4a8
[ "MIT" ]
5
2020-10-02T13:33:00.000Z
2021-03-01T14:06:08.000Z
Hackerrank_python/15.numpy/10.Min and Max.py
manish1822510059/Hackerrank
7c6e4553f033f067e04dc6c756ef90cb43f3c4a8
[ "MIT" ]
6
2020-10-03T04:04:55.000Z
2021-10-18T04:07:53.000Z
import numpy as arr n,m=map(int,input().split()) ar=([list(map(int,input().split()))for _ in range(n)]) arr1=arr.min(ar,axis=1) print(max(arr1))
24.166667
54
0.668966