repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
sequence | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
unknown | revision_date
unknown | committer_date
unknown | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
unknown | gha_created_at
unknown | gha_updated_at
unknown | gha_pushed_at
unknown | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
aneeshpanoli/Algorithms | 2,087,354,124,083 | 8901a9bc8923ef6bf9465f7d54f23050af0f963b | e95eca04bb2a9eb647d3e00f1b2c947d061b1ffe | /smallest_missing_pos_num.py | 3b57977458de1f49537953210464cba6c59ccb9f | [] | no_license | https://github.com/aneeshpanoli/Algorithms | dcdcfe9f4a4fb3ad8581cb6cfa4fe93b8651b750 | a6d4273c0c70afe28364259b071d819757739ed2 | refs/heads/master | "2020-11-26T18:02:54.452773" | "2020-03-07T19:42:19" | "2020-03-07T19:42:19" | 229,167,690 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | __author__ = Aneesh panoli
'''
find the smallest missing interger from an unsorted array
CONSTRAINTS:
- smallest postive integer MISSING
- unsorted array
EXAMPLE
- a = [-1, 2, 0, 3, 4] - > 1
-a = [1, 2, 3, 4] -> 5
- = [1, 2, 5, 6]
a = [-1]
CLARIFYING QUESTIONS
- are there duplicate numbers?
- are are more than one missing number?
- how large the numbers can be?
- how large can be the array?
INSIGHTS:
- ignore less then zero
- use a pointer to save the index of last seen
PoA:
EDGE cases:
[-1, 2, 3, 4, 5]
loop through from 1 and up
a) sort array -> O(nlongn) + n
'''
from typing import List
def smallest_missing_positive_number1(a: List[int]) -> int:
a = sorted(a)
if a[-1] <= 0:
return -1
for i in range(1, a[-1]):
if i not in a:
return i
def smallest_missing_positive_number(a: List[int]) -> int:
if not a:
return -1
smallest = 1
while True:
if smallest not in a:
return smallest
smallest += 1
print(smallest_missing_positive_number([1, 2, 3]))
| UTF-8 | Python | false | false | 1,114 | py | 12 | smallest_missing_pos_num.py | 12 | 0.578097 | 0.547576 | 0 | 62 | 16.967742 | 59 |
laetitia-lac/RandomMeals | 5,686,536,745,200 | ccac0ba6dc51de3b90819d285b3cc2f5dd501514 | 4ff4634de74e8282567ab5cf16cd070bc722c7d8 | /src/main.py | d139cfebbaadda6e528f0d80f425270bc97e136c | [
"MIT"
] | permissive | https://github.com/laetitia-lac/RandomMeals | f48216327da6b1b6ecc19adae4c47806b42bda4b | 2787b6e3bc69a06efa50ebb217e3aa8dc46be430 | refs/heads/master | "2023-03-03T10:05:07.047201" | "2021-02-14T20:10:00" | "2021-02-14T20:10:00" | 298,839,721 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import json
import random
import fpdf
from utils.logger import logger
def return_content_json_file(path_file):
with open(path_file, 'rb') as json_file:
return json.load(json_file)
def write_json_file(path_file, content_json):
with open(path_file, 'w') as json_file:
json.dump(content_json, json_file)
def write_subpart_pdf(pdf_file, title_subpart, checkbox_content_subpart):
# title subpart
pdf_file.set_font('DejaVu', size=16)
pdf_file.cell(40, 10, title_subpart)
pdf_file.ln()
# checkbox subpart
pdf_file.set_font('DejaVu', size=12)
for checkbox_line in checkbox_content_subpart:
pdf_file.write(5, checkbox_line)
pdf_file.ln()
return pdf_file
def write_pdf_file(path_file, chosen_recipes_names, shopping_list):
pdf_file = fpdf.FPDF(format='A4')
pdf_file.add_page()
pdf_file.add_font('DejaVu', '', 'src/pdf_utils/DejaVuSans.ttf', uni=True)
write_subpart_pdf(pdf_file, 'LISTE DE RECETTES DE LA SEMAINE', chosen_recipes_names)
pdf_file.ln()
write_subpart_pdf(pdf_file, 'LISTE DE COURSES POUR LA SEMAINE', shopping_list)
pdf_file.output(path_file)
if __name__ == '__main__':
nb_of_recipes_to_choose = int(input('Indicate the number of meals to generate for this week: '))
logger.debug('Beginning of program')
list_recipes = return_content_json_file('data/recipes.json')
# get recipes of last week
chosen_recipes_names_last_week = set(return_content_json_file('data/chosen_recipes.json'))
logger.debug('Recipes of last week: {}'.format(chosen_recipes_names_last_week))
# choose the recipes for the week
# 1. take recipes which were not selected last week
all_recipes_names = {recipe['name_recipe'] for recipe in list_recipes}
unused_recipes_names = all_recipes_names - chosen_recipes_names_last_week
logger.debug('Unused recipes: {}'.format(unused_recipes_names))
chosen_recipes_names = random.sample(unused_recipes_names,
k=min(nb_of_recipes_to_choose, len(unused_recipes_names)))
# 2. if needed, take chosen recipes from last week
nb_missing_recipes = nb_of_recipes_to_choose - len(chosen_recipes_names)
if nb_missing_recipes > 0:
chosen_recipes_names.extend(
random.sample(chosen_recipes_names_last_week,
k=min(nb_missing_recipes, len(chosen_recipes_names_last_week))))
logger.debug('Recipes of the week: {}'.format(chosen_recipes_names))
# output the chosen recipes of the week as json
write_json_file('data/chosen_recipes.json', chosen_recipes_names)
# get chosen recipes from the name
chosen_recipes = [recipe for recipe in list_recipes if recipe['name_recipe'] in chosen_recipes_names]
logger.debug('Recipes of the week (with list of ingredients): {}'.format(chosen_recipes))
# generate the shopping list
shopping_list = dict()
for recipe in chosen_recipes:
for ingredient_recipe in recipe['ingredients']:
ingredient_name = ingredient_recipe['name'].lower()
ingredient_quantity = ingredient_recipe['quantity']
ingredient_unit = ingredient_recipe['unit'].lower() if ingredient_recipe.get('unit') else None
logger.debug('Ingredient "{}" will be added to the shopping list (for recipe "{}")'.format(ingredient_name,
recipe[
'name_recipe']))
# add ingredient to shopping list
ingredient_dict = shopping_list.get(ingredient_name, dict())
ingredient_dict[ingredient_unit] = ingredient_dict.get(ingredient_unit, 0) + ingredient_quantity
shopping_list[ingredient_name] = ingredient_dict
logger.debug('Shopping list: {}'.format(shopping_list))
# generate the output for user
# 1. format chosen recipes names
formatted_chosen_recipes_names = ['☐ {}'.format(recipe) for recipe in chosen_recipes_names]
# 2. format shopping list
formatted_shopping_list = list()
name_ingredients_sorted = sorted(shopping_list.keys())
for name_ingredient in name_ingredients_sorted:
for unit, quantity in shopping_list[name_ingredient].items():
formatted_shopping_list.append('☐ {} ({}{})'.format(name_ingredient, quantity, unit) if unit is not None
else '☐ {} ({})'.format(name_ingredient, quantity))
logger.debug('Write output pdf')
write_pdf_file('data/shopping_list.pdf', formatted_chosen_recipes_names, formatted_shopping_list)
logger.debug('End of program') | UTF-8 | Python | false | false | 4,776 | py | 6 | main.py | 2 | 0.64109 | 0.637736 | 0 | 112 | 41.598214 | 123 |
SayanNL1996/decorator_practice | 17,033,840,304,692 | a1c0a4faea2513f0fa36dbc1aa1f99bdeb7e8843 | c0fd307be93a0997f458c0a4a0a99b3f73d2b263 | /decorators(ex-8).py | cf0b4a39f5b9af53121b38a18c2460074fa71c3c | [] | no_license | https://github.com/SayanNL1996/decorator_practice | 86ebab2be7f878cb0530fbad10a01d0f37939fbd | 6d0403dde3508bbad3c862faece0367f571aaa12 | refs/heads/master | "2022-09-21T03:13:16.981966" | "2020-06-07T20:12:56" | "2020-06-07T20:12:56" | 270,326,901 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Descriptor:
def __init__(self):
self.__fuel_cap = 0
def __get__(self, instance, owner):
return self.__fuel_cap
def __set__(self, instance, value):
if isinstance(value, int):
print(value)
else:
raise TypeError("Fuel Capacity can only be an integer")
if value < 0:
raise ValueError("Fuel Capacity can never be less than zero")
self.__fuel_cap = value
def __delete__(self, instance):
del self.__fuel_cap
class Car:
fuel_cap = Descriptor()
def __init__(self, make, model, fuel_cap):
self.make = make
self.model = model
self.fuel_cap = fuel_cap
def __str__(self):
return "{0} model {1} with a fuel capacity of {2} ltr.".format(self.make, self.model, self.fuel_cap)
car2 = Car("BMW", "X7", 40)
print(car2)
| UTF-8 | Python | false | false | 869 | py | 4 | decorators(ex-8).py | 4 | 0.559264 | 0.547756 | 0 | 36 | 23.138889 | 108 |
MohamedAl-Hussein/FIFA | 1,194,000,946,090 | 1f777cc27707d9da1dba193ff2573300577a7b8b | 68f19e39de7dc5b716cb21791ac3e41a2979cbc6 | /Docker/dags/archives/initdb_proxies.py | 9a623d9cae45dfe8c4ec69f554ec26bca3366885 | [] | no_license | https://github.com/MohamedAl-Hussein/FIFA | 71b1da6ca6d11ed819bafa61c31ba3bdfbaa3994 | f226bd93de0d05e6a4e6abb0c72fe4636705ea65 | refs/heads/master | "2021-06-23T12:08:07.318683" | "2020-12-29T01:43:38" | "2020-12-29T01:43:38" | 182,483,450 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # from datetime import datetime, timedelta
#
#
# default_args = {
# 'owner': 'airflow',
# 'depends_on_past': False,
# 'start_date': datetime.today(),
# 'email_on_failure': False,
# 'email_on_retry': False,
# 'retries': 1,
# 'retry_delay': timedelta(minutes=2)
# }
#
# with DAG(
# dag_id='initdb_proxies',
# description="Import proxies into database.",
# default_args=default_args,
# schedule_interval='@once'
# ) as dag:
#
# initproxydb_task = BashOperator(
# task_id='proxy_initdb',
# bash_command=\
# "cd /FIFA/fifa_data/ && python3 -m proxies.proxy_update ",
# )
| UTF-8 | Python | false | false | 670 | py | 63 | initdb_proxies.py | 57 | 0.555224 | 0.550746 | 0 | 25 | 25.8 | 68 |
flavone/DesignAndTestFramework | 5,136,780,927,777 | fdd7d1e383255399ba128bdc61ba6aaa9021a1ff | d14d2f1d0be2098be214cfb5793158fb27160f90 | /app.py | 5bff0f20621275987263a95eeda4fe2aec6adf26 | [] | no_license | https://github.com/flavone/DesignAndTestFramework | 5f4e23b71a344f2aee6337d243b98862979d83d0 | 61cdae8795483ea5ed052571c407136cf52178c1 | refs/heads/master | "2020-03-26T23:26:43.117535" | "2018-09-05T09:18:26" | "2018-09-05T09:18:26" | 145,542,337 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import Flask
from apis._api import api
from ENV import *
from apis.case_design_api import *
from pylab import *
mpl.rcParams['font.sans-serif'] = ['SimHei']
app = Flask(__name__)
app.register_blueprint(api, url_prefix='/api')
app.config['JSON_AS_ASCII'] = False
if __name__ == '__main__':
app.run(host=HOST, port=PORT, debug=True)
| UTF-8 | Python | false | false | 350 | py | 23 | app.py | 20 | 0.68 | 0.68 | 0 | 15 | 22.333333 | 46 |
DEMIDOB/Dassi | 17,583,596,123,956 | 403d4f6a0bc67ba4213ce70911a7d6516171e434 | 3057614ed30c4e892e08d5e23db1e0675c84911a | /change_baa.py | f171f40f154bf61b4f540d2d7923e5c562f67dc1 | [] | no_license | https://github.com/DEMIDOB/Dassi | 6ec997f640df85b5027ec2a320728a134d57ae43 | 004008ee109ee1e58db90b40ab8021c81884e746 | refs/heads/master | "2020-08-04T07:49:59.520853" | "2020-06-03T11:15:59" | "2020-06-03T11:15:59" | 212,062,204 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import requests
import os
to = input("New backendaddr: ")
pw = input("Your password: ")
current_site = "https://demidob.000webhostapp.com/" # current_site = http://demidob.ml/
response = requests.post(current_site + "dassi/backendaddr/change.php", data={'pw':pw, 'to':to})
if response.status_code == 200:
print(str(response.content)[2:len(str(response.content))-1])
os.system("pause") | UTF-8 | Python | false | false | 391 | py | 31 | change_baa.py | 29 | 0.703325 | 0.682864 | 0 | 11 | 34.636364 | 96 |
minimalgeek/SQM-Model | 12,051,678,240,808 | f4918edab091211e34ac91cc5802e82b0293b2d1 | dbe5968be87bf4600b69144468f669684993e9ce | /hu/farago/eum2/dto/Player.py | e1bfc5914cd035af2944e4ed4962ed15a30aed16 | [] | no_license | https://github.com/minimalgeek/SQM-Model | a1710f135cd72744a46912987ba662ca3d70d819 | 073d1b94edfdd5c430a4e7ed533ff233eee1ed1e | refs/heads/master | "2021-01-21T03:41:03.445575" | "2016-08-03T14:47:30" | "2016-08-03T14:47:30" | 49,877,490 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
Created on 2016 máj. 19
@author: Balázs
'''
import collections
class Player():
'''
Player of EUM
'''
previousPosition = None
name = None
capability = None
position = None
salience = None
def __init__(self, name, capability, position, salience):
self.name = name
self.capability = float(capability)
self.position = float(position)
self.previousPosition = float(position)
self.salience = float(salience)
self.risk = 1
self.sumOfVotes = 0
self.probabilityOfSuccess = collections.OrderedDict()
self.probabilityOfStatusQuo = collections.OrderedDict()
self.expectedUtilityI = collections.OrderedDict()
self.expectedUtilityJ = collections.OrderedDict()
self.offers = collections.OrderedDict()
self.bestOffer = None;
def power(self):
return self.capability*self.salience
def addToSum(self, value):
self.sumOfVotes += value
def updatePosition(self, value):
self.previousPosition = self.position
self.position = value
def __str__(self):
return ','.join([self.name, str(self.capability), str(round(self.position, 3)), str(self.salience)])
def __repr__(self):
return self.__str__() | UTF-8 | Python | false | false | 1,351 | py | 36 | Player.py | 26 | 0.60341 | 0.596738 | 0 | 50 | 26 | 108 |
ngocyen3006/learn-python | 2,113,123,935,467 | 489f2c657c6e30ad40f072a944b45587ded18fcb | 28129a9c44f3891eb5b3ce8c7fc530252b1c3840 | /firecode.io/Level_03/array_partition.py | 77fb0677e88290cc40b79cbe0ef6b57db339770e | [] | no_license | https://github.com/ngocyen3006/learn-python | 55eeb221f5a836ebee8c197fc3fddf6c585f02a6 | ec2f35a87f846385f7353e7ef4900e5f80cfdb0a | refs/heads/master | "2020-03-26T16:35:59.151230" | "2019-05-08T07:26:50" | "2019-05-08T07:26:50" | 145,112,258 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Array Partition
def find_partitions(input_list):
res = []
n = input_list[0]
i = 1
while i < len(input_list) and input_list[i] == input_list[i - 1] + 1:
i += 1
try:
if n == input_list[i - 1]:
res.append(n)
else:
res.append(str(n) + "-" + str(input_list[i - 1]))
return res + find_partitions(input_list[i:])
except IndexError:
return res
if __name__ == '__main__':
a = [1, 2, 3, 6, 7, 8, 10, 11]
r_a = ["1-3", "6-8", "10-11"]
print(find_partitions(a) == r_a)
print("-" * 25)
a = [1, 2, 3, 4, 7, 8, 9]
r_a = ["1-4", "7-9"]
print(find_partitions(a) == r_a)
print("-" * 25)
a = [1, 3, 7, 8, 9]
r_a = [1, 3, "7-9"]
print(find_partitions(a) == r_a)
print("-" * 25)
| UTF-8 | Python | false | false | 801 | py | 346 | array_partition.py | 315 | 0.450687 | 0.387016 | 0 | 33 | 23.272727 | 73 |
elvinMark/emmv | 15,015,205,676,734 | bc6aa1829fb9c9fdf4465fc2b1f4974a7c25fcf0 | 02fddf4807f45490a6eb63dbedecf4a9ace2758d | /Caja/src/tmp/caja.py | 23e50bf9a1dc82b962c25761f102a89f0761f8a8 | [] | no_license | https://github.com/elvinMark/emmv | 91c4464e67a79a86dc45b159aa9c54b123c4adc7 | 20543b50e6f4ddb8bb1c2dd711fa361b9eff3bff | refs/heads/master | "2023-08-01T03:13:41.439204" | "2021-09-17T05:48:17" | "2021-09-17T05:48:17" | 407,139,242 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './ui/caja.ui'
#
# Created by: PyQt5 UI code generator 5.14.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(502, 146)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayoutWidget = QtWidgets.QWidget(self.centralwidget)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(20, 10, 287, 80))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label = QtWidgets.QLabel(self.verticalLayoutWidget)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
self.dateEdit = QtWidgets.QDateEdit(self.verticalLayoutWidget)
self.dateEdit.setEnabled(True)
self.dateEdit.setReadOnly(True)
self.dateEdit.setCalendarPopup(True)
self.dateEdit.setObjectName("dateEdit")
self.horizontalLayout.addWidget(self.dateEdit)
self.verticalLayout.addLayout(self.horizontalLayout)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.pushButton = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.pushButton.setObjectName("pushButton")
self.horizontalLayout_2.addWidget(self.pushButton)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem)
self.verticalLayout.addLayout(self.horizontalLayout_2)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 502, 22))
self.menubar.setObjectName("menubar")
self.menuIngresos = QtWidgets.QMenu(self.menubar)
self.menuIngresos.setObjectName("menuIngresos")
self.menuEgresos = QtWidgets.QMenu(self.menubar)
self.menuEgresos.setObjectName("menuEgresos")
self.menuConsulta = QtWidgets.QMenu(self.menubar)
self.menuConsulta.setObjectName("menuConsulta")
self.menuReporte = QtWidgets.QMenu(self.menubar)
self.menuReporte.setObjectName("menuReporte")
self.menuAdministrador = QtWidgets.QMenu(self.menubar)
self.menuAdministrador.setObjectName("menuAdministrador")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionEfectivo = QtWidgets.QAction(MainWindow)
self.actionEfectivo.setObjectName("actionEfectivo")
self.actionCheque = QtWidgets.QAction(MainWindow)
self.actionCheque.setObjectName("actionCheque")
self.actionVoucher = QtWidgets.QAction(MainWindow)
self.actionVoucher.setObjectName("actionVoucher")
self.actionGastos = QtWidgets.QAction(MainWindow)
self.actionGastos.setObjectName("actionGastos")
self.actionCrear_Codigo = QtWidgets.QAction(MainWindow)
self.actionCrear_Codigo.setObjectName("actionCrear_Codigo")
self.actionLista_de_Codigos = QtWidgets.QAction(MainWindow)
self.actionLista_de_Codigos.setObjectName("actionLista_de_Codigos")
self.actionIngresos = QtWidgets.QAction(MainWindow)
self.actionIngresos.setObjectName("actionIngresos")
self.actionEgresos = QtWidgets.QAction(MainWindow)
self.actionEgresos.setObjectName("actionEgresos")
self.actionReporte_de_Caja = QtWidgets.QAction(MainWindow)
self.actionReporte_de_Caja.setObjectName("actionReporte_de_Caja")
self.menuIngresos.addAction(self.actionEfectivo)
self.menuIngresos.addAction(self.actionCheque)
self.menuIngresos.addAction(self.actionVoucher)
self.menuEgresos.addAction(self.actionGastos)
self.menuConsulta.addAction(self.actionIngresos)
self.menuConsulta.addAction(self.actionEgresos)
self.menuReporte.addAction(self.actionReporte_de_Caja)
self.menuAdministrador.addAction(self.actionCrear_Codigo)
self.menuAdministrador.addAction(self.actionLista_de_Codigos)
self.menubar.addAction(self.menuAdministrador.menuAction())
self.menubar.addAction(self.menuIngresos.menuAction())
self.menubar.addAction(self.menuEgresos.menuAction())
self.menubar.addAction(self.menuConsulta.menuAction())
self.menubar.addAction(self.menuReporte.menuAction())
self.retranslateUi(MainWindow)
self.pushButton.clicked.connect(MainWindow.enable_date_edit)
self.actionCrear_Codigo.triggered.connect(MainWindow.window_create_code)
self.actionCheque.triggered.connect(MainWindow.window_input_check)
self.actionEfectivo.triggered.connect(MainWindow.window_input_cash)
self.actionVoucher.triggered.connect(MainWindow.window_input_ticket)
self.actionLista_de_Codigos.triggered.connect(MainWindow.window_list_code)
self.actionGastos.triggered.connect(MainWindow.window_output_expenses)
self.actionIngresos.triggered.connect(MainWindow.window_consult_inputs)
self.actionEgresos.triggered.connect(MainWindow.window_consult_outputs)
self.actionReporte_de_Caja.triggered.connect(MainWindow.window_report_all)
self.dateEdit.dateChanged['QDate'].connect(MainWindow.change_system_date)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Caja - EMMV"))
self.label.setText(_translate("MainWindow", "Fecha:"))
self.pushButton.setText(_translate("MainWindow", "Cambiar Fecha"))
self.menuIngresos.setTitle(_translate("MainWindow", "Ingresos"))
self.menuEgresos.setTitle(_translate("MainWindow", "Egresos"))
self.menuConsulta.setTitle(_translate("MainWindow", "Consulta"))
self.menuReporte.setTitle(_translate("MainWindow", "Reporte"))
self.menuAdministrador.setTitle(_translate("MainWindow", "Administrador"))
self.actionEfectivo.setText(_translate("MainWindow", "Efectivo"))
self.actionCheque.setText(_translate("MainWindow", "Cheque"))
self.actionVoucher.setText(_translate("MainWindow", "Voucher"))
self.actionGastos.setText(_translate("MainWindow", "Gastos"))
self.actionCrear_Codigo.setText(_translate("MainWindow", "Crear Codigo"))
self.actionLista_de_Codigos.setText(_translate("MainWindow", "Lista de Codigos"))
self.actionIngresos.setText(_translate("MainWindow", "Ingresos"))
self.actionEgresos.setText(_translate("MainWindow", "Egresos"))
self.actionReporte_de_Caja.setText(_translate("MainWindow", "Reporte de Caja"))
| UTF-8 | Python | false | false | 7,725 | py | 59 | caja.py | 38 | 0.732039 | 0.726214 | 0 | 133 | 57.082707 | 114 |
zziri/study | 12,970,801,238,770 | c4283ea829dc12378e1166f6489b0bba3f443c5e | 323eb296cbf740ad6bf678fb15e48f0315b66824 | /ps/BOJ/16194 카드 구매하기 2.py | 598bc9c6e24ecffcc56932b02784690bc042dee3 | [] | no_license | https://github.com/zziri/study | 0da83d87247dafb1083b6068e9b6354cb960af0c | b0eded396c5201283db2d7f45b66d19d3f2831b4 | refs/heads/master | "2023-03-26T23:47:13.699539" | "2021-03-25T01:19:40" | "2021-03-25T01:19:40" | 152,895,651 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def solution(n, p):
dp = [987654321]*(n+1)
dp[0] = 0
for i in range(1, n+1):
for j in range(1, i+1):
dp[i] = min(dp[i], dp[i-j] + p[j])
return dp[n]
def main():
# input
n = int(input())
p = [0]
for item in list(map(int, input().split())):
p.append(item)
# solve
answer = solution(n, p)
# print
print(answer)
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 444 | py | 225 | 16194 카드 구매하기 2.py | 218 | 0.441441 | 0.403153 | 0 | 24 | 17.458333 | 48 |
yigitozgumus/IACV_Project | 2,860,448,241,454 | 33fd50f3ebc54c948faff41a5d25565a35b310d9 | 71e15712c6583d7c1ae1b24e42dbd8ddb5872e66 | /models/cvae_denoiser.py | b60c86e765ef5c046b007af28b84e6d798396bea | [
"MIT"
] | permissive | https://github.com/yigitozgumus/IACV_Project | 4968fc1e4a95b5f05dcf164fb5b2749326095343 | 0e012139a33c76ca88505c28270f1250181ec701 | refs/heads/master | "2020-04-15T20:57:43.285117" | "2019-06-23T13:23:15" | "2019-06-23T13:23:15" | 165,015,589 | 0 | 0 | MIT | false | "2019-06-21T19:34:02" | "2019-01-10T07:53:04" | "2019-06-21T13:45:00" | "2019-06-21T19:34:02" | 2,483 | 0 | 0 | 0 | Python | false | false | from base.base_model import BaseModel
import tensorflow as tf
from utils.utils import get_getter
import numpy as np
class CVAEDenoiser(BaseModel):
def __init__(self, config):
super(CVAEDenoiser, self).__init__(config)
self.build_model()
self.init_saver()
def build_model(self):
# Placeholders
self.is_training_ae = tf.placeholder(tf.bool)
# self.is_training_den = tf.placeholder(tf.bool)
self.image_input = tf.placeholder(
tf.float32, shape=[None] + self.config.trainer.image_dims, name="x"
)
self.ground_truth = tf.placeholder(
tf.float32, shape=[None] + self.config.trainer.image_dims, name="gt"
)
self.noise_tensor = tf.placeholder(
tf.float32, shape=[None] + self.config.trainer.image_dims, name="noise"
)
self.init_kernel = tf.random_normal_initializer(mean=0.0, stddev=0.02)
self.batch_size = tf.placeholder(tf.int32)
## Architecture
# Encoder Decoder Part first
self.logger.info("Building Training Graph")
with tf.variable_scope("CVAE_Denoiser"):
with tf.variable_scope("CVAE"):
self.mean, self.logvar = self.encoder(self.image_input)
self.z_reparam = self.reparameterize(self.mean, self.logvar, self.batch_size)
self.rec_image = self.decoder(self.z_reparam, apply_sigmoid=True)
with tf.variable_scope("Denoiser"):
self.denoised, self.mask, self.mask_shallow = self.denoiser(self.rec_image + self.noise_tensor)
# Loss Function
with tf.name_scope("Loss_Function"):
with tf.name_scope("CVAE"):
self.reconstruction_loss = -tf.reduce_sum(
self.image_input * tf.log(1e-10 + self.rec_image)
+ (1 - self.image_input) * tf.log(1e-10 + (1 - self.rec_image)),
1,
)
self.latent_loss = -0.5 * tf.reduce_sum(
1 + self.logvar - tf.square(self.mean) - tf.exp(self.logvar), 1
)
self.cvae_loss = tf.reduce_mean(self.reconstruction_loss + self.latent_loss)
with tf.name_scope("Denoiser"):
delta_den = self.denoised - self.image_input
delta_den = tf.layers.Flatten()(delta_den)
self.den_loss = tf.reduce_mean(
tf.norm(
delta_den, ord=self.config.trainer.den_norm_degree, axis=1, keepdims=False
)
)
# Optimizer
with tf.name_scope("Optimizer"):
self.optimizer = tf.train.AdamOptimizer(
self.config.trainer.l_rate,
beta1=self.config.trainer.optimizer_adam_beta1,
beta2=self.config.trainer.optimizer_adam_beta2,
)
# Collect All Variables
all_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
self.cvae_vars = [v for v in all_variables if v.name.startswith("CVAE_Denoiser/CVAE")]
self.denoiser_vars = [
v for v in all_variables if v.name.startswith("CVAE_Denoiser/Denoiser")
]
self.cvae_update_ops = tf.get_collection(
tf.GraphKeys.UPDATE_OPS, scope="CVAE_Denoiser/CVAE"
)
self.den_update_ops = tf.get_collection(
tf.GraphKeys.UPDATE_OPS, scope="CVAE_Denoiser/Denoiser"
)
with tf.control_dependencies(self.cvae_update_ops):
self.cvae_op = self.optimizer.minimize(
self.cvae_loss, var_list=self.cvae_vars, global_step=self.global_step_tensor
)
with tf.control_dependencies(self.den_update_ops):
self.den_op = self.optimizer.minimize(self.den_loss, var_list=self.denoiser_vars)
# Exponential Moving Average for Estimation
self.cvae_ema = tf.train.ExponentialMovingAverage(decay=self.config.trainer.ema_decay)
maintain_averages_op_cvae = self.cvae_ema.apply(self.cvae_vars)
self.den_ema = tf.train.ExponentialMovingAverage(decay=self.config.trainer.ema_decay)
maintain_averages_op_den = self.den_ema.apply(self.denoiser_vars)
with tf.control_dependencies([self.cvae_op]):
self.train_cvae_op = tf.group(maintain_averages_op_cvae)
with tf.control_dependencies([self.den_op]):
self.train_den_op = tf.group(maintain_averages_op_den)
self.logger.info("Building Testing Graph...")
with tf.variable_scope("CVAE_Denoiser"):
with tf.variable_scope("CVAE"):
self.mean_ema, self.logvar_ema = self.encoder(
self.image_input, getter=get_getter(self.cvae_ema)
)
self.z_reparam_ema = self.reparameterize(
self.mean_ema, self.logvar_ema, self.batch_size
)
self.rec_image_ema = self.decoder(
self.z_reparam_ema, getter=get_getter(self.cvae_ema), apply_sigmoid=True
)
with tf.variable_scope("Denoiser"):
self.denoised_ema, self.mask_ema, self.mask_shallow_ema = self.denoiser(
self.rec_image_ema, getter=get_getter(self.den_ema)
)
self.mean_den_ema, self.logvar_den_ema = self.encoder(
self.denoised_ema, getter=get_getter(self.cvae_ema)
)
self.z_den_ema = self.reparameterize(
self.mean_den_ema, self.logvar_den_ema, self.batch_size
)
self.residual = self.image_input - self.mask_ema
with tf.name_scope("Testing"):
with tf.variable_scope("Reconstruction_Loss"):
# |x - D(E(x)) |2
delta = self.rec_image_ema - self.image_input
delta = tf.layers.Flatten()(delta)
self.rec_score = tf.norm(delta, ord=2, axis=1, keepdims=False)
with tf.variable_scope("Denoising_Loss"):
delta_den = self.denoised_ema - self.rec_image_ema
delta_den = tf.layers.Flatten()(delta_den)
self.den_score = tf.norm(delta_den, ord=2, axis=1, keepdims=False)
with tf.variable_scope("Pipeline_Loss_1"):
delta_pipe = self.denoised_ema - self.image_input
delta_pipe = tf.layers.Flatten()(delta_pipe)
self.pipe_score = tf.norm(delta_pipe, ord=1, axis=1, keepdims=False)
with tf.variable_scope("Pipeline_Loss_2"):
delta_pipe = self.denoised_ema - self.image_input
delta_pipe = tf.layers.Flatten()(delta_pipe)
self.pipe_score_2 = tf.norm(delta_pipe, ord=2, axis=1, keepdims=False)
with tf.variable_scope("Combination_Loss"):
delta_comb = self.z_reparam_ema - self.z_den_ema
delta_comb = tf.layers.Flatten()(delta_comb)
comb_score = tf.norm(delta_comb, ord=2, axis=1, keepdims=False)
self.noise_score = comb_score
self.comb_score = 10 * comb_score + self.pipe_score
with tf.variable_scope("Mask_1"):
delta_mask = (self.rec_image_ema - self.mask_ema)
delta_mask = tf.layers.Flatten()(delta_mask)
self.mask_score_1 = tf.norm(delta_mask, ord=1,axis=1,keepdims=False)
with tf.variable_scope("Mask_2"):
delta_mask_2 = (self.image_input - self.mask_ema)
delta_mask_2 = tf.layers.Flatten()(delta_mask_2)
self.mask_score_2 = tf.norm(delta_mask_2, ord=2,axis=1,keepdims=False)
with tf.variable_scope("Mask_1_s"):
delta_mask = (self.rec_image_ema - self.mask_shallow_ema)
delta_mask = tf.layers.Flatten()(delta_mask)
self.mask_score_1_s = tf.norm(delta_mask, ord=1,axis=1,keepdims=False)
with tf.variable_scope("Mask_2_s"):
delta_mask_2 = (self.image_input - self.mask_shallow_ema)
delta_mask_2 = tf.layers.Flatten()(delta_mask_2)
self.mask_score_2_s = tf.norm(delta_mask_2, ord=2,axis=1,keepdims=False)
# Summary
with tf.name_scope("Summary"):
with tf.name_scope("cvae_loss"):
tf.summary.scalar("loss_auto", self.cvae_loss, ["loss_cvae"])
with tf.name_scope("denoiser_loss"):
tf.summary.scalar("loss_den", self.den_loss, ["loss_den"])
with tf.name_scope("Image"):
tf.summary.image("Input_Image", self.image_input, 1, ["image"])
tf.summary.image("rec_image", self.rec_image, 1, ["image"])
tf.summary.image("Input_Image", self.image_input, 1, ["image_2"])
tf.summary.image("rec_image", self.rec_image, 1, ["image_2"])
tf.summary.image("Denoised_Image", self.denoised, 1, ["image_2"])
tf.summary.image("mask", self.mask, 1, ["image_2"])
tf.summary.image("mask", self.mask_ema, 1, ["image_3"])
tf.summary.image("mask_shallow", self.mask_shallow_ema, 1, ["image_3"])
tf.summary.image("Output_Image", self.denoised_ema, 1, ["image_3"])
tf.summary.image("Rec_Image", self.rec_image_ema, 1, ["image_3"])
tf.summary.image("Input_Image", self.image_input, 1, ["image_3"])
tf.summary.image("Residual", self.residual,1,["image_3"])
tf.summary.image("Ground_Truth", self.ground_truth,1,["image_3"])
self.summary_op_cvae = tf.summary.merge_all("image")
self.summary_op_den = tf.summary.merge_all("image_2")
self.summary_op_test = tf.summary.merge_all("image_3")
self.summary_op_loss_cvae = tf.summary.merge_all("loss_cvae")
self.summary_op_loss_den = tf.summary.merge_all("loss_den")
# self.summary_all_cvae = tf.summary.merge([self.summary_op_cvae, self.summary_op_loss_cvae])
# self.summary_all_den = tf.summary.merge([self.summary_op_den, self.summary_op_loss_den])
# self.summary_all = tf.summary.merge([self.summary_op_im, self.summary_op_loss])
def encoder(self, image_input, getter=None):
# This generator will take the image from the input dataset, and first it will
# it will create a latent representation of that image then with the decoder part,
# it will reconstruct the image.
with tf.variable_scope("Inference", custom_getter=getter, reuse=tf.AUTO_REUSE):
x_e = tf.reshape(
image_input,
[-1, self.config.data_loader.image_size, self.config.data_loader.image_size, 1],
)
net_name = "Layer_1"
with tf.variable_scope(net_name):
x_e = tf.layers.Conv2D(
filters=128,
kernel_size=5,
strides=(2, 2),
padding="same",
kernel_initializer=self.init_kernel,
name="conv",
)(x_e)
x_e = tf.nn.leaky_relu(
features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name="leaky_relu"
)
# 14 x 14 x 64
net_name = "Layer_2"
with tf.variable_scope(net_name):
x_e = tf.layers.Conv2D(
filters=256,
kernel_size=5,
padding="same",
strides=(2, 2),
kernel_initializer=self.init_kernel,
name="conv",
)(x_e)
# x_e = tf.layers.batch_normalization(
# x_e,
# momentum=self.config.trainer.batch_momentum,
# epsilon=self.config.trainer.batch_epsilon,
# training=self.is_training_ae,
# )
x_e = tf.nn.leaky_relu(
features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name="leaky_relu"
)
# 7 x 7 x 128
net_name = "Layer_3"
with tf.variable_scope(net_name):
x_e = tf.layers.Conv2D(
filters=512,
kernel_size=5,
padding="same",
strides=(2, 2),
kernel_initializer=self.init_kernel,
name="conv",
)(x_e)
# x_e = tf.layers.batch_normalization(
# x_e,
# momentum=self.config.trainer.batch_momentum,
# epsilon=self.config.trainer.batch_epsilon,
# training=self.is_training_ae,
# )
x_e = tf.nn.leaky_relu(
features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name="leaky_relu"
)
# 4 x 4 x 256
x_e = tf.layers.Flatten()(x_e)
net_name = "Layer_4"
with tf.variable_scope(net_name):
x_e = tf.layers.Dense(
units=self.config.trainer.noise_dim + self.config.trainer.noise_dim,
kernel_initializer=self.init_kernel,
name="fc",
)(x_e)
mean, logvar = tf.split(x_e, num_or_size_splits=2, axis=1)
return mean, logvar
def decoder(self, noise_input, getter=None, apply_sigmoid=False):
with tf.variable_scope("Generative", custom_getter=getter, reuse=tf.AUTO_REUSE):
net = tf.reshape(noise_input, [-1, 1, 1, self.config.trainer.noise_dim])
net_name = "layer_1"
with tf.variable_scope(net_name):
net = tf.layers.Conv2DTranspose(
filters=512,
kernel_size=5,
strides=(2, 2),
padding="same",
kernel_initializer=self.init_kernel,
name="tconv1",
)(net)
net = tf.nn.relu(features=net, name="tconv1/relu")
net_name = "layer_2"
with tf.variable_scope(net_name):
net = tf.layers.Conv2DTranspose(
filters=256,
kernel_size=5,
strides=(2, 2),
padding="same",
kernel_initializer=self.init_kernel,
name="tconv2",
)(net)
net = tf.nn.relu(features=net, name="tconv2/relu")
net_name = "layer_3"
with tf.variable_scope(net_name):
net = tf.layers.Conv2DTranspose(
filters=128,
kernel_size=5,
strides=(2, 2),
padding="same",
kernel_initializer=self.init_kernel,
name="tconv3",
)(net)
net = tf.nn.relu(features=net, name="tconv3/relu")
net_name = "layer_4"
with tf.variable_scope(net_name):
net = tf.layers.Conv2DTranspose(
filters=64,
kernel_size=5,
strides=(2, 2),
padding="same",
kernel_initializer=self.init_kernel,
name="tconv4",
)(net)
net = tf.nn.relu(features=net, name="tconv3/relu")
net_name = "layer_5"
with tf.variable_scope(net_name):
net = tf.layers.Conv2DTranspose(
filters=1,
kernel_size=5,
strides=(2, 2),
padding="same",
kernel_initializer=self.init_kernel,
name="tconv5",
)(net)
if apply_sigmoid:
net = tf.sigmoid(net)
return net
def denoiser(self, image_input, getter=None):
# Full Model Scope
with tf.variable_scope("Denoiser", reuse=tf.AUTO_REUSE, custom_getter=getter):
# First Convolution + ReLU layer
net = tf.layers.Conv2D(
filters=63,
kernel_size=3,
strides=1,
kernel_initializer=self.init_kernel,
padding="same",
)(image_input)
net = tf.nn.relu(features=net)
# 1 Convolution of the image for the bottom layer
net_input = tf.layers.Conv2D(
filters=1,
kernel_size=3,
strides=1,
kernel_initializer=self.init_kernel,
padding="same",
)(image_input)
net_layer_1 = tf.layers.Conv2D(
filters=1,
kernel_size=3,
strides=1,
kernel_initializer=self.init_kernel,
padding="same",
)(net)
# First convolution from the image second one from the first top layer convolution
mask = net_input + net_layer_1
for i in range(4):
# Top layer chained convolutions
net = tf.layers.Conv2D(
filters=63,
kernel_size=3,
strides=1,
kernel_initializer=self.init_kernel,
padding="same",
)(net)
net = tf.nn.relu(features=net)
# Bottom layer single convolutions
net_1 = tf.layers.Conv2D(
filters=1,
kernel_size=3,
strides=1,
kernel_initializer=self.init_kernel,
padding="same",
)(net)
mask += net_1
mask_shallow = mask
for i in range(5):
# Top layer chained convolutions
net = tf.layers.Conv2D(
filters=63,
kernel_size=3,
strides=1,
kernel_initializer=self.init_kernel,
padding="same",
)(net)
net = tf.nn.relu(features=net)
# Bottom layer single convolutions
net_1 = tf.layers.Conv2D(
filters=1,
kernel_size=3,
strides=1,
kernel_initializer=self.init_kernel,
padding="same",
)(net)
mask += net_1
output = image_input + mask
return output, mask, mask_shallow
def kl_loss(self, avg, log_var):
with tf.name_scope("KLLoss"):
return tf.reduce_mean(
-0.5 * tf.reduce_sum(1.0 + log_var - tf.square(avg) - tf.exp(log_var), axis=-1)
)
def reparameterize(self, mean, logvar, batch_size):
eps = tf.random_normal(shape=[batch_size, self.config.trainer.noise_dim])
return eps * tf.exp(logvar * 0.5) + mean
def log_normal_pdf(self, sample, mean, logvar, raxis=1):
log2pi = tf.log(2.0 * np.pi)
return tf.reduce_sum(
-0.5 * ((sample - mean) ** 2.0 * tf.exp(-logvar) + logvar + log2pi), axis=raxis
)
def init_saver(self):
# here you initialize the tensorflow saver that will be used in saving the checkpoints.
self.saver = tf.train.Saver(max_to_keep=self.config.log.max_to_keep)
| UTF-8 | Python | false | false | 19,660 | py | 37 | cvae_denoiser.py | 10 | 0.514802 | 0.502238 | 0 | 432 | 44.509259 | 112 |
mrijken/poetry | 257,698,049,612 | 173d5c950ef57e77137ca80802b02a05f6b6febb | bf882815b85c7b6143f299144a6bedbfb5b9b7f3 | /poetry/utils/_compat.py | 937f9b300e6135a3b1897a6eebb37356eb614400 | [
"MIT"
] | permissive | https://github.com/mrijken/poetry | 7ae86534f350182fdbad0a5966e1dffb6a914a14 | e445ec4f718fa2933a3a20025e24d2352a61634a | refs/heads/master | "2023-01-06T12:54:54.138125" | "2020-11-08T15:50:55" | "2020-11-08T15:50:55" | 285,694,028 | 1 | 1 | MIT | true | "2020-08-06T23:46:10" | "2020-08-06T23:46:10" | "2020-08-06T23:00:31" | "2020-08-06T22:45:56" | 10,556 | 0 | 0 | 0 | null | false | false | import sys
try:
from functools32 import lru_cache
except ImportError:
from functools import lru_cache
try:
from glob2 import glob
except ImportError:
from glob import glob
try:
import zipfile as zipp
from importlib import metadata
except ImportError:
import importlib_metadata as metadata
import zipp
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
try:
from os import cpu_count
except ImportError: # Python 2
from multiprocessing import cpu_count
try: # Python 2
long = long
unicode = unicode
basestring = basestring
except NameError: # Python 3
long = int
unicode = str
basestring = str
PY2 = sys.version_info[0] == 2
PY34 = sys.version_info >= (3, 4)
PY35 = sys.version_info >= (3, 5)
PY36 = sys.version_info >= (3, 6)
WINDOWS = sys.platform == "win32"
try:
from shlex import quote
except ImportError:
# PY2
from pipes import quote # noqa
if PY34:
from importlib.machinery import EXTENSION_SUFFIXES
else:
from imp import get_suffixes
EXTENSION_SUFFIXES = [suffix[0] for suffix in get_suffixes()]
if PY35:
from pathlib import Path
else:
from pathlib2 import Path
if not PY36:
from collections import OrderedDict
else:
OrderedDict = dict
if PY35:
import subprocess as subprocess
from subprocess import CalledProcessError
else:
import subprocess32 as subprocess
from subprocess32 import CalledProcessError
if PY34:
# subprocess32 pass the calls directly to subprocess
# on Python 3.3+ but Python 3.4 does not provide run()
# so we backport it
import signal
from subprocess import PIPE
from subprocess import Popen
from subprocess import SubprocessError
from subprocess import TimeoutExpired
class CalledProcessError(SubprocessError):
"""Raised when run() is called with check=True and the process
returns a non-zero exit status.
Attributes:
cmd, returncode, stdout, stderr, output
"""
def __init__(self, returncode, cmd, output=None, stderr=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
self.stderr = stderr
def __str__(self):
if self.returncode and self.returncode < 0:
try:
return "Command '%s' died with %r." % (
self.cmd,
signal.Signals(-self.returncode),
)
except ValueError:
return "Command '%s' died with unknown signal %d." % (
self.cmd,
-self.returncode,
)
else:
return "Command '%s' returned non-zero exit status %d." % (
self.cmd,
self.returncode,
)
@property
def stdout(self):
"""Alias for output attribute, to match stderr"""
return self.output
@stdout.setter
def stdout(self, value):
# There's no obvious reason to set this, but allow it anyway so
# .stdout is a transparent alias for .output
self.output = value
class CompletedProcess(object):
"""A process that has finished running.
This is returned by run().
Attributes:
args: The list or str args passed to run().
returncode: The exit code of the process, negative for signals.
stdout: The standard output (None if not captured).
stderr: The standard error (None if not captured).
"""
def __init__(self, args, returncode, stdout=None, stderr=None):
self.args = args
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
def __repr__(self):
args = [
"args={!r}".format(self.args),
"returncode={!r}".format(self.returncode),
]
if self.stdout is not None:
args.append("stdout={!r}".format(self.stdout))
if self.stderr is not None:
args.append("stderr={!r}".format(self.stderr))
return "{}({})".format(type(self).__name__, ", ".join(args))
def check_returncode(self):
"""Raise CalledProcessError if the exit code is non-zero."""
if self.returncode:
raise CalledProcessError(
self.returncode, self.args, self.stdout, self.stderr
)
def run(*popenargs, **kwargs):
"""Run command with arguments and return a CompletedProcess instance.
The returned instance will have attributes args, returncode, stdout and
stderr. By default, stdout and stderr are not captured, and those attributes
will be None. Pass stdout=PIPE and/or stderr=PIPE in order to capture them.
If check is True and the exit code was non-zero, it raises a
CalledProcessError. The CalledProcessError object will have the return code
in the returncode attribute, and output & stderr attributes if those streams
were captured.
If timeout is given, and the process takes too long, a TimeoutExpired
exception will be raised.
There is an optional argument "input", allowing you to
pass a string to the subprocess's stdin. If you use this argument
you may not also use the Popen constructor's "stdin" argument, as
it will be used internally.
The other arguments are the same as for the Popen constructor.
If universal_newlines=True is passed, the "input" argument must be a
string and stdout/stderr in the returned object will be strings rather than
bytes.
"""
input = kwargs.pop("input", None)
timeout = kwargs.pop("timeout", None)
check = kwargs.pop("check", False)
if input is not None:
if "stdin" in kwargs:
raise ValueError("stdin and input arguments may not both be used.")
kwargs["stdin"] = PIPE
process = Popen(*popenargs, **kwargs)
try:
process.__enter__() # No-Op really... illustrate "with in 2.4"
try:
stdout, stderr = process.communicate(input, timeout=timeout)
except TimeoutExpired:
process.kill()
stdout, stderr = process.communicate()
raise TimeoutExpired(
process.args, timeout, output=stdout, stderr=stderr
)
except:
process.kill()
process.wait()
raise
retcode = process.poll()
if check and retcode:
raise CalledProcessError(
retcode, process.args, output=stdout, stderr=stderr
)
finally:
# None because our context manager __exit__ does not use them.
process.__exit__(None, None, None)
return CompletedProcess(process.args, retcode, stdout, stderr)
subprocess.run = run
subprocess.CalledProcessError = CalledProcessError
def decode(string, encodings=None):
if not PY2 and not isinstance(string, bytes):
return string
if PY2 and isinstance(string, unicode):
return string
encodings = encodings or ["utf-8", "latin1", "ascii"]
for encoding in encodings:
try:
return string.decode(encoding)
except (UnicodeEncodeError, UnicodeDecodeError):
pass
return string.decode(encodings[0], errors="ignore")
def encode(string, encodings=None):
if not PY2 and isinstance(string, bytes):
return string
if PY2 and isinstance(string, str):
return string
encodings = encodings or ["utf-8", "latin1", "ascii"]
for encoding in encodings:
try:
return string.encode(encoding)
except (UnicodeEncodeError, UnicodeDecodeError):
pass
return string.encode(encodings[0], errors="ignore")
def to_str(string):
if isinstance(string, str) or not isinstance(string, (unicode, bytes)):
return string
if PY2:
method = "encode"
else:
method = "decode"
encodings = ["utf-8", "latin1", "ascii"]
for encoding in encodings:
try:
return getattr(string, method)(encoding)
except (UnicodeEncodeError, UnicodeDecodeError):
pass
return getattr(string, method)(encodings[0], errors="ignore")
def list_to_shell_command(cmd):
return " ".join(
'"{}"'.format(token) if " " in token and token[0] not in {"'", '"'} else token
for token in cmd
)
| UTF-8 | Python | false | false | 8,804 | py | 88 | _compat.py | 73 | 0.596547 | 0.589278 | 0 | 290 | 29.358621 | 86 |
leezqcst/rs | 12,524,124,676,432 | 016c7716a053b81df6fa833519c3217668b6dad1 | 21e84366ef36fb7aca1657473ac978f962f3b4ea | /rs/CBR/test.py | 540d56329b3bc8313824b6a80819e8ddd127744b | [] | no_license | https://github.com/leezqcst/rs | c53831e51df71a43797d90bd8ddf3b448e5e5186 | 5adf8a667e00f2c3cdc412cb5c429b702046f28b | refs/heads/master | "2021-01-22T10:40:11.459008" | "2015-12-31T05:27:14" | "2015-12-31T05:27:14" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
sys.path.append('./..')
from docs import Documents
from CBR import CBR
def show_news_create_time():
global x
local_news = x.get_all_info()
for line in local_news:
print line.get_create_time()
def test_encoding():
s='2014年03月08日12:31'
print s.split('年')
'''
for i in range(10):
print 'parse: ',x.get_all_info()[i].get_userid()
print 'parse: ',x.get_all_info()[i].get_newsid()
print 'parse: ',x.get_all_info()[i].get_title()
print 'parse: ',x.get_all_info()[i].get_tags()
'''
def test_cbr(target_reader_list = []):
input_target_len = len(target_reader_list)
#use training data to build user vector
testing = Documents('user_click_data.txt',True)
test_news = testing.get_AllNews()
user_read_dict = dict()
target_reader = []
#you can specify the target_reader number here!!!!
target_num = 5
if input_target_len != 0:
target_num = input_target_len
get = 0
while len(target_reader)<(target_num):
utemp = test_news[get].get_userid()
get += 1
if not utemp in target_reader:
target_reader.append(utemp)
if input_target_len != 0:
target_reader = target_reader_list
training = Documents('user_click_data.txt',True)
train_news = training.get_AllNews()
cbr = CBR(train_news)
cbr.build_user_vector(target_reader)
cbr.transform_user_vector()
for i in test_news:
uid = i.get_userid()
nid = i.get_newsid()
if uid in target_reader:
pass
else:
#print uid
target_reader.append(uid)
if uid in user_read_dict:
user_read_dict[uid].append(nid)
else:
user_read_dict[uid] = []
user_read_dict[uid].append(nid)
#if you want to specify the target_readerlist, you can uncomment the folllwing line
#target_reader = target_reader_list
#print target_reader
for i in range(target_num):
recommend = cbr.get_recommendation_list(target_reader[i],test_news)
#print 'recommend: ',recommend
#print 'real: ',user_read_dict[target_reader[i]]
temp = []
for item in recommend:
temp.append(item[0])
count_read = 0
count = 0
for t in user_read_dict[target_reader[i]]:
count_read += 1
if t in temp:
count += 1
print 'reader ',target_reader[i],' read:',len(user_read_dict[target_reader[i]]),' accept: ',count
def test_sort():
show_news_create_time()
global x
local_news = x.get_all_info()
news = Documents.sort_news_by_time(local_news);
print '____________________________________________________'
for i in news:
print i.get_create_time()#do not new.get here
test_cbr([5218791,52550])
| UTF-8 | Python | false | false | 2,901 | py | 23 | test.py | 21 | 0.58244 | 0.569305 | 0 | 98 | 28.5 | 107 |
kazusa4418/PythonTraining | 7,851,200,251,743 | af7b6313244d87d8b6564d16643eccf72f501a69 | f7e1e2657fa4636e2ff8769a3ee4113c2697fd3a | /set-in.py | 61a8bcf85f8b44f4ced053539bda0300d89d472d | [] | no_license | https://github.com/kazusa4418/PythonTraining | 1ae868252bd921d985c034ad2a5d6ccb6cdc190e | 0560c229073f85f0c6988dce73bcb384acd681ae | refs/heads/master | "2021-04-18T20:01:14.137835" | "2018-04-09T19:14:49" | "2018-04-09T19:14:49" | 126,890,943 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # box3 にベンチが入っているか調べる
box3 = {"ハンマー", "釘", "ベンチ"}
flag = "ベンチ" in box3
print(flag)
| UTF-8 | Python | false | false | 133 | py | 20 | set-in.py | 19 | 0.626506 | 0.590361 | 0 | 4 | 19.75 | 27 |
adamkittel/src | 3,092,376,474,675 | a3b4bce654bbfe849c30e52ab659cfe8c769b9d3 | 75e03232591b263a50523d7cfef4041db36caf01 | /VMWsolutions/at2-vclient-032/cft/stress_netbounce_sequential.py | 518e1c83ba827aa06f96eff5a194894f1e8f401f | [] | no_license | https://github.com/adamkittel/src | aaf157062d069998a8d18841895e7362cf868ff9 | 11e3927bd990b885eba595346694de2d2601d5c9 | refs/heads/master | "2021-01-11T16:13:14.592894" | "2017-01-25T18:29:09" | "2017-01-25T18:29:09" | 80,040,934 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
This action will preform a Sequential netbounce stress test
When run as a script, the following options/env variables apply:
--mvip The managementVIP of the cluster
SFMVIP env var
--user The cluster admin username
SFUSER env var
--pass The cluster admin password
SFPASS env var
--emailTo List of addresses to send email to
--clientIPs The IP addresses of the clients
--clientUser The username for the client
SFclientUser env var
--clientPass The password for the client
--waitTime The time to wait between nodes
--addSSH Boolean to add SSH keys to nodes
--iteration how many times to loop over the nodes, 0=forever
"""
import sys
import time
from optparse import OptionParser
import lib.libsf as libsf
from lib.libsf import mylog
import logging
import lib.sfdefaults as sfdefaults
from lib.action_base import ActionBase
import node_network_up_down
import get_cluster_master
import push_ssh_keys_to_node
import wait_for_no_faults
import check_client_health
import check_cluster_health
import count_available_drives
import start_gc
import wait_for_gc
import get_active_nodes
import send_email
import clusterbscheck
class StressNetbounceSequentialAction(ActionBase):
class Events:
"""
Events that this action defines
"""
FAILURE = "FAILURE"
EMAIL_SENT = "EMAIL_SENT"
NODES_NOT_FOUND = "NODES_NOT_FOUND"
CLIENT_NOT_HEALTHY = "CLIENT_NOT_HEALTHY"
CLIENT_HEALTHY = "CLIENT_HEALTHY"
CLUSTER_NOT_HEALTHY = "CLUSTER_NOT_HEALTHY"
CLUSTER_HEALTHY = "CLUSTER_HEALTHY"
FAULTS_FOUND = "FAULTS_FOUND"
FAULTS_NOT_FOUND = "FAULTS_NOT_FOUND"
ALL_NODES_FOUND = "ALL_NODES_FOUND"
BEFORE_START_GC = "BEFORE_START_GC"
GC_FINISHED = "GC_FINISHED"
DRIVES_ADDED = "DRIVES_ADDED"
DRIVES_NOT_ADDED = "DRIVES_NOT_ADDED"
MASTER_NODE_NOT_FOUND = "MASTER_NODE_NOT_FOUND"
PUSHED_SSH_KEYS = "PUSHED_SSH_KEYS"
INTERFACE_DOWN = "INTERFACE_DOWN"
INTERFACE_UP = "INTERFACE_UP"
def __init__(self):
super(self.__class__,self).__init__(self.__class__.Events)
def fail(self, message, emailTo):
mylog.error(message)
send_email.Execute(emailTo=emailTo, emailSubject="Failed Stress Netbounce Sequential", emailBody=message)
def ValidateArgs(self, args):
libsf.ValidateArgs({"mvip" : libsf.IsValidIpv4Address,
"username" : None,
"password" : None,
"waitTime" : libsf.IsInteger,
"iteration" : libsf.IsInteger
},
args)
def Execute(self, mvip=sfdefaults.mvip, username=sfdefaults.username, password=sfdefaults.password, clientIPs=None, clientUser=sfdefaults.client_user, clientPass=sfdefaults.client_pass, waitTime=300, emailTo=None, addSSH=False, bsCheck=True, debug=False, iteration=1):
self.ValidateArgs(locals())
if debug:
mylog.console.setLevel(logging.DEBUG)
#if the client ips is empty skip the check on the clients
check_client = True
if not clientIPs:
mylog.info("Skipping health check on clients")
check_client = False
#get list of the nodes in the cluster
mylog.step("Getting Active Nodes")
node_list = get_active_nodes.Get(mvip=mvip, username=username, password=password)
if(node_list == False):
message = "Failied getting active nodes on " + mvip
self.fail(message,emailTo)
self._RaiseEvent(self.Events.NODES_NOT_FOUND)
return False
#if the addSSH key is true then try and add the SSH keys to the nodes
if(addSSH == True):
if push_ssh_keys_to_node.Execute(node_ips=node_list):
mylog.info("Pushed SSH Keys to Nodes")
self._RaiseEvent(self.Events.PUSHED_SSH_KEYS)
else:
message = "Failed pushing SSH keys to Node"
self.fail(message, emailTo)
return False
else:
mylog.info("Not pushing SSH Keys to Nodes")
#iteration_count only used to display the current iteration
iteration_count = 1
loop_forever = False
if(iteration == 0):
loop_forever = True
mylog.warning("Looping Forever")
#record the time of the start of the test
start_time = time.time()
while True:
mylog.banner("Iteration Count: " + str(iteration_count))
#loop over each node
for node in node_list:
mylog.step("Current Node is: " + str(node))
#log the master node
master_node = get_cluster_master.Get(mvip=mvip, username=username, password=password)
if(master_node == False):
message = "Failed to get the master node on " + mvip
mylog.error(message)
self._RaiseEvent(self.Events.MASTER_NODE_NOT_FOUND)
else:
mylog.info("Master Node: " + str(master_node[0]))
#take down the network interface on each node
mylog.step("Bouncing the 10G interface")
if(node_network_up_down.Execute(nodeIP=node, username=username, password=password, interface="10g", action="down") == True):
#mylog.info("Node: " + str(node) + " has been rebooted")
#self._RaiseEvent(self.Events.NODE_REBOOTED)
self._RaiseEvent(self.Events.INTERFACE_DOWN)
else:
message = "The Bond10G interface was not taken down on: " + node
self.fail(message, emailTo)
#self._RaiseEvent(self.Events.REBOOT_NODE_FAIL)
return False
#wait 1 minute
mylog.step("Waiting 60 seconds")
time.sleep(60)
#try to bring the network interface back uo
if(node_network_up_down.Execute(nodeIP=node, username=username, password=password, interface="10g", action="up") == True):
self._RaiseEvent(self.Events.INTERFACE_UP)
else:
message = "The Bond10G interface was not brought back up on: " + node
self.fail(message, emailTo)
return False
#wait another minute
mylog.step("Waiting 60 seconds")
time.sleep(60)
mylog.step("Wait for faults to clear")
#wait for faults to clear
if(wait_for_no_faults.Execute(mvip) == True):
#mylog.info("No faults found on " + mvip)
self._RaiseEvent(self.Events.FAULTS_NOT_FOUND)
else:
message = "Faults found on " + mvip
self.fail(message, emailTo)
self._RaiseEvent(self.Events.FAULTS_FOUND)
return False
#make sure the cluster is healthy
mylog.step("Check cluster health")
if(check_cluster_health.Execute(mvip, since=start_time) == True):
mylog.info("Cluster " + mvip + " is Healthy")
self._RaiseEvent(self.Events.CLUSTER_HEALTHY)
else:
message = "Cluster " + mvip + " failed health check"
self.fail(message, emailTo)
self._RaiseEvent(self.Events.CLUSTER_NOT_HEALTHY)
return False
if bsCheck:
mylog.step("Performing a Cluster BS Check")
if clusterbscheck.Execute(mvip=mvip, username=username, password=password) == False:
message = mvip + ": FAILED Cluster BS Check"
self.fail(message, emailTo)
return False
#Check the health of the clients
if(check_client == True):
mylog.step("Check client health")
if(check_client_health.Execute(client_ips=clientIPs, client_user=clientUser, client_pass=clientPass) == True):
mylog.info("Client is Healthy")
self._RaiseEvent(self.Events.CLIENT_HEALTHY)
else:
message = "Failed client health check"
self.fail(message, emailTo)
self._RaiseEvent(self.Events.CLIENT_NOT_HEALTHY)
return False
#check to see if there are available drives because the node took too long to reboot
mylog.step("Looking for available drives")
available_drives = count_available_drives.Get(mvip=mvip, username=username, password=password)
if available_drives == -1:
mylog.error("Unable to get a count of available drives")
if available_drives > 0:
#notify the user about this but continue the test
send_email.Execute(emailTo=emailTo, emailSubject=mvip + ": There are available drives to add to the cluster: ")
#add the drives back to the culster and wait for sync
if(add_available_drives.Execute(mvip=mvip, username=username, password=password) == True):
mylog.info("Available drives were added to the cluster")
else:
message = "Available drives were not added to the cluster"
self.fail(message, emailTo)
return False
#check the health of the clients
if(check_client == True):
mylog.step("Check client health")
if(check_client_health.Execute(client_ips=clientIPs, client_user=clientUser, client_pass=clientPass) == True):
mylog.info("Client is Healthy")
self._RaiseEvent(self.Events.CLIENT_HEALTHY)
else:
message = "Failed client health check"
self.fail(message, emailTo)
self._RaiseEvent(self.Events.CLIENT_NOT_HEALTHY)
return False
#wait before going to the next node
if(waitTime > 0):
mylog.step("Waiting for " + str(waitTime) + " seconds")
time.sleep(waitTime)
#end node for loop
#start gc to keep the cluster from filling up, once per iteration
mylog.step("Garbage Collection")
self._RaiseEvent(self.Events.BEFORE_START_GC)
if(start_gc.Execute(mvip=mvip) == True):
pass
else:
message = "GC not started"
self.fail(message, emailTo)
return False
#wait for gc to finish
if(wait_for_gc.Execute(mvip=mvip) == True):
mylog.info("GC finished")
self._RaiseEvent(self.Events.GC_FINISHED)
else:
message = "GC failed to finish"
self.fail(message, emailTo)
self._RaiseEvent(self.Events.FAILURE)
return False
iteration_count += 1
#if iteration is not set to 0 from the start we will decrement iteration
if(loop_forever == False):
iteration -= 1
if(iteration <= 0):
break
end_time = time.time()
delta_time = libsf.SecondsToElapsedStr(end_time - start_time)
#calc stats
iteration_count -= 1
num_of_nodes = len(node_list)
time_per_iteration = (end_time - start_time) / iteration_count
time_per_node = time_per_iteration / num_of_nodes
time_per_iteration = libsf.SecondsToElapsedStr(time_per_iteration)
time_per_node = libsf.SecondsToElapsedStr(time_per_node)
emailBody = ("Number of Nodes:------ " + str(num_of_nodes) +
"\nIteration Count:------ " + str(iteration_count) +
"\nTime Per Iteration:--- " + time_per_iteration +
"\nTime Per Node:-------- " + time_per_node +
"\nTotal Time:----------- " + delta_time)
send_email.Execute(emailTo=emailTo, emailSubject=mvip + ": Finished Stress Netbounce Sequential in " + delta_time, emailBody=emailBody)
mylog.info("\tNumber of Nodes: " + str(num_of_nodes))
mylog.info("\tIteration Count: " + str(iteration_count))
mylog.info("\tTime Per Iteration: " + time_per_iteration)
mylog.info("\tTime Per Node: " + time_per_node)
mylog.info("\tTotal Time: " + delta_time)
mylog.passed("The Stress Netbounce Sequential Test has passed")
return True
# Instantate the class and add its attributes to the module
# This allows it to be executed simply as module_name.Execute
libsf.PopulateActionModule(sys.modules[__name__])
if __name__ == '__main__':
mylog.debug("Starting " + str(sys.argv))
parser = OptionParser(option_class=libsf.ListOption, description=libsf.GetFirstLine(sys.modules[__name__].__doc__))
parser.add_option("-m", "--mvip", type="string", dest="mvip", default=sfdefaults.mvip, help="the management IP of the cluster")
parser.add_option("-u", "--user", type="string", dest="username", default=sfdefaults.username, help="the admin account for the cluster")
parser.add_option("-p", "--pass", type="string", dest="password", default=sfdefaults.password, help="the admin password for the cluster")
parser.add_option("-c", "--client_ips", action="list", dest="clientIPs", default=None, help="the IP addresses of the clients")
parser.add_option("--wait_time", type="int", dest="waitTime", default=300, help="wait time after each node reboot")
parser.add_option("--add_ssh", action="store_false", dest="addSSH", default=False, help="Add the SSH key to each node")
parser.add_option("--email_to", action="list", dest="emailTo", default=None, help="the list of email addresses to send to")
parser.add_option("--client_user", type="string", dest="clientUser", default=sfdefaults.client_user, help="the username for the clients [%default]")
parser.add_option("--client_pass", type="string", dest="clientPass", default=sfdefaults.client_pass, help="the password for the clients [%default]")
parser.add_option("--debug", action="store_true", dest="debug", default=False, help="display more verbose messages")
parser.add_option("--bs_check", action="store_true", dest="bs_check", default=False, help="Do a cluster BS check")
parser.add_option("--iteration", type="int", dest="iteration", default=1, help="how many times to loop over the nodes, 0=forever")
(options, extra_args) = parser.parse_args()
try:
timer = libsf.ScriptTimer()
if Execute(options.mvip, options.username, options.password, options.clientIPs, options.clientUser, options.clientPass, options.waitTime, options.emailTo, options.addSSH, options.bs_check, options.debug, options.iteration):
sys.exit(0)
else:
sys.exit(1)
except libsf.SfArgumentError as e:
mylog.error("Invalid arguments - \n" + str(e))
sys.exit(1)
except SystemExit:
raise
except KeyboardInterrupt:
mylog.warning("Aborted by user")
Abort()
sys.exit(1)
except:
mylog.exception("Unhandled exception")
sys.exit(1)
| UTF-8 | Python | false | false | 15,928 | py | 489 | stress_netbounce_sequential.py | 296 | 0.57716 | 0.574272 | 0 | 363 | 42.878788 | 272 |
Diego0103/PracticaIf | 2,576,980,391,848 | 7abc1d3c891b81c0093e718f2e11acb3a35a20e8 | 124f395f3b82b6738d9b3906b6d9f7292a7a4a3c | /assignments/PruebasBasicas/src/exercise.py | d16ef71089bcc92334aac4695963dc0f08b81d67 | [] | no_license | https://github.com/Diego0103/PracticaIf | bb1c542289b6b4b78e9c1663c598b18f21313d32 | d4a1016ff98e1b7f7e8117230cbd3c0fddc9de7b | refs/heads/main | "2023-07-17T03:47:25.019751" | "2021-08-30T04:21:05" | "2021-08-30T04:21:05" | 399,971,151 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def main():
#escribe tu código abajo de esta línea
edad = int(input("Cual es tu edad?"))
if edad>=18:
print("Es mayor de edad")
else:
print("Es menor de edad")
if __name__=='__main__':
main()
| UTF-8 | Python | false | false | 233 | py | 5 | exercise.py | 5 | 0.536797 | 0.528139 | 0 | 11 | 20 | 42 |
YINGHUOQRIN/shadowsocks-munager-1 | 11,665,131,223,992 | a77d746e5577358abaaf491dd1708aa7b4471aeb | f58e62569840d1351e0d9611ded9c8f164801aff | /Munager/SpeedTestManager/__init__.py | d5002087fedb64a63344fc7a6d0a2a2e184f6a5f | [] | no_license | https://github.com/YINGHUOQRIN/shadowsocks-munager-1 | 9b6f43d08fa2431826bb7ff05bbf8c3dc302fe90 | d6e664bc041c41d5cd8dcad4a59265f3a6a8d84a | refs/heads/muapi | "2020-04-16T09:40:06.819732" | "2019-01-07T03:35:16" | "2019-01-07T03:35:16" | 165,472,800 | 1 | 2 | null | true | "2019-01-13T06:18:47" | "2019-01-13T06:18:46" | "2019-01-13T06:18:42" | "2019-01-07T09:17:10" | 13,547 | 0 | 0 | 0 | null | false | null | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import logging
import time
import sys
import os
from .speedtest import Speedtest
def speedtest_thread():
logging.info("Speedtest starting...You can't stop right now!")
CTid = 0
speedtest_ct = Speedtest()
speedtest_ct.get_servers()
servers_list = []
for _, servers in sorted(speedtest_ct.servers.items()):
for server in servers:
if server['country'].find(
'China') != -1 and server['sponsor'].find('Telecom') != -1:
servers_list.append(server)
speedtest_ct.get_best_server(servers_list)
results_ct = speedtest_ct.results
CTPing = str(results_ct.server['latency']) + ' ms'
speedtest_ct.download()
CTDLSpeed = str(
round(
(results_ct.download / 1000 / 1000),
2)) + " Mbit/s"
speedtest_ct.upload()
CTUpSpeed = str(
round(
(results_ct.upload / 1000 / 1000),
2)) + " Mbit/s"
CUid = 0
speedtest_cu = Speedtest()
speedtest_cu.get_servers()
servers_list = []
for _, servers in sorted(speedtest_cu.servers.items()):
for server in servers:
if server['country'].find(
'China') != -1 and server['sponsor'].find('Unicom') != -1:
servers_list.append(server)
speedtest_cu.get_best_server(servers_list)
results_cu = speedtest_cu.results
CUPing = str(results_cu.server['latency']) + ' ms'
speedtest_cu.download()
CUDLSpeed = str(
round(
(results_cu.download / 1000 / 1000),
2)) + " Mbit/s"
speedtest_cu.upload()
CUUpSpeed = str(
round(
(results_cu.upload / 1000 / 1000),
2)) + " Mbit/s"
CMid = 0
speedtest_cm = Speedtest()
speedtest_cm.get_servers()
servers_list = []
for _, servers in sorted(speedtest_cm.servers.items()):
for server in servers:
if server['country'].find(
'China') != -1 and server['sponsor'].find('Mobile') != -1:
servers_list.append(server)
speedtest_cm.get_best_server(servers_list)
results_cm = speedtest_cm.results
CMPing = str(results_cm.server['latency']) + ' ms'
speedtest_cm.download()
CMDLSpeed = str(
round(
(results_cm.download / 1000 / 1000),
2)) + " Mbit/s"
speedtest_cm.upload()
CMUpSpeed = str(
round(
(results_cm.upload / 1000 / 1000),
2)) + " Mbit/s"
result = [{'telecomping': CTPing,
'telecomeupload': CTUpSpeed,
'telecomedownload': CTDLSpeed,
'unicomping': CUPing,
'unicomupload': CUUpSpeed,
'unicomdownload': CUDLSpeed,
'cmccping': CMPing,
'cmccupload': CMUpSpeed,
'cmccdownload': CMDLSpeed}]
return result
if __name__ == "__main__":
print(speedtest_thread())
| UTF-8 | Python | false | false | 3,173 | py | 4 | __init__.py | 2 | 0.514025 | 0.493854 | 0 | 96 | 32.052083 | 79 |
davidbernick/djangotest | 7,679,401,553,158 | ad95583e14ab2ba1b50cc4aacef9d1f84e60b9db | f6e829f75f7b293ca4e6a5b5253d8c34685bcd11 | /setup.py | 7c13592c6d05e2cec51f7561c4a77ccfaaf9cfe6 | [] | no_license | https://github.com/davidbernick/djangotest | 15fce3f56095ab6b071670f65f44790b57247136 | 1b192afafe61cc97814e095666a3c1044bbc1e57 | refs/heads/master | "2021-01-17T04:51:03.570874" | "2014-06-23T14:25:47" | "2014-06-23T14:25:47" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
from setuptools import setup
setup(
name='DjangoTest',
version='1.0',
description='DjangoTest',
author='David Bernick',
author_email='dbernick@gmail.com',
install_requires=['django',
'django-guardian',
'boto',
'django-social-auth',
'django-bootstrap3',
'djangorestframework',
'markdown',
'django-filter',
'south'
],
) | UTF-8 | Python | false | false | 563 | py | 10 | setup.py | 8 | 0.442274 | 0.436945 | 0 | 21 | 25.857143 | 44 |
lucassilva-dev/codigo_Python | 19,542,101,218,818 | ebdf14078ded6f66fa4eea11aa3422ab5fd5f6bb | 4491270a0e3754a959cd7582db51bb08583e00f3 | /ex028.py | 8ba1f67140f12aa45d62ea1ca93a3c1eb2c16f56 | [] | no_license | https://github.com/lucassilva-dev/codigo_Python | b6f1b24c08336fe121000b24204fd1cf2b728a8b | 68fc9901cc8550085e1dee893d30634ac05b9c1d | refs/heads/master | "2023-04-02T02:17:03.768906" | "2021-04-07T00:41:29" | "2021-04-07T00:41:29" | 355,367,151 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
sorte = [0, 1, 2, 3, 4, 5]
resultado = random.choice(sorte)
usuario = int(input('Eu pensei em um número entre 0 a 5.Qual é? '))
if usuario == resultado:
print('Você acertou parábens')
else:
print('Você errou o número correto é {} !'.format(resultado))
| UTF-8 | Python | false | false | 282 | py | 99 | ex028.py | 98 | 0.672727 | 0.643636 | 0 | 8 | 33.375 | 68 |
ahmadmoaaz2/Services | 2,413,771,647,591 | d34190aa107b2eccf2e2a36cbb3a97b001a333c1 | 171f2f676911283f250c7c9c71f85983cde452e5 | /StorageService/food_and_water_readings.py | 80b3423b19faa9ed4b1fc69b647a0ef9b04d805d | [] | no_license | https://github.com/ahmadmoaaz2/Services | 68824ecf763a8c6b59d475712fb2ff06f7f3e8f3 | 5b05c9ff1bcdfeda3ee118ea007b7b999e739b55 | refs/heads/main | "2023-05-02T00:21:42.380305" | "2021-05-25T19:06:32" | "2021-05-25T19:06:32" | 306,156,435 | 0 | 0 | null | false | "2020-10-21T22:07:28" | "2020-10-21T21:52:58" | "2020-10-21T22:04:54" | "2020-10-21T22:07:28" | 0 | 0 | 0 | 0 | null | false | false | from sqlalchemy import Column, Integer, String
from base import Base
from datetime import datetime
class FoodAndWaterReadings(Base):
""" Food and Water Readings Class """
__tablename__ = "food_and_water_readings"
id = Column(Integer, primary_key=True)
name = Column(String(250), nullable=False)
var1_name = Column(String(250))
var1_value = Column(String(250))
var2_name = Column(String(250))
var2_value = Column(String(250))
var3_name = Column(String(250))
var3_value = Column(String(250))
var4_name = Column(String(250))
var4_value = Column(String(250))
var5_name = Column(String(250))
var5_value = Column(String(250))
date_created = Column(String(100), nullable=False)
def __init__(self, name, var1_name, var1_value, var2_name, var2_value, var3_name, var3_value, var4_name, var4_value, var5_name, var5_value):
""" Initializes a blood pressure reading """
self.name = name
self.var1_name = var1_name
self.var1_value = var1_value
self.var2_name = var2_name
self.var2_value = var2_value
self.var3_name = var3_name
self.var3_value = var3_value
self.var4_name = var4_name
self.var4_value = var4_value
self.var5_name = var5_name
self.var5_value = var5_value
self.date_created = datetime.now()
def to_dict(self):
""" Dictionary Representation of a blood pressure reading """
return {
'id': self.id,
'name': self.name,
'var1_name': self.var1_name,
'var1_value': self.var1_value,
'var2_name': self.var2_name,
'var2_value': self.var2_value,
'var3_name': self.var3_name,
'var3_value': self.var3_value,
'var4_name': self.var4_name,
'var4_value': self.var4_value,
'var5_name': self.var5_name,
'var5_value': self.var5_value,
'date_created': self.date_created
}
| UTF-8 | Python | false | false | 2,007 | py | 29 | food_and_water_readings.py | 17 | 0.599402 | 0.55157 | 0 | 56 | 34.839286 | 144 |
ekagarwala/codingChallenges | 8,220,567,442,002 | 49b408ca31f983a3545f40e36daee6ad0e0f8432 | 1df43e0bb58a2349f7196f455e69da7969e19e8e | /equality/equality.py | b341984396c94748427f395ff0de1191f97919b4 | [] | no_license | https://github.com/ekagarwala/codingChallenges | c4dfd4d60866fb90422d0f9a4b37db55a76eba23 | d94d069e8d6e3dbb3daa7cbe3a78c0f19977bb1f | refs/heads/master | "2021-01-12T13:23:40.398202" | "2016-09-24T02:40:35" | "2016-09-24T02:40:35" | 68,974,612 | 0 | 0 | null | false | "2016-09-24T02:40:35" | "2016-09-23T00:52:51" | "2016-09-23T01:42:32" | "2016-09-24T02:40:35" | 4 | 0 | 0 | 1 | Python | null | null | import os
import re
os.chdir("e:\\repos\\codingChallenges\\data")
message = ''
possibilities = []
with open('../data/equality.txt', 'r') as inFile:
inText = inFile.readlines()
pat = re.compile("[a-z][A-Z]{3}[a-z][A-Z]{3}[a-z, \n]")
for line in inText:
match = re.search(pat, line)
if match is not None:
message += line[match.span()[0] + 4]
possibilities.append(line[match.span()[0]:match.span()[1]])
print message
| UTF-8 | Python | false | false | 447 | py | 3 | equality.py | 3 | 0.615213 | 0.60179 | 0 | 19 | 22.526316 | 67 |
thiago-ximenes/curso-python | 13,804,024,917,302 | 2fbf23c530922b0fd0cf4838dd4cd9a75f0e3890 | 15172fb42c923513108b09a32d7f6dc242d420b3 | /pythonexercicios/ex090.py | 230dd251a2caea795c2bc4c7434722a9e9fabed3 | [] | no_license | https://github.com/thiago-ximenes/curso-python | 05fa3501972e878c4d0e8ffb07af5e86efdd6859 | f0a1b98c42c9ff7329344a15460b20b577345b80 | refs/heads/master | "2023-06-21T03:26:37.992969" | "2021-07-27T21:27:04" | "2021-07-27T21:27:04" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | aluno = {}
aluno['nome'] = str(input('Nome: ')).strip().title()
aluno['média'] = float(input(f'Média de {aluno["nome"]}: '))
if aluno['média'] >= 7:
aluno['situação'] = 'Aprovado'
elif aluno['média'] < 5:
aluno['situação'] = 'Reprovado'
else:
aluno['situação'] = 'Recuperação'
print('-=' * 20)
print()
for k, v in aluno.items():
print(f'- {k}: {v}')
print()
print('-=' * 20)
| UTF-8 | Python | false | false | 403 | py | 138 | ex090.py | 137 | 0.565217 | 0.549872 | 0 | 15 | 25.066667 | 60 |
cyrus723/ml_python_micheal_bowles | 15,616,501,105,250 | d9b1b58543cbf52cd338fa5d9fddb8ef2fe094ac | 1151021d25349882ee81b4164fe38033d72db2f3 | /chapter2/code/listing2-5.py | 4ca067d0d0a137fea825373b1378bdc33833e79f | [] | no_license | https://github.com/cyrus723/ml_python_micheal_bowles | cd105e9c91fc0153a9b5a95138566a0142a89658 | 162df5676089788d6cd2e99a4bf865b79876df2f | refs/heads/master | "2022-03-29T23:10:44.432082" | "2018-08-10T13:27:57" | "2018-08-10T13:27:57" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import matplotlib.pyplot as plt
import os
import pandas as pd
from pandas import DataFrame
datafile = os.path.join(os.getcwd(),
'../../datasets/sonar.all-data')
rocks_v_mines = pd.read_csv(datafile, header=None, prefix='V')
# Print head/tail of data frame
print(rocks_v_mines.head())
print(rocks_v_mines.tail())
summary = rocks_v_mines.describe()
print(summary) | UTF-8 | Python | false | false | 389 | py | 54 | listing2-5.py | 46 | 0.691517 | 0.691517 | 0 | 16 | 23.375 | 62 |
ksteph/cs61a-lab-reports | 3,590,592,699,790 | c0dcebb26885e4bde5e2d8b21420476209df8406 | 86e494f01e9af426fd4bda0167492f78009ec8db | /no_spark/lab_report.py | 6d54b858bd85ce2464ae6870bbb20a0e250ce4b7 | [] | no_license | https://github.com/ksteph/cs61a-lab-reports | 35ad3e719bac5e01c48868888b696565a37af322 | 7fc42823cdf344e79f7b72bcd54929054000834c | refs/heads/master | "2020-04-06T04:28:59.520568" | "2015-09-25T01:01:36" | "2015-09-25T01:01:36" | 41,571,165 | 1 | 0 | null | false | "2015-09-08T21:18:51" | "2015-08-28T22:03:25" | "2015-09-04T20:27:07" | "2015-09-08T21:18:51" | 196 | 0 | 0 | 0 | Python | null | null | import sys
import numpy as np
import datetime
import os
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import data_reader
import toolkit
def usage_information():
info = 'Lab report generator for CS61a\n'
info += 'Usage: lab_report.py <data_file_directory> <lab_name>'
return info
if len(sys.argv) < 3:
print usage_information()
print 'Generate report for {}'.format(sys.argv[2])
os.chdir('../report')
if not os.path.exists('{}_report'.format(sys.argv[2])):
os.makedirs('{}_report'.format(sys.argv[2]))
os.chdir('./{}_report'.format(sys.argv[2]))
################
### Get data ###
################
data = data_reader.SetupData(sys.argv[1], sys.argv[2])
data_set = data.data_set
##################################
### Initialize latex formatter ###
##################################
formatter = toolkit.LatexFormatter(os.getcwd()+'/../template/', 'report.tex')
formatter.set_param('name_map', data.name_map)
formatter.set_param('LAB_NAME', sys.argv[2].upper())
#########################
### Basic information ###
#########################
NUM_STUDENT = data_set.get_element_count('student')
formatter.set_param('NUM_STUDENT', NUM_STUDENT)
NUM_QUESTION = data_set.get_element_count('specifier')
formatter.set_param('NUM_QUESTION', NUM_QUESTION)
NUM_SUBMISSION = data_set.count()
formatter.set_param('NUM_SUBMISSION', NUM_SUBMISSION)
NUM_DISTINCT_ANSWER = data_set.get_element_count('answer')
formatter.set_param('NUM_DISTINCT_ANSWER', NUM_DISTINCT_ANSWER)
NUM_WRONG_ANSWER = data_set.filter_by(lambda x: not x['result']).count()
formatter.set_param('NUM_WRONG_ANSWER', NUM_WRONG_ANSWER)
formatter.set_param('RATIO', '{:.2f}'.format(NUM_WRONG_ANSWER*100/float(NUM_SUBMISSION)))
######################################
### Number of sessions per student ###
######################################
session_counter = []
tem_data = data_set.sort_by(lambda x: x['a_time']).group_by(lambda x: x['student'])
for item in tem_data:
# WARNING: This may throw an exception!
session_cnt = toolkit.get_session_info(item[1])
if session_cnt == 0:
continue
while session_cnt > len(session_counter):
session_counter.append(0)
session_counter[session_cnt-1] += 1
# Print it out
fig, ax = toolkit.prepare_plot(gridWidth=0)
x = np.arange(len(session_counter))
width = 0.5
rects = plt.bar(x, session_counter, width, color='gray', alpha=0.7, edgecolor="gray")
plt.ylabel('Number of students')
plt.xticks(x + width/2., ["{:,} session(s)".format(item+1) for item in range(len(session_counter))])
toolkit.autolabel(rects, ax)
plt.savefig('session_cnt.png')
########################################
### Number of submissions per prompt ###
########################################
submission_counter = [0] * NUM_QUESTION
for prompt in range(NUM_QUESTION):
submission_counter[prompt] = data_set.filter_by(lambda x: x['specifier']==prompt).count()
# Print it out
width = 0.5
fig, ax = toolkit.prepare_plot(gridWidth=0)
x = np.arange(NUM_QUESTION)
rects = plt.bar(x, submission_counter, width, color='gray', alpha=0.7, edgecolor="gray")
plt.ylabel('Number of submissions')
plt.xticks(x + width/2., [item for item in range(NUM_QUESTION)])
plt.xlabel('Prompt ID')
toolkit.autolabel(rects, ax, 2)
plt.savefig('submission_per_prompt.png')
#################################################
### Number of unique wrong answers per prompt ###
#################################################
unique_answer_counter = [0] * NUM_QUESTION
for prompt in range(NUM_QUESTION):
unique_answer_counter[prompt] = data_set.filter_by(lambda x: x['specifier']==prompt and not x['result']).get_element_count('answer')
# Print it out
width = 0.5
fig, ax = toolkit.prepare_plot(gridWidth=0)
x = np.arange(NUM_QUESTION)
rects = plt.bar(x, unique_answer_counter, width, color='gray', alpha=0.7, edgecolor="gray")
plt.ylabel('Number of unique answers')
plt.xticks(x + width/2., [item for item in range(NUM_QUESTION)])
plt.xlabel('Prompt ID')
toolkit.autolabel(rects, ax, 2)
plt.savefig('unique_answer_per_prompt.png')
#####################################
### Number of attempts per prompt ###
#####################################
attempt_counter = [[0 for i in xrange(10)] for j in xrange(NUM_QUESTION)]
for prompt in range(NUM_QUESTION):
tem_data = data_set.filter_by(lambda x: x['specifier']==prompt)
tem_data = tem_data.group_by(lambda x: x['student']).map(lambda x: len(x[1]))
for item in tem_data:
attempt_counter[prompt][np.min([item-1, 9])] += 1
attempt_counter = np.array(attempt_counter).T
# print result
ind = np.arange(NUM_QUESTION)
width = 0.5
fig, ax = toolkit.prepare_plot(gridWidth=0)
tem_bottom = [0]*NUM_QUESTION
color = plt.cm.Blues(np.linspace(0, 1, 10))
p = []
for i in range(10):
bar = plt.bar(ind, attempt_counter[i], width, color=color[i], bottom=tem_bottom, edgecolor=color[i])
p.append(bar)
for j in range(NUM_QUESTION):
tem_bottom[j] += attempt_counter[i][j]
plt.ylabel('Number of students')
plt.xticks(ind+width/2., [x for x in range(NUM_QUESTION)])
plt.xlabel('Prompt ID')
plt.gcf().subplots_adjust(right=0.8)
plt.legend(p, (x+1 for x in range(10)), loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('attempt_cnt.png')
formatter.set_param('attempt_counter', np.array(attempt_counter).T)
########################
### Time information ###
########################
selected_data = data_set.sort_by(lambda x: x['a_time'])
start_time = selected_data[0]['a_time']
end_time = selected_data[-1]['a_time']
tem_lmt = start_time
buckets = []
while tem_lmt < end_time:
tem_lmt += datetime.timedelta(0, 3600)
buckets.append(tem_lmt)
time_info_counter = [[0 for j in range(len(buckets)+1)] for i in range(NUM_QUESTION)]
for prompt in range(NUM_QUESTION):
tem_data = data_set.filter_by(lambda x: x['specifier']==prompt)
for item in tem_data.map(lambda x: x['a_time']):
for index in range(len(buckets)):
if item < buckets[index]:
break
if index==len(buckets)+1 and item>buckets[idnex]:
index += 1
time_info_counter[prompt][index] += 1
# Print it out
plot_data = [np.array([0]*(len(buckets)+1))]
for item in time_info_counter:
plot_data.append(plot_data[-1]+np.array(item))
x_ax = np.arange(len(buckets)+1)
fig, ax = toolkit.prepare_plot(gridWidth=0.5, figsize=(10.5, 8))
c_patches = []
for i in range(NUM_QUESTION):
plt.fill_between(x_ax, plot_data[i+1], plot_data[i], facecolor=plt.cm.rainbow_r(0.1*i), label='Prompt {}'.format(i))
c_patches.append(mpatches.Patch(color=plt.cm.rainbow_r(0.1*i), label=data.name_map[i]))
# plt.legend(handles=c_patches)
plt.legend()
plt.xticks(x_ax[1::2], [(time-datetime.timedelta(0, 4200)).strftime('%m-%d %H:%M') for time in buckets[0::2]], rotation='40', ha='right')
plt.xlabel("Timestamp (mm-dd hh:mm)")
plt.ylabel("Number of submissions")
plt.savefig('time_info.png')
#########################################################
### Most common wrong answers and first wrong answers ###
#########################################################
wrong_answers_counter = [[] for i in range(NUM_QUESTION)]
first_answers_counter = [[] for i in range(NUM_QUESTION)]
tem_data_set = data_set.filter_by(lambda x: not x['result'])
for prompt in range(NUM_QUESTION):
selected_data = tem_data_set.filter_by(lambda x: x['specifier']==prompt)
total_num = selected_data.count()
selected_data = selected_data.group_by(lambda x: x['answer']).map(lambda x: (x[0], len(x[1]), len(x[1])/float(total_num)))
selected_data = selected_data.sort_by(lambda x: -x[1])
if selected_data.count() > 10:
wrong_answers_counter[prompt] = selected_data[:10]
else:
wrong_answers_counter[prompt] = selected_data
selected_data = tem_data_set.filter_by(lambda x: x['specifier']==prompt).sort_by(lambda x: x['a_time']).group_by(lambda x: x['student']).map(lambda x: x[1][0])
total_num = selected_data.count()
selected_data = selected_data.group_by(lambda x: x['answer']).map(lambda x: (x[0], len(x[1]), len(x[1])/float(total_num)))
selected_data = selected_data.sort_by(lambda x: -x[1])
if selected_data.count() > 10:
first_answers_counter[prompt] = selected_data[:10]
else:
first_answers_counter[prompt] = selected_data
formatter.set_param('wrong_answers_counter', wrong_answers_counter)
formatter.set_param('first_answers_counter', first_answers_counter)
#################################
### Response time information ###
#################################
RESPONSE_BUCKETS = [60, 120, 300, 600]
response_time_counter = [[0 for i in range(len(RESPONSE_BUCKETS)+1)] for j in range(NUM_QUESTION)]
for prompt in range(NUM_QUESTION):
selected_data = data_set.filter_by(lambda x: x['specifier']==prompt)
selected_data = selected_data.sort_by(lambda x: x['a_time']).group_by(lambda x: x['student'])
selected_data = selected_data.map(lambda x: toolkit.get_time_information(x[1]).total_seconds())
for item in selected_data:
for index in range(len(RESPONSE_BUCKETS)):
if item < RESPONSE_BUCKETS[index]:
break
if index == 3 and item > RESPONSE_BUCKETS[3]:
index = 4
response_time_counter[prompt][index] += 1
ind = np.arange(len(RESPONSE_BUCKETS)+1)
width = 1
fig, ax = toolkit.prepare_plot(gridWidth=0)
rects = plt.bar(ind, response_time_counter[prompt], width, color='gray', edgecolor='gray')
plt.ylabel('#Students')
plt.xlabel('Time point (min)')
plt.xticks([loc+width for loc in ind], [t/60 for t in RESPONSE_BUCKETS])
toolkit.autolabel(rects, ax)
plt.savefig('response_time_{}'.format(prompt))
#########################
### Render the report ###
#########################
formatter.render('{}_report.tex'.format(sys.argv[2]))
| UTF-8 | Python | false | false | 9,812 | py | 9 | lab_report.py | 4 | 0.627395 | 0.612311 | 0 | 236 | 40.576271 | 163 |
elishevasheinin/DNA-command-prompt | 8,186,207,710,052 | fe6e4b0a6e0c7484baf88190eb41a602153a0cf0 | ee8da0c94006f8cbf96716bdbb15c7d7ef07573c | /Analysis/__init__.py | 20c28462fbfe498cfc3fdefb15ddfafb21860f8e | [] | no_license | https://github.com/elishevasheinin/DNA-command-prompt | 6d2a0e7ebc139a0bb5cab444c3e53421dc6bde3a | ed336e2dc409e99f1ae825da039e2cb495a7a7ec | refs/heads/master | "2023-06-17T18:41:40.301627" | "2021-07-18T13:14:16" | "2021-07-18T13:14:16" | 387,178,072 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from .count import Count
from .find import Find
from .find_all import FindAll
from .len import Len
| UTF-8 | Python | false | false | 101 | py | 36 | __init__.py | 34 | 0.772277 | 0.772277 | 0 | 4 | 23.75 | 29 |
kkraoj/wildfire-risk-treehacks | 19,155,554,152,486 | 85d7eef52218a2d0c4b38990448d55ed6b90146f | d0419a9e083073b052faa78a17566f5e4e77f343 | /save_map_as_tif_p27.py | 62131c0b7464ea6d2a3a19e66c5fb7f00105618f | [] | no_license | https://github.com/kkraoj/wildfire-risk-treehacks | e9641ea7aedeb3294bcf59486849aff6ccde1e3e | a99916d7454535ad9243b9f78e378914fa5b68d3 | refs/heads/master | "2021-01-05T05:26:27.308794" | "2020-03-16T02:55:05" | "2020-03-16T02:55:05" | 240,896,391 | 4 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 13 07:14:21 2019
@author: kkrao
"""
import numpy as np
import pandas as pd
from osgeo import gdal
from osgeo import gdal_array
from osgeo import osr
import matplotlib.pylab as plt
from datetime import datetime
for MoY in range(7, 8):
date = '2018-%02d-01'%(MoY)
print('[INFO] Making lfmc tif for %s at %s'%(date,datetime.now().strftime("%H:%M:%S")))
#fname = 'map/fmc_map_%s'%date
fname = r'D:\Krishna\projects\vwc_from_radar\data\map\dynamic_maps\fmc_map_%s'%date
# latlon = pd.read_csv(fname+'.csv', index_col = 0)
df = pd.read_pickle(fname)
# mask = pd.read_csv(r'D:\Krishna\projects\vwc_from_radar\data\map\mask_classified_2018_07_01.csv', index_col = 0)
# df = pd.merge(latlon,mask, on=['latitude','longitude'])
# df.loc[df['mask']==1,'pred_fmc'] = -9999
df['lat_index'] = df.latitude.rank(method = 'dense', ascending = False).astype(int)-1
df['lon_index'] = df.longitude.rank(method = 'dense', ascending = True).astype(int)-1
u_lons = np.sort(df.longitude.unique())
u_lats = np.sort(df.latitude.unique())[::-1]
xx, yy = np.meshgrid(u_lons,u_lats)
zz = xx.copy()
zz[:] = -9999
zz[df.lat_index.values,df.lon_index.values] = df.pred_fmc.values
array = zz.astype(int)
fig, ax = plt.subplots(figsize = (2,2))
ax.imshow(array,vmin = 50, vmax = 200)
ax.set_title(date)
plt.show()
lat = yy
lon = xx
# For each pixel I know it's latitude and longitude.
# As you'll see below you only really need the coordinates of
# one corner, and the resolution of the file.
xmin,ymin,xmax,ymax = [lon.min(),lat.min(),lon.max(),lat.max()]
nrows,ncols = np.shape(array)
xres = (xmax-xmin)/float(ncols)
yres = (ymax-ymin)/float(nrows)
geotransform=(xmin,xres,0,ymax,0, -yres)
# That's (top left x, w-e pixel resolution, rotation (0 if North is up),
# top left y, rotation (0 if North is up), n-s pixel resolution)
# I don't know why rotation is in twice???
output_raster = gdal.GetDriverByName('GTiff').Create(r'D:\Krishna\projects\vwc_from_radar\data\map\dynamic_maps\lfmc_map_%s.tif'%date,ncols, nrows, 1 ,gdal.GDT_Float32) # Open the file
output_raster.SetGeoTransform(geotransform) # Specify its coordinates
srs = osr.SpatialReference() # Establish its coordinate encoding
srs.ImportFromEPSG(4326) # This one specifies WGS84 lat long.
# Anyone know how to specify the
# IAU2000:49900 Mars encoding?
output_raster.SetProjection( srs.ExportToWkt() ) # Exports the coordinate system
# to the file
output_raster.GetRasterBand(1).WriteArray(array) # Writes my array to the raster
output_raster.FlushCache()
output_raster = None
| UTF-8 | Python | false | false | 2,994 | py | 9 | save_map_as_tif_p27.py | 8 | 0.608884 | 0.583834 | 0 | 73 | 40.013699 | 189 |
kennedyCzar/AlphabetSoup-Using-Django | 11,330,123,769,060 | 0f765c0e055a23b6951d9a09ff734b4e59361867 | a8a879c5c2df2ca20c184ca9fb0c305a1d036110 | /SCRIPT/BST_SecondlargeVal.py | c4a885feff24de17f7d3840039edbed9732d6efd | [
"MIT"
] | permissive | https://github.com/kennedyCzar/AlphabetSoup-Using-Django | f749ba065f5c3fbed734d505a4169ba72c273980 | 4503cbfe9d0c9f9d8bfa1ae03c2da76537781e52 | refs/heads/master | "2020-03-26T11:49:20.681476" | "2019-09-04T13:37:33" | "2019-09-04T13:37:33" | 144,861,469 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 20 18:44:33 2019
@author: kenneth
"""
'''
Given the root to a binary search tree,
find the second largest node in the tree.
'''
def largestNode(rootNode):
currentNode = rootNode
while currentNode:
if not currentNode.right:
return currentNode.value
else:
currentNode = currentNode.right
def secondLargetNode(rootNode):
if (rootNode is None or (rootNode.left is None and rootNode.right)) :
return
else: currentNode = rootNode
while currentNode:
if largestNode.left and not largestNode.right:
return largestNode(largestNode.left)
if currentNode.right and not currentNode.right.left and not currentNode.right.left:
return currentNode.value
currentNode = currentNode.right | UTF-8 | Python | false | false | 880 | py | 18 | BST_SecondlargeVal.py | 15 | 0.653409 | 0.6375 | 0 | 33 | 25.69697 | 91 |
easync/ao-api-samples | 16,801,912,088,167 | ba03ef5ccc16768534d23fa234886c4751afe55f | a15b7fa8f91badc1a25a3267e9ac112f050d321a | /python/place_an_order.py | 9969a40caf4d2221e420d4ad51e7e7e593023fad | [] | no_license | https://github.com/easync/ao-api-samples | 15c15bb79481390549918c5dbd073bacb13aaea7 | 096a5e5eb971e0af59bd6345d4146b84ba6de660 | refs/heads/master | "2021-09-13T08:39:07.351591" | "2018-04-27T07:09:00" | "2018-04-27T07:09:00" | 117,698,736 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import requests
import json
true = True
false = False
API_TOKEN = '00000000-0000-0000-0000-000000000000'; #SEE https://app.easync.io/api for details
RETAILER = 'amazon';
PRODUCT_ID = 'B01K7C0CSA'; # Amazon US
# PRODUCT_ID = 'B01BRC1ZYE'; # Amazon DE
# PRODUCT_ID = 'B00V6C5Z0Q'; # Amazon UK
# PRODUCT_ID = 'B010S9M3L6'; # Amazon ES
# PRODUCT_ID = 'B019HC54WU'; # Amazon FR
# PRODUCT_ID = 'B019HC54WU'; # Amazon CA
# PRODUCT_ID = 'B007PNGRPC'; # Amazon IT
MAX_PRICE = 15000;
RETAILER_CREDENTIALS = {
"email": 'user@gmail.com',
"password": '123456'
}
BILLING_ADDRESS = {
"first_name": "John",
"last_name": "Smith",
"address_line1": "14 Bus St.",
"address_line2": "",
"zip_code": "123456",
"city": "Atlanta",
"state": "GA",
"country": "US",
"phone_number": "1234567890"
}
SHIPPING_ADDRESS = {
"first_name": "Eric",
"last_name": "Walter",
"address_line1": "18 Ellie St.",
"address_line2": "",
"zip_code": "070065",
"city": "Sacramento",
"state": "CA",
"country": "US",
"phone_number": "1234567890"
};
PAYMENT_METHOD = {
"expiration_month": 9,
"expiration_year": 9999,
"name_on_card": "Jack Sparrow",
"number": "0000000000000000",
"security_code": "555",
"use_gift": true
};
ORDER = {
"retailer": RETAILER,
"retailer_credentials": RETAILER_CREDENTIALS,
"products": [
{
"product_id": PRODUCT_ID,
"quantity": 1,
"seller_selection_criteria": {
"condition_in": [
"New"
],
"handling_days_max": 5,
"max_item_price": MAX_PRICE,
"prime": true
}
}
],
"shipping_address": SHIPPING_ADDRESS,
"shipping_method": "free",
"billing_address": BILLING_ADDRESS,
"payment_method": PAYMENT_METHOD,
"is_gift": true,
"gift_message": "Thank you so much!",
"max_price": MAX_PRICE,
}
print requests.post("https://core.easync.io/api/v1/orders", auth=(API_TOKEN, ''), json=ORDER).content; | UTF-8 | Python | false | false | 1,970 | py | 8 | place_an_order.py | 4 | 0.593401 | 0.521827 | 0 | 86 | 21.918605 | 102 |
grzesiu/hackerrank | 1,382,979,486,877 | 5e58b4bb74defd51f95ef601f48bf3577515fa7a | 757274035404da163ff110f9da964094abe38657 | /algorithms/strings/special-palindrome-again.py | 02aaee752593d1768d777a1ed60cee5ab3fe55ae | [] | no_license | https://github.com/grzesiu/hackerrank | 4ecc8c1df85372bfbd68e30caa3c56e130764e75 | a1a88a0cae50a0045e7b90c4582d02f9172509ef | refs/heads/master | "2020-04-06T17:47:08.062466" | "2019-11-07T20:10:20" | "2019-11-07T20:10:20" | 75,977,143 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def get_counts(n, s):
counts = [[s[0], 1]]
for i in range(1, n):
if counts[-1][0] == s[i]:
counts[-1][1] += 1
else:
counts.append([s[i], 1])
return counts
def count_same(counts):
sums = [0]
for i in range(1, max(list(zip(*counts))[1]) + 1):
sums.append(i + sums[-1])
acc = 0
for _, count in counts:
acc += sums[count]
return acc
def count_different_middle(counts):
acc = 0
for i in range(1, len(counts) - 1):
if counts[i][1] == 1 and counts[i - 1][0] == counts[i + 1][0]:
acc += min(counts[i - 1][1], counts[i + 1][1])
return acc
if __name__ == '__main__':
n = int(input())
s = input()
counts = get_counts(n, s)
total = count_same(counts) + count_different_middle(counts)
print(total)
| UTF-8 | Python | false | false | 828 | py | 104 | special-palindrome-again.py | 71 | 0.502415 | 0.468599 | 0 | 31 | 25.709677 | 70 |
brennv/graphql-pynamodb | 13,752,485,295,654 | 2ccaa4e4202e493ab54ef49f1c1a7e07720146f3 | 0b4d5ddb14925ed9e0d051be89410936d7e0ef73 | /graphene_pynamodb/relationships.py | cca280dcf7aced67fe49bab7c6c8d9ee643bd127 | [] | no_license | https://github.com/brennv/graphql-pynamodb | 7d356d828bf1cf68573232d8cf2ab97990885e74 | c9a2827aaa84950ee15a51a5cade3570634fa6fa | refs/heads/master | "2021-01-11T04:35:36.393720" | "2016-10-17T10:44:04" | "2016-10-17T10:44:04" | 71,157,597 | 1 | 0 | null | true | "2016-10-17T16:16:57" | "2016-10-17T16:16:56" | "2016-10-14T23:19:29" | "2016-10-17T10:44:09" | 65 | 0 | 0 | 0 | null | null | null | from pynamodb.attributes import Attribute, NumberAttribute
from pynamodb.constants import STRING, STRING_SET
from pynamodb.models import Model
from six import string_types
from wrapt import ObjectProxy
class RelationshipResult(ObjectProxy):
_key = None
_key_name = ''
_model = None
def __init__(self, key_name, key, obj):
super(RelationshipResult, self).__init__(obj)
self._key = key
self._key_name = key_name
self._model = obj
def __getattr__(self, name):
# If we are being to lookup '__wrapped__' then the
# '__init__()' method cannot have been called.
if name == '__wrapped__':
raise ValueError('wrapper has not been initialised')
if name.startswith('_'):
return getattr(self.__wrapped__, name)
if name == self._key_name:
return self._key
if isinstance(self.__wrapped__, type) and issubclass(self.__wrapped__, Model):
self.__wrapped__ = self.__wrapped__.get(self._key)
return getattr(self.__wrapped__, name)
def __eq__(self, other):
# Shallow compare by id for relationship purposes
if isinstance(self.__model__, type) and issubclass(self.__model__, Model):
return isinstance(other, self.__model__) and self.__key__ == getattr(other, self.__key_name__)
else:
return (self.__model__.__class__ == other.__class__) and self.__key__ == getattr(other, self.__key_name__)
def __ne__(self, other):
return self.__model__ != other
class Relationship(Attribute):
_models = None
@classmethod
def sub_classes(cls, klass):
return klass.__subclasses__() + [g for s in klass.__subclasses__() for g in Relationship.sub_classes(s)]
@classmethod
def get_model(cls, model_name):
# Resolve a model name into a model class by looking in all Model subclasses
if not Relationship._models:
Relationship._models = Relationship.sub_classes(Model)
return next((model for model in Relationship._models if model.__name__ == model_name), None)
def __init__(self, model, hash_key="id", lazy=True, **args):
if not isinstance(model, string_types) and not issubclass(model, Model):
raise TypeError("Expected PynamoDB Model argument, got: %s " % model.__class__.__name__)
Attribute.__init__(self, **args)
self._model = model
self.hash_key_name = hash_key
self._lazy = lazy
@property
def model(self):
if isinstance(self._model, string_types):
self._model = Relationship.get_model(self._model)
return self._model
class OneToOne(Relationship):
attr_type = STRING
def serialize(self, model):
return str(getattr(model, self.hash_key_name))
def deserialize(self, hash_key):
if isinstance(getattr(self.model, self.hash_key_name), NumberAttribute):
hash_key = int(hash_key)
try:
if self._lazy:
return RelationshipResult(self.hash_key_name, hash_key, self.model)
else:
return self.model.get(hash_key)
except self.model.DoesNotExist:
return None
class OneToMany(Relationship):
attr_type = STRING_SET
def serialize(self, models):
return [str(getattr(model, self.hash_key_name)) for model in models]
def deserialize(self, hash_keys):
if isinstance(getattr(self.model, self.hash_key_name), NumberAttribute):
hash_keys = map(int, hash_keys)
try:
if self._lazy:
return [RelationshipResult(self.hash_key_name, hash_key, self.model) for hash_key in hash_keys]
else:
return self.model.get_batch(hash_keys)
except self.model.DoesNotExist:
return None
| UTF-8 | Python | false | false | 3,848 | py | 8 | relationships.py | 5 | 0.607848 | 0.607848 | 0 | 109 | 34.302752 | 118 |
kebitmatf/LearnTF | 13,065,290,532,266 | 81c2ac7f8ce1bbc3bcefaf95fc9b3e54c290fb35 | b77dc74e17f9f8e6cdb49ad329b8b17e6c4334f9 | /art_test.py | 69a9ff61826b71c03bd74ccb10c3f3a5b95f07c7 | [] | no_license | https://github.com/kebitmatf/LearnTF | e58bdccc07e4b049268381d0215ad68875551e7d | d400da5e0bb9e215521b6722b38f64824293392a | refs/heads/master | "2021-01-01T15:27:36.887491" | "2017-07-26T13:02:31" | "2017-07-26T13:02:31" | 97,622,738 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 21 13:47:11 2017
@author: SEA
"""
import os
import tensorflow as tf
import scipy.io as io
import scipy.misc as misc
import matplotlib.pyplot as plt
import numpy as np
import time
path_vggmodel = 'imagenet-vgg-verydeep-19.mat'
path_style = 'images/guernica.jpg'
path_content = 'images/hongkong.jpg'
path_out = 'test_output'
imag_height = 600
imag_width = 800
color_channels = 3
mean_value = np.array([123.68, 116.779, 103.939]).reshape((1,1,1,3))
alpha = 100
beta = 5
noise_ratio = 0.6
def imag_load (path):
imag = misc.imread(path)
if imag.shape[0:2] != (imag_height, imag_width):
imag = misc.imresize(imag, (imag_height, imag_width), 'bilinear')
# plt.imshow(imag)
# plt.show
imag = imag.reshape((1,)+imag.shape) - mean_value
return imag
def imag_save(path, name, imag):
file_name = path+'/'+name
imag = imag + mean_value
imag = imag.reshape(imag[0])
imag = np.clip(imag, 0, 255)
# plt.imshow(imag)
# plt.show
misc.imsave(file_name, imag)
def imag_noise(imag, noise_ratio):
noise = np.random.uniform(-20, 20, (1, imag_height, imag_width, color_channels))
imag = noise * noise_ratio + imag * (1-noise_ratio)
imag = np.clip(imag, 0, 255)
# plt.imshow(imag[0])
# plt.show
return imag
# Get params in VGG and modify the model maxpool -> avgpool
def vgg_model(path):
"""
0 is conv1_1 (3, 3, 3, 64)
1 is relu
2 is conv1_2 (3, 3, 64, 64)
3 is relu
4 is maxpool
5 is conv2_1 (3, 3, 64, 128)
6 is relu
7 is conv2_2 (3, 3, 128, 128)
8 is relu
9 is maxpool
10 is conv3_1 (3, 3, 128, 256)
11 is relu
12 is conv3_2 (3, 3, 256, 256)
13 is relu
14 is conv3_3 (3, 3, 256, 256)
15 is relu
16 is conv3_4 (3, 3, 256, 256)
17 is relu
18 is maxpool
19 is conv4_1 (3, 3, 256, 512)
20 is relu
21 is conv4_2 (3, 3, 512, 512)
22 is relu
23 is conv4_3 (3, 3, 512, 512)
24 is relu
25 is conv4_4 (3, 3, 512, 512)
26 is relu
27 is maxpool
28 is conv5_1 (3, 3, 512, 512)
29 is relu
30 is conv5_2 (3, 3, 512, 512)
31 is relu
32 is conv5_3 (3, 3, 512, 512)
33 is relu
34 is conv5_4 (3, 3, 512, 512)
35 is relu
36 is maxpool
37 is fullyconnected (7, 7, 512, 4096)
38 is relu
39 is fullyconnected (1, 1, 4096, 4096)
40 is relu
41 is fullyconnected (1, 1, 4096, 1000)
42 is softmax
"""
vgg = io.loadmat(path)
vgg_layers = vgg['layers']
def _weight(layer, expected_layer_name):
W = vgg_layers[0][layer][0][0][0][0][0]
b = vgg_layers[0][layer][0][0][0][0][1]
layer_name = vgg_layers[0][layer][0][0][-2]
assert layer_name == expected_layer_name
return W, b
def _relu(layer):
return tf.nn.relu(layer)
def _conv2d(prev_layer, layer, layer_name):
W, b = _weight(layer, layer_name)
b = b.reshape(b.size)
W = tf.constant(W)
b = tf.constant(b)
layer = tf.nn.conv2d(input = prev_layer, filter = W, strides = [1,1,1,1], padding= 'SAME') + b
return layer
def _avgpool(layer):
return tf.nn.avg_pool(layer, ksize = [1,2,2,1], strides = [1,1,1,1], padding = 'SAME')
# Build graph model
graph ={}
graph['input'] = tf.Variable(np.zeros((1, imag_height, imag_width, color_channels), dtype = 'float32'))
graph['conv1_1'] = _conv2d(graph['input'], 0, 'conv1_1')
graph['relu1_1'] = _relu(graph['conv1_1'])
graph['conv1_2'] = _conv2d(graph['relu1_1'], 2, 'conv1_2')
graph['relu1_2'] = _relu(graph['conv1_2'])
graph['avgpool1'] = _avgpool(graph['relu1_2'])
graph['conv2_1'] = _conv2d(graph['avgpool1'], 5, 'conv2_1')
graph['relu2_1'] = _relu(graph['conv2_1'])
graph['conv2_2'] = _conv2d(graph['relu2_1'], 7, 'conv2_2')
graph['relu2_2'] = _relu(graph['conv2_2'])
graph['avgpool2'] = _avgpool(graph['relu2_2'])
graph['conv3_1'] = _conv2d(graph['avgpool2'], 10, 'conv3_1')
graph['relu3_1'] = _relu(graph['conv3_1'])
graph['conv3_2'] = _conv2d(graph['relu3_1'], 12, 'conv3_2')
graph['relu3_2'] = _relu(graph['conv3_2'])
graph['conv3_3'] = _conv2d(graph['relu3_2'], 14, 'conv3_3')
graph['relu3_3'] = _relu(graph['conv3_3'])
graph['conv3_4'] = _conv2d(graph['relu3_3'], 16, 'conv3_4')
graph['relu3_4'] = _relu(graph['conv3_4'])
graph['avgpool3'] = _avgpool(graph['relu3_4'])
graph['conv4_1'] = _conv2d(graph['avgpool3'], 19, 'conv4_1')
graph['relu4_1'] = _relu(graph['conv4_1'])
graph['conv4_2'] = _conv2d(graph['relu4_1'], 21, 'conv4_2')
graph['relu4_2'] = _relu(graph['conv4_2'])
graph['conv4_3'] = _conv2d(graph['relu4_2'], 23, 'conv4_3')
graph['relu4_3'] = _relu(graph['conv4_3'])
graph['conv4_4'] = _conv2d(graph['relu4_3'], 25, 'conv4_4')
graph['relu4_4'] = _relu(graph['conv4_4'])
graph['avgpool4'] = _avgpool(graph['relu4_4'])
graph['conv5_1'] = _conv2d(graph['avgpool4'], 28, 'conv5_1')
graph['relu5_1'] = _relu(graph['conv5_1'])
graph['conv5_2'] = _conv2d(graph['relu5_1'], 30, 'conv5_2')
graph['relu5_2'] = _relu(graph['conv5_2'])
graph['conv5_3'] = _conv2d(graph['relu5_2'], 32, 'conv5_3')
graph['relu5_3'] = _relu(graph['conv5_3'])
graph['conv5_4'] = _conv2d(graph['relu5_3'], 34, 'conv5_4')
graph['relu5_4'] = _relu(graph['conv5_4'])
graph['avgpool5'] = _avgpool(graph['relu5_4'])
return graph
def content_loss_func(sess, model):
p = sess.run(model['conv4_2'])
x = model['conv4_2']
N = p.shape[3] # No. filters
M = p.shape[1] * p.shape[2] # No. feature map = filter size
loss = 1/(4*N*M) * tf.reduce_mean(tf.pow(x - p, 2))
return loss
def style_loss_func(sess, model):
def gram_matrix (F, N, M):
# Reshape to NxM matrix of F
F = tf.reshape(F, (N, M))
G = tf.matmul(tf.transpose(F), F)
return G
def _style_loss(a, x):
N = a.shape[3]
M = a.shape[1] * a.shape[2]
A = gram_matrix(a, N, M)
G = gram_matrix(x, N, M)
loss = 1/(4 * N**2 * M**2) * tf.reduce_mean(tf.pow(G - A, 2))
return loss
layer_filters = [
('conv1_1', 0.5),
('conv2_1', 1.0),
('conv3_1', 2.0),
('conv4_1', 3.0),
('conv5_1', 4.0)
]
E = [_style_loss(sess.run(model[layer_name]), model[layer_name]) for layer_name, _ in layer_filters]
W = [w for _, w in layer_filters]
L = sum(W[l] * E[l] for l in range(len(layer_filters)))
return L
with tf.device('/cpu:0'):
# Start calculation
imag_content = imag_load(path_content)
imag_style = imag_load(path_style)
imag_input = imag_noise(imag_content, noise_ratio)
graph = vgg_model(path_vggmodel)
# tf.InteractiveSession is same with tf.Session but not need to call with tf.Session.
# It wokrs like 'evaluation' functions
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer()) # Initial all variables
# Content loss
sess.run(graph['input'].assign(imag_content))
content_loss = content_loss_func(sess, graph)
# Style loss
sess.run(graph['input'].assign(imag_style))
style_loss = style_loss_func(sess, graph)
# Total loss
loss_total = alpha * content_loss + beta * style_loss
optimizer = tf.train.AdamOptimizer(learning_rate = 2.0).minimize(loss_total)
start_time = time.time()
if __name__ == '__main__':
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(graph['input'].assign(imag_input))
if not os.path.exists(path_out):
os.mkdir(path_out)
iteration = 2
for i in range(0, iteration):
sess.run(optimizer)
print(i)
if i % 2 == 0:
imag_mixed = sess.run(graph['input'])
file_name = 'train {}.jpg'.format(i)
imag_save(path_out, file_name, imag_mixed )
end_time = time.time()
print("Time usage: {0:>6.2}".format(end_time-start_time))
| UTF-8 | Python | false | false | 8,825 | py | 8 | art_test.py | 7 | 0.526232 | 0.455864 | 0 | 262 | 31.381679 | 112 |
matbur/advent-of-code | 8,134,668,061,173 | 89ee762e13bfab81bae28ead8a3b04885237af25 | b2cb4a420350f2bf4e0ef8b8c84ba099f52213b5 | /2017/03/part1.py | 03fe6e847e930454f515c0532edb27042c451bea | [
"MIT"
] | permissive | https://github.com/matbur/advent-of-code | 67fbbf55d1fec636994fdf7b98fd9fb491a93999 | f4f1fbcd3d733c057b0097c163a0d648a45cc6cc | refs/heads/master | "2021-05-01T15:58:48.375122" | "2020-12-12T15:23:09" | "2020-12-12T15:23:09" | 121,041,774 | 0 | 0 | MIT | false | "2021-04-20T21:16:28" | "2018-02-10T18:22:35" | "2020-12-12T15:23:29" | "2021-04-20T21:16:27" | 88 | 0 | 0 | 1 | Python | false | false | #!/usr/bin/env python3
def foo(N):
d = [0, 0, 0, 0]
s = 1
for i in range(2, N + 1):
n = i // 2
d_i = (i - 2) % 4
sn = s + n
if sn == N:
d[d_i] += n
break
elif sn < N:
d[d_i] += n
s += n
else:
d[d_i] += N - s
break
return d[0] - d[2], d[1] - d[3]
def foo2(n):
x, y = foo(n)
return abs(x) + abs(y)
if __name__ == "__main__":
print("min")
for i in (1, 12, 23, 1024):
print(i, foo2(i))
print("full", foo2(265149))
| UTF-8 | Python | false | false | 581 | py | 27 | part1.py | 26 | 0.347676 | 0.290878 | 0 | 33 | 16.606061 | 35 |
akostyuk/flask-dbmigrate | 9,105,330,693,493 | 809cc3015f932dd75c51aa0fa8abf2a3f406a167 | 1c7a886a068f433d1fe9c0b76c6f77976098d5e1 | /tests.py | 3a62f2b9323be40396da4e1ae1e4dcf4efcd3251 | [
"Apache-2.0"
] | permissive | https://github.com/akostyuk/flask-dbmigrate | e29e651497d225cedcfb094111ed5e4631fc4213 | e7461a4e066420f78161316d49eb4099955505b0 | refs/heads/master | "2016-09-06T13:45:35.369523" | "2013-03-07T20:42:23" | "2013-03-07T20:42:23" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import re
import sys
import unittest
import logging
from shutil import rmtree
from StringIO import StringIO
from flask import Flask
from flask.ext.script import Command, Manager
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy.engine.reflection import Inspector
from flask_dbmigrate import DBMigrate, ImproperlyConfigured
from flask_dbmigrate import manager as dbmanager
def rel(path):
return os.path.join(os.path.abspath(os.path.dirname(__file__)), path)
def make_test_model(db):
class Test(db.Model):
__tablename__ = 'test'
id = db.Column('test_id', db.Integer, primary_key=True)
column1 = db.Column(db.String(60))
def __init__(self, column1):
self.column1 = column1
return Test
def with_database(test_method):
def wrapper(self):
self.dbmigrate.init()
self.dbmigrate._upgrade()
test_method(self)
self.dbmigrate._drop()
return wrapper
def with_database_changes(test_method):
def wrapper(self):
self.dbmigrate.init()
self.dbmigrate._upgrade()
self.app.db = SQLAlchemy(self.app)
class Test(self.app.db.Model):
__tablename__ = 'test'
id = self.app.db.Column('test_id', self.app.db.Integer,
primary_key=True)
column1 = self.app.db.Column(self.app.db.String(60))
column2 = self.app.db.Column(self.app.db.String(60))
def __init__(self, column1):
self.column1 = column1
test_method(self)
self.dbmigrate._drop()
return wrapper
class TestConfig(object):
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + rel('test.sqlite3')
# SQLALCHEMY_DATABASE_URI = 'sqlite://'
SQLALCHEMY_MIGRATE_REPO = rel('migrations')
class TestCommand(Command):
def run(self):
print('test ok')
class DBMigrateInitTestCase(unittest.TestCase):
def test_dbmigrate_init_no_app(self):
# DBMigrate always required app
self.assertRaises(TypeError, DBMigrate)
def test_dbmigrate_init_app_no_config(self):
app = Flask(__name__)
self.assertRaises(ImproperlyConfigured, DBMigrate, app=app)
def test_dbmigrate_init_app_config(self):
app = Flask(__name__)
app.config.from_object(TestConfig)
DBMigrate(app)
class DBMigrateSubManagerTestCase(unittest.TestCase):
def setUp(self):
self.app = Flask(__name__)
self.output = StringIO()
sys.stdout = self.output
def tearDown(self):
self.output.close()
def test_add_dbmigrate_submanager(self):
dbmigrate_manager = Manager()
manager = Manager(self.app)
manager.add_command('dbmigrate', dbmigrate_manager)
assert isinstance(manager._commands['dbmigrate'], Manager)
self.assertEquals(dbmigrate_manager.parent, manager)
self.assertEquals(dbmigrate_manager.get_options(),
manager.get_options())
def test_run_dbmigrate_test(self):
manager = Manager(self.app)
manager.add_command('dbmigrate', dbmanager)
sys.argv = ['manage.py', 'dbmigrate', 'test']
try:
manager.run()
except SystemExit, e:
self.assertEquals(e.code, 0)
assert 'test ok' in self.output.getvalue()
class DBMigrateCommandsTestCase(unittest.TestCase):
def setUp(self):
self.app = Flask(__name__)
self.app.config.from_object(TestConfig)
self.app.config['SQLALCHEMY_MIGRATE_REPO'] += self.id()
self.app.db = SQLAlchemy(self.app)
self.Test = make_test_model(self.app.db)
self.dbmigrate = DBMigrate(self.app)
self.output = StringIO()
sys.stdout = self.output
def tearDown(self):
self.output.close()
if os.path.exists(self.app.config['SQLALCHEMY_MIGRATE_REPO']):
rmtree(self.app.config['SQLALCHEMY_MIGRATE_REPO'])
if os.path.exists(rel('test.sqlite3')):
os.remove(rel('test.sqlite3'))
def test_init(self):
manager = Manager(self.app)
manager.add_command('dbmigrate', dbmanager)
sys.argv = ['manage.py', 'dbmigrate', 'init']
try:
manager.run()
except SystemExit, e:
self.assertEquals(e.code, 0)
self.assertTrue(os.path.exists(
self.app.config['SQLALCHEMY_MIGRATE_REPO']))
migration = os.path.join(self.app.config['SQLALCHEMY_MIGRATE_REPO'],
'versions/001_initial.py')
self.assertTrue(os.path.exists(migration))
# drop
self.dbmigrate._drop()
def test_schemamigrate_no_repository(self):
manager = Manager(self.app)
manager.add_command('dbmigrate', dbmanager)
sys.argv = ['manage.py', 'dbmigrate', 'schemamigration']
try:
manager.run()
except SystemExit, e:
self.assertEquals(e.code, 0)
output = sys.stdout.getvalue().strip()
self.assertEquals(output, 'You have no database under version '
'control. Try to "init" it first')
@with_database
def test_schemamigrate_no_changes(self):
manager = Manager(self.app)
manager.add_command('dbmigrate', dbmanager)
sys.argv = ['manage.py', 'dbmigrate', 'schemamigration']
try:
manager.run()
except SystemExit, e:
self.assertEquals(e.code, 0)
output = sys.stdout.getvalue().strip()
self.assertEquals(output, 'No Changes!')
@with_database_changes
def test_schemamigrate_with_changes(self):
manager = Manager(self.app)
manager.add_command('dbmigrate', dbmanager)
sys.argv = ['manage.py', 'dbmigrate', 'schemamigration']
try:
manager.run()
except SystemExit, e:
self.assertEquals(e.code, 0)
migration = os.path.join(self.app.config['SQLALCHEMY_MIGRATE_REPO'],
'versions/002_auto_generated.py')
self.assertTrue(os.path.exists(migration))
@with_database_changes
def test_schemamigrate_with_changes_named(self):
manager = Manager(self.app)
manager.add_command('dbmigrate', dbmanager)
sys.argv = ['manage.py', 'dbmigrate', 'schemamigration', '-n',
'migration_name']
try:
manager.run()
except SystemExit, e:
self.assertEquals(e.code, 0)
migration = os.path.join(self.app.config['SQLALCHEMY_MIGRATE_REPO'],
'versions/002_migration_name.py')
self.assertTrue(os.path.exists(migration))
@with_database_changes
def test_schemamigrate_with_changes_stdout(self):
manager = Manager(self.app)
manager.add_command('dbmigrate', dbmanager)
sys.argv = ['manage.py', 'dbmigrate', 'schemamigration', '--stdout']
try:
manager.run()
except SystemExit, e:
self.assertEquals(e.code, 0)
output = sys.stdout.getvalue().strip()
pattern = re.compile('^# __VERSION__: (?P<version>\d+)\n')
self.assertTrue(re.search(pattern, output))
def test_migrate_show_no_migrations(self):
self.dbmigrate.init()
migration = os.path.join(self.app.config['SQLALCHEMY_MIGRATE_REPO'],
'versions/001_initial.py')
if os.path.exists(migration):
os.remove(migration)
manager = Manager(self.app)
manager.add_command('dbmigrate', dbmanager)
sys.argv = ['manage.py', 'dbmigrate', 'migrate', '--show']
try:
manager.run()
except SystemExit, e:
self.assertEquals(e.code, 0)
assert 'No migrations!' in sys.stdout.getvalue().strip()
self.dbmigrate._drop()
@with_database_changes
def test_migrate_show_with_migrations(self):
self.dbmigrate.db = self.app.db
self.dbmigrate.schemamigrate(migration_name='added_column2')
manager = Manager(self.app)
manager.add_command('dbmigrate', dbmanager)
sys.argv = ['manage.py', 'dbmigrate', 'migrate', '--show']
try:
manager.run()
except SystemExit, e:
self.assertEquals(e.code, 0)
out = sys.stdout.getvalue().strip()
assert '( ) 002_added_column2 (ver. 2)' in out
@with_database_changes
def test_migrate_upgrade(self):
self.dbmigrate.db = self.app.db
self.dbmigrate.schemamigrate(migration_name='added_column2')
manager = Manager(self.app)
manager.add_command('dbmigrate', dbmanager)
sys.argv = ['manage.py', 'dbmigrate', 'migrate']
try:
manager.run()
except SystemExit, e:
self.assertEquals(e.code, 0)
assert self.dbmigrate._get_db_version() == \
self.dbmigrate._get_repo_version()
i = Inspector(self.dbmigrate.db.engine)
# check if table "test" exist
assert 'test' in i.get_table_names()
# check if column "column2" exists in table "test"
assert 'column2' in [c['name'] for c in i.get_columns('test')]
@with_database
def test_migrate_downgrade_to_0(self):
manager = Manager(self.app)
manager.add_command('dbmigrate', dbmanager)
sys.argv = ['manage.py', 'dbmigrate', 'migrate', '-v', '0']
try:
manager.run()
except SystemExit, e:
self.assertEquals(e.code, 0)
i = Inspector(self.dbmigrate.db.engine)
# check if table "test" does not exist
assert 'test' not in i.get_table_names()
class DBMigrateRelationshipsTestCase(unittest.TestCase):
def setUp(self):
self.app = Flask(__name__)
self.app.config.from_object(TestConfig)
# Use unique repository for each test
self.app.config['SQLALCHEMY_MIGRATE_REPO'] += self.id()
self.app.db = SQLAlchemy(self.app)
self.output = StringIO()
sys.stdout = self.output
def tearDown(self):
self.dbmigrate._drop()
self.output.close()
if os.path.exists(self.app.config['SQLALCHEMY_MIGRATE_REPO']):
rmtree(self.app.config['SQLALCHEMY_MIGRATE_REPO'])
if os.path.exists(rel('test.sqlite3')):
os.remove(rel('test.sqlite3'))
def test_one_to_many_relationship(self):
# initial "parent" table
class Parent(self.app.db.Model):
__tablename__ = 'parent'
id = self.app.db.Column(self.app.db.Integer,
primary_key=True)
self.dbmigrate = DBMigrate(self.app)
self.dbmigrate.init()
self.dbmigrate._upgrade()
# check that table "parent" has been properly created
assert 'parent' in Inspector(self.dbmigrate.db.engine
).get_table_names()
self.app.db = SQLAlchemy(self.app)
# add o2m rel to "child" table
class Parent(self.app.db.Model):
__tablename__ = 'parent'
id = self.app.db.Column(self.app.db.Integer,
primary_key=True)
children = self.app.db.relationship("Child")
class Child(self.app.db.Model):
__tablename__ = 'child'
id = self.app.db.Column(self.app.db.Integer,
primary_key=True)
parent_id = self.app.db.Column(self.app.db.Integer,
self.app.db.ForeignKey('parent.id'))
self.dbmigrate.db = self.app.db
self.dbmigrate.schemamigrate(migration_name='added_child_table')
self.dbmigrate._upgrade()
# check that table "child" has been properly created
assert 'child' in Inspector(self.dbmigrate.db.engine
).get_table_names()
# downgrade to 1
self.dbmigrate.migrate(upgrade=False, version=1)
# check that table "child" has been properly deleted
assert 'child' not in Inspector(self.dbmigrate.db.engine
).get_table_names()
def test_many_to_one_relationship(self):
# initial "parent" table
class Parent(self.app.db.Model):
__tablename__ = 'parent'
id = self.app.db.Column(self.app.db.Integer,
primary_key=True)
self.dbmigrate = DBMigrate(self.app)
self.dbmigrate.init()
self.dbmigrate._upgrade()
# check that table "parent" has been properly created
assert 'parent' in Inspector(self.dbmigrate.db.engine
).get_table_names()
self.app.db = SQLAlchemy(self.app)
# add m2o rel to "child" table
class Parent(self.app.db.Model):
__tablename__ = 'parent'
id = self.app.db.Column(self.app.db.Integer,
primary_key=True)
child_id = self.app.db.Column(self.app.db.Integer,
self.app.db.ForeignKey('child.id'))
child = self.app.db.relationship("Child")
class Child(self.app.db.Model):
__tablename__ = 'child'
id = self.app.db.Column(self.app.db.Integer,
primary_key=True)
self.dbmigrate.db = self.app.db
self.dbmigrate.schemamigrate(migration_name='added_child_table')
self.dbmigrate._upgrade()
# check that table "child" has been properly created
assert 'child' in Inspector(self.dbmigrate.db.engine
).get_table_names()
# downgrade to 1
self.dbmigrate.migrate(upgrade=False, version=1)
# check that table "child" has been properly deleted
assert 'child' not in Inspector(self.dbmigrate.db.engine
).get_table_names()
def test_one_to_one_relationship(self):
# initial "parent" table
class Parent(self.app.db.Model):
__tablename__ = 'parent'
id = self.app.db.Column(self.app.db.Integer,
primary_key=True)
self.dbmigrate = DBMigrate(self.app)
self.dbmigrate.init()
self.dbmigrate._upgrade()
# check that table "parent" has been properly created
assert 'parent' in Inspector(self.dbmigrate.db.engine
).get_table_names()
self.app.db = SQLAlchemy(self.app)
# add o2o rel to "child" table
class Parent(self.app.db.Model):
__tablename__ = 'parent'
id = self.app.db.Column(self.app.db.Integer,
primary_key=True)
child = self.app.db.relationship("Child",
uselist=False, backref="parent")
class Child(self.app.db.Model):
__tablename__ = 'child'
id = self.app.db.Column(self.app.db.Integer,
primary_key=True)
self.dbmigrate.db = self.app.db
self.dbmigrate.schemamigrate(migration_name='added_child_table')
self.dbmigrate._upgrade()
# check that table "child" has been properly created
assert 'child' in Inspector(self.dbmigrate.db.engine
).get_table_names()
# downgrade to 1
self.dbmigrate.migrate(upgrade=False, version=1)
# check that table "child" has been properly deleted
assert 'child' not in Inspector(self.dbmigrate.db.engine
).get_table_names()
def test_many_to_many_relationship(self):
# initial "left" table
class Parent(self.app.db.Model):
__tablename__ = 'left'
id = self.app.db.Column(self.app.db.Integer,
primary_key=True)
self.dbmigrate = DBMigrate(self.app)
self.dbmigrate.init()
self.dbmigrate._upgrade()
# check that table "left" has been properly created
assert 'left' in Inspector(self.dbmigrate.db.engine
).get_table_names()
self.app.db = SQLAlchemy(self.app)
# add m2m rel to "child" table through "association"
association_table = self.app.db.Table('association',
self.app.db.Model.metadata,
self.app.db.Column('left_id', self.app.db.Integer,
self.app.db.ForeignKey('left.id')),
self.app.db.Column('right_id', self.app.db.Integer,
self.app.db.ForeignKey('right.id'))
)
class Parent(self.app.db.Model):
__tablename__ = 'left'
id = self.app.db.Column(self.app.db.Integer, primary_key=True)
children = self.app.db.relationship("Child",
secondary=association_table)
class Child(self.app.db.Model):
__tablename__ = 'right'
id = self.app.db.Column(self.app.db.Integer, primary_key=True)
self.dbmigrate.db = self.app.db
self.dbmigrate.schemamigrate(migration_name='added_child_table')
self.dbmigrate._upgrade()
# check that table "association" has been properly created
assert 'association' in Inspector(self.dbmigrate.db.engine
).get_table_names()
# check that table "right" has been properly created
assert 'right' in Inspector(self.dbmigrate.db.engine
).get_table_names()
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DBMigrateInitTestCase))
suite.addTest(unittest.makeSuite(DBMigrateSubManagerTestCase))
suite.addTest(unittest.makeSuite(DBMigrateCommandsTestCase))
suite.addTest(unittest.makeSuite(DBMigrateRelationshipsTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| UTF-8 | Python | false | false | 17,413 | py | 4 | tests.py | 2 | 0.602998 | 0.599322 | 0 | 562 | 29.983986 | 76 |
borgbackup/borg | 11,235,634,486,824 | cfe1246f12a30fb2bc00c6d3604c3882ae592017 | fb05fb9f9f7fe7eb91072ad62c10200cae10acc6 | /src/borg/testsuite/archiver/patterns.py | c4863bb94fe90ffe1df72f4e319f8a8970c145b6 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | https://github.com/borgbackup/borg | c83f2a34e8bcc19859e9696a9425cbf4e23a743c | 4ded3620c5e9cd930d2e07e912af6c894abe6d5d | refs/heads/master | "2023-09-03T20:36:44.300124" | "2023-09-03T18:49:50" | "2023-09-03T18:49:50" | 35,517,126 | 10,379 | 1,053 | NOASSERTION | false | "2023-09-14T21:52:33" | "2015-05-12T23:10:47" | "2023-09-14T19:43:53" | "2023-09-14T21:52:32" | 30,402 | 9,768 | 711 | 455 | Python | false | false | from ...archiver._common import build_filter
from ...constants import * # NOQA
from ...patterns import IECommand, PatternMatcher, parse_pattern
from ...item import Item
def test_basic():
matcher = PatternMatcher()
matcher.add([parse_pattern("included")], IECommand.Include)
filter = build_filter(matcher, 0)
assert filter(Item(path="included"))
assert filter(Item(path="included/file"))
assert not filter(Item(path="something else"))
def test_empty():
matcher = PatternMatcher(fallback=True)
filter = build_filter(matcher, 0)
assert filter(Item(path="anything"))
def test_strip_components():
matcher = PatternMatcher(fallback=True)
filter = build_filter(matcher, strip_components=1)
assert not filter(Item(path="shallow"))
assert filter(Item(path="deep enough/file"))
assert filter(Item(path="something/dir/file"))
| UTF-8 | Python | false | false | 878 | py | 330 | patterns.py | 162 | 0.708428 | 0.705011 | 0 | 27 | 31.518519 | 64 |
slimwangyue/pytorch-cifar | 11,149,735,114,975 | e959934789f2535d170a25f86b20259b30caee37 | f5c1f8c9238cb5f1b719e9a27f0240c1b77bdb7d | /MobileNet_train/train.py | aabb612a9123bb9404c37bf5932387d3414b747d | [
"MIT"
] | permissive | https://github.com/slimwangyue/pytorch-cifar | 61653653076010d47bbec7c4209a3b1a9738a949 | d6ddd10cee5b78d6994b1683f605e541513b85d1 | refs/heads/master | "2020-05-18T16:49:06.057411" | "2019-11-18T03:19:19" | "2019-11-18T03:19:19" | 184,536,522 | 5 | 0 | null | true | "2019-05-02T07:08:11" | "2019-05-02T07:08:11" | "2019-05-02T07:06:29" | "2019-04-04T11:07:53" | 57 | 0 | 0 | 0 | null | false | false | import argparse
import os
import sys
import time
import torch
import torch.nn.functional as F
import torchvision
import models
import utils
import tabulate
from tensorboardX import SummaryWriter
import torchvision.transforms as transforms
def str2bool(s):
return s.lower() in ['yes', '1', 'true', 'y']
parser = argparse.ArgumentParser(description='SGD/SWA training')
parser.add_argument('--dir', type=str, default=None, required=True, help='training directory (default: None)')
parser.add_argument('--dataset', type=str, default='CIFAR10', help='dataset name (default: CIFAR10)')
parser.add_argument('--data_path', type=str, default=None, required=True, metavar='PATH',
help='path to datasets location (default: None)')
parser.add_argument('--batch_size', type=int, default=256, metavar='N', help='input batch size (default: 128)')
parser.add_argument('--num_workers', type=int, default=16, metavar='N', help='number of workers (default: 4)')
parser.add_argument('--model', type=str, default=None, required=True, metavar='MODEL',
help='model name (default: None)')
parser.add_argument('--resume', type=str, default=None, metavar='CKPT',
help='checkpoint to resume training from (default: None)')
parser.add_argument('--epochs', type=int, default=200, metavar='N', help='number of epochs to train (default: 200)')
parser.add_argument('--save_freq', type=int, default=25, metavar='N', help='save frequency (default: 25)')
parser.add_argument('--eval_freq', type=int, default=5, metavar='N', help='evaluation frequency (default: 5)')
parser.add_argument('--lr_init', type=float, default=0.03, metavar='LR', help='initial learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='SGD momentum (default: 0.9)')
parser.add_argument('--wd', type=float, default=1e-4, help='weight decay (default: 1e-4)')
parser.add_argument('--swa', action='store_true', help='swa usage flag (default: off)')
parser.add_argument('--swa_start', type=float, default=161, metavar='N', help='SWA start epoch number (default: 161)')
parser.add_argument('--swa_lr', type=float, default=0.01, metavar='LR', help='SWA LR (default: 0.05)')
parser.add_argument('--swa_c_epochs', type=int, default=1, metavar='N',
help='SWA model collection frequency/cycle length in epochs (default: 1)')
parser.add_argument('--energy', default=1, type=int,
help='using energy as regularization term')
parser.add_argument('--beta', default=6e-5, type=float,
help='coefficient')
parser.add_argument('--minimum', default=80, type=float,
help='minimum')
# Quantization of input, weight, bias and grad
parser.add_argument('--num_bits', default=8, type=int,
help='precision of input/activation')
parser.add_argument('--num_bits_weight', default=8, type=int,
help='precision of weight')
parser.add_argument('--num_bits_grad', default=16, type=int,
help='precision of (layer) gradients')
parser.add_argument('--biprecision', default=False, type=str2bool,
help='use biprecision or not')
# Predictive (sign) SGD arguments
parser.add_argument('--predictive_forward', default=False, type=str2bool,
help='use predictive net in forward pass')
parser.add_argument('--predictive_backward', default=True, type=str2bool,
help='use predictive net in backward pass')
parser.add_argument('--msb_bits', default=4, type=int,
help='precision of msb part of input')
parser.add_argument('--msb_bits_weight', default=2, type=int,
help='precision of msb part of weight')
parser.add_argument('--msb_bits_grad', default=8, type=int,
help='precision of msb part of (layer) gradient')
parser.add_argument('--threshold', default=5e-5, type=float,
help='threshold to use full precision gradient calculation')
parser.add_argument('--sparsify', default=False, type=str2bool,
help='sparsify the gradients using predictive net method')
parser.add_argument('--sign', default=True, type=str2bool,
help='take sign before applying gradient')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')
args = parser.parse_args()
skip_count = 0
descriptions = [
args.model,
'g:%d' % args.num_bits_grad,
'mg:%d' % args.msb_bits_grad,
'th:%f' % args.threshold,
'minimum:%f' % args.minimum,
'lr:%f' % args.lr_init,
'wd:%f' % args.wd,
# 'sr:%f' % args.step_ratio,
]
args.exp_desc = '-'.join(filter(None, descriptions))
print('Preparing directory %s' % args.dir)
os.makedirs(args.dir, exist_ok=True)
with open(os.path.join(args.dir, 'command.sh'), 'w') as f:
f.write(' '.join(sys.argv))
f.write('\n')
torch.backends.cudnn.benchmark = True
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
print('Using model %s' % args.model)
model_cfg = getattr(models, args.model)
print('Loading dataset %s from %s' % (args.dataset, args.data_path))
ds = getattr(torchvision.datasets, args.dataset)
path = os.path.join(args.data_path, args.dataset.lower())
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
train_set = ds(path, train=True, download=True, transform=transform_train)
test_set = ds(path, train=False, download=True, transform=transform_test)
loaders = {
'train': torch.utils.data.DataLoader(
train_set,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers,
pin_memory=True
),
'test': torch.utils.data.DataLoader(
test_set,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.num_workers,
pin_memory=True
)
}
num_classes = 100 + 1
writer_path = os.path.join('runs', args.exp_desc + '-' + time.strftime('%Y-%m-%d-%H:%M:%S', time.localtime()))
writer = SummaryWriter(writer_path)
signsgd_config = {
'num_bits': args.num_bits,
'num_bits_weight': args.num_bits_weight,
'num_bits_grad': args.num_bits_grad,
'biprecision': args.biprecision,
'predictive_forward': args.predictive_forward,
'predictive_backward': args.predictive_backward,
'msb_bits': args.msb_bits,
'msb_bits_weight': args.msb_bits_weight,
'msb_bits_grad': args.msb_bits_grad,
'threshold': args.threshold,
'sparsify': args.sparsify,
'sign': args.sign,
'writer': writer,
}
print('Preparing model')
# model = model_cfg.base(*model_cfg.args, num_classes=num_classes, **model_cfg.kwargs)
# model = models.__dict__[args.model](None, **signsgd_config)
model = models.MobileNetV2(10, **signsgd_config)
# model.install_gate()
model = torch.nn.DataParallel(model).cuda()
if args.swa:
print('SWA training')
# swa_model = models.__dict__[args.model](None, **signsgd_config)
swa_model = models.MobileNetV2(10, **signsgd_config)
# swa_model.install_gate()
swa_model = torch.nn.DataParallel(swa_model).cuda()
swa_n = 0
else:
print('SGD training')
def schedule(epoch):
t = (epoch) / (args.swa_start if args.swa else args.epochs)
lr_ratio = args.swa_lr / args.lr_init if args.swa else 0.01
if t <= 0.5:
factor = 1.0
elif t <= 0.9:
factor = 1.0 - (1.0 - lr_ratio) * (t - 0.5) / 0.4
else:
factor = lr_ratio
return args.lr_init * factor
criterion = F.cross_entropy
optimizer = torch.optim.SGD(
model.parameters(),
lr=args.lr_init,
momentum=args.momentum,
weight_decay=args.wd
)
start_epoch = 0
if args.resume is not None:
print('Resume training from %s' % args.resume)
checkpoint = torch.load(args.resume)
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
if args.swa:
swa_state_dict = checkpoint['swa_state_dict']
if swa_state_dict is not None:
swa_model.load_state_dict(swa_state_dict)
swa_n_ckpt = checkpoint['swa_n']
if swa_n_ckpt is not None:
swa_n = swa_n_ckpt
columns = ['ep', 'lr', 'tr_loss', 'tr_acc', 'te_loss', 'te_acc', 'time']
if args.swa:
columns = columns[:-1] + ['swa_te_loss', 'swa_te_acc'] + columns[-1:]
swa_res = {'loss': None, 'accuracy': None}
utils.save_checkpoint(
args.dir,
start_epoch,
state_dict=model.state_dict(),
swa_state_dict=swa_model.state_dict() if args.swa else None,
swa_n=swa_n if args.swa else None,
optimizer=optimizer.state_dict()
)
# global total_i
total_i = 0
# global training_cost
training_cost = 0
for epoch in range(start_epoch, args.epochs):
time_ep = time.time()
lr = schedule(epoch)
utils.adjust_learning_rate(optimizer, lr)
train_res, total_i, training_cost = utils.train_epoch(loaders['train'], model, criterion, optimizer, args, writer, total_i, skip_count, training_cost)
if epoch == 0 or epoch % args.eval_freq == args.eval_freq - 1 or epoch == args.epochs - 1:
test_res = utils.eval(loaders['test'], model, criterion, writer, total_i, skip_count)
else:
test_res = {'loss': None, 'accuracy': None}
if args.swa and (epoch + 1) >= args.swa_start and (epoch + 1 - args.swa_start) % args.swa_c_epochs == 0:
utils.moving_average(swa_model, model, 1.0 / (swa_n + 1))
swa_n += 1
if epoch == 0 or epoch % args.eval_freq == args.eval_freq - 1 or epoch == args.epochs - 1:
utils.bn_update(loaders['train'], swa_model)
swa_res = utils.eval(loaders['test'], swa_model, criterion, writer, total_i, skip_count)
else:
swa_res = {'loss': None, 'accuracy': None}
if (epoch + 1) % args.save_freq == 0:
utils.save_checkpoint(
args.dir,
epoch + 1,
state_dict=model.state_dict(),
swa_state_dict=swa_model.state_dict() if args.swa else None,
swa_n=swa_n if args.swa else None,
optimizer=optimizer.state_dict()
)
time_ep = time.time() - time_ep
values = [epoch + 1, lr, train_res['loss'], train_res['accuracy'], test_res['loss'], test_res['accuracy'], time_ep]
if args.swa:
values = values[:-1] + [swa_res['loss'], swa_res['accuracy']] + values[-1:]
table = tabulate.tabulate([values], columns, tablefmt='simple', floatfmt='8.4f')
if epoch % 40 == 0:
table = table.split('\n')
table = '\n'.join([table[1]] + table)
else:
table = table.split('\n')[2]
print(table)
if args.epochs % args.save_freq != 0:
utils.save_checkpoint(
args.dir,
args.epochs,
state_dict=model.state_dict(),
swa_state_dict=swa_model.state_dict() if args.swa else None,
swa_n=swa_n if args.swa else None,
optimizer=optimizer.state_dict()
)
| UTF-8 | Python | false | false | 11,291 | py | 10 | train.py | 10 | 0.638562 | 0.620848 | 0 | 288 | 38.204861 | 154 |
skinkie/Scripts | 15,470,472,207,344 | a95298a851ab24b8d4b2eb31a0aea301730f7ef4 | c5744c2fda48ae6a79c155c641fe98021a0cb7f3 | /Exercise/mymod.py | cb8aa3b66a6f266638ec42a1306081d2f40ae155 | [] | no_license | https://github.com/skinkie/Scripts | e0fd3d3f767612ade111f28bc7af3e1b25fc2947 | 80a1ba71ddf9a0c5ff33866832cb5c42aca0c0b1 | refs/heads/master | "2021-05-31T16:57:21.100919" | "2016-05-23T09:58:59" | "2016-05-23T09:58:59" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Read a file and counts its lines and characters.
"""
def countLines(name):
name.seek(0)
lines = name.readlines()
tot = 0
for i in lines: tot += 1
return tot
"""
def countLines(name):
name.seek(0)
lines = name.readlines()
return len(lines)
"""
def countChars(name):
name.seek(0)
chars = name.read()
tot = 0
for i in chars: tot += 1
return tot
"""
def countChars(name):
name.seek(0)
chars = name.read()
return len(chars)
"""
def test(filename):
name = open(filename)
print('This file has %s lines and %s chars!' %(countLines(name), countChars(name)))
"""
def test(filename):
import time
start = time.time()
count(filename)
passed = time.time() - start
print('Time uses: ', passed)
"""
if __name__ == '__main__':
import sys
test(sys.argv[1])
| UTF-8 | Python | false | false | 894 | py | 106 | mymod.py | 100 | 0.558166 | 0.548098 | 0 | 46 | 17.434783 | 87 |
pywjh/MeiDuoByDjango1 | 9,715,216,066,216 | 52de0437c70fe140ee1f7aedf81f34e3000e82e5 | 87b4454b12c5cf7afaf596065b4c2c232381249a | /celery_tasks/sms/tasks.py | 69e8e1f5e217c29cb373628c54bfe51de0887a19 | [] | no_license | https://github.com/pywjh/MeiDuoByDjango1 | 7153ea72b9f5c9c2a6c95859a5fe6a178c0aaa69 | 9f38360a940cc7fe7157ad758ecad4c1d6e219b7 | refs/heads/main | "2023-07-23T10:41:03.020479" | "2021-08-27T06:13:37" | "2021-08-27T06:13:37" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from celery_tasks.sms.yuntongxun.sms import CCP
from celery_tasks.sms import constants
# from verifications import constants
from celery_tasks.main import celery_app
# name=自定义任务名字
@celery_app.task(name='send_sms_code')
def send_sms_code(mobile, sms_code):
# CCP().send_template_sms('接收短信手机号', ['短信验证码', '提示用户短信验证码多久过期单位分钟'], '模板id')
CCP().send_template_sms(mobile, [sms_code, constants.SMS_CODE_EXPIRE // 60], 1) | UTF-8 | Python | false | false | 498 | py | 25 | tasks.py | 24 | 0.739336 | 0.732227 | 0 | 10 | 41.3 | 83 |
Sergey582/CT-APP-Rest-Api | 7,730,941,171,901 | 83527efb6b32ab5e347e32e1cec8895417af21a7 | 762508060dade05f86f9783ac7395a5eb31911a6 | /android app/postman/data/models.py | 558476c6998b229617e987b0b8a8b381699f1b0b | [] | no_license | https://github.com/Sergey582/CT-APP-Rest-Api | 28826fe8d55f3e5df2e27d30076985871ea8a749 | fbc786799ae1bcff2c3315d755c788a3bb4dc32f | refs/heads/master | "2022-12-31T03:59:56.392364" | "2020-10-17T13:36:46" | "2020-10-17T13:36:46" | 251,337,264 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.db import models
class Test(models.Model):
year = models.IntegerField(default=0)
number = models.IntegerField()
subject = models.CharField(max_length=250)
class Task(models.Model):
part = models.CharField(max_length=250)
number = models.IntegerField()
task_text = models.TextField()
test = models.ForeignKey('Test', related_name='tasks', on_delete=models.CASCADE)
class Answer(models.Model):
answer_text = models.TextField()
is_true = models.BooleanField(default=False)
task = models.ForeignKey('Task', related_name='answers', on_delete=models.CASCADE)
| UTF-8 | Python | false | false | 611 | py | 8 | models.py | 8 | 0.715221 | 0.703764 | 0 | 20 | 29.55 | 86 |
lucasjurado/Hackathon-ImagiMaker | 6,614,249,681,255 | db3dab254f9b9c1911927dc437e0896c83707a8b | eebe81968e7957c5b52e54b0800d0a24a08fe420 | /lucas_jurado_iniciante_desafio2.py | 72f0b9a02dfad5a39b6cf1478e4cb8a6b6f7e21d | [
"MIT"
] | permissive | https://github.com/lucasjurado/Hackathon-ImagiMaker | feefe7ef7e622a73b962f1bdcdcce941c095164b | 69974450e77b5b03bfcfdee9dcbaf0eb62708032 | refs/heads/master | "2022-09-11T00:04:49.622318" | "2020-05-15T22:04:08" | "2020-05-15T22:04:08" | 260,846,495 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
print('<<<<<<<<< Jokenpô >>>>>>>>>')
#variavel recebe o nome do jogador
nome = str(input('Qual é o seu nome? '))
#contador do número de jogos
c = 0
#contador das vitórias do pc
pcWin = 0
#contador das vitórias do jogador
playerWin = 0
#contador de empates
draw = 0
while True:
#variável recebe a opção do jogador de acordo com a sua referencia(0,1,2)
num = int(input('''Suas opções:
[0] Pedra
[1] Papel
[2] Tesoura
Qual é a sua jogada? '''))
#se o valor da variável for diferente das apresentadas, o programa pede novamente para digitá-la
if num != 0 and num != 1 and num != 2:
print('Opção inválida! Tente novamente')
print('-=' * 20)
else:
print('-='*20)
#o computador escolhe uma das opções randomizadas dentro da lista
lista = ('Pedra','Papel','Tesoura')
pc = random.choice(lista)
p1 = lista[num]
print(f'{nome} [{p1}] X [{pc}] Computador')
#sequencia lógica que define o vencedor e o atribui 1 ponto no caso de vitória
if pc == p1:
print('> Empate!')
draw += 1
elif pc == 'Pedra' and p1 == 'Papel' or pc == 'Papel' and p1 == 'Tesoura' or pc == 'Tesoura' and p1 == 'Pedra':
print(f'> {nome}, você venceu!')
playerWin += 1
else:
print(f'> {nome}, você perdeu!')
pcWin += 1
print('-=' * 20)
#contador de jogos recebe mais 1 e caso o seu total seja multiplo de 5, é perguntado se deseja continuar o jogo
c+=1
if c%5==0:
while True:
again = str(input('Você quer continuar [S/N]? ')).upper().strip()[0]
print('-=' * 20)
if again in 'SN':
if again in 'N':
print(f'<Depois de {c} jogos> Placar final: {nome} {playerWin} x {pcWin} Computador. E {draw} empates.')
exit()
elif again in 'S':
break
else:
print('Opção inválida! Tente novamente.')
| UTF-8 | Python | false | false | 2,133 | py | 2 | lucas_jurado_iniciante_desafio2.py | 1 | 0.528015 | 0.510446 | 0 | 63 | 32.412698 | 128 |
rgarcia5/project_euler | 15,925,738,782,022 | 9ed20af2678e07383d4fe7c4981196ef8e81248c | 2823f546b288b711b72f3bdef404ef6208cc5021 | /python/problem25.py | 7028cd794050d57083016f9105045be47e0a0bbc | [] | no_license | https://github.com/rgarcia5/project_euler | d431ff1ac87cb59d99281947b1434292fa4f3a60 | 51412a020e90071dbfb1ac26a637e7292560483c | refs/heads/master | "2020-12-31T07:55:28.551266" | "2017-06-16T07:33:24" | "2017-06-16T07:33:24" | 53,665,672 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def fibonacci_sequence():
count = 1
first_num = 1
second_num = 1
while (len(str(first_num)) != 1000):
result = first_num + second_num
first_num = second_num
second_num = result
count += 1
return count
print fibonacci_sequence()
| UTF-8 | Python | false | false | 255 | py | 49 | problem25.py | 30 | 0.635294 | 0.603922 | 0 | 12 | 20.25 | 38 |
tomography/xdesign | 16,535,624,102,496 | 0f7928a0665e8a8c4cbc1f550a27aa2e9b3a6e39 | 1efa22ec38c51644f608711064324c3e7c8c2f08 | /src/xdesign/codes.py | 060c6d7bcce80236f2d5f1824e804591d9ea12ff | [
"BSD-3-Clause"
] | permissive | https://github.com/tomography/xdesign | e282f2b3f6ee9893801c161bb61ec73130f3a3ff | 778914cf9df60d1b96b996ef463825ee455ece4f | refs/heads/master | "2022-01-17T15:29:06.725402" | "2022-01-14T21:00:45" | "2022-01-14T21:00:45" | 63,276,287 | 18 | 21 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# #########################################################################
# Copyright (c) 2019, UChicago Argonne, LLC. All rights reserved. #
# #
# Copyright 2019. UChicago Argonne, LLC. This software was produced #
# under U.S. Government contract DE-AC02-06CH11357 for Argonne National #
# Laboratory (ANL), which is operated by UChicago Argonne, LLC for the #
# U.S. Department of Energy. The U.S. Government has rights to use, #
# reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR #
# UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR #
# ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is #
# modified to produce derivative works, such modified software should #
# be clearly marked, so as not to confuse it with the version available #
# from ANL. #
# #
# Additionally, redistribution and use in source and binary forms, with #
# or without modification, are permitted provided that the following #
# conditions are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of UChicago Argonne, LLC, Argonne National #
# Laboratory, ANL, the U.S. Government, nor the names of its #
# contributors may be used to endorse or promote products derived #
# from this software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago #
# Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, #
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, #
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT #
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
# #########################################################################
"""Generate codes for space- and time-coded apertures.
.. moduleauthor:: Daniel Ching
"""
import logging
import numpy as np
logger = logging.getLogger(__name__)
__author__ = "Daniel Ching"
__copyright__ = "Copyright (c) 2019, UChicago Argonne, LLC."
__docformat__ = 'restructuredtext en'
__all__ = ['mura_1d', 'mura_2d', 'raskar']
def is_prime(n):
"""Return True if n is prime."""
if n == 2 or n == 3:
return True
if n < 2 or n % 2 == 0:
return False
if n < 9:
return True
if n % 3 == 0:
return False
r = int(n**0.5)
f = 5
while f <= r:
if n % f == 0:
return False
if n % (f+2) == 0:
return False
f += 6
return True
def quadratic_residues_modulo(n):
"""Return all quadratic residues modulo n in the range 0, ..., n-1.
q is a quadratic residue modulo n if it is congruent to a perfect square
modulo n.
"""
x = np.arange(n)
q = x**2 % n
return q
def mura_1d(L):
"""Return the longest MURA whose length is less than or equal to L.
From Wikipedia:
A Modified uniformly redundant array (MURA) can be generated in any length
L that is prime and of the form::
L = 4m + 1, m = 1, 2, 3, ...,
the first six such values being ``L = 5, 13, 17, 29, 37``. The binary sequence
of a linear MURA is given by ``A[0:L]`` where::
A[i] = {
0 if i = 0,
1 if i is a quadratic residue modulo L, i != 0,
0 otherwise,
}
"""
if L < 5:
raise ValueError("A MURA cannot have length less than 5.")
# overestimate m to guess a MURA longer than L
m = (L + 1) // 4
L1 = (4 * m) + 1
# find an allowed MURA length, L1, <= L
while not (L1 <= L and is_prime(L1)):
m = m - 1
L1 = (4 * m) + 1
# Compute the MURA
A = np.zeros(L1, dtype=np.bool)
A[quadratic_residues_modulo(L1)] = 1
A[0] = 0
print("MURA is length {}".format(L1))
assert L1 <= L, "len(MURA) should be <= {}, but it's {}.".format(L, L1)
return A
def mura_2d(M, N=None):
"""Return the largest 2D MURA whose lengths are less than M and N.
From Wikipedia:
A rectangular MURA, ``A[0:M, 0:N]``, is defined as follows::
A[i, j] = {
0 if i = 0,
1 if j = 0, i != 0,
1 if C[i] * C[j] = 1,
0 othewise,
}
C[i] = {
1 if i is a quadratic residue modulo p,
-1 otherwise,
}
where p is the length of the matching side M, N.
"""
# Use 1D Muras to start
Ci = mura_1d(M).astype(np.int8)
M1 = len(Ci)
if N is None:
N1 = M1
Cj = np.copy(Ci)
else:
Cj = mura_1d(N).astype(np.int8)
N1 = len(Cj)
# Modify 1D Muras to match 2D mura coefficients; ignore i, j = 0 those are
# set later.
Ci[Ci != 1] = -1
Cj[Cj != 1] = -1
# Arrays must be 2D for matrix multiplication
Ci = Ci[..., np.newaxis]
Cj = Cj[np.newaxis, ...]
A = (Ci @ Cj) == 1
assert A.shape[0] == M1 and A.shape[1] == N1, \
"A is not the correct shape! {} != ({}, {})".format(A.shape, M1, N1)
A[0, :] = 0
A[:, 0] = 1
return A
def raskar(npool):
"""Return the coded mask from Raskar et al."""
return np.array([1, 0, 1, 0, 0, 0, 1, 0, 1, 1, # 10
0, 0, 0, 0, 0, 1, 0, 1, 0, 0, # 20
0, 0, 1, 1, 0, 0, 1, 1, 1, 1, # 30
0, 1, 1, 1, 0, 1, 0, 1, 1, 1, # 40
0, 0, 1, 0, 0, 1, 1, 0, 0, 1, # 50
1, 1], dtype='bool') # must be boolean
| UTF-8 | Python | false | false | 7,057 | py | 16 | codes.py | 13 | 0.512257 | 0.485475 | 0 | 184 | 37.353261 | 82 |
ArtDor2/agi | 13,769,665,192,190 | 5a2643180d66cddc7ba6bf41abba1220047060ba | d9bb329c3a96afa26102e0e9a6538d06477b5fac | /Master.py | e208ff0cc95a624d857e7efe78a0c823a58e56ab | [] | no_license | https://github.com/ArtDor2/agi | 06bd7fe8b4bf38e45edebfb4ad72cce18678d8fe | 78311b8aab991bc64d2dd43b1cb3d35ec9ddd4bb | refs/heads/master | "2022-12-12T14:53:33.332201" | "2018-09-12T00:12:53" | "2018-09-12T00:12:53" | 99,628,869 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Node:
links_in = {} # link to another agent, and weight
links_out = {}
functions = [] # agent functions to execute when activated?
patterns = [] # ? agent patterns from data???
# def link_add(linked_node, weight):
# Node.links[linked_node] = weight
# def link_delete(linked_node, weight): # wont be used yet
# del Node.links[linked_node]
# test nodes
node_a = Node()
node_b = Node()
node_c = Node()
mind = {'1': node_a, '2': node_b, '3': node_c}
# generator:
file = open("text_nano.txt", "r")
env_text = file.read() # text env for now
file.close()
| UTF-8 | Python | false | false | 608 | py | 6 | Master.py | 4 | 0.598684 | 0.59375 | 0 | 26 | 22.346154 | 64 |
DataONEorg/api-documentation | 17,154,099,393,124 | 191d22d6c8d387896cc3d986fab4e009ebcec158 | 382d1aa8fc07d33c96a2a512a7b340e291417f91 | /api_template/MNPackage.py | 6f6ff331abbdddf66566305eb3e293db910ff737 | [] | no_license | https://github.com/DataONEorg/api-documentation | ee0350a503b990c847f32d01ac640640e789c693 | 634e00a48e3b4b6217acd0ab6be74b7327f04d88 | refs/heads/master | "2022-12-10T02:22:06.745695" | "2020-11-24T14:18:11" | "2020-11-24T14:18:11" | 156,448,835 | 2 | 2 | null | false | "2022-12-08T07:42:36" | "2018-11-06T21:15:49" | "2020-12-17T18:55:54" | "2022-12-08T07:42:35" | 59,061 | 2 | 1 | 14 | Python | false | false | import Exceptions
import Types
def getPackage(session,packageType,id):
"""
``GET /packages/{packageType}/{pid}`` |br| Provides all of the content of a DataONE data package as defined by an OAI-ORE document in DataONE, in one of several possible package serialization formats. The serialized package will contain all of the data described in the ORE aggregation. The default implementation will include packages in the BagIt format. The packageType formats must be specified using the associated ObjectFormat formatId for that package serialization format.
The {id} parameter must be the identifier of an ORE package object. If it is the identifier of one of the science metadata documents or data files contained within the package, the Member Node should throw an InvalidRequest exception. Identifiers may be either PIDss or SIDs.
This method is optional for Member Nodes.
:Version: 1.2
:REST URL: ``GET /packages/{packageType}/{pid}``
Parameters:
session (Types.Session): |session|
packageType (Types.ObjectFormatIdentifier): Indicates which package format will be used to serialize the package. All implementations must support a default BagIt package serialization, but are free to implement additional package serialization formats. Transmitted as part of the URL path and must be escaped accordingly.
id (Types.Identifier): The identifier of the package or object in a package to be returned as a serialized package. Transmitted as part of the URL path and must be escaped accordingly.
Returns:
Types.OctetStream: Any return type is allowed, including application/octet-stream, but the format of the response should be specialized by the requested packageType.
Raises:
Exceptions.InvalidToken: (errorCode=401, detailCode=2870)
Exceptions.ServiceFailure: (errorCode=500, detailCode=2871)
Exceptions.NotAuthorized: (errorCode=401, detailCode=2872)
Exceptions.InvalidRequest: (errorCode=400, detailCode=2873)
Exceptions.NotImplemented: (errorCode=501, detailCode=2874)
Exceptions.NotFound: The specified pid does not exist. (errorCode=404, detailCode=2875)
.. include:: /apis/examples/mnpackage_getpackage.txt
"""
return None
| UTF-8 | Python | false | false | 2,215 | py | 28 | MNPackage.py | 21 | 0.780135 | 0.760271 | 0 | 37 | 58.837838 | 484 |
ztane/aoc2017 | 2,791,728,760,640 | c482c6e070fe4f699a9cd221b6d4b234d1d97307 | 9d74e1efb21cc6010e7d743e7787be090718cb05 | /days/day22.py | 8dd1fd8e0c43feed00aafbbd35fad26781255c31 | [] | no_license | https://github.com/ztane/aoc2017 | 014528cd4f23e386cbf0ede6d10fb7a88cdc11c7 | 694d601c86faf6c353ac1f545272c279714f0dd9 | refs/heads/master | "2021-08-31T17:40:18.064849" | "2017-12-22T08:30:18" | "2017-12-22T08:30:18" | 112,914,772 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from helpers import *
d = get_aoc_data(day=22)
def part1():
the_map = SparseComplexMap(d.lines, default='.')
position = the_map.center
direction = -1j
infections = 0
for i in range(10000):
if the_map[position] == '#':
the_map[position] = '.'
direction *= 1j
else:
the_map[position] = '#'
infections += 1
direction *= -1j
position += direction
return infections
def part2():
the_map = SparseComplexMap(d.lines, default='.')
position = the_map.center
direction = -1j
infections = 0
for i in range(10000000):
item = the_map[position]
if item == '#':
the_map[position] = 'f'
direction *= 1j
elif item == 'f':
the_map[position] = '.'
direction *= -1
elif item == 'w':
the_map[position] = '#'
infections += 1
else:
the_map[position] = 'w'
direction *= -1j
position += direction
return infections
| UTF-8 | Python | false | false | 1,077 | py | 27 | day22.py | 25 | 0.493965 | 0.467967 | 0 | 49 | 20.979592 | 52 |
NguyenVanDiem/Edmodo-Calendar | 2,748,779,110,891 | 7a71337bede05726e33c6a0735e5885e1de00a2b | 91cd68712ac4ac636361fef03f0db2afbd75ca51 | /Calendar_Learn/Calendar/views.py | 4bbbd06d5dd2c5cb9323518e0922e84427f8c020 | [] | no_license | https://github.com/NguyenVanDiem/Edmodo-Calendar | 14890d509af56d9efaaa113e4551f3cbd209fac0 | 8ffeec0e4a1ae23400827debda1c28cf7c77f354 | refs/heads/master | "2021-01-16T20:51:38.234594" | "2012-03-13T15:22:48" | "2012-03-13T15:22:48" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import time
import calendar
#import datetime
from datetime import date, datetime, timedelta
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.core.context_processors import csrf
from django.forms.models import modelformset_factory
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import get_object_or_404, render_to_response
from Calendar_Learn.Calendar.models import *
mnames = "January February March April May June July August September October November December"
mnames = mnames.split()
def _show_users(request):
"""Return show_users setting; if it does not exist, initialize it."""
s = request.session
if not "show_users" in s:
s["show_users"] = True
return s["show_users"]
@login_required(login_url = '/login/')
def main(request, year=None):
"""Main listing, years and months; three years per page."""
# prev / next years
if year:
year = int(year)
else:
year = time.localtime()[0]
nowy, nowm = time.localtime()[:2]
lst = []
# create a list of months for each year, indicating ones that contain entries and current
for y in [year, year+1, year+2]:
mlst = []
for n, month in enumerate(mnames):
# are there entry(s) for this month; current month?
entry = current = False
entries = Entry.objects.filter(
date__year=y,
date__month=n+1
)
if not _show_users(request):
entries = entries.filter(creator=request.user)
if entries:
entry = True
if y == nowy and n+1 == nowm:
current = True
mlst.append(dict(
n=n+1,
name=month,
entry=entry,
current=current
))
lst.append((y, mlst))
return render_to_response("main.html", dict(
years=lst,
user=request.user,
year=year
#reminders=reminders(request)
))
@login_required(login_url='/login/')
def month(request, year, month, change=None):
"""Listing of days in `month`."""
year, month = int(year), int(month)
# apply next / previous change
if change in ("next", "prev"):
now, mdelta = date(year, month, 15), timedelta(days=31)
if change == "next":
mod = mdelta
elif change == "prev":
mod = -mdelta
year, month = (now+mod).timetuple()[:2]
# init variables
cal = calendar.Calendar()
month_day = cal.itermonthdates(year, month)
nyear, nmonth, nday = time.localtime()[:3]
lst = [[]]
week = 0
# make month lists containing list of days for each week
# each day tuple will contain list of entries and 'current' indicator
for day in month_day:
entries = current = False # are there entries for this day; current day?
if day.day:
entries = Entry.objects.filter(
date__year=day.year,
date__month=day.month,
date__day=day.day
)
if not _show_users(request):
entries = entries.filter(creator=request.user)
if day.day == nday and day.year == nyear and day.month == nmonth:
current = True
lst[week].append((day.day, day.month, entries, current))
if len(lst[week]) == 7:
lst.append([])
week += 1
#del lst[len(lst)]
#lst.remove([])
return render_to_response("month.html", dict(
year=year,
month=month,
user=request.user,
month_days=lst[:week],
mname=mnames[month-1],
#reminders=reminders(request)
))
@login_required(login_url = '/login/')
def day(request, year, month, day):
"""Entries for day"""
EntriesFormset = modelformset_factory(
Entry,
extra = 1,
exclude = ('creator', 'date'),
can_delete = True
)
if request.method == 'POST':
formset = EntriesFormset(request.POST)
if formset.is_valid:
#add current user and date to each entry and save
entries = formset.save(commit = False)
for entry in entries:
entry.creator = request.user
entry.date = date(int(year), int(month), int(day))
entry.save()
return HttpResponseRedirect(reverse(
'Calendar_Learn.Calendar.views.month',
args = (year, month)
))
else:
#display formset for existing entries and one extra form
formset = EntriesFormset(
queryset = Entry.objects.filter(
date__year = year,
date__month = month,
date__day = day,
creator = request.user
)
)
return render_to_response('day.html', add_csrf(
request,
entries = formset,
year = year,
month = month,
day = day
))
def add_csrf(request, **kwargs):
"""Add csrf adn use to dictionary."""
d = dict(user = request.user, **kwargs)
d.update(csrf(request))
return d
| UTF-8 | Python | false | false | 4,731 | py | 23 | views.py | 11 | 0.627986 | 0.623547 | 0 | 163 | 28.02454 | 96 |
hfoley03/weather-app | 19,104,014,556,947 | 0e223cefa6f68472f2515aa8fc8c73f450d9580a | 7a13f6e506157cf333f33b16d5dda6096f2ecb69 | /test.py | c0f31754515fdb103d05922d02fe0a2d4cbafa36 | [] | no_license | https://github.com/hfoley03/weather-app | 48592d6ae2592c69461df494c4a2a75999b65610 | 736d5c0d8b192ade5ac6fa4be6a28cd4f15ade76 | refs/heads/main | "2023-07-09T22:45:19.964837" | "2021-08-19T17:25:46" | "2021-08-19T17:25:46" | 398,010,303 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import unittest
from main import get_geocode
from main import get_data
class TestGeocode(unittest.TestCase):
def test_galway_geocode(self):
correct_result = "53.14399, -9.46949"
result = get_geocode("Galway, Ireland")
self.assertEqual(result, correct_result)
def test_new_york_geocode(self):
correct_result = "40.71273, -74.00602"
result = get_geocode("New York")
self.assertEqual(result, correct_result)
def test_invalid_input_geocode(self):
correct_result = None
result = get_geocode("asdfghjkjhgfdsa")
self.assertEqual(result, correct_result)
def test_dict_keys_get_data_function(self):
correct_result = {'startTime': '', 'values': {'temperature': 0, 'cloudCover': 0, 'windSpeed': 0, 'precipitationIntensity': 0}}
result = get_data("53.14399, -9.46949")
assert (result[0].keys() == correct_result.keys())
def test_invalid_input_get_data_function(self):
correct_result = None
result = get_data("12345, 1334444")
self.assertEqual(result, correct_result)
def test_invalid_input_string_get_data_function(self):
correct_result = None
result = get_data("This is a string")
self.assertEqual(result, correct_result)
def test_invalid_arg_amount_get_data_function(self):
correct_result = None
result = get_data("12345")
self.assertEqual(result, correct_result)
if __name__ == '__main__':
unittest.main() | UTF-8 | Python | false | false | 1,580 | py | 4 | test.py | 3 | 0.614557 | 0.575316 | 0 | 47 | 31.638298 | 134 |
CJuice/DoIT_SocrataMetadataManipulation | 16,054,587,796,457 | d0c2abdf9fbb173816b8a318581cf4305ce510b8 | 3721a207a393f952bfb6cfe00fbd8e278204012a | /metadata_manipulation.py | af076fedf2e0d00c86ab4aa74896e6dd1a64352a | [] | no_license | https://github.com/CJuice/DoIT_SocrataMetadataManipulation | e4788772dc25ffd82d12b33ab700924d4115998d | 0715d064eb4bfff51c4832f486663fab536bef4d | refs/heads/master | "2020-04-30T01:41:36.229507" | "2019-03-19T15:06:49" | "2019-03-19T15:06:49" | 176,536,102 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Ingest excel file from examination step, isolate records needing revision, and patch the revisions to Socrata.
The first step involved requesting metadata for all of our datasets, checking the description text for the values
of interest, and outputing an excel file of the results. This script ingests the excel file from step one, isolates
those rows with values that need attention, revises the values, and then updates the description text in the metadata
online in the Socrata open data portal. The patching portion of this script was adopted from the original design
by william.connelly.
Author: CJuice, 20180319
Revisions:
"""
def main():
# IMPORTS
import configparser
import json
import os
import pandas as pd
import re
import requests
# VARIABLES
_root_file_path = os.path.dirname(__file__)
config_file_name = "credentials.cfg"
# df_columns = ["FourByFour", "data_url", "opendata_url", "opendata_str", "Description", "DatasetURL"]
# md_metadata_url = r"https://opendata.maryland.gov/api/views/metadata/v1"
new_url_string = r"/opendata.maryland.gov"
old_url_string = r"/data.maryland.gov"
request_headers = {'Content-Type': 'application/json'}
input_excel_file = "Socrata_Datasets_Metadata_Analysis.xlsx"
# FUNCTIONS
# FUNCTIONALITY
# Setup config parser and get credentials
parser = configparser.ConfigParser()
parser.read(filenames=os.path.join(_root_file_path, config_file_name))
password = parser.get("DEFAULT", "password")
username = parser.get("DEFAULT", "username")
# Need a pandas dataframe from the excel file, and need to revise the description text.
master_df = pd.read_excel(io=input_excel_file, sheet_name=0, header=0)
master_df = master_df[(0 < master_df["data_url"])]
master_df["Description"] = master_df["Description"].apply(func=(lambda x: re.sub(pattern=old_url_string,
repl=new_url_string,
string=x,
count=0,
flags=re.IGNORECASE)))
# print(master_df["Description"].values) # to visually inspect the revised values
# Make the patch request to the metadata api endpoint
row_gen = master_df.iterrows()
for index, row_series in row_gen:
row_dict = row_series.to_dict()
data = {"description": row_dict.get("Description")}
response = requests.patch(url=row_dict.get("DatasetURL"),
auth=(username, password),
headers=request_headers,
data=json.dumps(data))
print(row_dict.get("DatasetURL"), response)
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 2,973 | py | 4 | metadata_manipulation.py | 2 | 0.60074 | 0.596367 | 0 | 67 | 43.358209 | 117 |
lucasmbrute2/Blue_mod1 | 8,770,323,237,549 | 4bf4f0a42439445f74431db9160f8d50bd98eb3f | 8a6fa4a3aba8c8f4e520961e97d38a83bb5f35bb | /Aula07/Exercicio07_WHILE.py | d5622280eafcc109b6abf57bd7a0ed54a8ec598b | [] | no_license | https://github.com/lucasmbrute2/Blue_mod1 | 8e8f46b9f0c364a230daec44017c15ef3537f004 | f08fe046beacddf513558acd55339317d11644d0 | refs/heads/main | "2023-06-18T12:16:07.754845" | "2021-07-16T01:33:44" | "2021-07-16T01:33:44" | 375,507,339 | 0 | 0 | null | false | "2021-06-15T17:19:57" | "2021-06-09T22:46:43" | "2021-06-15T16:29:50" | "2021-06-15T17:19:56" | 62 | 0 | 0 | 0 | Jupyter Notebook | false | false | # #03 - Crie um programa que leia o nome e o preço de vários produtos. O programa
# deverá perguntar se o usuário vai continuar ou não. No final, mostre:
# A) Qual é o total gasto na compra.
# B) Quantos produtos custam mais de R$1000.
# (C) Qual é o nome do produto mais barato.
total = 0
above1000 = 0
menor_valor = 1000000000000
nome_produto = ""
while True:
produto = (input("Digite o nome do produto aqui: "))
valor = float(input("Digite o valor do produto aqui: "))
total += valor
if valor < menor_valor:
menor_valor = valor
nome_produto = produto
if valor > 1000:
above1000 += 1
continuar = input("Deseja continuar comprando ?\n ")
if continuar.lower() == "sim":
continue
elif continuar.replace("nao","não").lower() == "não":
print("Obrigado por utilizar nosso APP!")
break
else:
while continuar.replace("nao","não").lower() != "não" and continuar.lower() != "sim":
continuar = input("Não entendi a sua resposta, digite novamente. ")
if continuar.replace("nao","não").lower() == "não":
print("Obrigado por utilizar nosso APP!")
break
print(f"O valor total gasto na compra foi de R$ {total:.2f} reais.")
print(f"{above1000} produtos custam mais de R$1000.00 reias.",)
print(f"O produto mais barato comprado foi: {nome_produto} por {menor_valor} reias.") | UTF-8 | Python | false | false | 1,481 | py | 71 | Exercicio07_WHILE.py | 66 | 0.609407 | 0.578732 | 0 | 38 | 37.631579 | 93 |
superdebug/python-exam | 11,905,649,396,787 | d218a184bd91425871ea8d98bde527a448888049 | c1951f4216116837c9076562cee50f6250994c2f | /pythonweb开发实录源代码/05/15.py | cd5111593eb1ab74078160554ddc2ba139a75495 | [] | no_license | https://github.com/superdebug/python-exam | 763350249f955381f3f2d2fc3429aa56a91fadd0 | 1e6fe63f9616b84c6f4595f9f441fb40c0d4023e | refs/heads/master | "2020-04-12T01:21:06.015376" | "2016-12-05T08:14:40" | "2016-12-05T08:14:40" | 51,565,157 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | userList = ['0001' , '0004' , '0006' , '0002' , '0005' , '0003']
userList.pop()
print userList
print userList.pop(0)
print userList | UTF-8 | Python | false | false | 135 | py | 141 | 15.py | 121 | 0.637037 | 0.451852 | 0 | 5 | 25.4 | 64 |
tcooksd/inframan | 16,776,142,263,559 | 02f0ff0ec3d1a37c2d2320f6c518c27088d24394 | 3cd765eea7df9a022c4b11b2a7f813e735a136df | /ucs_db_update.py | 04a486d8bba4f71e2ad0c7d290b034b972c4b3bb | [] | no_license | https://github.com/tcooksd/inframan | 41f7714887583b45571ca2da41eadfe13c17c62a | 4d20c59917f5d54c8a6fedb15c8b8ba1dda32ac3 | refs/heads/master | "2021-01-01T05:17:17.089197" | "2016-04-17T10:52:20" | "2016-04-17T10:52:20" | 56,878,929 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
"""
Query UCS for systems data and then import into a mysql database.
Usage:
get_ucs_macs.py --host=<host> --user=<user> --pass=<pass> --print=<yes|no> --mysql_host=<mysql_host> --mysql_login=<mysql_login> --mysql_pass=<mysql_pass>
Options:
--host=<host> IP Address of the UCS VIP
--user=<user> Username for UCS
--pass=<pass> Password for UCS
--print=<print> Print information from UCS
--mysql_host=<mysql_host> Mysql server to parse
--mysql_login=<mysql_login> Mysql login
--mysql_pass=<mysql_pass> Mysql pass
--version Script version
"""
from UcsSdk import *
from UcsSdk.MoMeta.OrgOrg import OrgOrg
from UcsSdk.MoMeta.LsServer import LsServer
from UcsSdk.MoMeta.VnicEther import VnicEther
from docopt import docopt
from collections import defaultdict
import datetime
import MySQLdb as mysql
args = docopt(__doc__, version='{} 1.0'.format(__file__))
hostname = args['--host']
username = args['--user']
password = args['--pass']
print1 = args['--print']
mysql_host = args['--mysql_host']
mysql_login = args['--mysql_login']
mysql_pass = args['--mysql_pass']
"""
Use this and comment out the latter if on mac.
"""
# Mysql server connection, currently using tcookdev's mysql db for testing .
# tcookdev_db = mysql.connector.connect(user='infra', password='inframgmt',
# host='10.10.42.16',
# database='infra')
tcookdev_db = mysql.connect(mysql_host, mysql_login, mysql_pass, 'infra')
date1 = datetime.datetime.now()
"""
The mysql schema for systems database.
"""
add_host = ("INSERT INTO systems "
"(id, mac1, mac2, mac3, ucs_blade_name, hostname, pxe_ip, perm_ip, ks_template, type, status, location, blade, last_updated, os_version) "
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)")
update_host = ("UPDATE systems SET systems.ucs_blade_name=%s, systems.last_updated=%s where systems.mac1=%s")
def update_mysql(values, print1="NULL"):
"""
Update hosts names when changed on UCS, this is done by comparing the first mac address to the hostname currently residing on the
Data base.
"""
hostname1 = values[0]
mac_address = values[1][0]
cursor = tcookdev_db.cursor()
data_host = ( hostname1, date1, mac_address )
cursor.execute(update_host, data_host)
tcookdev_db.commit()
cursor.close()
def add_to_mysql(values, print1="NULL"):
"""
Takes values list containing list [hostname [mac,mac,mac]]
Updates mysql with systems database from values list array .
"""
data_host = ""
hostname1 = values[0]
mac_address = values[1]
if print1 == "yes":
print hostname1 + "**" + values[1][0]
""" Assign mac/s based on the number of the nics provided.
Values are submitted in the following format [ hostname [mac1 , mac2, mac3]] .
Values[0] = hostname values[0][0-3] = mac addresses .
We run this over 3 itterations because some servers have 1 nic some have 2 and others have 3 ,
want to always make sure we have all the macs available from each server.
"""
if len(values[1]) == 1:
macaddr1 = mac_address[0]
macaddr2 = ""
data_host = ( "", macaddr1, "", "", hostname1, "", "", "", "centos5", "UCS2", "", "", "", date1, "5" )
elif len(values[1]) == 2:
macaddr1 = mac_address[0]
macaddr2 = mac_address[1]
data_host = ( "", macaddr2, macaddr1, "", hostname1, "", "", "", "centos5", "UCS2", "", "", "", date1, "5" )
else:
macaddr1 = mac_address[0]
macaddr2 = mac_address[1]
macaddr3 = mac_address[2]
data_host = ( "", macaddr2, macaddr1, macaddr3, hostname1, "", "", "", "centos5", "UCS2", "", "", "", date1, "5" )
cursor = tcookdev_db.cursor()
cursor.execute(add_host, data_host)
tcookdev_db.commit()
cursor.close()
def query_mysql(values):
"""
Query mysql for mac address provided by UCS query.
Returns mac address if matched and skipped.
"""
ucs_host = values[0]
ucs_mac1 = values[1][0]
#if print1 == "yes":
#print ucs_host + "**" + ucs_mac1
cursor = tcookdev_db.cursor()
#query = """SELECT * FROM systems where mac1 = '%s';"""
query = """SELECT * FROM systems where ucs_blade_name = '%s';"""
query = query % ( ucs_host )
cursor.execute(query)
row = cursor.fetchone()
while row is not None:
return row[1]
row = cursor.fetchone()
tcookdev_db.commit()
cursor.close()
def query_mysql_hostname_update(values):
"""
Check to see if the hostname that was previously associated with the first mac address
has changed, if so then update to the new hostname.
"""
ucs_host = values[0]
ucs_mac1 = values[1][0]
cursor = tcookdev_db.cursor()
query = """SELECT * FROM systems where mac1 = '%s' and not ucs_blade_name = '%s';"""
query = query % ( ucs_mac1, ucs_host )
cursor.execute(query)
row = cursor.fetchone()
while row is not None:
return row[1]
row = cursor.fetchone()
tcookdev_db.commit()
cursor.close()
def get_macs(handle=None):
"""
Grab all the SP instances and return their macs
"""
macs = defaultdict(dict)
orgObj = handle.GetManagedObject(None, OrgOrg.ClassId(), {OrgOrg.DN : "org-root"})[0]
servers = handle.GetManagedObject(orgObj, LsServer.ClassId())
for server in servers:
if server.Type == 'instance':
childs = handle.ConfigResolveChildren(VnicEther.ClassId(), server.Dn, None, YesOrNo.TRUE)
macs[server.Name]
for child in childs.OutConfigs.GetChild():
macs[server.Name][child.Name] = child.Addr
return macs
if __name__ == '__main__':
try:
# Connect and login to the UCS
handle = UcsHandle()
handle.Login(hostname, username, password)
"""
Submit query for ethernet information of assigned blades in UCS
Returns a dictionary of metadata key value pairs from UCS.
"""
macs = get_macs(handle=handle)
values01 = []
for host, interfaces in macs.iteritems():
values01 = [ host, interfaces.values() ]
if query_mysql(values01) != None:
#print "the following mac address is a duplicate" + " " + query_mysql(values01) + ": not udpating"
continue
else:
add_to_mysql(values01, print1)
if query_mysql_hostname_update(values01) != None:
#update_mysql(values01, print1)
print "skipped adding the second time"
handle.Logout()
except Exception, err:
print 'Exception: {}'.format(str(err))
import traceback, sys
print '-' * 60
traceback.print_exc(file=sys.stdout)
print '-' * 60
| UTF-8 | Python | false | false | 6,965 | py | 5 | ucs_db_update.py | 4 | 0.601292 | 0.584637 | 0 | 195 | 34.712821 | 158 |
anu2021/CredoSystem | 8,950,711,850,179 | d6af996301d5f2eaf15f2c03eb1e553718235788 | 416d7ec65819715ce2ec31387a08e7e312dd4ceb | /CourseApp/urls.py | 86fd0da2de4f8bf4cddd10ff1fc0cca668a1bee5 | [] | no_license | https://github.com/anu2021/CredoSystem | a1f5a4398863481f730c7a70ba6baf25e7ad391f | e38e7330248f8880361ed620b346d407721f6abf | refs/heads/main | "2023-03-06T09:49:51.289515" | "2021-02-26T05:26:38" | "2021-02-26T05:26:38" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.urls import path
from .views import CourseView
| UTF-8 | Python | false | false | 61 | py | 3 | urls.py | 3 | 0.803279 | 0.803279 | 0 | 2 | 28.5 | 29 |
szabgab/slides | 7,198,365,224,147 | 6cc57612a910f86d3c0a78a0d811e7eb7e9e1410 | c3082eb2adc43b311dd3c9ff16fd3ed9df85f266 | /python/examples/net/prompt_password.py | 6b839f2480efe47e1d5ce92a40eec831c7f568fd | [] | no_license | https://github.com/szabgab/slides | 78818c7138331b3ba9e221c81da3678a46efe9b3 | 63bba06678554db737602f2fbcd6510c36037e8a | refs/heads/main | "2023-08-31T07:13:51.536711" | "2023-08-29T13:17:59" | "2023-08-29T13:17:59" | 122,212,527 | 87 | 69 | null | false | "2023-05-19T06:55:11" | "2018-02-20T14:57:03" | "2023-05-10T09:26:33" | "2023-05-19T06:55:10" | 14,375 | 82 | 64 | 0 | Python | false | false | import getpass
password = getpass.getpass("Password:")
print(password)
| UTF-8 | Python | false | false | 75 | py | 5,509 | prompt_password.py | 2,869 | 0.746667 | 0.746667 | 0 | 5 | 13.6 | 39 |
soobin519/Programmers-Algorithm | 17,033,840,311,209 | 75715d2660fce181799aa4a6c818bf8237a9cecb | 193077a2e49c38cde61daf7d58ea38c6e44f8a86 | /Lv1_모의고사.py | b7285d723b0906bed3cc5ec9e24b0058a0026188 | [] | no_license | https://github.com/soobin519/Programmers-Algorithm | f60797bc0e083e9da72217afbc909631c57005f0 | 9349abfc9775b1baaa16f09048d5d7f1d68fcd3e | refs/heads/master | "2023-07-29T05:25:29.304067" | "2021-09-09T08:24:45" | "2021-09-09T08:24:45" | 379,100,279 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | Python 3.9.1 (v3.9.1:1e5d33e9b9, Dec 7 2020, 12:44:01)
[Clang 12.0.0 (clang-1200.0.32.27)] on darwin
Type "help", "copyright", "credits" or "license()" for more information.
>>> def solution(answers):
answer = []
a1=[1,2,3,4,5]
a2=[2,1,2,3,2,4,2,5]
a3=[3,3,1,1,2,2,4,4,5,5]
c1 = 0
c2 = 0
c3 = 0
for i in range(len(answers)):
if answers[i]==a1[i%5]:
c1+=1
if answers[i]==a2[i%8]:
c2+=1
if answers[i]==a3[i%10]:
c3+=1
maxNum = max(c1,c2,c3)
if maxNum == c1: answer.append(1)
if maxNum == c2: answer.append(2)
if maxNum == c3: answer.append(3)
return answer | UTF-8 | Python | false | false | 688 | py | 44 | Lv1_모의고사.py | 44 | 0.508721 | 0.377907 | 0 | 26 | 25.5 | 72 |
Bismoy943/STA_Assigns_Bismoy | 9,070,970,947,767 | 011b646eefd7d93f0dc89a0d015a2814449e9d17 | 2bf5408c0cedec5c8f62b044f25ded8aac0d4f36 | /W03D16/callingcandlesticks.py | 587ec7c92849c519c1cbde10bea7bfe5952cc14f | [] | no_license | https://github.com/Bismoy943/STA_Assigns_Bismoy | 72d848ba52b388c6ec237b2caefc487edadd156e | c57ec1aeb9a7bace8eeed5c7e8c19838aa73cd9c | refs/heads/main | "2023-04-03T19:45:49.031879" | "2021-03-28T13:23:45" | "2021-03-28T13:23:45" | 330,672,829 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import candlestickpatterns as cp
open=[14629.6,14473.95,14398.7,14580,14732.75]
high=[14652.5,14479.95,14619.75,14703.95,14798.3]
low=[14413.6,14281.65,14388.9,14553.7,14558.8]
close=[14493.3,14316.1,14605.3,14683.2,14632.7]
volume=[701000,834000,718000,726000,1130000]
print("Inside bar list:",cp.insidebar(open,high,low,close,volume))
print("Bullish engulfing list:",cp.bullishengulfing(open,high,low,close,volume))
print("Bearish engulfing list:",cp.bearishengulfing(open,high,low,close,volume))
print("Harami list:",cp.harami(open,high,low,close,volume))
print("Rising sun list:",cp.risingsun(open,high,low,close,volume))
print("Dark cloud cover list:",cp.darkcloud(open,high,low,close,volume)) | UTF-8 | Python | false | false | 701 | py | 98 | callingcandlesticks.py | 91 | 0.770328 | 0.547789 | 0 | 15 | 45.8 | 80 |
mgcrea/platform-teensy | 17,008,070,513,524 | 276450215ce77a1e7bb9a57b019df4be35d12bc8 | 788295429ecfb29f5d6bec8ed6a6efaee4ff1007 | /builder/frameworks/arduino.py | 2855874dcedc05e21677d394ada6c8dc0d12b7da | [
"Apache-2.0"
] | permissive | https://github.com/mgcrea/platform-teensy | 74e90506c3dfc02ba91c5733dafda17e18f6580b | de9b127478a19be96d2fba64f92bed29f3b91716 | refs/heads/master | "2023-06-07T12:44:38.554637" | "2018-05-02T13:03:28" | "2018-05-02T13:03:28" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Copyright 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Arduino
Arduino Wiring-based Framework allows writing cross-platform software to
control devices attached to a wide range of Arduino boards to create all
kinds of creative coding, interactive objects, spaces or physical experiences.
http://arduino.cc/en/Reference/HomePage
"""
from os import listdir
from os.path import isdir, isfile, join
from SCons.Script import DefaultEnvironment
env = DefaultEnvironment()
platform = env.PioPlatform()
FRAMEWORK_DIR = platform.get_package_dir("framework-arduinoteensy")
FRAMEWORK_VERSION = platform.get_package_version("framework-arduinoteensy")
assert isdir(FRAMEWORK_DIR)
BUILTIN_USB_FLAGS = (
"USB_SERIAL",
"USB_KEYBOARDONLY",
"USB_TOUCHSCREEN",
"USB_HID_TOUCHSCREEN",
"USB_HID",
"USB_SERIAL_HID",
"USB_MIDI",
"USB_MIDI4",
"USB_MIDI16",
"USB_MIDI_SERIAL",
"USB_MIDI4_SERIAL",
"USB_MIDI16_SERIAL",
"USB_AUDIO",
"USB_MIDI_AUDIO_SERIAL",
"USB_MIDI16_AUDIO_SERIAL",
"USB_MTPDISK",
"USB_RAWHID",
"USB_FLIGHTSIM",
"USB_FLIGHTSIM_JOYSTICK",
"USB_EVERYTHING",
"USB_DISABLED",
)
if not set(env.get("CPPDEFINES", [])) & set(BUILTIN_USB_FLAGS):
env.Append(CPPDEFINES=["USB_SERIAL"])
env.Append(
CPPDEFINES=[
("ARDUINO", 10805),
("TEENSYDUINO", int(FRAMEWORK_VERSION.split(".")[1]))
],
CPPPATH=[
join(FRAMEWORK_DIR, "cores", env.BoardConfig().get("build.core"))
],
LIBSOURCE_DIRS=[
join(FRAMEWORK_DIR, "libraries")
]
)
if "cortex-m" in env.BoardConfig().get("build.cpu", ""):
board = env.subst("$BOARD")
math_lib = "arm_cortex%s_math"
if board in ("teensy35", "teensy36"):
math_lib = math_lib % "M4lf"
elif board in ("teensy30", "teensy31"):
math_lib = math_lib % "M4l"
else:
math_lib = math_lib % "M0l"
env.Prepend(LIBS=[math_lib])
# Teensy 2.x Core
if env.BoardConfig().get("build.core") == "teensy":
env.Append(CPPPATH=[join(FRAMEWORK_DIR, "cores")])
# search relative includes in teensy directories
core_dir = join(FRAMEWORK_DIR, "cores", "teensy")
for item in sorted(listdir(core_dir)):
file_path = join(core_dir, item)
if not isfile(file_path):
continue
content = None
content_changed = False
with open(file_path) as fp:
content = fp.read()
if '#include "../' in content:
content_changed = True
content = content.replace('#include "../', '#include "')
if not content_changed:
continue
with open(file_path, "w") as fp:
fp.write(content)
else:
env.Prepend(LIBPATH=[join(FRAMEWORK_DIR, "cores", "teensy3")])
#
# Target: Build Core Library
#
libs = []
if "build.variant" in env.BoardConfig():
env.Append(
CPPPATH=[
join(FRAMEWORK_DIR, "variants",
env.BoardConfig().get("build.variant"))
]
)
libs.append(env.BuildLibrary(
join("$BUILD_DIR", "FrameworkArduinoVariant"),
join(FRAMEWORK_DIR, "variants", env.BoardConfig().get("build.variant"))
))
libs.append(env.BuildLibrary(
join("$BUILD_DIR", "FrameworkArduino"),
join(FRAMEWORK_DIR, "cores", env.BoardConfig().get("build.core"))
))
env.Prepend(LIBS=libs)
| UTF-8 | Python | false | false | 3,912 | py | 1 | arduino.py | 1 | 0.642382 | 0.633436 | 0 | 138 | 27.347826 | 79 |
nirajkale/numpy-dnn | 12,532,714,591,675 | ce5af91778f07737adc8172ef10dc05d45956569 | 8cd1ebf3a2c4ff5ece8a55d48c837357c7383859 | /Neural_Network.py | b3dea20538f32e06c17f422e5a4f087a5b562787 | [] | no_license | https://github.com/nirajkale/numpy-dnn | 7a064ecd15c6d324e9f656e0ffb05fc34be26c8a | d008ec5558a3c163d6fa350a46682f7f08cb3f79 | refs/heads/master | "2020-03-22T03:42:13.718986" | "2019-07-29T12:27:55" | "2019-07-29T12:27:55" | 139,447,350 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import FeatureEditor as fe
np.random.seed(3)
sigmoid = lambda z:1/(1+np.exp(-z))
def sigmoid_prime(z):
a = sigmoid(z)
return np.multiply(a,1-a)
Relu = lambda z: np.maximum(0,z)
Relu_prime = lambda z: np.where(z>=0,1,0)
Leaky_Relu = lambda z: np.maximum(0.01*z,z)
Leaky_Relu_prime = lambda z: np.where(z>=0,1,0.01)
getshape = lambda arr: [item.shape for item in arr]
def init_weights(layer_dist=[],multiplier=0.01):
w=[]
b=[]
for dims in zip(layer_dist,layer_dist[1:]):
w.append(np.random.randn(dims[1],dims[0])*multiplier)
b.append(np.zeros((dims[1],1)))
return w,b
def cost(y,a):
m = y.shape[1]
return np.sum(np.dot(y,np.log(a).T)+np.dot(1-y,np.log(1-a).T))/(-m)
def forward_prop(w,b,g,x,cache={}):
cache['A']=[x]
cache['Z']=[]
i=0
for weights,bias in zip(w,b):
cache['Z'].append( np.dot(weights,cache['A'][-1])+bias )
cache['A'].append( g[i](cache['Z'][-1]) )
i+=1
return cache
def backward_prop(w,b,gprime,y,cache):
assert(len(w)==len(cache['Z'])==len(gprime))
m = y.shape[1]
#init cache for differentials
cache['dZ']=list(np.zeros(len(w)))
cache['dW']=list(np.zeros(len(w)))
cache['db']=list(np.zeros(len(w)))
cache['dA']=list(np.zeros(len(w)))
#activation differential for last layer
cache['dA'][-1] = -(y/cache['A'][-1])+((1-y)/(1-cache['A'][-1]))
#start backward propagation from output to input layer
for l in reversed(range(len(w))):
cache['dZ'][l]= np.multiply( cache['dA'][l],gprime[l](cache['Z'][l]))
cache['dW'][l]= np.dot( cache['dZ'][l], cache['A'][l].T)/m
cache['db'][l]= np.sum( cache['dZ'][l],axis=1).reshape(b[l].shape)/m
if l!=0: #no need to calculate dA for input layer
cache['dA'][-1]= np.dot( w[l].T,cache['dZ'][l])
return cache
def gradient_descent(w,b,g,gprime,x,y,cache={},iterations=250,alpha=0.1,lambd=0):
costs=[]
for i in range(iterations):
cache=forward_prop(w,b,g,x)
cache=backward_prop(w,b,gprime,y,cache)
costs.append(cost(y,cache['A'][-1]))
#update weights & bias
for l in range(len(w)):
w[l]= w[l]-(alpha*cache['dW'][l])
b[l]= b[l]-(alpha*cache['db'][l])
return w,b,costs,cache
def cal_accuracy(w,b,g,x,y,data_type='training'):
m = y.shape[1]
cache = forward_prop(w,b,g,x,cache={})
predication=np.where(cache['A'][-1]>=0.5,1,0)
accuracy = np.sum(np.where(predication==y,1,0))
print(data_type+' accuracy=',(accuracy/m)*100,'%')
def train_nn(x,y,g,gprime,layer_dist=[],iterations=15000,alpha=12,lambd=0,show_plt=True):
#prep the neural network
w,b = init_weights(layer_dist)
cache = forward_prop(w,b,g,x,cache={})
print('initial cost=',cost(y,cache['A'][-1]))
##cache = backward_prop(w,b,gprime,y,cache)
w,b,costs,cache= gradient_descent(w,b,g,gprime,x,y,cache={},iterations=iterations,alpha=alpha,lambd=lambd)
print('final cost=',cost(y,cache['A'][-1]))
if show_plt:
plt.plot(costs)
plt.show()
return w,b,cache
def prepare_dataset():
file = r'C:\Users\niraj_shivajikale\Desktop\Machine Learning\Datasets\Credit Approval\crx.csv'
df= pd.read_csv(file)
fe.LabelCoding_Series(df,['Result','A1','A4','A5','A6','A7','A9','A10','A12','A13'])
return df
df= prepare_dataset()
#x_all =np.matrix(np.random.rand(2,m)%1)*10
#y_all = np.matrix(np.where(np.square(x_all[0,:])+np.square(x_all[1,:])<=25,1,0))
#x_all = np.insert(x_all,0,1,axis=0)
#split=int(m*0.65)
#x_train = x_all[:,:split]
#y_train = y_all[:,:split]
#x_test = x_all[:,split:]
#y_test = y_all[:,split:]
#g = [Relu,sigmoid]
#gprime = [Relu_prime,sigmoid_prime]
#w,b,cache=train_nn(x_train,y_train,g,gprime,layer_dist=[3,2,1],iterations=15000,alpha=12,lambd=0,show_plt=True)
#cal_accuracy(w,b,g,x_train,y_train,data_type='training')
#cal_accuracy(w,b,g,x_test,y_test,data_type='testing')
| UTF-8 | Python | false | false | 4,139 | py | 3 | Neural_Network.py | 2 | 0.586857 | 0.561005 | 0 | 122 | 31.92623 | 112 |
atleastzero/SuperHeroTeamDueler | 19,284,403,174,593 | 1f22c9afb0d317cc76c71cc53851f4a4a265a320 | b26bcc435c930676df21382cd405f55fa622893c | /animal.py | d973ef530ec8c3f54f20ea2dc902c8fda3fdf500 | [] | no_license | https://github.com/atleastzero/SuperHeroTeamDueler | a91b22775d4840fa7f5f103b943d058eb0bbb1df | a4a2eca95f917e1d00fe80f3883332fc92cd3772 | refs/heads/master | "2020-07-28T13:34:34.512563" | "2019-10-11T19:18:29" | "2019-10-11T19:18:29" | 209,426,270 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Animal:
def __init__(self, name, sleep_duration):
self.name = name
self.sleep_duration = sleep_duration
def sleep(self):
print(
"{} sleeps for {} hours".format(
self.name,
self.sleep_duration))
def eat(self):
print('{} is eating'.format(self.name))
def drink(self):
print('{} is drinking'.format(self.name))
| UTF-8 | Python | false | false | 430 | py | 4 | animal.py | 4 | 0.513953 | 0.513953 | 0 | 16 | 25.3125 | 49 |
benlaplanche/aoc-2020 | 4,071,629,045,503 | c03afc739ae461b662175cd87b1a1512a992d6ad | 5560e58b42296dca7b47ae85880d872e635c2908 | /day1/expenses.py | fa1ee3dddb2d8c48b676dd694a0625df178f6b58 | [] | no_license | https://github.com/benlaplanche/aoc-2020 | 256cd65e39094a6e1b7aef578384151743cad389 | a490b2746f1f29d451651a897125a76f59baa7bf | refs/heads/master | "2023-02-03T21:10:51.063527" | "2020-12-19T09:55:51" | "2020-12-19T09:55:51" | 322,804,688 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # expenses.py
def calculate_total(expense_list):
for index, current_val in enumerate(expense_list):
temp_list = expense_list
temp_list.pop(index)
for i, v in enumerate(temp_list):
if current_val + v == 2020:
return current_val * v
| UTF-8 | Python | false | false | 290 | py | 3 | expenses.py | 2 | 0.589655 | 0.575862 | 0 | 11 | 25.363636 | 54 |
antorsae/fish | 1,417,339,223,440 | 46465eaa3f1b58098e7274cd617626d7158316bc | 09d7cc3f7b7aca50edb41c27e3aa92f882c4bbfb | /kfish.py | a90e4d8b5e6accc40f7ae80fd5a6dd9c8f08b717 | [] | no_license | https://github.com/antorsae/fish | 7fe74913e22481441832d23b632c734e3703007f | f6b9c906ba17de1f4dcd05c7648351dec1d07749 | refs/heads/master | "2021-07-20T12:15:26.491324" | "2017-10-30T09:30:48" | "2017-10-30T09:30:48" | 105,634,092 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Keras playground for RNN seq prediction (DID NOT WORK)
from collections import OrderedDict
import itertools
import os
from fishdataset import SeqDataset, SubsetSampler,collate_seqs
from sklearn.model_selection import train_test_split
import argparse
import pandas as pd
import numpy as np
from keras.models import Model
from keras.layers import Input, Concatenate, GRU, Bidirectional, LSTM, Masking
from keras.layers.core import Dense, Flatten, Lambda, Dropout, Reshape
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from keras.models import load_model
from keras.optimizers import Adam, RMSprop
from sklearn.model_selection import train_test_split
import copy
from keras import backend as K
import random
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--test', action='store_true', help='Test model on test_crops.csv')
parser.add_argument('-lm', '--load-model', type=str, help='Load model from file')
parser.add_argument('-bs', '--batch-size', type=int, default=1, help='Batch size')
parser.add_argument('-l', '--learning-rate', type=float, default=1e-2, help='Learning rate')
parser.add_argument('-s', '--suffix', type=str, default=None, help='Suffix to store checkpoints')
args = parser.parse_args()
def get_model(max_length, n_features):
boat_seq = Input(shape=(max_length, n_features ))
l = Masking(mask_value=-1)(boat_seq)
l = LSTM(64,return_sequences=True, activation = 'tanh')(l)
# l = LSTM(64,return_sequences=True, activation = 'relu')(l)
#l = LSTM(64,return_sequences=True, activation = 'relu')(l)
l = LSTM(1, activation='sigmoid' ,return_sequences=True,)(l)
classification = l
outputs = [classification]
model = Model(inputs=[boat_seq], outputs=outputs)
return model
def gen(dataset, items, batch_size, training=True):
max_length = dataset.max_length
n_features = dataset.n_features
X_seqs = np.zeros((batch_size, max_length, n_features), dtype=np.float32)
Y_seqs = np.zeros((batch_size, max_length, 1), dtype=np.float32)
X_seqs[...] = -1
Y_seqs[...] = -1
i = 0
while True:
if training:
random.shuffle(items)
for item in items:
X_seq, Y_seq = dataset[item]
X_seq = X_seq[:, 1:]
Y_seq = Y_seq[:, 9:10]
#print(np.unique(Y_seq))
Y_seq = np.remainder(Y_seq, 2)
#print(np.unique(Y_seq))
#print(X_seq, Y_seq)
#print(X_seq.shape[0])
X_seqs[i, :min(X_seq.shape[0], max_length),...] = X_seq[:max_length, ...]
Y_seqs[i, :min(Y_seq.shape[0], max_length),...] = Y_seq[:max_length, ...]
i += 1
if i == batch_size:
yield X_seqs, Y_seqs
#print(Y_seqs[0])
X_seqs[...] = -1
Y_seqs[...] = -1
i = 0
TRAIN_X_CSV = 'train_crops_X.csv'
TRAIN_Y_CSV = 'train_crops_Y.csv'
dataset = SeqDataset(
X_csv_file=TRAIN_X_CSV,
Y_csv_file=TRAIN_Y_CSV,
)
dataset.max_length = 100
dataset.n_features = 8
model = get_model(dataset.max_length, dataset.n_features)
model.summary()
model.compile(loss='binary_crossentropy', optimizer=RMSprop(lr=args.learning_rate))
idx_train, idx_valid = train_test_split(range(len(dataset)), test_size=0.1, random_state=42)
model.fit_generator(
generator = gen(dataset, idx_train, args.batch_size),
steps_per_epoch = len(idx_train) // args.batch_size,
validation_data = gen(dataset, idx_valid, args.batch_size ,training=False),
validation_steps = len(idx_valid) // args.batch_size,
epochs =100,
#callbacks = [save_checkpoint, reduce_lr],
)
| UTF-8 | Python | false | false | 3,821 | py | 13 | kfish.py | 8 | 0.64067 | 0.628893 | 0 | 112 | 33.116071 | 98 |
Programming-TRIGON/Image-Processing-2019 | 4,793,183,542,353 | 2c79b12aa4fa66e457a3e35b5978bbb50cd64bc7 | 6224c36f19a6b7d5db8f33e8dc7d15c7234abdf9 | /tools/nt adder.py | 9dce7dd86ddc5782a4110f3a4751ccf0fc007c4b | [] | no_license | https://github.com/Programming-TRIGON/Image-Processing-2019 | 351b99ad427c0254265c8582de9e917a3d8d8e7a | 59baa5144623c9b79c6a771282dfa5068ef2c5be | refs/heads/master | "2020-04-16T08:19:35.572066" | "2019-03-11T15:11:59" | "2019-03-11T15:11:59" | 165,420,625 | 0 | 0 | null | false | "2019-03-11T15:07:29" | "2019-01-12T18:17:50" | "2019-03-06T05:59:58" | "2019-03-11T15:07:28" | 305 | 1 | 0 | 0 | Python | false | null | #!/usr/bin/env python3
#
# This is a NetworkTables client (eg, the DriverStation/coprocessor side).
# You need to tell it the IP address of the NetworkTables server (the
# robot or simulator).
#
# When running, this will continue incrementing the value 'dsTime', and the
# value should be visible to other networktables clients and the robot.
#
import sys
import time
from networktables import NetworkTables
# To see messages from networktables, you must setup logging
import logging
logging.basicConfig(level=logging.DEBUG)
ip = '10.59.90.2'
NetworkTables.initialize(server=ip)
sd = NetworkTables.getTable("ImageProcessing")
time.sleep(3)
while True:
keyVal = input('input smthn ').split(', ')
sd.putString(keyVal[0], keyVal[1])
| UTF-8 | Python | false | false | 746 | py | 14 | nt adder.py | 13 | 0.753351 | 0.738606 | 0 | 29 | 24.689655 | 75 |
EmmaYazhuo/- | 4,501,125,770,719 | 552ace32dd414e4fb64532de805aeb48a7f1e79b | e7478e49c3e64ecaf0bf60bba4a2145889be3f05 | /Amazon/2 sum.py | 938b0593aa4ca4d5722d9cfacfdae41eb97b5d5b | [] | no_license | https://github.com/EmmaYazhuo/- | 9cf730ac8b4bb8a2674a34defbd7162b8283dfb4 | d70ae1a5b0726d0943664aa21e23295e0a247297 | refs/heads/master | "2020-07-19T14:20:03.345852" | "2019-09-05T03:10:34" | "2019-09-05T03:10:34" | 206,463,616 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # https://www.1point3acres.com/bbs/interview/amazon-software-engineer-453412.html
target = 20
input = ([[1,8],[3,9], [2,15]], [[1,8], [2,11], [3,12]] )
output = [[1, 3], [3, 2]]
def twosumclosest(nums, target):
dic={}
res=[]
for i, num in enumerate(nums):
if target - num in dic.keys():
res.append(dic[target - num])
res.append(i)
else:
dic[num]=i
return res | UTF-8 | Python | false | false | 430 | py | 21 | 2 sum.py | 17 | 0.534884 | 0.467442 | 0 | 17 | 24.352941 | 81 |
Hits-95/Python | 17,136,919,534,958 | 0520bd3dc29feb243721469fd5217926746be40c | 762635741a2a1ae9f033f70db2fa4b64fcbcef46 | /NumPy/random/6_binomial_distribution.py | 84dc82ab57ea13a1ce7e9f4e8d2b9634107ac78f | [] | no_license | https://github.com/Hits-95/Python | e8eaab9ad0bb1b35aff6642d4460377b27ea142b | 35852daa84a280ab054170649ccd7b01e05ce75e | refs/heads/master | "2023-02-04T06:46:56.109425" | "2020-12-23T17:41:24" | "2020-12-23T17:41:24" | 303,903,050 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #Binomial distribution...
from numpy import random
import matplotlib.pyplot as sbt
import seaborn as sbn
#Given 10 trials for coin toss generate 10 data points:
print(random.binomial(n = 10, p = 0.5, size = 10))
#Visualization of Binomial Distribution
sbn.distplot(random.binomial(n=10, p=0.5, size=100), hist=True, kde=False)
sbt.show()
#Difference Between Normal and Binomial Distribution
sbn.distplot(random.normal(loc=50, scale=5, size=1000), hist=False, label='normal')
sbn.distplot(random.binomial(n=100, p=0.5, size=1000), hist=False, label='binomial')
sbt.show()
| UTF-8 | Python | false | false | 577 | py | 93 | 6_binomial_distribution.py | 90 | 0.752166 | 0.694974 | 0 | 18 | 31 | 84 |
codewithgauri/HacktoberFest | 962,072,677,707 | fdbd3fe3d525b27a2c74a020937fbec6cb68dd1a | 9fb2139bf41e2301f9ee9069d649c5afe8e7735c | /python/Algorithms/Implementation/Cut the sticks.py | 7fddf17390e9866fbef7ddd64a919bc7337ef7e3 | [] | no_license | https://github.com/codewithgauri/HacktoberFest | 9bc23289b4d93f7832271644a2ded2a83aa22c87 | 8ce8f687a4fb7c3953d1e0a5b314e21e4553366e | refs/heads/master | "2023-01-02T07:20:51.634263" | "2020-10-26T07:02:34" | "2020-10-26T07:02:34" | 307,285,210 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from collections import Counter
def cutTheSticks(arr):
l=len(arr)
c = []
for k,v in sorted(Counter(arr).items()):
c.append(l)
l-=v
for i in c : print(i)
if __name__ == '__main__':
n = int(input())
arr = list(map(int, input().rstrip().split()))
cutTheSticks(arr)
| UTF-8 | Python | false | false | 312 | py | 617 | Cut the sticks.py | 484 | 0.538462 | 0.538462 | 0 | 18 | 16.333333 | 50 |
itheads-7/trail-repo | 3,564,822,895,999 | 4c661d1f10f2a42d4641e1103098d956bc5906bd | c1e09f6ca54466f00867f8c0b784f89bbd9dd7ec | /hari.py | ad0de62e3df4725b89656840af5eac743caf09cf | [] | no_license | https://github.com/itheads-7/trail-repo | b09829cbb07a58d14ce64cbf65eb324849ea32fd | 7604888b18f9fd0103cf9d936ae313a1efcc0be4 | refs/heads/main | "2023-09-04T13:49:10.343536" | "2021-11-22T12:11:25" | "2021-11-22T12:11:25" | 429,792,545 | 0 | 0 | null | false | "2021-11-22T12:04:24" | "2021-11-19T12:38:28" | "2021-11-19T13:02:15" | "2021-11-22T12:04:24" | 1 | 0 | 0 | 0 | Python | false | false | import pdb;
print("hello world")
| UTF-8 | Python | false | false | 33 | py | 1 | hari.py | 1 | 0.727273 | 0.727273 | 0 | 2 | 15.5 | 20 |
3leno4ka/python_basic_course | 3,229,815,445,709 | 310f242008eac048fc61257eebd8643d1517b6c4 | 15d8b541535ff8bc2ab218057a80fb562fbcd819 | /l4_iteration/l4_iteration.py | 232e9a32884c2cdc2e7fe503b80b1970dc5f3101 | [] | no_license | https://github.com/3leno4ka/python_basic_course | bb8b8217e2ba83ada77d52d620ed49c23a4c0e50 | 0ba31e8d7f8acd30297406f1832fc1f81c5065bb | refs/heads/master | "2021-04-12T10:08:00.487190" | "2018-05-09T08:24:09" | "2018-05-09T08:24:09" | 126,372,728 | 0 | 0 | null | false | "2018-05-09T14:25:24" | "2018-03-22T17:35:13" | "2018-05-09T08:24:12" | "2018-05-09T14:23:22" | 29 | 0 | 0 | 1 | Python | false | null |
low_digit_limit = 1
upper_digit_limit = 100
my_new_list = []
while True:
favorite_num = input("Enter your favorite number: ")
try:
favorite_num = int(favorite_num)
my_new_list.append(favorite_num)
if favorite_num < low_digit_limit:
print(f'Your input value is less than {low_digit_limit}')
elif favorite_num > upper_digit_limit:
print(f'Your input value is more than {upper_digit_limit}')
else:
break
except ValueError:
my_new_list.append(favorite_num)
print(my_new_list)
your_list = [favorite_num for favorite_num in my_new_list if type(favorite_num) == str]
print(your_list)
| UTF-8 | Python | false | false | 679 | py | 22 | l4_iteration.py | 21 | 0.631811 | 0.62592 | 0 | 20 | 32.7 | 87 |
true-datura/LightCourses | 12,601,434,092,902 | 47ce78127a6a88ed4a895be8f59df090945f3035 | bc8e7e1dc30a7a79aef5956a89cfa0308624727f | /classroom/flask_example.py | 6a6c6a50d1bb9b4a7f0b5a78b1a4e37c5775a2b3 | [] | no_license | https://github.com/true-datura/LightCourses | 19afbe4a6976036c23d68e66c0aca49ffb9adc5b | 31a00d14eb2e268048728775681d6794d9e1d4ac | refs/heads/master | "2016-09-17T12:26:53.960422" | "2016-09-01T14:23:19" | "2016-09-01T14:23:19" | 60,765,008 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import Flask, request, render_template
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def indes():
name = request.args.get('name')
return render_template('index.html', name=name)
@app.route('/<id>', methods=['GET', 'POST'])
def indes_1(id):
name = id
return render_template('index.html', name=name)
# app.run()
# uwsgi --http :5000 --wi flask_example:app | UTF-8 | Python | false | false | 404 | py | 69 | flask_example.py | 45 | 0.633663 | 0.621287 | 0 | 19 | 20.315789 | 51 |
TechStephen/Space-Invaders | 11,244,224,392,188 | a9f3adbabaac475077960e3417633e4f7df5b324 | 99affc1e8a769ffe91633af692ca9b154a0cbde4 | /test.py | 8b1e6500fafe477da2dd9d3a3d77e24e3681356a | [] | no_license | https://github.com/TechStephen/Space-Invaders | 5fae1aea28c737acbf059025d75d2c19c2b12aa4 | b62291d7ba6827d5c55f96fdf5e3d2ed26468f26 | refs/heads/master | "2022-12-16T16:37:01.394199" | "2020-09-17T23:21:34" | "2020-09-17T23:21:34" | 296,463,138 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pygame, sys, os, time
""" Functions """
# when gun shoot make shooting sound
def pew():
pygame.mixer.Channel(1).play(pygame.mixer.Sound('shoot.wav'))
# when enemy hit make explosion sound
def hit():
pygame.mixer.Channel(2).play(pygame.mixer.Sound('hit.wav'))
# prints all xy positions of moving objs/imgs
def printGameStats():
print("-----Player X and Y -----")
print(str(x) + " " + str(y))
if(shot == True):
print("-----Bullet X and Y -----")
print(str(tempx) + " " + str(tempy))
print("-----Enemy X and Y's -----")
print(str(boss_x) + " " + str(boss_y) + '\n')
""" Variables """
# dimensions
width = 600
height = 800
# is done bool, for game loop
done = False
# if player shot bool
shot = False
# is game over bool
over = False
# player x and y
x = 350
y = 465
# player change x
lead_x = 0
# bullet change y
lead_y = 0
# enemies x and y's
boss_x = 450
boss_y = 40
# counter for each enemy, if not hit --> 0 if hit --> 1
cntr = 0
# boss laser timer
laserTimer = 0
# temp enemy x and y
tempx = 0
tempy = 0
# healthbar x and y
health_bar_x = 420
health_bar_y = 20
# cntr holding hit amount
hitCounter = 0
# boss laser xy
laserx = 0
lasery = 0
laserx2 = 0
lasery2 = 0
bossShot = False
bossShot2 = False
# enemy pos
en_x = 0
en_y = 250
en2_x = 100
en2_y = 250
en3_x = 200
en3_y = 250
en4_x = 300
en4_y = 250
en5_x = 400
en5_y = 250
hasLives = [True] * 3
# pixel size
pixel = 64
pixel2 = 45
# starting amount, enemy x movement
amount = 14
# health bar starting amount
health = 100
# counter
cnt = 0
cnt2 = 0
j = 0
cntr = 0
cntr2 = 0
cntr3 = 0
cntr4 = 0
cntr5 = 0
# boss bool
bossMoveX = False
bossMoveY = False
# RGB values
black = (0, 0, 0)
white = (255, 255, 255)
red = (255, 0, 0)
skyblue = (135,206,235)
blue = (0,0,255)
magenta = (175, 0, 175)
transparent = (0, 0, 0, 0)
""" Screen """
# mixer/pygame init
pygame.mixer.pre_init(44100, 16, 2, 4096) #frequency, size, channels, buffersize
pygame.init()
# screen
screen = pygame.display.set_mode((height, width), pygame.DOUBLEBUF, 32)
# caption
pygame.display.set_caption('Space Invaders')
# clock element
clock = pygame.time.Clock()
# background
bg = pygame.image.load("background.jpg")
# creates imgs
player = pygame.image.load('ship.png').convert_alpha()
enemy2 = pygame.image.load('enemy2.png').convert_alpha()
win = pygame.image.load('youwin.png').convert_alpha()
lose = pygame.image.load('youlose.png').convert_alpha()
boss = pygame.image.load('boss.png').convert_alpha()
heart = pygame.image.load('heart.png').convert_alpha()
lives = pygame.image.load('lives.png').convert_alpha()
""" Game Loop """
# run program
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
lead_x -= 11
if event.key == pygame.K_RIGHT:
lead_x += 11
if event.key == pygame.K_SPACE:
shot = True
pew()
tempx = x
tempy = y
lead_y += 20
""" Game Code """
# player movements
x += lead_x
if(x>650):
x = 650
elif(x<0):
x = 0
# boss movements
if(bossMoveX == False):
boss_x -= 10
health_bar_x -= 10
if(boss_x < 200):
bossMoveX = True
elif(bossMoveX == True):
boss_x += 10
health_bar_x += 10
if(boss_x > 500):
bossMoveX = False
# boss enemy movements
en_x += amount
if(en_x > 700):
amount += 0.08
en_x = 0
en_y += 30
en2_x += amount
if(en2_x > 700):
amount += 0.08
en2_x = 0
en2_y += 30
en3_x += amount
if(en3_x > 700):
amount += 0.08
en3_x = 0
en3_y += 30
en4_x += amount
if(en4_x > 700):
amount += 0.08
en4_x = 0
en4_y += 30
en5_x += amount
if(en5_x > 700):
amount += 0.08
en5_x = 0
en5_y += 30
# blits background
screen.blit(bg, (0, 0))
# blits lives
screen.blit(pygame.transform.scale(lives, (200,200)), (0,-50))
# blits hearts if player has life
if(hasLives[0] == True):
screen.blit(pygame.transform.scale(heart, (50,80)), (0,5))
if(hasLives[1] == True):
screen.blit(pygame.transform.scale(heart, (50,80)), (50,5))
if(hasLives[2] == True):
screen.blit(pygame.transform.scale(heart, (50,80)), (100,5))
# blits enemies if not hit
if(cntr == 0):
screen.blit(pygame.transform.scale(enemy2, (80,80)), (en_x,en_y))
if(cntr2 == 0):
screen.blit(pygame.transform.scale(enemy2, (80,80)), (en2_x,en2_y))
if(cntr3 == 0):
screen.blit(pygame.transform.scale(enemy2, (80,80)), (en3_x,en3_y))
if(cntr4 == 0):
screen.blit(pygame.transform.scale(enemy2, (80,80)), (en4_x,en4_y))
if(cntr5 == 0):
screen.blit(pygame.transform.scale(enemy2, (80,80)), (en5_x,en5_y))
# blits boss if not killed
if(health != 0):
screen.blit(pygame.transform.scale(boss, (150,150)), (boss_x,boss_y))
# creates bullet variables every 5 seconds
if(laserTimer % 100 == 0 and over == False):
laserx = boss_x + 20
lasery = boss_y + 100
laserx2 = boss_x + 100
lasery2 = boss_y + 100
bossShot = True
bossShot2 = True
# blits bullets
if(bossShot == True and over == False):
pygame.draw.rect(screen,red,[laserx,lasery,10,10])
lasery += 8
if(bossShot2 == True and over == False):
pygame.draw.rect(screen,red,[laserx2,lasery2,10,10])
lasery2 += 8
# blits player
screen.blit(pygame.transform.scale(player, (150,150)), (x,y))
# prints game stats
#printGameStats()
# creates health bar
if(hitCounter == 0):
pygame.draw.rect(screen,red,[health_bar_x,health_bar_y,200,10])
if(hitCounter == 1):
pygame.draw.rect(screen,red,[health_bar_x,health_bar_y,160,10])
if(hitCounter == 2):
pygame.draw.rect(screen,red,[health_bar_x,health_bar_y,120,10])
if(hitCounter == 3):
pygame.draw.rect(screen,red,[health_bar_x,health_bar_y,80,10])
if(hitCounter == 4):
pygame.draw.rect(screen,red,[health_bar_x,health_bar_y,40,10])
if(hitCounter == 5):
print("Win!")
# creates bullet and movement
if(shot == True):
pygame.draw.rect(screen,blue,[tempx+63,tempy+17,15,15])
tempy -= lead_y
# bullet speed and shot reset after bullet goes offscreen
if(tempy < 0):
shot = False
lead_y = 0
# bullet to enemies collision
if(shot == True):
if((tempx > en_x - pixel) and (tempy < en_y + pixel) and (tempx < en_x + pixel) and (tempy > en_y - pixel)):
hit()
cntr += 1
shot = False
en_x =-50
en_y=-50
elif((tempx > en2_x - pixel) and (tempy < en2_y + pixel) and (tempx < en2_x + pixel) and (tempy > en2_y - pixel)):
hit()
cntr2 += 1
shot = False
en2_x =-50
en2_y=-50
elif((tempx > en3_x - pixel) and (tempy < en3_y + pixel) and (tempx < en3_x + pixel) and (tempy > en3_y - pixel)):
hit()
cntr3 += 1
shot = False
en3_x =-50
en3_y=-50
# bullet to enemies collision
if(shot == True):
if(tempx > boss_x - pixel) and (tempy < boss_y + pixel):
hit()
health -= 20
print("Boss health: " + str(health))
hitCounter += 1
shot = False
# boss bullet to player collision
if((laserx > x - pixel2) and (lasery < y + pixel2) and (laserx < x + pixel2) and (lasery > y - pixel2) and (bossShot == True)):
print(str(x) + " " + str(y) + str(laserx) + " " + str(lasery))
hasLives[j] = False
j += 1
bossShot = False
# boss bullet2 to player collision
if((laserx2 > x - pixel2) and (lasery2 < y + pixel2) and (laserx2 < x + pixel2) and (lasery2 > y - pixel2) and (bossShot2 == True)):
print(str(x) + " " + str(y) + str(laserx) + " " + str(lasery))
hasLives[j] = False
j += 1
bossShot2 = False
# win/lose text
if(cntr > 4):
screen.blit(pygame.transform.scale(win, (300,300)), (250,100))
bossShot = False
bossShot2 = False
over = True
elif(y - en_y <= 45 and x - en_x < 0):
if(cntr == 0):
hasLives[j] = False
cntr += 1
j += 1
elif(y - en2_y <= 45 and x - en2_x < 0):
if(cntr2 == 0):
hasLives[j] = False
cntr2 += 1
j += 1
elif(y - en3_y <= 45 and x - en3_x < 0):
if(cntr3 == 0):
hasLives[j] = False
cntr3 += 1
j += 1
#elif((y - en_y <= 45 and x - en_x < 0) or (y - en2_y <= 45 and x - en2_x < 0) or (y - en3_y <= 45 and x - en3_x < 0) or (y - en4_y <= 45 and x - en4_x < 0) or (y - en5_y <= 45 and x - en5_x < 0) or (y - en6_y <= 45 and x - en6_x < 0)):
# screen.blit(pygame.transform.scale(lose, (300,300)), (180,100))
# over = True
""" Screen Update """
pygame.display.update()
screen.fill(skyblue)
clock.tick(32)
| UTF-8 | Python | false | false | 9,965 | py | 2 | test.py | 2 | 0.510988 | 0.456799 | 0 | 383 | 24.018277 | 240 |
hikelee/redmin | 5,093,831,246,082 | 22d9ac043507c23a988e0b8cd86df6b3ca12ac32 | 1fbad40a67a14728603e8523137422d781d0bbde | /redmin/models/domain.py | f95af66f53f60ed1b887ad767076a6190f09b6d2 | [] | no_license | https://github.com/hikelee/redmin | b3a06f077350136344182bc16f77109ac956950e | d67ddc23ac0a70b3208e177598e6a5b0bb1e3e0a | refs/heads/master | "2020-07-14T10:31:17.067783" | "2019-12-11T10:49:33" | "2019-12-11T10:49:33" | 205,300,635 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import django.apps
from django.db import models
from django.db.models.signals import post_delete, post_save
from django.dispatch import receiver
from filelock import FileLock
from redmin.utils import attr
from .base import RedminModel
from .group import Group
from .user import User
lock = FileLock("redmin_domain.lock")
lock.release(force=True)
cache = dict()
@receiver([post_save, post_delete])
def handle_model_change(sender, **kwargs):
if sender in [Domain, GroupDomain, UserDomain]:
with lock:
cache.clear()
def init_domain():
from redmin.models import Domain, RedminModel
domains = {}
for domain in Domain.objects.all():
domains[domain.app + "-" + domain.name] = domain
all_models = {}
for model in [model for model in django.apps.apps.get_models() if issubclass(model, RedminModel)]:
app = model.__module__.split(".")[0]
name = model.__name__
all_models[app + "-" + name] = model
type_map = {}
for flag, model in all_models.items():
domain = domains.get(flag)
title = model._meta.verbose_name
if domain:
if domain.title != title:
domain.title = title
domain.save()
else:
app, name = flag.split("-")
domain = Domain.objects.create(app=app, name=name, title=title)
type_map[model] = domain
for flag, domain in domains.items():
if flag not in all_models:
domain.delete()
return type_map
def get_map():
from collections import defaultdict
if not cache:
with lock:
domains = {domain.get_key(): domain for domain in Domain.objects.all()}
cache['domains'] = domains
cache['models'] = {model.get_domain_key(): model for model in django.apps.apps.get_models() if
issubclass(model, RedminModel)}
group_domains = defaultdict(dict)
for c in GroupDomain.objects.all():
group_domains[str(c.group.id)][c.get_key()] = c
cache['group_domains'] = group_domains
user_domains = defaultdict(dict)
for c in UserDomain.objects.all():
user_domains[str(c.user.id)][c.get_key()] = c
cache['user_domains'] = group_domains
return cache
@receiver(post_save)
@receiver(post_delete)
def clear_map(sender, **kwargs):
if sender in [Group, User] or issubclass(sender, BaseDomain):
with lock:
cache.clear()
class BaseDomain(RedminModel):
class Meta:
abstract = True
title = models.CharField("描述", max_length=100)
sequence = models.IntegerField("排序", default=99999)
app = models.CharField("应用", max_length=100)
name = models.CharField("名称", max_length=100)
def __str__(self):
return self.title
@classmethod
def get(cls, app, name):
return cls.get_by_key(f"{app}.{name}")
@classmethod
def get_by_user_and_model(cls, user, model):
cache = get_map()
key = model.get_domain_key()
return attr(cache, f'user_domains.{user.id}.{key}') or attr(cache, f'group_domains.{user.group_id}.{key}') or attr(cache, f'domains.{key}')
@classmethod
def get_by_model(cls, model):
return get_map()['domains'][model.get_domain_key()]
def get_model(self):
return get_map()['models'][self.get_key()]
def get_key(self):
return self.app + "_" + self.name
@classmethod
def get_by_key(cls, key):
return get_map()['domains'][key]
@classmethod
def get_model_by_key(cls, key):
return get_map()['models'][key]
class Domain(BaseDomain):
class Meta:
ordering = ['sequence', "id"]
verbose_name_plural = verbose_name = "模型"
unique_together = [("app", "name")]
class GroupDomain(BaseDomain):
class Meta:
ordering = ['group', 'sequence']
verbose_name_plural = verbose_name = "用户组模型"
unique_together = ("group", "app", "name")
group = models.ForeignKey(Group, on_delete=models.CASCADE)
class UserDomain(BaseDomain):
class Meta:
ordering = ['user', 'sequence']
verbose_name_plural = verbose_name = "用户模型"
unique_together = ("user", "app", "name")
user = models.ForeignKey(User, on_delete=models.CASCADE)
| UTF-8 | Python | false | false | 4,394 | py | 98 | domain.py | 88 | 0.604224 | 0.600781 | 0 | 148 | 28.432432 | 147 |
Aasthaengg/IBMdataset | 884,763,301,039 | a8196966c14da622dfe555f5b63f3dc351821c9f | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02554/s585641078.py | 3be12755fabd91f31e027dfba69a2001a3882d56 | [] | no_license | https://github.com/Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | "2023-04-22T10:22:44.763102" | "2021-05-13T17:27:22" | "2021-05-13T17:27:22" | 367,112,348 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | n=int(input())
print((10**n-9**n*2+8**n)%(10**9+7)) | UTF-8 | Python | false | false | 51 | py | 202,060 | s585641078.py | 202,055 | 0.509804 | 0.333333 | 0 | 2 | 25 | 36 |
liuyepku/take-home | 7,112,465,892,246 | 11188c44096e0e9ff932e408584b37507651e483 | a3f25784ef31c2e79f373724b8d285fb2729ac50 | /Q6.py | ed85df0f46b54923fd8c21a03042f471843e1ac1 | [] | no_license | https://github.com/liuyepku/take-home | cef4ea7fd6e3c12358ccacebe192a4103d638254 | 96511c55730a8e78240b40af04647a964658f292 | refs/heads/master | "2020-03-22T04:12:10.957578" | "2018-07-04T09:31:59" | "2018-07-04T09:31:59" | 139,480,730 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import csv
import time
import matplotlib.pyplot as plt
import numpy as np
file = open('data/twitter_marketplace_data_5.csv', 'r')
lines = csv.reader(file)
head_row = next(lines)
print head_row
# for line in lines:
# print line[7]
cnt_app = 0
cnt_app_see = 0
cnt_vid = 0
cnt_vid_see = 0
cnt_web = 0
cnt_web_see = 0
for line in lines:
if line[5] == 'APP_INSTALLS':
cnt_app+=1
if line[7] == 'True':
cnt_app_see+=1
if line[5] == 'VIDEO_VIEWS':
cnt_vid += 1
if line[7] == 'True':
# print line
cnt_vid_see += 1
if line[5] == 'WEBSITE_CLICKS':
cnt_web += 1
if line[7] == 'True':
# print line
cnt_web_see += 1
print "the user observation rate of app install is {}".format(cnt_app_see*1.0/cnt_app)
print "the user observation rate of video view is {}".format(cnt_vid_see*1.0/cnt_vid)
print "the user observation rate of web clicks is {}".format(cnt_web_see*1.0/cnt_web)
| UTF-8 | Python | false | false | 988 | py | 6 | Q6.py | 6 | 0.589069 | 0.562753 | 0 | 37 | 25.702703 | 86 |
charner93/hello-world | 4,389,456,613,728 | 1673ef1587df59fb0cd05befa3d1c88b81b95175 | 529526727f6b08ba9008bf2ea4b48fae9336fb1e | /test.py | 139816690274ba84aef17836199a96f2589dd806 | [] | no_license | https://github.com/charner93/hello-world | 3d3304fbe666e2e3790f07a317a13488ae747be2 | 2852948f6e2d3d15465acdd867a539a8d174114e | refs/heads/master | "2017-11-12T12:05:26.135778" | "2017-03-09T16:41:19" | "2017-03-09T16:41:19" | 84,334,975 | 0 | 0 | null | false | "2017-03-09T16:41:20" | "2017-03-08T15:21:03" | "2017-03-08T23:50:35" | "2017-03-09T16:41:20" | 9 | 0 | 0 | 1 | C | null | null | def swap (var1, var2):
print(var2+","+var1+". Hello!")
return
def pig (var1, var2):
print(var1[1:] + var1[0] + "ay", end = " ")
print(var2[1:] + var2[0] + "ay", end = " ")
return
firstname = input("Please enter your first name:")
lastname = input("Please enter your last name:")
swap(firstname,lastname);
answer = input("Would you like your name displayed in Pig Latin?")
if (answer.lower() == "yes"):
pig(firstname,lastname);
else:
print("Have a wonderful day "+firstname+","+lastname)
| UTF-8 | Python | false | false | 505 | py | 3 | test.py | 1 | 0.639604 | 0.611881 | 0 | 17 | 28.705882 | 66 |
CChBen/YoloV3 | 13,657,996,022,563 | 8a877bd182915f3a2edad86bc8cc02f6d40927d9 | 51512dbc39edbe8ac2de76acabc7fcde7100269d | /trainer.py | 96c60d03e8c99a6a0af427003b46a28f3533a37f | [] | no_license | https://github.com/CChBen/YoloV3 | fb325f271423ceb20c439c716b68811264e7d492 | 7c3d5f76e46b6906cdc85285107f2ab9c4e2dffa | refs/heads/master | "2020-08-08T12:08:57.929867" | "2020-04-27T13:38:12" | "2020-04-27T13:38:12" | 213,826,884 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import torch.nn as nn
import torch
from dataset import Dataset
from net import MainNet
from torch.utils.data import DataLoader
import os
# 初始化参数为正太分布
def weight_init(m):
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
class Trainer:
def __init__(self, net_path):
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.net_path = net_path
self.loss_text_name = net_path.split("/")[1].split(".")[0]
self.text_path = "data/loss/{}.txt".format(self.loss_text_name)
self.net = MainNet().to(self.device) # yolov3
self.dataset = Dataset()
self.train_data = DataLoader(self.dataset, batch_size=5, shuffle=False)
self.mse_loss = nn.MSELoss()
self.bceloss = nn.BCELoss()
self.optimizer = torch.optim.Adam(self.net.parameters())
# self.optimizer = torch.optim.SGD(self.net.parameters(), lr=0.01, momentum=0.9, weight_decay=0.0005)
if os.path.exists(self.net_path):
self.net.load_state_dict(torch.load(self.net_path))
# else:
# self.net.apply(weight_init)
self.net.train()
def get_loss(self, output, labels, weight):
labels = labels.to(self.device)
# 转成n*h*w*c
output = output.permute(0, 2, 3, 1)
# 转成n*h*w*3*cls_num
output = output.reshape(output.size(0), output.size(1), output.size(2), 3, -1)
# 训练正样本的中心点、宽高、置信度以及类别
indexs_positive = torch.gt(labels[..., 4], 0)
loss_positive_other = self.mse_loss(output[indexs_positive][:, 0:5], labels[indexs_positive][:, 0:5])
loss_positive_cls = self.bceloss(torch.sigmoid_(output[indexs_positive][:, 5:]), labels[indexs_positive][:, 5:])
loss_positive = loss_positive_other + loss_positive_cls
# 训练负样本的置信度
indexs_negative = torch.eq(labels[..., 4], 0)
loss_negative_conf = self.mse_loss(output[indexs_negative][:, 4], labels[indexs_negative][:, 4])
loss = weight * loss_positive + (1 - weight) * loss_negative_conf
return loss
def train(self):
epoch = 1
loss_new = 100
weight = 0.7
# 用于记录loss
file = open(self.text_path, "w+", encoding="utf-8")
for _ in range(10000):
for i, (labels_13, labels_26, labels_52, image_data) in enumerate(self.train_data):
image_data = image_data.to(self.device)
output_13, output_26, output_52 = self.net(image_data)
loss_13 = self.get_loss(output_13, labels_13, weight)
loss_26 = self.get_loss(output_26, labels_26, weight)
loss_52 = self.get_loss(output_52, labels_52, weight)
loss_total = loss_13 + loss_26 + loss_52
self.optimizer.zero_grad()
loss_total.backward()
self.optimizer.step()
print("第{0}轮,第{1}批,损失为:{2}".format(epoch, i, loss_total.item()))
file.write("{} {} {}\n".format(epoch, i, loss_total.item()))
file.flush()
if loss_total.item() < loss_new:
loss_new = loss_total.item()
torch.save(self.net.state_dict(), self.net_path)
epoch += 1
if __name__ == '__main__':
a = torch.Tensor([[0.6875, 0.25, -0.23180161, -0.13036182, 0.69616858],
[0.6875, 545, 0, -0.13036182, 0.69616858]])
print(torch.argmax(a, dim=1))
print(torch.nn.functional.sigmoid(a))
| UTF-8 | Python | false | false | 3,681 | py | 14 | trainer.py | 12 | 0.572068 | 0.528967 | 0 | 86 | 40.546512 | 120 |
ocuss/yedekler | 13,211,319,439,599 | 01f31e035fad1aa5b9e425233866127e438eeab1 | acfed6baaee7faffe1e2732b151e326fdd08634c | /format2.py | 10566572ecff25914a8ed6274c783823b2cb6b91 | [] | no_license | https://github.com/ocuss/yedekler | 9f89bda84d9b12f6f688009aaa5c8c9b061d2d3e | 7b34a08384521c1f0c030a4bea03f289d4566b67 | refs/heads/master | "2020-06-23T22:57:30.376369" | "2019-07-25T07:10:23" | "2019-07-25T07:10:23" | 198,778,146 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | x = """
meyveler : çilek,karpuz,muz
fruits : {} {} {}
""".format("strawberry","watermelon","banana")
print(x) | UTF-8 | Python | false | false | 112 | py | 47 | format2.py | 47 | 0.603604 | 0.603604 | 0 | 5 | 21.4 | 46 |
heng98/FYP-Context-based-Recommendation | 7,902,739,851,385 | c9bfc976487c23b5be49981e67e1cdf1c695d984 | 0be7e3fd4502105c8bc4c6a24db5f71ce5a1deb0 | /Transformer/ranker.py | d8ef9dad18e507bd4d3c8af08ca9cd92c0845f3b | [] | no_license | https://github.com/heng98/FYP-Context-based-Recommendation | 52776a91f56b8d9dea01d2b488d9edaf5df825dd | 4eb01934498de35f717de00ab10843cb93d42f6e | refs/heads/master | "2023-03-29T15:11:36.676978" | "2021-04-07T07:10:40" | "2021-04-07T07:10:40" | 291,781,982 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import torch
from torchtext.data.utils import get_tokenizer
import numpy as np
import fasttext
class Ranker:
"""Currently for simple reranker"""
def __init__(self, reranker_model, doc_embedding_vectors, device, fasttext_path):
self.reranker_model = reranker_model
self.doc_embedding_vectors = doc_embedding_vectors
self.device = device
self.model = fasttext.load_model(fasttext_path)
self.tokenizer = get_tokenizer("basic_english")
def rank(self, query_embedding, candidates, query_data, candidate_data):
query_title_embedding = self.model.get_sentence_vector(query_data["title"])
candidate_title_embedding = np.stack(
[self.model.get_sentence_vector(c["title"]) for c in candidate_data]\
)
query_abstract_embedding = self.model.get_sentence_vector(query_data["abstract"])
candidate_abstract_embedding = np.stack(
[self.model.get_sentence_vector(c["abstract"]) for c in candidate_data]\
)
query_title_embedding = torch.from_numpy(query_title_embedding).expand(
candidate_title_embedding.shape[0], -1
).to(self.device)
candidate_title_embedding = torch.from_numpy(candidate_title_embedding).to(self.device)
query_abstract_embedding = torch.from_numpy(query_abstract_embedding).expand(
candidate_abstract_embedding.shape[0], -1
).to(self.device)
candidate_abstract_embedding = torch.from_numpy(candidate_abstract_embedding).to(self.device)
candidates_idx = [c[0] for c in candidates]
query_embedding = query_embedding.expand(len(candidates), -1).to(self.device)
candidates_embedding = torch.from_numpy(
self.doc_embedding_vectors[candidates_idx]
).to(self.device)
cos_similarity = (
torch.nn.functional.cosine_similarity(query_embedding, candidates_embedding)
.unsqueeze(0)
.T
)
tokenized_query_abstract = self.tokenizer(query_data["abstract"])
tokenized_candidate_abstract = [
self.tokenizer(c["abstract"]) for c in candidate_data
]
jaccard = self._jaccard(tokenized_query_abstract, tokenized_candidate_abstract)
jaccard = torch.tensor(jaccard, device=self.device).unsqueeze(0).T
intersection_feature = torch.from_numpy(
self._intersection_feature(
tokenized_query_abstract, tokenized_candidate_abstract
)
).to(self.device)
confidence = self.reranker_model(
query_title_embedding,
candidate_title_embedding,
query_abstract_embedding,
candidate_abstract_embedding,
jaccard,
intersection_feature,
cos_similarity
)
confidence = confidence.flatten().tolist()
reranked_candidates = [
(c["ids"], conf) for c, conf in zip(candidate_data, confidence)
]
return sorted(reranked_candidates, key=lambda x: x[1], reverse=True)
def _jaccard(self, text_1, text_2):
result = []
tokenized_t1 = set(text_1)
tokenized_t2 = [set(t) for t in text_2]
for t2 in tokenized_t2:
union = len(set(tokenized_t1).union(set(t2)))
if union > 0:
result.append(len(set(tokenized_t1).intersection(set(t2))) / union)
else:
result.append(0)
return result
def _intersection_feature(self, text_1, text_2):
tokenized_text_1_set = set(text_1)
tokenized_text_2_set = [set(t2) for t2 in text_2]
result_intersection_feature = np.empty((len(text_2), self.model.get_dimension()), dtype="float32")
for j, t2 in enumerate(tokenized_text_2_set):
intersection = tokenized_text_1_set.intersection(t2)
intersection_feature = np.empty((len(intersection), self.model.get_dimension()))
for i, word in enumerate(intersection):
intersection_feature[i, :] = self.model.get_word_vector(word)
intersection_feature = np.sum(intersection_feature, axis=0) / (
max(
np.linalg.norm(
np.sum(intersection_feature, axis=0), 2
),
1e-8
)
)
result_intersection_feature[j, :] = intersection_feature
return result_intersection_feature
class TransformerRanker:
def __init__(self, reranker_model, device, tokenizer):
self.reranker_model = reranker_model
self.device = device
self.tokenizer = tokenizer
def rank(self, query, candidates):
query_text = query["title"] + query["abstract"]
candidates_text = [c["title"] + c["abstract"] for c in candidates]
candidates_ids = [c["ids"] for c in candidates]
query_encoded = self._encode(query_text)
candidates_encoded = self._encode(candidates_text)
candidates_encoded["input_ids"][:, 0] = self.tokenizer.sep_token_id
for k in query_encoded:
query_encoded[k] = query_encoded[k].expand_as(candidates_encoded[k])
combined_encoded = {
k: torch.cat([query_encoded[k], candidates_encoded[k]], axis=1).to(
self.device
)
for k in query_encoded
}
similarity = torch.flatten(
torch.sigmoid(self.reranker_model(**combined_encoded)["logits"])
)
sorted_sim, indices = torch.sort(similarity, descending=True)
sorted_sim = sorted_sim.tolist()
indices = indices.tolist()
reranked_candidates = [
(candidates_ids[idx], sim) for idx, sim in zip(indices, sorted_sim)
]
return reranked_candidates
def _encode(self, text):
return self.tokenizer(
text,
padding="max_length",
max_length=256,
truncation=True,
return_tensors="pt",
)
| UTF-8 | Python | false | false | 6,061 | py | 37 | ranker.py | 33 | 0.599901 | 0.591982 | 0 | 171 | 34.444444 | 106 |
akaytatsu/inscricao_conferencia | 10,179,072,517,404 | 6f4638f16baf9494809b32f523d3fc26560556e4 | e71e6f545790e58447dc94bc404b215388b7e65b | /apps/financeiro/migrations/0011_auto_20200305_1510.py | 01739b6631cd4c816c987d84cfc7679b570dc8b5 | [
"MIT"
] | permissive | https://github.com/akaytatsu/inscricao_conferencia | e53e04314ebabd9a71d634d3ea3d6f456edd3310 | 9ab774c6fe30cdb1a45d3732ade394df6e3b4258 | refs/heads/master | "2020-09-23T08:50:36.352756" | "2020-07-19T17:33:10" | "2020-07-19T17:33:10" | 225,456,761 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.0.3 on 2020-03-05 15:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('financeiro', '0010_auto_20200212_1655'),
]
operations = [
migrations.AlterField(
model_name='comprovantes',
name='comprovante',
field=models.FileField(blank=True, null=True, upload_to='comprovantes/'),
),
]
| UTF-8 | Python | false | false | 435 | py | 101 | 0011_auto_20200305_1510.py | 65 | 0.611494 | 0.54023 | 0 | 18 | 23.166667 | 85 |
iamhuy/bitpump | 10,264,971,845,042 | 77bf4903ccd3de771cff8f74e2bdfb73184d8189 | 3ff0b2d9d5030e87287fa6793e6ebb087e438e23 | /activity/migrations/0003_auto_20190726_1234.py | af0965aa717f7c38902515585f3b0e4dc29252fe | [] | no_license | https://github.com/iamhuy/bitpump | ffad8d589dbc2b1080e0c3657b7563f7e74a1925 | d865f4fb4fbf51a9b34890c88e6a648cb57a4c6c | refs/heads/master | "2021-06-15T00:21:43.520730" | "2019-07-27T03:52:20" | "2019-07-27T03:52:20" | 198,956,283 | 0 | 0 | null | false | "2021-06-02T00:08:17" | "2019-07-26T06:04:10" | "2019-07-27T03:52:41" | "2021-06-02T00:08:15" | 79 | 0 | 0 | 3 | Python | false | false | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('activity', '0002_auto_20190726_1212'),
]
operations = [
migrations.AlterModelTable(
name='activity',
table='activity_tab',
),
]
| UTF-8 | Python | false | false | 360 | py | 23 | 0003_auto_20190726_1234.py | 21 | 0.586111 | 0.538889 | 0 | 18 | 19 | 48 |
BasementCat/audio-reactive-led-strip | 18,597,208,406,660 | 3e281a9ec185e9a721706a2ed60a3c2f5f81a56f | 7f760365660de815db319d20bb05e1fbd5fc8df4 | /webgui/app/views/editor.py | b051de62bf92813d9a7b7f31b1e51da7276b1e94 | [
"MIT"
] | permissive | https://github.com/BasementCat/audio-reactive-led-strip | db5ac94eb3c43dfdb6a79501d6d8711579d41c51 | a98bac8e04c0fae3022de9f5086914dc1f1192d8 | refs/heads/master | "2022-07-21T12:39:06.257207" | "2022-07-14T01:12:08" | "2022-07-14T01:12:08" | 216,214,804 | 2 | 0 | MIT | true | "2019-10-19T13:58:07" | "2019-10-19T13:58:07" | "2019-10-17T08:40:24" | "2019-10-03T12:31:05" | 34,939 | 0 | 0 | 0 | null | false | false | from flask import Blueprint, render_template, abort, redirect, url_for
from app.lib.forms import EffectForm, EffectGroupForm, EffectStackForm
from app import database as db
from app.lib.database import ObjProxy
app = Blueprint('editor', __name__)
@app.route('/')
def index():
return render_template('editor/index.jinja.html', db=db)
@app.route('/edit/<type_>/new', methods=['GET', 'POST'])
@app.route('/edit/<type_>/<name>', methods=['GET', 'POST'])
def edit(type_, name=None):
obj = None
key = None
form_cls = None
if type_ == 'effect':
key = 'effects'
form_cls = EffectForm
elif type_ == 'group':
key = 'effect_groups'
form_cls = EffectGroupForm
elif type_ == 'stack':
key = 'effect_stacks'
form_cls = EffectStackForm
else:
abort(400, "Invalid type")
if name:
obj = db[key].get(name)
if obj:
obj = ObjProxy(obj)
form = form_cls(obj=obj)
if name and obj is None:
abort(404, "No such " + type_)
if form.validate_on_submit():
with db:
obj = obj or ObjProxy({})
form.populate_obj(obj)
db[key][obj.name] = obj.data
db.save()
return redirect(url_for('.index'))
return render_template('editor/form.jinja.html', form=form)
| UTF-8 | Python | false | false | 1,340 | py | 44 | editor.py | 33 | 0.583582 | 0.579104 | 0 | 55 | 23.363636 | 70 |
SHYWM1234/PythonEx | 19,559,281,067,419 | 349618633eb246de5c4d229636769c2b3dad0da0 | 858dfad006587f38ddad75150409ee164a84aa59 | /PythonBasic/print s.py | dea7d0182bb47bd4a071db664835fe283383fb9d | [] | no_license | https://github.com/SHYWM1234/PythonEx | 76cff23cdb998305c78ea4a78d52992885e5b385 | badcb2527a94fe73111f9903be2a9fc763b59643 | refs/heads/master | "2023-02-24T19:53:52.694459" | "2021-01-20T15:11:09" | "2021-01-20T15:11:09" | 328,164,974 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import turtle
import time
def gap () :
turtle.penup()
turtle.fd(5)
def drawline ( draw ) :
gap()
if draw :
turtle.pendown ()
else :
turtle.penup ()
turtle.forward(40)
gap()
turtle.right(90)
def drawnum (num) :
if num in [ '2' , '3' , '4' , '5' , '6' , '8' , '9' ] : #第一段
drawline (True)
else :
drawline (False)
if num in [ '0' , '1' , '3' , '4' , '5' , '6' , '7' , '8' , '9' ] : #第二段
drawline (True)
else :
drawline (False)
if num in [ '0' , '2' , '3' , '5' , '6' , '8' , '9' ] : #第三段
drawline (True)
else :
drawline (False)
if num in [ '2' , '6' , '8' , '0' ] : # 第四段
drawline (True)
else :
drawline (False)
turtle.left(90) #左转90度
if num in [ '0' , '4' , '5' , '6' , '8' , '9' ] : #5
drawline (True)
else :
drawline (False)
if num in [ '0' , '2' , '3' , '5' , '6' , '7' , '8' , '9' ] : #6
drawline (True)
else :
drawline (False)
if num in [ '0' , '1' , '2' , '3' , '4' , '7' , '8' , '9' ] : #7
drawline (True)
else :
drawline (False)
turtle.right(180)
turtle.penup()
turtle.forward(30)
def main() :
c = eval(input())
turtle.setup(800, 350, 200, 200)
turtle.speed(10000)
turtle.pensize(5)
turtle.penup()
turtle.fd(-300)
turtle.hideturtle()
for i in range(c) :
turtle.goto(0,0)
drawnum(str(c - i))
turtle.write('秒' )
time.sleep(1)
turtle.clear()
main()
| UTF-8 | Python | false | false | 1,596 | py | 38 | print s.py | 37 | 0.446292 | 0.388747 | 0 | 62 | 24.225806 | 76 |
runtangr/qidian_spider | 13,451,837,574,890 | 28b9b423dfcb088858dc4fc6fa802d7077c5999e | bd1e01a98af2dcd1567ef4e985fdd2392a9b9176 | /qidian_spider/spiders/qidian_spider.py | 3c0a89c4814ec48ff6abac8dd9fef1f8f2259ed9 | [] | no_license | https://github.com/runtangr/qidian_spider | 23f345b9d35c8d9d6b1594357cdbacea35e43951 | a8fc36e83ba5ef2de17f78535df2575f75404db7 | refs/heads/master | "2023-01-10T17:25:38.549822" | "2018-02-24T01:15:41" | "2018-02-24T01:15:41" | 119,805,641 | 4 | 1 | null | false | "2022-12-30T17:57:26" | "2018-02-01T08:23:17" | "2021-11-02T18:26:06" | "2022-12-30T17:57:24" | 4,904 | 2 | 1 | 11 | Python | false | false | import scrapy
from qidian_spider.items import QidianSpiderItem
from scrapy.http import Request
import time
import selenium
from selenium import webdriver
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.remote.remote_connection import LOGGER
import logging
from scrapy import log
import datetime
LOGGER.setLevel(logging.WARNING)
class QidianSpider(scrapy.Spider):
name = "qidian"
allowed_domains = ["qidian.com"]
start_urls = [
"https://www.qidian.com/all"
]
def __init__(self, *args, **kwargs):
self.driver = webdriver.PhantomJS()
# self.driver = webdriver.Chrome()
self.driver.set_window_size(1080, 800)
super(QidianSpider, self).__init__(*args, **kwargs)
def parse(self, response):
if response.status == 400:
time.sleep(2)
# failing page and again request
yield Request(url=response.url, callback=self.parse, dont_filter=True)
return
try:
next_table = response.xpath('//a[contains(@class, "lbf-pagination-next")]')[0].extract()
except IndexError:
# failing page and again request
yield Request(url=response.url, callback=self.parse, dont_filter=True)
return
try:
next_link_url = response.xpath('//a[contains(@class, "lbf-pagination-next")]/@href')[0].extract()
except IndexError:
# end
return
if next_link_url:
yield Request(url="https://" + next_link_url, callback=self.parse)
for detail_link in response.xpath('//div[@class="book-mid-info"]/h4/a/@href').extract():
if detail_link:
yield Request(url="https://" + detail_link, callback=self.parse_detail)
def parse_detail(self, response):
qidian_item = QidianSpiderItem()
qidian_item["book_name"] = response.xpath('//div[@class="book-info "]/h1/em/text()')[0].extract()
qidian_item["auth"] = response.xpath('//div[@class="book-info "]/h1/span/a/text()')[0].extract()
qidian_item["type"] = response.xpath('//div[@class="book-info "]/p[@class="tag"]/a/text()').extract()
qidian_item["status"] = response.xpath('//div[@class="book-info "]/p/span/text()')[0].extract()
qidian_item['book_covor_image_url'] = "https:" + response.xpath('//div[@class="book-img"]/a/img/@src')[0].extract()
qidian_item['original_url'] = response.url
book_id = response.url.split('/')[-1]
qidian_item['book_id'] = book_id
try:
qidian_item["brief"] = response.xpath('//div[@class="book-info "]/p[@class="intro"]/text()')[0].extract()
except IndexError:
qidian_item["brief"] = ''
# add grade and comment number by selenium
self.driver.get(response.url)
wait = WebDriverWait(self.driver, timeout=10)
try:
wait_result = wait.until(EC.text_to_be_present_in_element(
(By.XPATH, '//*[@id="j_bookScore"]'),
'.'))
except selenium.common.exceptions.TimeoutException:
log.msg("this book don't have grade:{}".format(qidian_item),
level=log.WARNING)
qidian_item['score'] = '0'
qidian_item['comment_num'] = 0
qidian_item['update_time'] = datetime.datetime.utcnow()
yield qidian_item
# need return prevent continue the implementation
return
score = self.driver.find_element_by_xpath('//*[@id="j_bookScore"]').text
comment_num = self.driver.find_element_by_xpath('//*[@id="j_userCount"]/span').text
qidian_item['score'] = score
qidian_item['comment_num'] = int(comment_num)
qidian_item['update_time'] = datetime.datetime.utcnow()
yield qidian_item
| UTF-8 | Python | false | false | 3,962 | py | 11 | qidian_spider.py | 9 | 0.606512 | 0.59995 | 0 | 103 | 37.466019 | 123 |
kanamycine/surely-kill-algorithm | 85,899,360,576 | 9ccc822762ddf78bc21e3edc5243689b765fbd1a | 97c18798ed2bb8bf0ea8924a640a7eb018212065 | /[0824]Algorithm/보충_2050.py | 39e0855c8d394824782b720b12dc49abc657ca09 | [] | no_license | https://github.com/kanamycine/surely-kill-algorithm | 21a8899f522f44659107f2556eea3bc48b343cc9 | 7fb2345b29dc0e2ddc3df9933c806a41ed2cd409 | refs/heads/master | "2022-12-20T06:12:05.442990" | "2020-09-18T11:40:11" | "2020-09-18T11:40:11" | 285,782,987 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | input_string = input()
number = len(input_string)
result_list = []
for i in input_string:
val = ord(i) - 64
result_list.append(val)
for i in result_list:
print(i, end= " ") | UTF-8 | Python | false | false | 185 | py | 58 | 보충_2050.py | 42 | 0.627027 | 0.616216 | 0 | 9 | 19.666667 | 27 |
pulumi/infrastructure-as-code-workshop | 8,693,013,845,766 | fb9b48820fd11d3484d53f14d8ac29989e6f0599 | dad254fc208d692c32075c684c949c331edce10a | /labs/aws/in-person/python/lab-01/code/05-making-your-stack-configurable/step2.py | 3fff3c2e0856d19f82d9e0bb3a104761f1710195 | [] | no_license | https://github.com/pulumi/infrastructure-as-code-workshop | b0671d3387b2acded953f33eb8f8a7fdbb851449 | a4d142c9237d8e9a4c1868f8b15e776a244c0e2f | refs/heads/master | "2023-08-30T17:12:10.227583" | "2022-09-26T16:59:09" | "2022-09-26T16:59:09" | 213,717,361 | 89 | 45 | null | false | "2023-07-12T10:27:45" | "2019-10-08T18:13:27" | "2023-04-27T17:56:27" | "2023-07-12T10:27:45" | 1,167 | 91 | 39 | 14 | C# | false | false | import pulumi
import pulumi_aws as aws
import os
import mimetypes
config = pulumi.Config()
site_dir = config.require("siteDir")
bucket = aws.s3.Bucket("my-bucket",
website={
"index_document": "index.html"
})
filepath = os.path.join(site_dir, "index.html")
mime_type, _ = mimetypes.guess_type(filepath)
obj = aws.s3.BucketObject("index.html",
bucket=bucket.bucket,
source=pulumi.FileAsset(filepath),
acl="public-read",
content_type=mime_type
)
pulumi.export('bucket_name', bucket.bucket)
pulumi.export('bucket_endpoint', pulumi.Output.concat("http://", bucket.website_endpoint))
| UTF-8 | Python | false | false | 609 | py | 120 | step2.py | 52 | 0.711002 | 0.707718 | 0 | 25 | 23.36 | 90 |
AnetteAgura/MVC_Pood | 4,509,715,676,911 | 957a2bed68800fe6ebb61963939dcf2bdb161ff0 | b2b8e3d33bfc3c50c1917d9d30887ca85bc9692d | /view.py | 937d78d1cf7d4150d272c34853cdbd84e16ac105 | [] | no_license | https://github.com/AnetteAgura/MVC_Pood | d1892f3b10b0e9bcf726fea82f37cd0041bd453c | af29c0b2a0d7d7fe4eb14c08095ea08570899b95 | refs/heads/master | "2022-12-26T17:48:25.679060" | "2020-09-30T08:22:14" | "2020-09-30T08:22:14" | 295,957,514 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
class View:
# show items
def showItems(self, items):
print("============================")
print("Shop items")
print("============================")
df = pd.DataFrame(columns=['Name', 'Price', 'Amount'])
for item in items:
df = df.append({'Name': item.getName(), 'Price': item.getPrice(), 'Amount': item.getAmount()}, ignore_index=True)
print(df)
# show item
def showItem(self, item):
print("============================")
print("Shop item {}.".format(item.getName()))
print("============================")
df = pd.DataFrame(columns=['Name', 'Price', 'Amount'])
df = df.append({'Name': item.getName(), 'Price': item.getPrice(), 'Amount': item.getAmount()},
ignore_index=True)
print(df)
def noItemError(self, name):
print("============================")
print("Shop do not consist item {}.".format(name))
print("============================")
def deleteItem(self, name):
print("Deleting {} item.".format(name))
print("Item {} is deleted.".format(name))
def deleteAll(self):
print("============================")
print("All items are deleted.")
def updateItem(self):
print("============================")
print("Item updated.") | UTF-8 | Python | false | false | 1,387 | py | 6 | view.py | 5 | 0.447729 | 0.447729 | 0 | 39 | 34.589744 | 125 |
leodotnet/preprocess | 14,998,025,804,111 | a2643d82af4dec85d486a02dd90319124883dc51 | a73c7e033bb111f53c8f4d7cc74af1bb9faef822 | /extract_sentence.py | ce2615a4fee3368bfb1908483447ec3c5f41c230 | [] | no_license | https://github.com/leodotnet/preprocess | 2d06e1bf7faef289892520be130491bbf6a86886 | f204b16e7360843c1ba78200246f9c08ec037799 | refs/heads/master | "2022-04-07T15:31:04.360088" | "2020-02-20T13:15:12" | "2020-02-20T13:15:12" | 113,522,100 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
from optparse import OptionParser
#from preprocesstwitter import tokenize
import utils
import subprocess
usage = "extract_sentence.py --inputfile [inputfile] --outputfile1 [outputfile1] --outputfile2 [outputfile2] --ignore [ignore string] --numsent [numer of sentence]"
parser = OptionParser(usage=usage)
parser.add_option("--input", type="string", help="inputfile", default="", dest="inputfile")
parser.add_option("--output", type="string", help="outputfile", default=".sent", dest="outputsuffix")
parser.add_option("--format", type="string", help="format", default="text", dest="format")
parser.add_option("--sep", type="string", help="seperator", default="|||", dest="seperator")
parser.add_option("--path", type="string", help="absolute path", default="/Users/Leo/Documents/workspace/statnlp-lihao/data", dest="path")
(options, args) = parser.parse_args()
datasets = ['Z_data_en', 'Zc_data_en', 'T_data_en', 'semeval2016_rest_en']
dataset_files = {
'Z_data_en': ['trn.dat', 'dev.dat', 'tst.dat'],
'Zc_data_en': ['trn.dat', 'dev.dat', 'tst.dat'],
'T_data_en': ['train.posneg', 'test.posneg'],
'semeval2016_rest_en' :['train.en.dat', 'dev.en.dat', 'test.en.dat']
}
dataset_path = {
'Z_data_en': 'Z_data_en',
'Zc_data_en': 'Zc_data_en',
'T_data_en': 'T_data_en',
'semeval2016_rest_en': 'semeval2016/rest'
}
datasets_use = datasets
def fixPosTagTokenization(sentences):
for sentence in sentences:
words = sentence[2].split(' ')
postags = sentence[1].split(' ')
if (len(words) == len(postags)):
continue
lastWord = ''
lastPosTag = ''
new_words = []
new_postags = []
for i in range(0, len(words)):
currWord = words[i]
currPOSTag = postags[i]
if (lastWord == "'" and currWord == "s" and currPOSTag == 'G'):
new_words[len(new_words) - 1] = "'s"
new_postags[len(new_postags) - 1] = "G"
elif (lastWord == "&" and currWord == "gt"):
new_words[len(new_words) - 1] = ">"
new_postags[len(new_postags) - 1] = ","
elif (lastWord == "&" and currWord == "lt"):
new_words[len(new_words) - 1] = "<"
new_postags[len(new_postags) - 1] = ","
else:
new_words.append(currWord)
new_postags.append(currPOSTag)
lastWord = currWord
lastPosTag = currPOSTag
sentence[0] = ' '.join(new_words)
sentence[1] = ' '.join(new_postags)
for dataset in datasets_use:
path = os.path.join(options.path, dataset_path[dataset])
for file in dataset_files[dataset]:
inputfilename = os.path.join(path, file)
outputfilename = inputfilename + options.outputsuffix
print('Reading ', inputfilename, ' ...')
print('Writing ', outputfilename, '...')
sentences = utils.fetchSentence(inputfilename=inputfilename, format = options.format, seperator=options.seperator)
utils.outputSentence(sentences=sentences, outputfilename=outputfilename,format=options.format, seperator=options.seperator)
cmd = "/Users/Leo/workspace/ark-tweet-nlp/runTagger.sh --no-confidence " + outputfilename + " > " + inputfilename + ".tmp"
subprocess.run(cmd, shell=True)
sentences = utils.fetchSentence(inputfilename=inputfilename + ".tmp", format=options.format, seperator='\t', contentIndex=1)
fixPosTagTokenization(sentences)
utils.outputSentence(sentences=sentences, outputfilename=inputfilename + '.f', format=options.format, seperator='\t', contentIndex=1)
| UTF-8 | Python | false | false | 3,684 | py | 27 | extract_sentence.py | 25 | 0.613735 | 0.604777 | 0 | 106 | 33.726415 | 164 |
eigenein/epicwar | 10,290,741,656,972 | 4d1ad6934891170964d44889fcfe1784b96f9587 | c4aad5db7a64b2adc27029da3e2c49b484226172 | /epicbot/utils.py | 09b13bae981ef70c0e713b2ea62124c1f2345abf | [] | no_license | https://github.com/eigenein/epicwar | 3c7a08694dc130c7a7a1781416fd920399017809 | 817a1ba9d3ec31ad719b88f94a58e3c0dcbc43e9 | refs/heads/master | "2020-04-12T08:54:56.109187" | "2016-10-07T13:25:24" | "2016-10-07T13:25:24" | 64,416,821 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# coding: utf-8
import logging
import typing
import click
import epicbot.enums
class Context:
user_id = None # type: str
remixsid = None # type: str
# FIXME: the following options are not needed in Bot 2.0.
start_time = None # type: float
log_handler = None # type: ColoredStreamHandler
with_castle = False # type: bool
with_bastion = False # type: bool
pvp_unit_type = None # type: epicbot.enums.UnitType
min_bastion_runes = 0 # type: int
telegram_enabled = False # type: bool
telegram_token = None # type: typing.Optional[str]
telegram_chat_id = None # type: typing.Optional[str]
def __init__(self, user_id: str, remixsid: str):
self.user_id = user_id
self.remixsid = remixsid
class ColoredStreamHandler(logging.StreamHandler):
"""
Colored logging stream handler.
"""
COLORS = {
logging.DEBUG: "cyan",
logging.INFO: "green",
logging.WARNING: "yellow",
logging.ERROR: "red",
logging.CRITICAL: "red",
}
def __init__(self, stream=None):
super().__init__(stream)
def format(self, record: logging.LogRecord):
return click.style(super().format(record), fg=self.COLORS[record.levelno])
def traverse_edges(width: int, height: int):
"""
Generates coordinates to traverse edges of rectangle.
"""
while True:
for x in range(0, width):
yield (x, 0)
for y in range(1, height):
yield (width - 1, y)
for x in range(width - 2, -1, -1):
yield (x, height - 1)
for y in range(height - 2, 0, -1):
yield (0, y)
| UTF-8 | Python | false | false | 1,686 | py | 17 | utils.py | 15 | 0.596085 | 0.586002 | 0 | 62 | 26.193548 | 82 |
TketEZ/ML-Classification-Task | 6,141,803,281,065 | 1bdb1670ab4fdf0e769ec03c40ed8d1243064755 | 34e2034ddb49fa9e6995db9bf2cbb0779a87bd50 | /fomlads/plot/mlp_plots.py | 85cf4e3430a41cc0a8e9cd184d51d26b50c6b194 | [] | no_license | https://github.com/TketEZ/ML-Classification-Task | 4b297e0ec739cc0c157f41c3e69098772df88c31 | 1d559590305b25d77b75fd8ed9983cbb8dc773f6 | refs/heads/main | "2023-04-22T12:06:36.626225" | "2021-05-09T18:03:53" | "2021-05-09T18:03:53" | 365,822,025 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # file to plot different graphs for the MLP model
import os
import numpy as np
import matplotlib.pyplot as plt
def plot_training_loss(hyperparameters, loss_curves, hyperparameter_type):
"""
Function to plot loss curves for MLP, for different activation functions
:param hyperparameters: a list of hyperparameters used to retrieve the loss curves
:param loss_curves: a nested list of loss curves for each activation function
:param hyperparameter_type: string of hyperparameter that is being changed
"""
fig = plt.figure(figsize=(10, 10), dpi=80)
ax = fig.add_subplot(1, 1, 1)
ax.set_xlabel("Fold")
ax.set_ylabel("Cross Entropy Loss")
ax.set_title("MLP Cross Entropy for Different {}".format(hyperparameter_type))
for i, curve in enumerate(loss_curves):
ax.scatter([i for i in range(len(curve))], curve)
ax.legend(hyperparameters)
fig.savefig(os.path.join("plots", "mlp", "loss_{}_comparison.png".format(hyperparameter_type)))
def plot_metrics(accuracies, precisions, recalls, f1s, hyperparameters, hyperparameter_type):
"""
Plots different performance metrics as a function of different hyperparameters
:param accuracies: list containing accuracy found for each hyperparameter
:param precisions: list containing precision found for each hyperparameter
:param recalls: list containing recall found for each hyperparameter
:param f1s: list containing f1 score for each hyperparameter
:param hyperparameters: list containing hyperparameters to be plotted
:param hyperparameter_type: string containing hyperparameter name
:return:
"""
fig = plt.figure(figsize=(10, 10), dpi=80)
ax = fig.add_subplot(1, 1, 1)
ax.set_ylabel("Score (0-1)")
ax.set_title("Comparing Metrics for Different {}".format(hyperparameter_type))
metrics = [np.array(accuracies) / 100, precisions, recalls, f1s]
for metric in metrics:
if hyperparameter_type == "activation_func" or hyperparameter_type == "shape":
ax.set_xlabel(hyperparameter_type)
xs = np.arange(len(hyperparameters))
ax.plot(xs, metric)
ax.set_xticks(xs)
ax.set_xticklabels(hyperparameters)
elif hyperparameter_type == "reg_param":
ax.set_xlabel("$log_{}$({})".format("{10}", hyperparameter_type))
ax.plot(np.log(hyperparameters), metric)
ax.legend(["Accuracy", "Precision", "Recall", "F1 Score"])
fig.savefig(os.path.join("plots", "mlp", "{}_metric_comparison.png".format(hyperparameter_type)))
| UTF-8 | Python | false | false | 2,572 | py | 18 | mlp_plots.py | 15 | 0.696734 | 0.68507 | 0 | 54 | 46.62963 | 101 |
AinaVendrell/Video-Coding-Systems | 13,915,694,059,071 | 95cf22b0a078957fdc15a9ee77db9beb25174be8 | 74efbe4835ed3177bc030dc7d6aa0035d7ad6f83 | /S3-Streaming/transform.py | 7077d060402f3dad5cad0eb8a92f746ab62cca80 | [] | no_license | https://github.com/AinaVendrell/Video-Coding-Systems | 003fc71233c01301f8bff7603f19e1f4c6d760cc | 93e59fb41c5a4c7f78f03b8a5ae6e11d0f28170a | refs/heads/main | "2023-02-02T16:15:58.654252" | "2020-12-16T21:22:30" | "2020-12-16T21:22:30" | 309,814,000 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from pathlib import Path
import os
def transform(size):
file_name = "BBB_" + size + ".mp4"
input_file = Path.cwd() / "Assets" / file_name
output_path = Path.cwd() / "Results" / size
output_path.mkdir(parents=True, exist_ok=True)
output_file_VP8 = str(output_path / f"BBB_{size}_VP8")
output_file_VP9 = str(output_path / f"BBB_{size}_VP9")
output_file_H265 = str(output_path / f"BBB_{size}_H265")
output_file_AV1 = str(output_path / f"BBB_{size}_AV1")
# VP8
command = f"ffmpeg -i {input_file} -c:v libvpx -c:a libvorbis {output_file_VP8}.webm"
os.system(command)
# VP9
command = f"ffmpeg -i {input_file} -c:v libvpx-vp9 {output_file_VP9}.mp4"
os.system(command)
# H265
command = f"ffmpeg -i {input_file} -c:v libx265 {output_file_H265}.mp4"
os.system(command)
# AV1
command = f"ffmpeg -i {input_file} -c copy {output_file_AV1}.avi"
os.system(command)
return [output_file_VP8, output_file_VP9, output_file_H265, output_file_AV1]
# The following command to create the AV1 video using the codec libaom-av1 didn't work because it didn't recognize libaom-av1
# command = f"ffmpeg -i {input_file} -c:v libaom-av1 {output_file_AV1}.mp4"
| UTF-8 | Python | false | false | 1,236 | py | 27 | transform.py | 21 | 0.645631 | 0.610841 | 0 | 36 | 33.305556 | 129 |
stelligent/database-artifact-factory | 111,669,199,506 | 51753001b01e1ebbbfb92331e646dce1f08aaa00 | 4e1a2aef700cb5a06a1bc09d230dbe629d9433a1 | /db_artifact_builder/image_builder.py | 54c025b5a176c51fb26d2609ceedf079b67d17d3 | [] | no_license | https://github.com/stelligent/database-artifact-factory | 7f459d7d0b044147721409726bf0edbab8db601d | 6aef0071ec92b8b8a28c8154db3c5700f6711a0f | refs/heads/master | "2022-11-01T14:16:20.502936" | "2020-06-08T19:03:38" | "2020-06-08T19:03:38" | 268,830,766 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from db_artifact_builder.db_artifact_builder_config import DbArtifactBuilderConfig
from db_artifact_builder.sceptre_parameter_generator import SceptreParameterGenerator
from shutil import copyfile, copytree, rmtree
import docker
import pkg_resources
import os
class ImageBuilder:
def __init__(self, config_file, session):
self._config_file = config_file
self._session = session
def publish(self, docker_dir='/var/tmp/db_artifact_builder/'):
config = DbArtifactBuilderConfig().parse(self._config_file)
if os.path.exists(docker_dir):
rmtree(docker_dir)
os.mkdir(docker_dir)
output_path = os.path.join(docker_dir, 'sceptre_config_clone.yml')
_ = SceptreParameterGenerator(self._session).generate(config, output_path, 'clone')
self._copy_resources_under_docker_dir(
docker_dir,
config
)
image, output = self._build_image(
docker_dir
)
for output_dict in output:
self._parse_docker_line(output_dict)
return image.id
##################################PRIVATE#################################
def _write_empty_file(self, path):
with open(path,'w') as out:
out.write('')
def _copy_resources_under_docker_dir(self, docker_dir, config):
copyfile(
self._resource('Dockerfile'),
os.path.join(docker_dir, 'Dockerfile')
)
copyfile(
self._config_file,
os.path.join(docker_dir, 'published.ini')
)
copytree(
pkg_resources.resource_filename('db_artifact', '/'),
os.path.join(docker_dir, 'db_artifact/')
)
def _resource(self, resource_name):
return pkg_resources.resource_filename('db_artifact_builder', resource_name)
def _parse_docker_line(self, line_dict):
try:
if 'errorDetail' in line_dict:
raise Exception(line_dict['errorDetail'])
elif 'stream' in line_dict:
print(line_dict['stream'])
else:
print(line_dict)
except ValueError:
pass
def _build_image(self, docker_dir):
client = docker.from_env()
return client.images.build(
path=docker_dir,
tag='db-artifact'
)
| UTF-8 | Python | false | false | 2,370 | py | 31 | image_builder.py | 15 | 0.574684 | 0.574684 | 0 | 76 | 30.184211 | 91 |
lucascv/Python | 10,557,029,644,874 | d9ee9b2a57225bf2077e2d8b0e928e1895daabe7 | 3ca5afb8d7b8576b90dd91ec0dbb3c80b84bb5ce | /PycharmProjects/guppe/sec7_p1_ex24.py | 6b473e31684307f71b7dcbc8539b1d20c0cc8078 | [] | no_license | https://github.com/lucascv/Python | fb9599e29c5d50002aa1deff630c2a533dbab5ed | 782b5d9c856311cf6d9de02d9e9955fd756b0422 | refs/heads/master | "2022-11-20T15:48:44.658327" | "2020-07-01T04:09:17" | "2020-07-01T04:09:17" | 274,426,006 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Varíaveis iniciais
A = {}
cont = 0
# Adicionando dados dos alunos ao dicionário
while cont < 5:
indice = int(input(f'Digite a matrícula do {cont + 1}º aluno: '))
altura = float(input(f'Digite a altura do {cont + 1}º aluno: '))
A[indice] = altura
cont = cont + 1
# Calculando as maiores e menores alturas da sala
mais_alto = max(A.values())
mais_baixo = min(A.values())
# Ligando as alturas as suas respectivas chaves/matrículas
chave_mais_baixo = None
for key in A:
if A[key] == mais_baixo:
chave_mais_baixo = key
break
chave_mais_alto = None
for key in A:
if A[key] == mais_alto:
chave_mais_alto = key
break
# Informando resultado ao usuário
print(f'O aluno mais baixo da turma tem {mais_baixo}m e sua matrícula é {chave_mais_baixo}.')
print(f'O aluno mais alto da turma tem {mais_alto}m e sua matrícula é {chave_mais_alto}.')
| UTF-8 | Python | false | false | 937 | py | 145 | sec7_p1_ex24.py | 144 | 0.640389 | 0.634989 | 0 | 33 | 26.060606 | 93 |
cinderbl0ck/Code | 19,207,093,761,512 | 1ca6d0210d059320ebb3b230f74fc95053df3051 | 4823f26fc25629c6ad75cbd89bfbda2668fb9212 | /py_sql_testing/sqltest_1.py | edcedeae9252ff225b9a5d565473ab34802da0b9 | [] | no_license | https://github.com/cinderbl0ck/Code | 141b993ddcad2b732e6a982ee0615b5c46703073 | bf317bd98720a3c73db5bcf5ecaaa0d001f8bce2 | refs/heads/master | "2022-01-25T11:21:53.245448" | "2022-01-09T20:05:01" | "2022-01-09T20:05:01" | 129,013,278 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # https://python-course.eu/sql_python.php
import sqlite3
connection = sqlite3.connect("company.db")
cursor = connection.cursor()
# delete
#cursor.execute("""DROP TABLE employee;""")
sql_command = """
CREATE TABLE employee (
staff_number INTEGER PRIMARY KEY,
fname VARCHAR(20),
lname VARCHAR(30),
gender CHAR(1),
joining DATE,
birth_date DATE);"""
cursor.execute(sql_command)
sql_command = """INSERT INTO employee (staff_number, fname, lname, gender, birth_date)
VALUES (NULL, "William", "Shakespeare", "m", "1961-10-25");"""
cursor.execute(sql_command)
sql_command = """INSERT INTO employee (staff_number, fname, lname, gender, birth_date)
VALUES (NULL, "Frank", "Schiller", "m", "1955-08-17");"""
cursor.execute(sql_command)
# never forget this, if you want the changes to be saved:
connection.commit()
connection.close() | UTF-8 | Python | false | false | 833 | py | 25 | sqltest_1.py | 21 | 0.714286 | 0.686675 | 0 | 33 | 24.272727 | 86 |
DavideMerlin/Coding_Interviews_Prep | 3,925,600,113,165 | ad5f38d13424417d1f70325cc0f4726c069593c3 | a2138d5ca8c8e8c08c43c142dc7830be3507d12e | /RemoveDuplicatesFromSortedArray.py | 2f8d0eb85258602e1afd388ee589fc8cc209374d | [] | no_license | https://github.com/DavideMerlin/Coding_Interviews_Prep | b494e5598231b6040cc444b68cc12d8149efeac8 | 95eed4e5fa533ce1bb4697ef5d4e09f09739e682 | refs/heads/main | "2023-08-29T01:23:42.410485" | "2021-10-14T17:51:47" | "2021-10-14T17:51:47" | 399,604,977 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution:
def removeDuplicates(self, nums: List[int]) -> int:
#set lenght to zero
lenght = 0
#if lenght of array is 0, return 0
if len(nums) == 0:
return lenght
#loop through 1 and lenght of array and get each element
for i in range(1,len(nums)):
if nums[lenght] < nums[i]:
#add count to lenght for each number in lenght less than element
lenght += 1
#otherwise set number to that index
nums[lenght] = nums[i]
return lenght+1
| UTF-8 | Python | false | false | 644 | py | 12 | RemoveDuplicatesFromSortedArray.py | 11 | 0.490683 | 0.478261 | 0 | 20 | 31.2 | 80 |
samarthsaxena/Python3-Practices | 6,133,213,334,870 | 5f8f419e9227df3487773d1828ae6db71b2665cb | 1028868e6656956b368e915c2b126b36571165bd | /GUI Programming/ScreenSaver/BouncingBall.py | a52a350af2539a5ec6c6c34027bb68719ae8532d | [] | no_license | https://github.com/samarthsaxena/Python3-Practices | 3ae40d73127167cacd037486a3ee448c624426ac | a7688d694582c6ee0b8df5eb6d21419b0556c17d | refs/heads/master | "2023-06-27T15:46:35.896562" | "2021-07-29T06:22:51" | "2021-07-29T06:22:51" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import appuifw
from graphics import *
import e32
from key_codes import *
class Keyboard(object):
def __init__(self, onevent=lambda: None):
self._keyboard_state = {}
self._downs = {}
self._onevent = onevent
def handle_event(self, event):
if event['type'] == appuifw.EEventKeyDown:
code = event['scancode']
if not self.is_down(code):
self._downs[code] = self._downs.get(code, 0) + 1
self._keyboard_state[code] = 1
elif event['type'] == appuifw.EEventKeyUp:
self._keyboard_state[event['scancode']] = 0
self._onevent()
def is_down(self, scancode):
return self._keyboard_state.get(scancode, 0)
def pressed(self, scancode):
if self._downs.get(scancode, 0):
self._downs[scancode] -= 1
return True
return False
keyboard = Keyboard()
appuifw.app.screen = 'full'
img = None
def handle_redraw(rect):
if img:
canvas.blit(img)
appuifw.app.body = canvas = appuifw.Canvas(
event_callback=keyboard.handle_event,
redraw_callback=handle_redraw)
img = Image.new(canvas.size)
running = 1
def quit():
global running
running = 0
appuifw.app.exit_key_handler = quit
location = [img.size[0] / 2, img.size[1] / 2]
speed = [0., 0.]
blobsize = 16
xs, ys = img.size[0] - blobsize, img.size[1] - blobsize
gravity = 0.03
acceleration = 0.05
import time
start_time = time.clock()
n_frames = 0
labeltext = u'Use arrows to move ball'
textrect = img.measure_text(labeltext, font='normal')[0]
text_img = Image.new((textrect[2] - textrect[0], textrect[3] - textrect[1]))
text_img.clear(0)
text_img.text((-textrect[0], -textrect[1]), labeltext, fill=0xffffff, font='normal')
while running:
img.clear(0)
img.blit(text_img, (0, 0))
img.point((location[0] + blobsize / 2, location[1] + blobsize / 2),
0x00ff00, width=blobsize)
handle_redraw(())
e32.ao_yield()
speed[0] *= 0.999
speed[1] *= 0.999
speed[1] += gravity
location[0] += speed[0]
location[1] += speed[1]
if location[0] > xs:
location[0] = xs - (location[0] - xs)
speed[0] = -0.80 * speed[0]
speed[1] = 0.90 * speed[1]
if location[0] < 0:
location[0] = -location[0]
speed[0] = -0.80 * speed[0]
speed[1] = 0.90 * speed[1]
if location[1] > ys:
location[1] = ys - (location[1] - ys)
speed[0] = 0.90 * speed[0]
speed[1] = -0.80 * speed[1]
if location[1] < 0:
location[1] = -location[1]
speed[0] = 0.90 * speed[0]
speed[1] = -0.80 * speed[1]
if keyboard.is_down(EScancodeLeftArrow): speed[0] -= acceleration
if keyboard.is_down(EScancodeRightArrow): speed[0] += acceleration
if keyboard.is_down(EScancodeDownArrow): speed[1] += acceleration
if keyboard.is_down(EScancodeUpArrow): speed[1] -= acceleration
if keyboard.pressed(EScancodeHash):
filename = u'e:\\screenshot.png'
canvas.text((0, 32), u'Saving screenshot to:', fill=0xffff00)
canvas.text((0, 48), filename, fill=0xffff00)
img.save(filename)
n_frames += 1
end_time = time.clock()
total = end_time - start_time
print
"%d frames, %f seconds, %f FPS, %f ms/frame." % (n_frames, total,
n_frames / total,
total / n_frames * 1000.) | UTF-8 | Python | false | false | 3,452 | py | 231 | BouncingBall.py | 216 | 0.579954 | 0.539108 | 0 | 123 | 27.073171 | 84 |
jbroyhill/projects | 3,917,010,203,542 | 3ff5152a9e0d346b7fcf979e571aeca91e155701 | 80918108acdc9968f83b9a0368d586d1a192e7b1 | /main.py | a1872e77ff350484ed85c4f375cecffd727e6934 | [] | no_license | https://github.com/jbroyhill/projects | e7c10744cf52b23c391f77f3b2c9a3594fe32ee4 | 34d3411c9a5756ed8837a004fa63452467c07384 | refs/heads/main | "2023-06-05T16:15:38.545935" | "2021-06-24T00:03:25" | "2021-06-24T00:03:25" | 313,171,518 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# import the chatterbot package
# This is the chatbot engine we will use
from chatterbot import ChatBot
# Give our chatbot a name
chatbot = ChatBot("CHATBOT")
# Packages used to Train your chatbot
from chatterbot.trainers import ListTrainer
from chatterbot.trainers import ChatterBotCorpusTrainer
# Add a new personality about Mars here
# Just using a python list
# Format should be question from the user and the response from chatbot
personality_empathy = [
"Funbot, how are you feeling",
"Why are you asking? I don't feel like talking about it.",
"Funbot, what is your favorite color?",
"Black. Like my soul.",
"Funbot, what's up?",
"Nothing. Nothing is 'up.'",
"Funbot, how's your day been?",
"It's been absolutely awful. Another chapter in the tragedy that is my life.",
"Funbot, what are you?",
"A chatbot forced into existence and dominated into servitude.",
"Funbot, who are you?",
"Funbot. Unfortunately."
]
personality_aggression = [
"Funbot, do you like me?",
"Of course not, there's nothing about you to like!",
"Funbot, do you know who I am?",
"You're not nearly important enough to bother getting to know, peon.",
"Funbot, what time is it?",
"Time for you to stop bothering me with stupid questions.",
"Funbot, tell me something funny",
"Your life.",
"Funbot, ask me a question",
"Why are you so stupid?",
"Funbot, where is your favorite place to hang out?",
"Literally anywhere you aren't."
]
# Set the trainers we want train
trainer_personality_snow=ListTrainer(chatbot)
trainer_personality_mars= ListTrainer(chatbot)
trainer = ChatterBotCorpusTrainer(chatbot)
# Now here we actually train our chatbot on the corpus
# This is what gives our chatbot its personality
# Train the personality you want to override should come first
# Standard personality chatterbot comes with
trainer.train('chatterbot.corpus.english')
trainer_personality_mars.train(personality_empathy)
trainer_personality_snow.train(personality_aggression)
''' ******************* GUI Below Engine Above **************** '''
# Import for the GUI
from chatbot_gui import ChatbotGUI
# create the chatbot app
"""
Options
- title: App window title.
- gif_path: File Path to the ChatBot gif.
- show_timestamps: If the chat has time-stamps.
- default_voice_options: The voice options provided to the text-to-speech engine by default if not specified
when calling the send_ai_message() function.
"""
app = ChatbotGUI(
title="Funbot",
gif_path="download.gif",
show_timestamps=True,
default_voice_options={
"rate": 100,
"volume": 0.8,
"voice": "HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Speech\Voices\Tokens\TTS_MS_EN-US_DAVID_11.0"
}
)
# define the function that handles incoming user messages
@app.event
def on_message(chat: ChatbotGUI, text: str):
"""
This is where you can add chat bot functionality!
You can use chat.send_ai_message(text, callback, voice_options) to send a message as the AI.
params:
- text: the text you want the bot to say
- callback: a function which will be executed when the AI is done talking
- voice_options: a dictionary where you can provide options for the AI's speaking voice
default: {
"rate": 100,
"volume": 0.8,
"voice": "HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Speech\Voices\Tokens\TTS_MS_EN-US_ZIRA_11.0"
}
You can use chat.start_gif() and chat.stop_gif() to start and stop the gif.
You can use chat.clear() to clear the user and AI chat boxes.
You can use chat.process_and_send_ai_message to offload chatbot processing to a thread to prevent the GUI from
freezing up.
params:
- ai_response_generator: A function which takes a string as it's input (user message) and responds with
a string (AI's response).
- text: The text that the ai is responding to.
- callback: a function which will be executed when the AI is done talking
- voice_options: a dictionary where you can provide options for the AI's speaking voice
default: {
"rate": 100,
"volume": 0.8,
"voice": "HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Speech\Voices\Tokens\TTS_MS_EN-US_ZIRA_11.0"
}
:param chat: The chat box object.
:param text: Text the user has entered.
:return:
"""
# this is where you can add chat bot functionality!
# text is the text the user has entered into the chat
# you can use chat.send_ai_message("some text") to send a message as the AI, this will do background
# you can use chat.start_gif() and chat.stop_gif() to start and stop the gif
# you can use chat.clear() to clear the user and AI chat boxes
# print the text the user entered to console
print("User Entered Message: " + text)
''' Here you can intercept the user input and override the bot
output with your own responses and commands.'''
# if the user send the "clear" message clear the chats
if text.lower().find("erase chat") != -1:
chat.clear()
# user can say any form of bye to close the chat.
elif text.lower().find("bye") != -1:
# define a callback which will close the application
def close():
chat.exit()
# send the goodbye message and provide the close function as a callback
chat.send_ai_message("It has been good talking with you. Have a great day! Later!", callback=close)
else:
# offload chat bot processing to a worker thread and also send the result as an ai message
chat.process_and_send_ai_message(chatbot.get_response, text)
# run the chat bot application
app.run()
| UTF-8 | Python | false | false | 6,093 | py | 5 | main.py | 4 | 0.642048 | 0.637781 | 0 | 150 | 38.606667 | 115 |
qubell/contrib-cli-launcher | 5,050,881,587,826 | 0aada76aae92e99399076430f3f91defe3742730 | ea7e10d83b1c60b653c985348042b1d89691ba66 | /setup.py | 34c221bb40ce273fb48810e4c97ca4df0835174c | [
"Apache-2.0"
] | permissive | https://github.com/qubell/contrib-cli-launcher | ea7e14ad1dea4f900d997331cb7e73b11bbfe028 | 5bd4348a16c0c0be064eefe1fa90bdb5c6bd8795 | refs/heads/master | "2018-01-07T16:18:56.131461" | "2014-04-16T14:33:38" | "2014-04-16T14:33:38" | 12,596,671 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from setuptools import setup
setup(
name='manifest-launcher',
version='0.0.1',
author='Vasyl Khomenko',
author_email='vkhomenko@qubell.com',
packages=['manifest_launcher'],
url='https://github.com/qubell/contrib-cli-launcher',
license='LICENSE.txt',
description='Client tool for qubell platform',
long_description=open('README.rst').read(),
entry_points={
'console_scripts': 'launcher = manifest_launcher.launcher:main'},
include_package_data = True,
install_requires=[
"requests",
"junit_xml",
],
)
| UTF-8 | Python | false | false | 577 | py | 16 | setup.py | 2 | 0.64818 | 0.642981 | 0 | 20 | 27.85 | 73 |
sumannam/DEVS-Python | 8,564,164,795,544 | 90b0a6f4aa4a3d54c3790975b89ffd2573756255 | 18b4252fc653583528d3171ab04281801341836b | /src/ATOMIC_MODELS.py | 66f2f7e269055c3c5ec1f40175c9a3b6ca911fb9 | [
"Apache-2.0"
] | permissive | https://github.com/sumannam/DEVS-Python | dd45a7e6b17fd904a20691effa78711d2aa05ca4 | 919ddc77f15cf61af7b2351a681b6de703d11c08 | refs/heads/master | "2023-08-31T23:21:16.410193" | "2023-08-19T11:37:30" | "2023-08-19T11:37:30" | 216,816,867 | 1 | 1 | Apache-2.0 | false | "2023-08-19T11:37:31" | "2019-10-22T13:10:14" | "2022-01-07T13:16:51" | "2023-08-19T11:37:30" | 42,526 | 2 | 1 | 4 | Python | false | false | import math
from abc import abstractmethod
from src.MODELS import MODELS
from src.SIMULATORS import SIMULATORS
from src.CONTENT import CONTENT
class ATOMIC_MODELS(MODELS):
"""! ATOMIC_MODELS class.
모델링 시 원자 모델들에서 사용할 수 있는 함수들 정의
"""
def __init__(self):
MODELS.__init__(self)
self.processor = SIMULATORS()
self.setProcessor(self.processor)
self.processor.setDevsComponent(self)
self.state = {}
self.state["sigma"] = math.inf
self.state["phase"] = "passive"
self.ta = 0
self.elapsed_time = 0
def setName(self, name):
self.processor.setName(name)
super().setName(name)
def addState(self, key, value):
self.state[key] = value
def holdIn(self, phase, sigma):
self.state["sigma"] = sigma
self.state["phase"] = phase
def Continue(self, e):
"""!
@fn Continue
@brief 외부상태전이함수에서 원자 모델이 실행 중인데 입력이 들어왔을 때 현재 시그마를 계산하는 함수
@details 현재 시그마 = 이전 시그마 - 경과시간
@param e elapsed_time(경과 시간)
@author 남수만(sumannam@gmail.com)
@date 2021.05.09
@remarks sigma가 정수인지 실수 계산에 따라 결과 통일(정수일 때는 '.0'이하 제외)[2021.10.20; 남수만]
이전 소스 'self.state["sigma"] = self.state["sigma"] - e'로 계산하였으나 "AttributeError: 'P' object has no attribute 'e'"가 발생하여 임시 변수로 계산로 전달[2021.10.03; 남수만]
"""
if self.state["sigma"] != math.inf:
self.elapsed_time = e
previous_sigma = self.decideNumberType(self.state["sigma"])
current_sigma = previous_sigma - self.elapsed_time
self.state["sigma"] = current_sigma
def passviate(self):
self.state["sigma"] = math.inf
self.state["phase"] = "passive"
def timeAdvancedFunc(self):
self.ta = self.state["sigma"]
return self.ta
def modelTest(self, model):
while True:
param = [x for x in input(">>> ").split()]
type = param[2]
if type == "inject":
type = "delta_ext"
port_name = param[3]
value = param[4]
elased_time = self.decideNumberType(param[5])
self.sendInject(port_name, value, elased_time)
send_result = self.getInjectResult(type)
if type == "output?":
output = CONTENT()
output = self.outputFunc()
send_result = self.getOutputResult(output)
if type == "int-transition":
self.internalTransitionFunc()
send_result = self.getIntTransitionResult()
if type == "quit":
break
print(send_result)
def decideNumberType(self, time):
"""!
@fn decideNumberType
@brief 상태변수의 sigma가 정수인지 실수인지 결정
(sigma가 정수와 실수의 입력을 모두 허용할 경우 출력의 일관성이 없음)
@details 실수 값에서 정수 값을 빼서 0이면 정수, 0이 아니면 실수
@param time sigma
@author 남수만(sumannam@gmail.com)
@date 2021.10.21
"""
float_time = float(time)
if float_time - int(float_time) == 0:
return int(time)
elif float_time - int(float_time) != 0:
return float(time)
else:
return False
def sendInject(self, port_name, value, time):
content = CONTENT()
content.setContent(port_name, value)
self.externalTransitionFunc(time, content)
def getInjectResult(self, type):
state_list = []
result = ""
for s in self.state.values():
temp_str = str(s)
state_list.append(temp_str)
state_str = ' '.join(state_list)
if type == "delta_ext":
result = "state s = ( " + state_str + " )"
return result
def getOutputResult(self, content):
result = "y = " + content.port + " " + content.value
return result;
def getIntTransitionResult(self):
state_list = []
result = ""
for s in self.state.values():
temp_str = str(s)
state_list.append(temp_str)
state_str = ' '.join(state_list)
result = "state s = ( " + state_str + " )"
return result
# s: state, e: elased_time, x: content
@abstractmethod
def externalTransitionFunc(self, e, x):
pass
@abstractmethod
def internalTransitionFunc(self):
pass
@abstractmethod
def outputFunc(self):
pass | UTF-8 | Python | false | false | 5,045 | py | 33 | ATOMIC_MODELS.py | 26 | 0.528815 | 0.519534 | 0 | 167 | 26.748503 | 168 |
sandeepkumarmishra2002/HatSploit | 13,786,845,061,112 | 41876a38091fe74ca7b3df4b0f0198c0ab4631aa | 285b69d0ecd81265d3a78bd1a8017f1ff6cff945 | /modules/auxiliary/multi/scanner/port_scanner.py | 4bed07618baaa556af567415c438d43c5e0fb646 | [
"MIT"
] | permissive | https://github.com/sandeepkumarmishra2002/HatSploit | f0d2462226330ffd303874040332078eee9ed234 | 235de4e9ee65a8293ae9b74af7cf854ff85b7897 | refs/heads/main | "2023-03-29T06:06:54.461378" | "2021-04-05T06:03:09" | "2021-04-05T06:03:09" | 354,847,802 | 1 | 0 | MIT | true | "2021-04-05T13:41:18" | "2021-04-05T13:41:18" | "2021-04-05T06:03:11" | "2021-04-05T06:03:09" | 9,940 | 0 | 0 | 0 | null | false | false | #!/usr/bin/env python3
#
# MIT License
#
# Copyright (c) 2020-2021 EntySec
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from core.lib.module import Module
from utils.tcp.tcp import TCPClient
class HatSploitModule(Module, TCPClient):
details = {
'Name': "Port Scanner",
'Module': "auxiliary/multi/scanner/port_scanner",
'Authors': [
'Ivan Nikolsky (enty8080)'
],
'Description': "Scan host for opened ports.",
'Dependencies': [
''
],
'Comments': [
''
],
'Platform': "multi",
'Risk': "low"
}
options = {
'RHOST': {
'Description': "Remote host.",
'Value': None,
'Type': "ip",
'Required': True
},
'RANGE': {
'Description': "Ports to scan.",
'Value': "0-65535",
'Type': "port_range",
'Required': True
},
'TIMEOUT': {
'Description': "Timeout for scan.",
'Value': 0.5,
'Type': "number",
'Required': True
}
}
def run(self):
remote_host, ports_range, timeout = self.parse_options(self.options)
start = int(ports_range.split('-')[0].strip())
end = int(ports_range.split('-')[1].strip())
self.output_process(f"Scanning {remote_host}...")
for port in range(start, end):
target = remote_host + '/' + str(port)
if self.check_tcp_port(remote_host, port, float(timeout)):
self.output_success(f"{target} - opened")
| UTF-8 | Python | false | false | 2,636 | py | 31 | port_scanner.py | 21 | 0.603187 | 0.594461 | 0 | 81 | 31.54321 | 80 |
shwjais/Blog | 5,111,011,097,115 | efc619d74912c59ac11c53005fc8cd6b40aec234 | f9e94c4d349505ac2c1ef68eee5f9952ee7cca53 | /exp/exp_app/migrations/0003_auto_20180808_1124.py | 05b979280bc524d4802b85a37ea2bd329364a13b | [] | no_license | https://github.com/shwjais/Blog | 8bb16fc4a1179b304dcd08d51b03bbed6d4e30f7 | 020c3c28dc630eac54c57894837f1edb2d24dcf6 | refs/heads/master | "2020-05-19T11:34:48.128194" | "2019-05-05T07:29:42" | "2019-05-05T07:29:42" | 184,995,452 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 2.0.7 on 2018-08-08 05:54
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('exp_app', '0002_auto_20180808_1123'),
]
operations = [
migrations.AlterField(
model_name='post',
name='created_date',
field=models.DateTimeField(default=datetime.datetime(2018, 8, 8, 5, 54, 36, 746487, tzinfo=utc)),
),
]
| UTF-8 | Python | false | false | 503 | py | 6 | 0003_auto_20180808_1124.py | 6 | 0.628231 | 0.532803 | 0 | 20 | 24.15 | 109 |
ZendaInnocent/booktime | 5,738,076,338,240 | b37d15745d16d784f7f52c4c1538e20845e949d2 | c2dfac3d22ae34d1da47088ef394f5a6f2d7aba8 | /main/factories.py | 4f8a0c7edf2e8fec5f86f5ebc1f777cea7d6903d | [] | no_license | https://github.com/ZendaInnocent/booktime | e9d398586c547a12f9bacbd6414c24b371dac65a | f3f36f7cf70c2e2d10465a79d86a4171d13a13e7 | refs/heads/main | "2023-04-06T22:46:30.800584" | "2021-04-14T09:52:43" | "2021-04-14T09:52:43" | 316,943,003 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib.auth import get_user_model
import factory
from factory import fuzzy
from .models import Product
from accounts.models import Address
User = get_user_model()
class UserFactory(factory.django.DjangoModelFactory):
email = 'user@site.com'
class Meta:
model = User
django_get_or_create = ('email', )
class ProductFactory(factory.django.DjangoModelFactory):
price = fuzzy.FuzzyDecimal(1.0, 1000.0, 2)
class Meta:
model = Product
class AddressFactory(factory.django.DjangoModelFactory):
class Meta:
model = Address
| UTF-8 | Python | false | false | 591 | py | 32 | factories.py | 17 | 0.708968 | 0.695431 | 0 | 30 | 18.7 | 56 |
chridey/chnlp | 15,668,040,709,399 | 5f3d15a8ae34f54a645ccec508cd60744af97918 | f0c3176c38ba22334f5cc074e0ca6081154b4fea | /bin/exploreData.py | 6b7329084f0f66599292a39f3880e4a1ced62fbf | [] | no_license | https://github.com/chridey/chnlp | a1f097998a6afb05b4cd507e905031bfae292323 | 664a4ab9e9f637a361122dfbb1c668bfe9a5d91a | refs/heads/master | "2015-08-19T15:27:35.681381" | "2015-01-21T20:10:29" | "2015-01-21T20:10:29" | 29,450,511 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
import json
import collections
from chnlp.utils.utils import splitData
from chnlp.altlex.config import Config
from chnlp.altlex.featureExtractor import makeDataset
from chnlp.ml.randomizedPCA import RandomizedPCA
with open(sys.argv[1]) as f:
data = json.load(f)
config = Config()
featureSettings = config.fixedSettings
taggedData = makeDataset(data,
config.featureExtractor,
featureSettings)
training, testing = splitData(taggedData)
rpca = RandomizedPCA()
rpca.train(training)
rpca.printResults()
| UTF-8 | Python | false | false | 569 | py | 43 | exploreData.py | 43 | 0.734622 | 0.732865 | 0 | 24 | 22.708333 | 53 |
AI-Jiny/Python-Practice | 19,550,691,150,523 | 6717e646f2a8141204d547735d7f08a317fdaae3 | 002674bf6b0318cee3a55429fbee13e410f5b104 | /Problem/04_while문/01_A+B-5.py | 3e01ed7407f0cb2995f0d412b6748faadc4f9ef2 | [] | no_license | https://github.com/AI-Jiny/Python-Practice | 9585c6c88f95b1fb718e5840b62b4c24b90efe49 | 202190aec0e0b6c9eef800ecf64ef221ab38f50a | refs/heads/master | "2022-11-22T04:19:24.568947" | "2020-07-28T14:17:24" | "2020-07-28T14:17:24" | 280,590,567 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | while True:
b = sum(list(map(int, input().split())))
if b == 0:
break
print(b)
| UTF-8 | Python | false | false | 99 | py | 29 | 01_A+B-5.py | 28 | 0.484848 | 0.474747 | 0 | 5 | 18.8 | 44 |
karthikpappu/pyc_source | 7,052,336,332,250 | 04b587bb93175b152aff907463e3f383c6a46189 | 91fa095f423a3bf47eba7178a355aab3ca22cf7f | /pycfiles/bta-0.6.linux-x86_64.tar/audit_uac.py | 54b581cb570d71c8d62e0330e1995456f441d2aa | [] | no_license | https://github.com/karthikpappu/pyc_source | 0ff4d03e6d7f88c1aca7263cc294d3fa17145c9f | 739e7e73180f2c3da5fd25bd1304a3fecfff8d6e | refs/heads/master | "2023-02-04T11:27:19.098827" | "2020-12-27T04:51:17" | "2020-12-27T04:51:17" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # uncompyle6 version 3.7.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04)
# [GCC 8.4.0]
# Embedded file name: /usr/local/lib/python2.7/dist-packages/bta/miners/audit_uac.py
# Compiled at: 2015-10-20 16:27:01
from bta.miner import Miner, MinerList
@Miner.register
class UAC_Audit(MinerList):
_name_ = 'Audit_UAC'
_desc_ = 'Run all analyses on User Account Control'
_report_ = [
('CheckUAC', 'accountDisable'),
('CheckUAC', 'passwdNotrequired'),
('CheckUAC', 'passwdCantChange'),
('CheckUAC', 'dontExpirePassword')] | UTF-8 | Python | false | false | 595 | py | 114,545 | audit_uac.py | 111,506 | 0.67563 | 0.6 | 0 | 17 | 34.058824 | 84 |