repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
sequence | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
unknown | revision_date
unknown | committer_date
unknown | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
unknown | gha_created_at
unknown | gha_updated_at
unknown | gha_pushed_at
unknown | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
userchaitu/userchaitu | 17,016,660,447,301 | 46eabeb496e15ffe3658e58869793a34857b831a | 76a44425755ffc9fea9f4f2a80ad693e7dce294d | /testfiles/conftest.py | 059e5ae0a4adaaf1cf2d33200343301228cca70d | [] | no_license | https://github.com/userchaitu/userchaitu | f870691a7d2d108a6d6529a8f076b44e3d1d3acf | 913ae1a68f51a68ae3407c86aabee6166d235222 | refs/heads/master | "2023-07-23T22:30:59.583246" | "2021-09-05T17:20:01" | "2021-09-05T17:20:01" | 388,604,876 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pytest
from selenium import webdriver
@pytest.fixture(scope='class')
def test_setup(request):
options = webdriver.ChromeOptions()
options.add_argument("--start-maximized")
driver = webdriver.Chrome(executable_path="C:\\Users\\user\\eclipse\\chromedriver_win32\\chromedriver.exe",
options=options)
driver.get("https://rahulshettyacademy.com/angularpractice/")
request.cls.driver = driver
yield
driver.close()
| UTF-8 | Python | false | false | 475 | py | 10 | conftest.py | 9 | 0.690526 | 0.686316 | 0 | 14 | 32.928571 | 111 |
DiyanKalaydzhiev23/fundamentals---python | 25,769,831,006 | 1fc42c7c2341b461d9f4c94009031efa7161e6b1 | e0b48bcc55dafe59f0032a6fea2c8da770d849af | /RegEx- exsercise/Extract Eamils.py | c58fb62da2e382e26a6ae106ca0524859434bb07 | [
"MIT"
] | permissive | https://github.com/DiyanKalaydzhiev23/fundamentals---python | a80bc9308aad87902e3fcd4e61cdd4e2c47853df | 7fa032d9a3270648ffa383bb00dad8e51613189d | refs/heads/main | "2023-03-30T21:49:56.244273" | "2021-04-02T11:17:17" | "2021-04-02T11:17:17" | 353,996,815 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import re
valid_emails = []
pattern = r"(^|(?<=\s))[a-zA-Z0-9]+[\._-]?[a-zA-Z0-9]+@[a-z]+-?[a-z]+(\.[a-z]+)+"
[valid_emails.append(el.group()) for el in re.finditer(pattern, input())]
[print(email) for email in valid_emails]
| UTF-8 | Python | false | false | 226 | py | 197 | Extract Eamils.py | 192 | 0.575221 | 0.557522 | 0 | 6 | 36.666667 | 81 |
YourboyTizzyT/HW_codecraft_2018 | 1,185,410,999,818 | 784c22dff30b18272b709df8eb1293e53d676818 | 7a535152b364e04afae85edc03417be01a2345dd | /ecs/predictor.py | 40a4fff4f66175cb17cc30b14e85a7a0692f259d | [] | no_license | https://github.com/YourboyTizzyT/HW_codecraft_2018 | 31c19e13c0bceddd262f3248ee4bb3ea3eac91e6 | 39afef08421eb27b696f14eb2b82be44f3d50311 | refs/heads/master | "2020-04-07T22:16:10.424049" | "2018-05-08T09:28:53" | "2018-05-08T09:28:53" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
import datetime,time,knapsack,math
class PhyInfo:
def __init__(self, cpu, mem, sto):
self.cpu = cpu
self.mem = mem
self.sto = sto
class VMInfo:
def __init__(self):
self.CPU = []
self.MEM = []
self.ID = []
def add_VM(self,VMlist):
self.ID.append(VMlist[0])
self.MEM.append(int(int(VMlist[2])/1024))
self.CPU.append(int(VMlist[1]))
def sort(self):
self.ID=list(reversed(self.ID))
self.MEM=list(reversed(self.MEM))
self.CPU=list(reversed(self.CPU))
def exponential_smoothing(l,a,b):
avg=Math.avg(l)
s,t = [0*i for i in range(len(l))],[0*i for i in range(len(l))]
for i in range(1, len(s)):
s[i] = a*l[i]+(1-a)*(s[i-1]+t[i-1])
if s[i]<0:s[i]=avg
t[i] = b*(s[i]-s[i-1])+(1-b)*t[i-1]
return s,t
class Math:
@staticmethod
def avg(l):
return float(sum(l))/len(l);
@staticmethod
def variance(l):
s1=0;
s2=0;
for i in l:
s1+=i**2;
s2+=i;
return math.sqrt(float(s1)/len(l)-(float(s2)/len(l))**2);
def noise(l):
l1=[]#第i周
l1=Denoising(l)
return l1
def Denoising(l):
print Math.avg(l),Math.variance(l)
max_l=Math.avg(l)+3*Math.variance(l)
avg_l=Math.avg(l)
while max(l)>max_l:
l[l.index(max(l))]=max_l
return l
def moving_windows(lists,length=7,step=1):
l=[]
begin=0
while begin<len(lists)-length+1:
l.append(sum(lists[begin:begin+length]))
begin+=step
return l
def try_alpha(l,t1):#t1预测天数
train=l[0:int(-1*t1+1)]
test=l[-1]
a_list=[i*0.1+0.1 for i in range(9)]
b_list=[i*0.1+0.1 for i in range(9)]
error,errors=[],[]
for a in a_list:
for b in b_list:
s,t = exponential_smoothing(train,a,b)
error.append((s[-1]+t[-1]*t1-test)**2)
errors.append(error)
error=[]
mins=[]
mins_index=[]
for i in errors:
mins.append(min(i))
mins_index.append(i.index(min(i)))
return a_list[mins.index(min(mins))],b_list[mins_index[mins.index(min(mins))]]
def predict_vm(ecs_lines, input_lines):
# Do your work from here#
result = []
if ecs_lines is None:
print 'ecs information is none'
return result
if input_lines is None:
print 'input file information is none'
return result
#读取input文件信息
values=input_lines[0].split(" ")
phyInfo=PhyInfo(int(values[0]),int(values[1]),int(values[2]))
VM_num=int(input_lines[2])
VM=VMInfo()
for i in range(3,3+VM_num):
VM.add_VM(input_lines[i].split(" "))
VM.sort()
DIM=input_lines[3+VM_num+1][0:-1]
predict_daySpan=(str2time(input_lines[3+VM_num+4])-str2time(input_lines[3+VM_num+3][0:-1])).days
#读取traindata
train_begin_time=str2time(ecs_lines[0].split("\t")[-1][0:-1])
train_end_time=str2time(ecs_lines[-1].split("\t")[-1][0:-1])
train_daySpan=(str2time(ecs_lines[-1].split("\t")[-1][0:-1])-str2time(ecs_lines[0].split("\t")[-1][0:-1])).days+1
sequence=[]
start=train_begin_time
ecs=ecs_lines
ecs_list=[]
predict=[]
alpha = 0.60
beta= 0.2
for i in range(len(VM.ID)):
sequence=[j*0 for j in range(train_daySpan)]
for item in ecs:
if item.split("\t")[1]==VM.ID[i]:
sequence[(str2time(item.split("\t")[-1][0:10])-start).days]+=1
else:
ecs_list.append(item)
sequence=noise(sequence)
sequence=moving_windows(sequence,predict_daySpan,1)
#alpha,beta=try_alpha(sequence,predict_daySpan)
s,t = exponential_smoothing(sequence,alpha,beta)
predict.append(int(max(round(s[-1]+t[-1]*predict_daySpan,0),0)))
ecs=ecs_list
ecs_list=[]
result=0
print "predict:",predict
to_kp=knapsack_list(predict,VM)
kp,reuse=knapsack.read_input(to_kp,phyInfo.cpu,phyInfo.mem,DIM)
#if reuse[0]*1.0/PhyInfo.cpu
if ((reuse[0]<0.2*phyInfo.cpu) or (reuse[1]<0.2*phyInfo.mem)) and (len(kp[-1])<=5):
for i in kp[-1]:
predict[VM.ID.index(i)]-=1
kp.pop()
else:
#给最后一个物理机添加虚拟机
for i in range(len(VM.CPU)):
x=predict[i]//7+1
while(reuse[0]>=VM.CPU[i] and reuse[1]>=VM.MEM[i]):
x-=1
if x==0:
break
reuse[0]-=VM.CPU[i]
reuse[1]-=VM.MEM[i]
kp[-1].append(VM.ID[i])
predict[i]+=1
result=result_writer1(predict,VM)
result+=result_writer2(kp)
return result
def knapsack_list(num_list,vminfo):
cpu,mem,vmid=[],[],[]
for i in range(len(num_list)):
if num_list[i]!=0:
for j in range(num_list[i]):
cpu.append(vminfo.CPU[i]),mem.append(vminfo.MEM[i]),vmid.append(vminfo.ID[i])
return [cpu,mem,vmid]
def result_writer1(num_list,vminfo):
out=[]
out.append(str(sum(num_list)))
for i in range(len(num_list)):
out.append(vminfo.ID[i]+' '+str(num_list[i]))
return out
def result_writer2(lists):
out=[]
out.append('\n'+str(len(lists)))
n=0
for i in lists:
n+=1
lines=str(n)+' '
dic = {}
for j in i:
if i.count(j)>=1:
dic[j] = i.count(j)
for k in dic.items():
lines+=k[0]+' '+str(k[1])+' '
out.append(lines.strip())
del lines
return out
def str2time(inputStr):
return datetime.datetime.strptime(inputStr[0:10],"%Y-%m-%d") | UTF-8 | Python | false | false | 5,660 | py | 3 | predictor.py | 2 | 0.541206 | 0.513914 | 0 | 187 | 28.983957 | 117 |
Nejel/coursera-python-specialization-repository | 6,390,911,361,126 | 79ceea2f14095a03bf7222e312cb0e175154c2e6 | eb2b4f8db686ec8987830d6135a4d75408fc5a5b | /3-access-web-data/parse2.py | b41d8b722a5c76853d8135132bd206bdf6fcf8d8 | [] | no_license | https://github.com/Nejel/coursera-python-specialization-repository | 5f4e3bb667e898a3f381e4ff3c19e2a93fb7fb50 | c264f59cb7bf55126ba80f0ca4251e4d56376270 | refs/heads/master | "2020-03-26T19:02:40.418563" | "2018-10-08T12:15:24" | "2018-10-08T12:15:24" | 145,245,788 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # To run this, you can install BeautifulSoup
# https://pypi.python.org/pypi/beautifulsoup4
# Or download the file
# http://www.py4e.com/code3/bs4.zip
# and unzip it in the same directory as this file
import urllib.request, urllib.parse, urllib.error
from bs4 import BeautifulSoup
sum = 0
import ssl
import re
pagecount = 0
tagcount = 0
html2 = ()
currenttag = ()
sampleurl = 'http://py4e-data.dr-chuck.net/comments_42.xml'
mainurl = 'http://py4e-data.dr-chuck.net/comments_109327.xml'
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
#url = ('http://py4e-data.dr-chuck.net/known_by_Harris.html')
url = mainurl
#html = urllib.request.urlopen(url).read()
# Retrieve all of the anchor tags
#while pagecount < 7 :
html = urllib.request.urlopen(url, context=ctx).read()
soup = BeautifulSoup(html, 'html.parser')
tags = soup('count')
for tag in tags:
if tagcount < 7777:
#print(tag.get('href', None))
print(tag.string)
sum += int(tag.string)
tagcount += 1
#print(tagcount)
'''
elif tagcount == 17:
print(tag.get('href', None))
currenttag = (tag.string)
print(currenttag)
#answer = re.findall('>(.)</a>', currenttag, flags = 0)
#print(answer)
url = (tag.get('href', None))
break
'''
# pagecount += 1
# tagcount = 0
# continue
#else:
print(currenttag)
print(sum)
| UTF-8 | Python | false | false | 1,502 | py | 17 | parse2.py | 14 | 0.633156 | 0.612517 | 0 | 60 | 24.033333 | 67 |
avneetkaur1103/workbook | 816,043,805,778 | ddff17f668c4f2293f8f99d2f53df8a55f92d365 | 3142275793b5ca407bcb658ece39a617d1e75e30 | /Algorithms/Greedy/job_sequencing.py | 92f5825beaf501d7616e7583407622dd73539a0f | [] | no_license | https://github.com/avneetkaur1103/workbook | d35da2d58fe4e213a38303d4d21ede2973ad3f2d | 61b32387784a19ac2c4d758e8dcbdbe2d2e870e5 | refs/heads/master | "2022-04-10T14:22:37.551104" | "2020-03-13T06:06:41" | "2020-03-13T06:06:41" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """ Given an array of jobs where every job has a deadline and associated profit if the job is finished before the deadline.
It is also given that every job takes single unit of time, so the minimum possible deadline for any job is 1.
How to maximize total profit if only one job can be scheduled at a time. """
import operator
def schedule(arr, n_slots):
arr = sorted(arr, key=operator.itemgetter(2), reverse=True)
result = list()
slots = [True] * n_slots
for id, deadline, profit in arr:
for i in range(deadline-1, -1, -1):
if slots[i]:
result.append((id, i+1, profit))
slots[i] = False
break
print('Result ', result)
# Driver COde
arr = [['a', 2, 100], # Job Array
['b', 1, 19],
['c', 2, 27],
['d', 1, 25],
['e', 3, 15]]
print("Following is maximum profit sequence of jobs")
schedule(arr, 3) | UTF-8 | Python | false | false | 862 | py | 380 | job_sequencing.py | 358 | 0.638051 | 0.611369 | 0 | 28 | 29.821429 | 123 |
kopyshev/HackerRankCheckIO | 9,526,237,477,674 | fc9c7dca8f7ccc129950c69c1edfc274c9424e21 | cabd82f851ac7096801e8bc22b17f8c7225356da | /Others/The_Devil_s_WiFi.py | bc45a643e1616adb65cd1bac910fe76e2c04fe89 | [] | no_license | https://github.com/kopyshev/HackerRankCheckIO | 9f2f92b058d371a7b533a578b2dde7bdbf67726f | 9b1829b3e078b1acfc331bcbac222f7f7f360a68 | refs/heads/main | "2023-03-30T20:53:06.983835" | "2021-04-08T19:47:11" | "2021-04-08T19:47:11" | 350,063,120 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from decimal import Decimal, getcontext
from math import factorial
getcontext().prec = 1000 #need for sqrt() precision
def devils_wifi(position, n = 800):
# the Chudnovsky brothers' formula
# Each new term of the formula gives an additional 14 digits of pi.
# by default n = 80
pi = Decimal(0)
k = 0
while n > k:
pi += Decimal((-1)**k*factorial(6*k)*(13591409+545140134*k))/Decimal(factorial(k)**3*(factorial(3*k))*((640320)**3)**k)
k += 1
pi *= Decimal(10005).sqrt()/4270934400
pi = pi**(-1)
return str(pi).split(str(position))[0][-8:]
print(devils_wifi(28638823))
assert devils_wifi(28638823) == "73115956", 'wrong password' | UTF-8 | Python | false | false | 684 | py | 9 | The_Devil_s_WiFi.py | 8 | 0.643275 | 0.520468 | 0 | 21 | 31.619048 | 127 |
DanielPiqueras/Python | 6,287,832,171,888 | 3d41404f4638f0cd15c0d21061b342a2ed27cc4c | dadd16a451e9ba5679ef06b8eea2a8d097d9269e | /Python/Ebiblio/ebiblio-master/ebiblio-api/Login/models.py | 26a3f7b571c58930947924fed59ac6d578860f45 | [] | no_license | https://github.com/DanielPiqueras/Python | d12845389202cb1d86e68b47191681a76c3ab04f | c83bd0dcd5752ccb8cc6b0fa2485635af7718e41 | refs/heads/master | "2023-05-10T19:44:45.878892" | "2020-04-20T08:57:21" | "2020-04-20T08:57:21" | 248,194,443 | 0 | 0 | null | false | "2023-05-09T01:00:22" | "2020-03-18T09:58:31" | "2020-04-20T08:57:28" | "2023-05-09T01:00:22" | 51,869 | 0 | 0 | 2 | Python | false | false | import re
from django.contrib.auth.models import User
from django.contrib.auth.tokens import PasswordResetTokenGenerator
import six
from django.utils.crypto import constant_time_compare
from django.utils.http import base36_to_int
from rest_framework import status, serializers
from rest_framework.permissions import BasePermission, SAFE_METHODS
# Permission classes
class IsUserOwnerOrAdmin(BasePermission):
def has_permission(self, request, view):
if (view.action == 'list' or request.method == 'POST') and \
not request.user.is_staff:
return False
elif request.user.is_authenticated:
return bool(request.user and request.user.is_authenticated)
else:
return False
def check_object_permission(self, user, obj):
return bool(user and user.is_authenticated and
bool(user.is_staff or obj == user))
def has_object_permission(self, request, view, obj):
return self.check_object_permission(request.user, obj)
class IsOwnerOrAdmin(BasePermission):
def has_permission(self, request, view):
if (request.user.pk == view.kwargs['user_id']) or request.user.is_staff:
return True
else:
return False
class IsAdminOrReadListBookOnly(BasePermission):
def has_permission(self, request, view):
return bool(
request.method in SAFE_METHODS or
request.user and
request.user.is_staff
)
class IsAdminOrOnlyRent(BasePermission):
def has_permission(self, request, view):
if (request.method != 'POST' and request.method != 'PUT') and \
not request.user.is_staff:
return False
elif request.user.is_authenticated:
return bool(request.user and request.user.is_authenticated)
else:
return False
# Check if data is valid
def check_is_empty(attrs):
for attribute in attrs:
if attrs[attribute] == 'Not found':
raise serializers.ValidationError(
'The {data} has not been sent'.format(data=attribute))
if isinstance(attrs[attribute], str) is False:
raise serializers.ValidationError(
'The {data} need to be string'.format(data=attribute),
code=status.HTTP_409_CONFLICT)
if len(attrs[attribute]) == 0:
raise serializers.ValidationError(
'The {data} is empty'.format(data=attribute),
code=status.HTTP_409_CONFLICT)
if ' ' in attrs[attribute]:
raise serializers.ValidationError(
'The {data} should not contain spaces'.format(data=attribute))
return True
def check_email(email):
if isinstance(email, str) is False:
raise serializers.ValidationError('The email need to be string')
if re.match(r'^[(a-z0-9\_\-\.)]+@[(a-z0-9\_\-\.)]+\.[(a-z)]{2,15}$',
email.lower()):
return True
else:
raise serializers.ValidationError('Not valid email')
def check_is_in(param):
response = {
True: True,
False: False,
'true': True,
'false': False,
}
if not isinstance(param, str) and not isinstance(param, bool):
raise serializers.ValidationError('Only true or false in string')
if isinstance(param, str):
param = param.lower()
if param not in response:
raise serializers.ValidationError('Only true or false in string')
return response[param]
def check_if_is_valid_post(request):
field = ''
if request.data.get('email', '') != '':
check_email(request.data['email'])
if len(User.objects.filter(email=request.data['email'])) != 0:
raise serializers.ValidationError(
'This email is being used by another user.')
data_to_check = {
'username': request.data.get('username', 'Not found'),
'password': request.data.get('password', 'Not found'),
'repeat_password': request.data.get('repeat_password', 'Not found')
}
check_is_empty(data_to_check)
check_is_staff = request.data.get('is_staff', False)
is_staff = check_is_in(check_is_staff)
if isinstance(request.data.get('first_name', ''), str):
first_name = request.data.get('first_name', '')
else:
field += 'First name, '
if isinstance(request.data.get('last_name', ''), str):
last_name = request.data.get('last_name', '')
else:
field += 'Last name'
if request.data['password'] != request.data['repeat_password']:
raise serializers.ValidationError(
'The two password need to be identical')
if field != "":
raise serializers.ValidationError(
'Invalid data for {fields}'.format(fields=field))
user = User.objects.create_user(
username=request.data['username'],
password=request.data['password'],
)
user.is_staff = is_staff
user.first_name = first_name
user.email = request.data.get('email', user.email)
user.last_name = last_name
user.save()
return user
def check_if_is_valid_put(instance, request):
field = ""
if request.data.get('email', '') != '':
check_email(request.data['email'])
if len(User.objects.filter(email=request.data['email']).exclude(pk=instance.pk)) != 0:
raise serializers.ValidationError(
'This email is being used by another user.')
check_is_staff = request.data.get('is_staff', False)
check_is_active = request.data.get('is_active', True)
is_staff = check_is_in(check_is_staff)
is_active = check_is_in(check_is_active)
if request.data.get('first_name') is not None and isinstance(
request.data.get('first_name', ''), str):
instance.first_name = request.data.get('first_name',
instance.first_name)
else:
field += 'First name, '
instance.email = request.data.get('email', instance.email)
if request.data.get('last_name') is not None and isinstance(
request.data.get('last_name', ''), str):
instance.last_name = request.data.get('last_name',
instance.last_name)
else:
field += 'Last name, '
if request.data.get('username') is not None:
check_is_empty({'username': request.data.get('username', 'Not found')})
instance.username = request.data.get('username',
instance.username)
else:
field += 'Username, '
if request.data.get('password', 'notFound') is None or request.data.get(
'repeat_password', 'notFound') is None:
field += 'Password'
if field != "":
raise serializers.ValidationError(
'Invalid data for {fields}'.format(fields=field))
if request.data.get('password', '') != '' or request.data.get(
'repeat_password', '') != '':
check_is_empty({'password': request.data.get('password', 'Not found'),
'repeat_password': request.data.get('repeat_password',
'Not found')})
if instance.check_password(request.data.get('old_password', '')) and \
request.data['password'] == request.data['repeat_password']:
instance.set_password(request.data['password'])
elif request.data['password'] != request.data['repeat_password']:
raise serializers.ValidationError(
'The password need to be identical')
else:
raise serializers.ValidationError('Incorrect old passwords')
instance.is_active = is_active
instance.is_staff = is_staff
instance.save()
return instance
class TokenGenerator(PasswordResetTokenGenerator):
def _make_hash_value(self, user, timestamp):
return (six.text_type(user.pk)
+ six.text_type(timestamp)
+ six.text_type(user.is_active))
def check_token(self, user, token):
"""
Check that a password reset token is correct for a given user.
"""
if not (user and token):
return False
# Parse the token
try:
ts_b36, _ = token.split("-")
except ValueError:
return False
try:
ts = base36_to_int(ts_b36)
except ValueError:
return False
# Check that the timestamp/uid has not been tampered with
if not constant_time_compare(self._make_token_with_timestamp(user, ts),
token):
return False
if (self._num_days(self._today()) - ts) > 1:
return False
return True
| UTF-8 | Python | false | false | 8,838 | py | 200 | models.py | 146 | 0.592894 | 0.590066 | 0 | 283 | 30.229682 | 94 |
RIT-Space-Exploration/URC-Base-Station | 17,239,998,733,602 | c3807b2cea0c006c8f23feb4d20115c872241160 | 0130de5817f4f3b34cfb13b9ed01f2612c5d464e | /Software/backend/rover_base_station/migrations/0006_auto_20200105_1252.py | 49d67d71f670509b4f85c5953d5dfa5733f65690 | [] | no_license | https://github.com/RIT-Space-Exploration/URC-Base-Station | e2a6c43c72de960cd1e0760a74daaf072b3c710b | 8097e28b11ed7b941f37aa2b54b14c5e7fb0b04c | refs/heads/master | "2023-01-11T11:58:38.725546" | "2020-10-17T01:48:22" | "2020-10-17T01:48:22" | 220,842,178 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 2.2.5 on 2020-01-05 17:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rover_base_station', '0005_video'),
]
operations = [
migrations.AlterField(
model_name='video',
name='img',
field=models.FileField(upload_to='images'),
),
]
| UTF-8 | Python | false | false | 385 | py | 26 | 0006_auto_20200105_1252.py | 20 | 0.579221 | 0.52987 | 0 | 18 | 20.388889 | 55 |
CodecoolKRK20173/erp-mvc-2-ccnoobs | 19,396,072,344,479 | 1cea68952750766d54282d1c8ab55f81d7c5ef77 | e69f9d63ad3601553f936845cf6d3d7d5081bdd2 | /controller/inventory_controller.py | 7d8ea2dc000b6afbee6219286675d91e5350628d | [] | no_license | https://github.com/CodecoolKRK20173/erp-mvc-2-ccnoobs | d99a569e96907adb86692dfce82b61662aed698f | d97e0684f159a0c1d071d8abcb54fc652f30c0c7 | refs/heads/master | "2020-04-28T04:13:41.887946" | "2019-03-15T11:58:03" | "2019-03-15T11:58:03" | 174,969,515 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # everything you'll need is imported:
from view import terminal_view
from model.inventory import inventory
from controller import common
def run():
"""
Starts this module and displays its menu.
* User can access default special features from here.
* User can go back to main menu from here.
Returns:
None
"""
def run():
"""
Starts this module and displays its menu.
* User can access default special features from here.
* User can go back to main menu from here.
Returns:
None
"""
table = inventory.get_inventory_table_from_file()
title_list = ["ID", "NAME", "Manufacturer".upper(), "Year".upper(), "Durability".upper()]
options = ["View records",
"Add record",
"Remove record",
"Update record",
"Which items have not exceeded their durability yet?",
"What are the average durability times for each manufacturer?"]
choice = None
while choice != "0":
choice = terminal_view.get_choice_inner_menu(options, "Inventory manager")
if choice == "1":
terminal_view.print_table(table, title_list)
elif choice == "2":
data_input_not_correct = True
while data_input_not_correct:
record = terminal_view.get_inputs(title_list[1::],"Please provide new item data")
if record[2].isdigit() and record[3].isdigit():
table = inventory.add(table, record)
data_input_not_correct = False
else:
terminal_view.print_error_message("Year and durability should be natural numbers!")
elif choice == "3":
id_to_delete_table = terminal_view.get_inputs(["ID"],"Item to delete")
id_to_delete = id_to_delete_table[0]
table = inventory.remove(table, id_to_delete)
elif choice == "4":
records = terminal_view.get_inputs(title_list,"Edit item")
record_id = records[0]
table = inventory.update(table, record_id, records)
elif choice == "5":
available_items = inventory.get_available_items(table)
terminal_view.print_table(available_items, title_list)
#terminal_view.print_result(available_items, "Available items")
elif choice == "6":
average_durability = inventory.get_average_durability_by_manufacturers(table)
list_from_dict = average_durability.items()
dict_headers = ["MANUFACTURER","DURABILITY"]
terminal_view.print_table(list_from_dict, dict_headers)
# terminal_view.print_result(average_durability, "Average durability by manufacturer")
elif choice != "0":
terminal_view.print_error_message("There is no such choice.")
| UTF-8 | Python | false | false | 2,847 | py | 13 | inventory_controller.py | 13 | 0.600983 | 0.596417 | 0 | 67 | 41.477612 | 103 |
418sec/labml | 15,728,170,271,044 | dcd040889fbc48082cf75089e6c280259f594820 | bf483551159fc3d8420943f9906f8254bd2cdefc | /labml/utils/cache.py | deb41493c167e9fb5ef5b2e720435182beddd486 | [
"MIT"
] | permissive | https://github.com/418sec/labml | 0d9474223f8646cb634853902943ebbbe9b32339 | 215264eff311ad91ca6fd1519f94833c2f9a7cd6 | refs/heads/master | "2023-02-27T08:29:25.024344" | "2021-02-08T16:15:11" | "2021-02-08T16:15:11" | 337,135,213 | 0 | 1 | MIT | true | "2021-02-08T17:05:40" | "2021-02-08T16:21:09" | "2021-02-08T16:21:11" | "2021-02-08T16:15:18" | 19,370 | 0 | 1 | 1 | null | false | false | import json
from typing import Callable, Any
from labml import lab
def cache(name: str, loader: Callable[[], Any], file_type: str = 'json') -> Any:
cache_path = lab.get_data_path() / 'cache'
if not cache_path.exists():
cache_path.mkdir(parents=True)
path = cache_path / f'{name}.{file_type}'
if path.exists():
with open(str(path), 'r') as f:
if file_type == 'json':
return json.load(f)
else:
raise ValueError(f'Unknown file type: {file_type}')
else:
value = loader()
with open(str(path), 'w') as f:
if file_type == 'json':
json.dump(value, f)
else:
raise ValueError(f'Unknown file type: {file_type}')
return value
def cache_get(name: str, file_type: str = 'json') -> Any:
cache_path = lab.get_data_path() / 'cache'
if not cache_path.exists():
cache_path.mkdir(parents=True)
path = cache_path / f'{name}.{file_type}'
if path.exists():
with open(str(path), 'r') as f:
if file_type == 'json':
return json.load(f)
else:
raise ValueError(f'Unknown file type: {file_type}')
else:
return None
| UTF-8 | Python | false | false | 1,264 | py | 85 | cache.py | 70 | 0.534019 | 0.534019 | 0 | 41 | 29.829268 | 80 |
Sunil-Yaragoppa/pythonRepo | 17,291,538,375,886 | f4ca67c691b053298d6c8231fd75613af6fda977 | a55769d8a62bbcc17a9a8b7ef6047e19bd6a6c10 | /restaurant.py | a299f2def06b00324639007ae14666fc3d38d0e0 | [] | no_license | https://github.com/Sunil-Yaragoppa/pythonRepo | 76ded75464794c092e43ed7602e402e7faa46eda | 1240b8dd6a6c28ad6f8eeb9b61a921747a6da1dd | refs/heads/master | "2020-07-16T10:18:06.162157" | "2019-09-02T03:43:39" | "2019-09-02T03:43:39" | 205,770,226 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''Restaurant: Make a class called Restaurant.
The __init__() method for Restaurant should store two attributes: a restaurant_name and a cuisine_type.
Make a method called describe_restaurant() that prints these two pieces of information,
and a method called open_restaurant() that prints a message indicating that the restaurant is open.
Make an instance called restaurant from your class. Print the two attributes individually, and then call both methods.'''
'''Ice Cream Stand: An ice cream stand is a specific kind of restaurant.
Write a class called IceCreamStand that inherits from the Restaurant class you wrote in Exercise 9-1 (page 166) or Exercise 9-4 (page 171).
Either version of the class will work; just pick the one you like better.
Add an attribute called flavors that stores a list of ice cream flavors.
Write a method that displays these flavors. Create an instance of IceCreamStand, and call this method.'''
class Restaurant():
def __init__(self , restaurant_name , cuisine_type):
self.restaurant_name = restaurant_name
self.cuisine_type = cuisine_type
def describe_restaurant(self):
print(self.restaurant_name.title())
print(self.cuisine_type.title())
def open_restaurant(self):
print("The restaurant is open!")
#restaurant = Restaurant('the grand budapest' , 'luxury')
#restaurant.describe_restaurant()
#restaurant.open_restaurant()
class IceCreamStand(Restaurant):
def __init__(self , restaurant_name , cuisine_type):
super().__init__( restaurant_name , cuisine_type)
self.flavours = ['strawberry' , 'chocolate' , 'pista' , 'butter scotch']
def show_flavours(self):
print(self.flavours)
restaurant = IceCreamStand('the grand budapest' , 'luxury')
restaurant.show_flavours()
| UTF-8 | Python | false | false | 1,744 | py | 48 | restaurant.py | 39 | 0.75 | 0.744266 | 0 | 41 | 41.536585 | 139 |
razaldahal/eduinfsys | 15,968,688,410,102 | df01b0d9193879a9f24f82d86c02e49aaa7b88cd | 6ef1991d2704c5882a52fd3d6844e8b529d6c058 | /book/serializers.py | 09fbaef0ae4076d8447c8a027e4150bc878ba447 | [] | no_license | https://github.com/razaldahal/eduinfsys | 2093e19bd9e205813be14799b6e3ee5c7ca574d3 | 41531d78237e80459bbf5afed356a7ad193c3086 | refs/heads/master | "2020-03-23T12:19:34.389171" | "2018-09-20T07:54:17" | "2018-09-20T07:54:17" | 141,551,871 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from rest_framework import serializers
from .models import *
class bookSerializer(serializers.ModelSerializer):
class Meta:
model=book
fields=('serial_no','name','author','publisher','edition')
| UTF-8 | Python | false | false | 203 | py | 96 | serializers.py | 96 | 0.753695 | 0.753695 | 0 | 8 | 24.125 | 60 |
joshuaNewman10/ml | 2,585,570,350,584 | 1e565ecde5c034d6becd1245dffed8281187d24e | 72b1d8b44520d1757d379d8013eb3912b005bef3 | /ml/common/helper/data_generator.py | 07eb045d577675a429d50c7dfd8322c0053f25e1 | [] | no_license | https://github.com/joshuaNewman10/ml | 14d8d5821bd952e77272b740cf05cef69ebee383 | 3ec43868004d421814f8e056205e77a2b8cb92dc | refs/heads/master | "2021-04-03T06:29:33.655495" | "2018-09-17T19:03:40" | "2018-09-17T19:03:40" | 124,795,304 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import json
class DataGenerator:
def __init__(self, training_data_file_path, validation_data_file_path, image_loader):
self.training_data_file_path = training_data_file_path
self.validation_data_file_path = validation_data_file_path
self.image_loader = image_loader
self.training_data = self.load_data(self.training_data_file_path)
def get_validation_data(self):
X = []
y = []
validation_data = self.load_data(self.validation_data_file_path)
for datum in validation_data:
X.append(self.image_loader.load(datum['X']))
y.append(datum['y'])
return (X, y)
def get_training_data(self, batch_size, num_batches):
while True:
training_data = self.load_data(self.training_data_file_path)
for batch_num in range(num_batches):
X = []
y = []
for i in range(batch_size):
training_datum = next(training_data)
X.append(self.image_loader.load(training_datum['X']))
y.append(training_datum['y'])
yield (X, y)
def load_data(self, data_path):
with open(data_path) as f:
data = map(json.loads, f)
for datum in data:
yield datum
| UTF-8 | Python | false | false | 1,335 | py | 104 | data_generator.py | 102 | 0.558052 | 0.558052 | 0 | 42 | 30.785714 | 89 |
mentoring-by-guido/hallo-web | 5,583,457,524,357 | 5d7f69977d30e4daae5946eea71e1d9d3e7e6d23 | c0d488dfe581d676d7bb193449c43e65ab57dba3 | /app.py | ce6718e871e44dfffa524bf4977fc3cb66d9d7b5 | [] | no_license | https://github.com/mentoring-by-guido/hallo-web | 983d5a4f72390c8a4d77367a9acf8cfb62ecbb66 | 1501da20c1fd70f4a8ab69c4b2ffed74f78e2a63 | refs/heads/master | "2020-03-18T11:55:07.371787" | "2018-06-07T07:23:12" | "2018-06-07T07:23:12" | 134,698,150 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import Flask
from flask import request
app = Flask(__name__)
@app.route('/')
def hello():
name = request.args.get('name', default = 'world', type = str)
out = f'Hello, {name}!'
return out
@app.route('/sum')
def sum():
x = request.args.get('x')
y = request.args.get('y')
sum = int(x) + int(y)
return str(sum)
@app.route('/square')
def square():
x = request.args.get('x')
square = int(x) * int(x)
return str(square)
@app.route('/sub')
def substraction():
x = request.args.get('x')
y = request.args.get('y')
sub = int(x) - int(y)
return str(sub)
print("changes")
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
| UTF-8 | Python | false | false | 701 | py | 2 | app.py | 1 | 0.569187 | 0.563481 | 0 | 35 | 19.028571 | 66 |
kavindukk/RRT | 4,767,413,719,967 | 9e2704dab4bae1fc2f56c42f138b8281f33459f0 | d458001627291b4e70c2e1783e1c92885072dbda | /AS_.py | d767732520d608d1a7a584a98fa7ba003cae6965 | [] | no_license | https://github.com/kavindukk/RRT | eba0861e2f4228ee6a7930787786842574365d9b | aed1271c7b80d4866caf5f3f82e630e4855feb96 | refs/heads/master | "2022-09-01T11:56:45.722479" | "2020-05-21T00:10:38" | "2020-05-21T00:10:38" | 264,335,571 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
import numpy as np
class Node():
def __init__(self, name, index, h, graph):
self.node_name = name
self.node_index = index
self.h = h
self.g = 9999
self.f = self.g + self.h
self.graph = graph
self.previous_node = None
A = Node('A', 0, 16, {'B':5, 'C':5})
B = Node('B', 1, 17, {'A':5, "C":4, 'D':3})
C = Node('C', 2, 13, {'A':5, 'B':4, 'D':7, 'E':7, 'H':8})
D = Node('D', 3, 16, {'B':3, 'C':7, 'H':11, 'K':16, 'L':13, 'M':14})
E = Node('E', 4, 16, {'C':7, 'H':5, 'F':4})
F = Node('F', 5, 20, {'E':4, 'G':9})
G = Node('G', 6, 12, {'F':9, 'N':12})
H = Node('H', 7, 11, {'C':8, 'D':11, 'E':5, 'I':3})
I = Node('I', 8, 10, {'H':3, 'J':4})
J = Node('J', 9, 8, {'I':4, 'N':3, 'P':8})
K = Node('K', 10, 4, {'D':16, 'L':5, 'N':7, 'P':4})
L = Node('L', 11, 7, {'D':13, 'K':5, 'M':9, 'O':4})
M = Node('M', 12, 10, {'D':14, 'L':9, 'O':5})
N = Node('N', 13, 7, {'G':12, 'J':3, 'K':7, 'P':7 })
O = Node('O', 14, 5, {'L':4, 'M':5})
P = Node('P', 15, 0, {'J':8, 'K':4, 'N':7})
nodes = [A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P]
current_node = 'A'
open_nodes = ['A']
closed_nodes = []
A.g = 0
A.f = A.g + A.h
def A_star_step():
global A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P
global current_node, open_nodes, closed_nodes
for node in list(globals()[current_node].graph.keys()):
# N = globals()[node]
if node not in open_nodes and node not in closed_nodes :
open_nodes.append(node)
if node not in closed_nodes:
if globals()[node].previous_node == None and not A:
globals()[node].previous_node = current_node
globals()[node].g = globals()[current_node].g + globals()[node].graph[current_node]
globals()[node].f = globals()[node].g + globals()[node].h
elif globals()[node].f > globals()[current_node].g + globals()[node].graph[current_node] + globals()[node].h:
globals()[node].previous_node = current_node
globals()[node].g = globals()[current_node].g + globals()[node].graph[current_node]
globals()[node].f = globals()[node].g + globals()[node].h
closed_nodes.append(open_nodes.pop(open_nodes.index(current_node)))
acitve_f = []
for node in open_nodes:
acitve_f.append( globals()[node].f)
i = acitve_f.index(min(acitve_f))
current_node = open_nodes[i]
while 'P' not in list(globals()[current_node].graph.keys()):
# for i in range(4):
A_star_step()
print(current_node)
print(open_nodes)
print(closed_nodes)
A_star_step()
print(P.previous_node) | UTF-8 | Python | false | false | 2,641 | py | 4 | AS_.py | 4 | 0.50549 | 0.461568 | 0 | 72 | 35.694444 | 121 |
DanielSeehausen/pseudo_smart_random_pairing | 16,114,717,305,126 | bfd1de852e55c1c3c5bcf57ce2c297b9d469462e | 389dfef2204fd925cff51a3f39d626e86159fbe3 | /db_interface.py | 6670b5faf9d1a0bbeeef32af584f1fc2c7d48e53 | [] | no_license | https://github.com/DanielSeehausen/pseudo_smart_random_pairing | e18c8728d5903d069e9f94e053d5ea49e5b39d21 | f51a043bb046dc96feb7d43d62c8aaa757ebc70a | refs/heads/master | "2020-07-23T09:03:33.898337" | "2017-09-05T18:37:21" | "2017-09-05T18:37:21" | 94,354,398 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import json
def read_dict(db_name):
try: #little ducktyping -- catches both file not found and file empty (incase it was erased)
with open(db_name, 'r') as f:
print("updating db...db_name")
return json.load(f)
except (FileNotFoundError, ValueError):
print("creating db...")
return init_pairing_dict(student_arr)
def write_dict(db_name, data):
with open(db_name, 'w+') as f:
json.dump(data, f)
| UTF-8 | Python | false | false | 463 | py | 7 | db_interface.py | 6 | 0.613391 | 0.613391 | 0 | 14 | 32.071429 | 96 |
meatballhat/box-o-sand | 15,710,990,388,886 | 11ee3f2a16a611f8ecd4ea1555e1cd10c25e5030 | 6546b3f8758823318385b36ad969e65a83fdccf1 | /aoc2020/day03/solution.py | a23e7ad7381be2e82457e3f6753babea63e39821 | [
"MIT"
] | permissive | https://github.com/meatballhat/box-o-sand | cfe4860e775a74e1130d24369de6d919641166c0 | 4d665d7c15ee2dff10ad71993492beea03cade56 | refs/heads/main | "2021-01-17T00:09:32.526302" | "2021-01-11T14:19:36" | "2021-01-11T14:19:36" | 3,574,573 | 1 | 0 | MIT | false | "2020-12-20T16:05:45" | "2012-02-28T19:24:35" | "2020-12-20T16:02:15" | "2020-12-20T16:05:34" | 17,348 | 1 | 0 | 0 | Rust | false | false | import functools
import sys
import typing
class Loc(typing.NamedTuple):
x: int
y: int
def main() -> int:
forest_frame = [list(line.strip()) for line in sys.stdin.readlines()]
frame_width = len(forest_frame[0])
frame_height = len(forest_frame)
all_trees_encountered = []
for slope in [
Loc(x=1, y=1),
Loc(x=3, y=1),
Loc(x=5, y=1),
Loc(x=7, y=1),
Loc(x=1, y=2),
]:
loc = Loc(x=0, y=0)
trees_encountered = 0
while loc.y <= (frame_height - 1):
at_loc = forest_frame[loc.y][loc.x]
if at_loc == "#":
trees_encountered += 1
next_x = (loc.x + slope.x) % frame_width
next_y = loc.y + slope.y
next_loc = Loc(x=next_x, y=next_y)
loc = next_loc
print(
f"(slope right={slope.x} down={slope.y}) trees encountered: {trees_encountered}"
)
all_trees_encountered.append(trees_encountered)
trees_encountered_product = functools.reduce(
lambda x, y: x * y, all_trees_encountered
)
print(f"trees encountered product: {trees_encountered_product}")
return 0
if __name__ == "__main__":
sys.exit(main())
| UTF-8 | Python | false | false | 1,240 | py | 21 | solution.py | 14 | 0.533871 | 0.520161 | 0 | 52 | 22.846154 | 92 |
jar3b/py-phias | 13,254,269,088,426 | 3522b4fbf2286f0dd870e76f3dc39cad4086b90e | f5d6e591a9684db43ec6c97c5ab06613ee7e2ec6 | /orchestra/db/__init__.py | b5f96c71e6dbda2f29266cb1d8c76bc4fd10709e | [
"BSD-3-Clause"
] | permissive | https://github.com/jar3b/py-phias | 20ce00f5d27b0e6c78a239d2cd12f27c73d59715 | 41b370208a801c79b4b6204d2b115f1724d232bc | refs/heads/master | "2023-08-16T21:28:50.563240" | "2022-07-27T18:22:50" | "2022-07-27T18:22:50" | 49,511,706 | 12 | 1 | BSD-3-Clause | false | "2023-07-20T15:38:04" | "2016-01-12T16:07:23" | "2022-10-08T23:20:38" | "2023-07-20T15:38:04" | 8,749 | 3 | 0 | 3 | Python | false | false | from .db_filler import DbFiller
| UTF-8 | Python | false | false | 32 | py | 60 | __init__.py | 47 | 0.8125 | 0.8125 | 0 | 1 | 31 | 31 |
JeremyJones/jpmctt | 6,657,199,323,229 | 950e92e0692a51175b532aaa4907fb3dbe7dde07 | f8945c9ddfb7aa3fed2aad2b2dc4685c2e93253c | /main.py | fe1ae8ec7d03988c287825f3717cb1d684f567d5 | [] | no_license | https://github.com/JeremyJones/jpmctt | de663389e733537d502e93475265203fa6cb983d | cca7bdbcf69489d025f5377ba134bdb1ee97a180 | refs/heads/master | "2021-07-10T21:09:20.579108" | "2017-10-12T13:08:46" | "2017-10-12T13:08:46" | 106,687,314 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """main.py -- tech test by Jeremy Jones
Given text data representing the instructions sent by various clients
to execute in the international market, create a report that shows:
- Amount in USD settled incoming every day
- Amount in USD settled outgoing every day
- Ranking of entities based on incoming and outgoing amount
Usage: python main.py < data.tsv
"""
from sys import stdin
from models import Solution
def main():
"""Process the data provided on standard input and print reports of
the results to standard output.
"""
sol = Solution()
sol.add_data(stdin)
print("=========================================================")
print(sol.report_amount_settled_every_day())
print("=========================================================")
print(sol.report_rank_entities('incoming'))
print("=========================================================")
print(sol.report_rank_entities('outgoing'))
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 993 | py | 5 | main.py | 2 | 0.574018 | 0.574018 | 0 | 33 | 29.090909 | 71 |
vinifmor/bauh | 18,605,798,342,881 | 1773de9d7645aeb8d76937503cc5416ffdce151b | 22c1865e6768e0ffae2a32db8044c155e245b3d7 | /bauh/gems/arch/proc_util.py | 13bb59d496e3672824007bf754121b756defc4bf | [
"Zlib"
] | permissive | https://github.com/vinifmor/bauh | d33ffbe40c18c8db9f8fe053a26846420cae30f9 | d80b23a952808c883045759270776847fcd7c4ec | refs/heads/master | "2023-08-18T20:59:36.763518" | "2023-03-07T11:06:50" | "2023-03-07T11:06:50" | 201,946,404 | 815 | 89 | Zlib | false | "2023-09-09T15:16:23" | "2019-08-12T14:27:18" | "2023-09-08T17:56:53" | "2023-07-11T20:43:54" | 6,175 | 762 | 64 | 71 | Python | false | false | import multiprocessing
import os
import traceback
from pwd import getpwnam
from typing import Callable, Optional, TypeVar
R = TypeVar('R')
class CallAsUser:
def __init__(self, target: Callable[[], R], user: str):
self._target = target
self._user = user
def __call__(self, *args, **kwargs) -> R:
try:
os.setuid(getpwnam(self._user).pw_uid)
return self._target()
except Exception:
traceback.print_exc()
class WriteToFile:
def __init__(self, file_path: str, content: str):
self._file_path = file_path
self._content = content
def __call__(self, *args, **kwargs) -> bool:
try:
with open(self._file_path, 'w+') as f:
f.write(self._content)
return True
except Exception:
traceback.print_exc()
return False
def exec_as_user(target: Callable[[], R], user: Optional[str] = None) -> R:
if user:
with multiprocessing.Pool(1) as pool:
return pool.apply(CallAsUser(target, user))
else:
return target()
def write_as_user(content: str, file_path: str, user: Optional[str] = None) -> bool:
return exec_as_user(target=WriteToFile(file_path=file_path, content=content),
user=user)
| UTF-8 | Python | false | false | 1,320 | py | 204 | proc_util.py | 154 | 0.57803 | 0.577273 | 0 | 51 | 24.882353 | 84 |
MaRDI4NFDI/docker-importer | 7,799,660,631,048 | bf08b8ea6d04689c6f51fe24e043b5677201beff | c0ba916d5f8951c1dfe41909855fb8e1326dfd23 | /src/mardi_importer/zbmath/ZBMathConfigParser.py | 9a1995d1123916fa029e2d8ef62b35f0e8d384b1 | [] | no_license | https://github.com/MaRDI4NFDI/docker-importer | 8b4fd796b602d70a156a4291c330a18f5bc0fc2b | 2b99f7eb236f1f5e8988b537c9238766bdaa2971 | refs/heads/main | "2023-09-04T13:35:27.011502" | "2023-09-04T12:34:25" | "2023-09-04T12:34:25" | 459,656,508 | 0 | 0 | null | false | "2023-09-13T13:51:06" | "2022-02-15T16:20:41" | "2022-03-30T08:55:46" | "2023-09-13T13:51:05" | 1,613 | 0 | 0 | 19 | Python | false | false | from configparser import ConfigParser, NoOptionError
import sys
from mardi_importer.importer.Importer import AConfigParser
class ZBMathConfigParser(AConfigParser):
"""Config parser for ZBMath data"""
def __init__(self, config_path):
"""
Args:
config_path (string): path to config file
"""
self.config_path = config_path
self.config = ConfigParser()
def parse_config(self):
"""
Overrides abstract method.
This method reads a config file containing the config dfor handling ZBMath data.
Returns:
dict: dict of (config_key, value) pairs extracted from config file
"""
config_dict = {}
self.config.read(self.config_path)
if not "ZBMath" in self.config:
sys.exit("Error: Config file does not contain section ZBMath")
try:
config_dict["out_dir"] = self.config["DEFAULT"]["output_directory"]
except NoOptionError:
sys.exit("Error: No output_directory in DEFAULT section of config")
try:
config_dict["tags"] = [
x.strip() for x in self.config["ZBMath"]["tags"].split(",")
]
except NoOptionError:
sys.exit("Error: No tags in ZBMath section of config.")
for key in ["raw_dump_path", "processed_dump_path", "split_id"]:
try:
val = self.config["ZBMath"][key]
if val == "None":
config_dict[key] = None
else:
config_dict[key] = val
except NoOptionError:
sys.exit("Error: No " + key + " in ZBMath section of config.")
for key in ["from_date", "until_date"]:
try:
val = self.config["ZBMath"][key]
if val == "None":
config_dict[key] = None
else:
config_dict[key] = val
except NoOptionError:
sys.exit("Error: No " + key + " in ZBMath section of config.")
return config_dict
| UTF-8 | Python | false | false | 2,108 | py | 65 | ZBMathConfigParser.py | 39 | 0.536053 | 0.536053 | 0 | 64 | 31.9375 | 88 |
gxyd/competitive-programming | 12,945,031,474,713 | 2de07d9e9f813dbd44ec2208b67fbbec310e7814 | 964475a2cac8640b21d6826123ef3cf7f78d3933 | /journey-to-the-moon.py | 928ffc6029bf4042e0ef256e197add0b37867f90 | [] | no_license | https://github.com/gxyd/competitive-programming | 06183ac6fe17e2594cdad4bc05ded99500fa2416 | 7fc4b4569d21226fd51a85a1637e362339f9aed1 | refs/heads/master | "2021-01-01T19:57:29.087457" | "2017-08-15T19:08:38" | "2017-08-15T19:08:38" | 98,727,624 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python3
n, p = map(int, input().split())
X = []
for i in range(p):
a, b = map(int, input().split())
X.append((a, b))
graph = dict([(i, set()) for i in range(n)])
for i, j in X:
graph[i].add(j)
graph[j].add(i)
def dfs(graph, start):
visited, stack = set(), [start]
while stack:
node = stack.pop()
if node not in visited:
visited.add(node)
stack += list(graph[node] - visited)
return visited
components = []
k = set(graph.keys())
while k:
node = k.pop()
visited = dfs(graph, node)
components.append(visited)
k -= visited
t = [len(i) for i in components]
count = 0
l = 0
for j in range(len(t) - 1, 0, -1):
l += t[j]
count += t[j-1]*l
print(count)
| UTF-8 | Python | false | false | 758 | py | 103 | journey-to-the-moon.py | 102 | 0.538259 | 0.529024 | 0 | 43 | 16.627907 | 48 |
thezakman/CTF-Heaven | 14,010,183,346,952 | 1ec931a68014f43f01740a6bae8565707aad0dc3 | 6deafbf6257a5c30f084c3678712235c2c31a686 | /Toolz/binwalk/src/binwalk/modules/entropy.py | 4deeac525015f5c3e5672004e37ac0fd483435af | [
"MIT",
"Unlicense"
] | permissive | https://github.com/thezakman/CTF-Heaven | 53fcb4a72afa821ad05d8cc3b309fb388f958163 | 4b52a2178922f1502ab00fa8fc156d35e1dc653f | refs/heads/master | "2023-04-05T18:20:54.680378" | "2023-03-21T13:47:45" | "2023-03-21T13:47:45" | 167,290,879 | 182 | 24 | Unlicense | false | "2022-11-29T21:41:30" | "2019-01-24T02:44:24" | "2022-11-11T11:53:27" | "2022-11-29T21:41:28" | 27,837 | 96 | 18 | 1 | Python | false | false | # Calculates and optionally plots the entropy of input files.
import os
import math
import zlib
import binwalk.core.common
from binwalk.core.compat import *
from binwalk.core.module import Module, Option, Kwarg
class Entropy(Module):
XLABEL = 'Offset'
YLABEL = 'Entropy'
XUNITS = 'B'
YUNITS = 'E'
FILE_WIDTH = 1024
FILE_FORMAT = 'png'
COLORS = ['g', 'r', 'c', 'm', 'y']
DEFAULT_BLOCK_SIZE = 1024
DEFAULT_DATA_POINTS = 2048
DEFAULT_TRIGGER_HIGH = .95
DEFAULT_TRIGGER_LOW = .85
TITLE = "Entropy"
ORDER = 8
# TODO: Add --dpoints option to set the number of data points?
CLI = [
Option(short='E',
long='entropy',
kwargs={'enabled': True},
description='Calculate file entropy'),
Option(short='F',
long='fast',
kwargs={'use_zlib': True},
description='Use faster, but less detailed, entropy analysis'),
Option(short='J',
long='save',
kwargs={'save_plot': True},
description='Save plot as a PNG'),
Option(short='Q',
long='nlegend',
kwargs={'show_legend': False},
description='Omit the legend from the entropy plot graph'),
Option(short='N',
long='nplot',
kwargs={'do_plot': False},
description='Do not generate an entropy plot graph'),
Option(short='H',
long='high',
type=float,
kwargs={'trigger_high': DEFAULT_TRIGGER_HIGH},
description='Set the rising edge entropy trigger threshold (default: %.2f)' % DEFAULT_TRIGGER_HIGH),
Option(short='L',
long='low',
type=float,
kwargs={'trigger_low': DEFAULT_TRIGGER_LOW},
description='Set the falling edge entropy trigger threshold (default: %.2f)' % DEFAULT_TRIGGER_LOW),
]
KWARGS = [
Kwarg(name='enabled', default=False),
Kwarg(name='save_plot', default=False),
Kwarg(name='trigger_high', default=DEFAULT_TRIGGER_HIGH),
Kwarg(name='trigger_low', default=DEFAULT_TRIGGER_LOW),
Kwarg(name='use_zlib', default=False),
Kwarg(name='display_results', default=True),
Kwarg(name='do_plot', default=True),
Kwarg(name='show_legend', default=True),
Kwarg(name='block_size', default=0),
]
# Run this module last so that it can process all other module's results
# and overlay them on the entropy graph
PRIORITY = 0
def init(self):
self.HEADER[-1] = "ENTROPY"
self.max_description_length = 0
self.file_markers = {}
self.output_file = None
if self.use_zlib:
self.algorithm = self.gzip
else:
self.algorithm = self.shannon
# Get a list of all other module's results to mark on the entropy graph
for (module, obj) in iterator(self.modules):
for result in obj.results:
if result.plot and result.file and result.description:
description = result.description.split(',')[0]
if not has_key(self.file_markers, result.file.name):
self.file_markers[result.file.name] = []
if len(description) > self.max_description_length:
self.max_description_length = len(description)
self.file_markers[result.file.name].append((result.offset, description))
# If other modules have been run and they produced results, don't spam
# the terminal with entropy results
if self.file_markers:
self.display_results = False
if not self.block_size:
if self.config.block:
self.block_size = self.config.block
else:
self.block_size = None
def _entropy_sigterm_handler(self, *args):
print ("Fuck it all.")
def run(self):
self._run()
def _run(self):
# Sanity check and warning if matplotlib isn't found
if self.do_plot:
try:
# If we're saving the plot to a file, configure matplotlib
# to use the Agg back-end. This does not require a X server,
# allowing users to generate plot files on headless systems.
if self.save_plot:
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
except ImportError as e:
binwalk.core.common.warning("Failed to import matplotlib module, visual entropy graphing will be disabled")
self.do_plot = False
for fp in iter(self.next_file, None):
if self.display_results:
self.header()
self.calculate_file_entropy(fp)
if self.display_results:
self.footer()
def calculate_file_entropy(self, fp):
# Tracks the last displayed rising/falling edge (0 for falling, 1 for
# rising, None if nothing has been printed yet)
last_edge = None
# Auto-reset the trigger; if True, an entropy above/below
# self.trigger_high/self.trigger_low will be printed
trigger_reset = True
# Clear results from any previously analyzed files
self.clear(results=True)
# If -K was not specified, calculate the block size to create
# DEFAULT_DATA_POINTS data points
if self.block_size is None:
block_size = fp.size / self.DEFAULT_DATA_POINTS
# Round up to the nearest DEFAULT_BLOCK_SIZE (1024)
block_size = int(block_size + ((self.DEFAULT_BLOCK_SIZE - block_size) % self.DEFAULT_BLOCK_SIZE))
else:
block_size = self.block_size
# Make sure block size is greater than 0
if block_size <= 0:
block_size = self.DEFAULT_BLOCK_SIZE
binwalk.core.common.debug("Entropy block size (%d data points): %d" %
(self.DEFAULT_DATA_POINTS, block_size))
while True:
file_offset = fp.tell()
(data, dlen) = fp.read_block()
if dlen < 1:
break
i = 0
while i < dlen:
entropy = self.algorithm(data[i:i + block_size])
display = self.display_results
description = "%f" % entropy
if not self.config.verbose:
if last_edge in [None, 0] and entropy > self.trigger_low:
trigger_reset = True
elif last_edge in [None, 1] and entropy < self.trigger_high:
trigger_reset = True
if trigger_reset and entropy >= self.trigger_high:
description = "Rising entropy edge (%f)" % entropy
display = self.display_results
last_edge = 1
trigger_reset = False
elif trigger_reset and entropy <= self.trigger_low:
description = "Falling entropy edge (%f)" % entropy
display = self.display_results
last_edge = 0
trigger_reset = False
else:
display = False
description = "%f" % entropy
r = self.result(offset=(file_offset + i),
file=fp,
entropy=entropy,
description=description,
display=display)
i += block_size
if self.do_plot:
self.plot_entropy(fp.name)
def shannon(self, data):
'''
Performs a Shannon entropy analysis on a given block of data.
'''
entropy = 0
if data:
length = len(data)
seen = dict(((chr(x), 0) for x in range(0, 256)))
for byte in data:
seen[byte] += 1
for x in range(0, 256):
p_x = float(seen[chr(x)]) / length
if p_x > 0:
entropy -= p_x * math.log(p_x, 2)
return (entropy / 8)
def gzip(self, data, truncate=True):
'''
Performs an entropy analysis based on zlib compression ratio.
This is faster than the shannon entropy analysis, but not as accurate.
'''
# Entropy is a simple ratio of: <zlib compressed size> / <original
# size>
e = float(float(len(zlib.compress(str2bytes(data), 9))) / float(len(data)))
if truncate and e > 1.0:
e = 1.0
return e
def plot_entropy(self, fname):
try:
import matplotlib.pyplot as plt
except ImportError as e:
return
i = 0
x = []
y = []
plotted_colors = {}
for r in self.results:
x.append(r.offset)
y.append(r.entropy)
fig = plt.figure()
# axisbg is depreciated, but older versions of matplotlib don't support facecolor.
# This tries facecolor first, thus preventing the annoying depreciation warnings,
# and falls back to axisbg if that fails.
try:
ax = fig.add_subplot(1, 1, 1, autoscale_on=True, facecolor='black')
except AttributeError:
ax = fig.add_subplot(1, 1, 1, autoscale_on=True, axisbg='black')
ax.set_title(self.TITLE)
ax.set_xlabel(self.XLABEL)
ax.set_ylabel(self.YLABEL)
ax.plot(x, y, 'y', lw=2)
# Add a fake, invisible plot entry so that offsets at/near the
# minimum x value (0) are actually visible on the plot.
ax.plot(-(max(x)*.001), 1.1, lw=0)
ax.plot(-(max(x)*.001), 0, lw=0)
if self.show_legend and has_key(self.file_markers, fname):
for (offset, description) in self.file_markers[fname]:
# If this description has already been plotted at a different offset, we need to
# use the same color for the marker, but set the description to None to prevent
# duplicate entries in the graph legend.
#
# Else, get the next color and use it to mark descriptions of
# this type.
if has_key(plotted_colors, description):
color = plotted_colors[description]
description = None
else:
color = self.COLORS[i]
plotted_colors[description] = color
i += 1
if i >= len(self.COLORS):
i = 0
ax.plot([offset, offset], [0, 1.1], '%s-' % color, lw=2, label=description)
ax.legend(loc='lower right', shadow=True)
if self.save_plot:
self.output_file = os.path.join(os.getcwd(), os.path.basename(fname)) + '.png'
fig.savefig(self.output_file)
else:
plt.show()
| UTF-8 | Python | false | false | 11,215 | py | 706 | entropy.py | 336 | 0.531342 | 0.523852 | 0 | 317 | 34.375394 | 123 |
UCL/scikit-surgeryfred | 13,073,880,485,410 | 91144c003b68fb54829a2c2c9451fbc2bd2c7dda | 4f5303dd2dcfc1ab60f3147ab68eb82d83d31696 | /get_results.py | a0d07493d2ab9895cf8b97e0481b8415e4105e9c | [
"BSD-3-Clause"
] | permissive | https://github.com/UCL/scikit-surgeryfred | a599591ab0b84b38c5dc4c945fa534372a6b93bc | 3e22fc8b9d0898502a5f8a6c8cc813dc62fc3fd5 | refs/heads/master | "2023-04-28T07:02:45.141719" | "2022-01-26T12:32:26" | "2022-01-26T12:32:26" | 296,295,748 | 4 | 0 | NOASSERTION | false | "2021-05-18T13:53:00" | "2020-09-17T10:37:10" | "2021-05-13T14:34:06" | "2021-05-18T13:53:00" | 3,696 | 2 | 0 | 12 | Python | false | false | from sksurgeryfred.utilities.get_results import get_results
get_results()
| UTF-8 | Python | false | false | 75 | py | 30 | get_results.py | 18 | 0.826667 | 0.826667 | 0 | 3 | 24 | 59 |
tsushiy/competitive-programming-submissions | 16,784,732,232,116 | 2e2c905ce277492c944d24130486d1f117fd71f4 | 16d159d6d3fe69d513717caad3e2c21320f93224 | /AtCoder/ABC/ABC101-150/abc134/abc134d.py | b76a6ee76b6079dc4edbb121de2952045048c509 | [] | no_license | https://github.com/tsushiy/competitive-programming-submissions | d4f068a5157c0de0f1822367e0ca66dd978e43f9 | 9011d855d9252134179cc9cc8f328f6e0ca32407 | refs/heads/master | "2023-04-11T08:34:01.015316" | "2021-04-11T15:16:17" | "2021-04-11T15:17:35" | 175,807,862 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | n = int(input())
a = list(map(int, input().split()))
c = [0 for i in range(n)]
for i in range(n, n//2, -1):
if a[i-1]:
c[i-1] = 1
for i in range(n//2, 1, -1):
t = i
tt = 0
cnt = 1
while t<=n:
tt += c[t-1]
cnt += 1
t = i*cnt
if (a[i-1]==1 and tt%2==0) or (a[i-1]==0 and tt%2==1):
c[i-1] = 1
ttt = sum(c)
if (ttt%2==0 and a[0]==1) or (ttt%2==1 and a[0]==0):
c[0] = 1
ttt += 1
#print(c)
ans = []
for i in range(n):
if c[i]:
ans.append(i+1)
print(ttt)
if ttt:
print(*ans) | UTF-8 | Python | false | false | 513 | py | 769 | abc134d.py | 768 | 0.467836 | 0.39961 | 0 | 30 | 16.133333 | 56 |
EEmery/anomaly-detection | 549,755,820,808 | 55775fde7d9fdb14ba0632da6f0d39845322c27e | 94a27c14e31aeac3fe16980240e19634837679a8 | /Google Maps/google-maps-API.py | 6050b04f953f7b1d95f20aa880c1f2d9a44d91d7 | [] | no_license | https://github.com/EEmery/anomaly-detection | 73762045eb317f0dc565f1199b28c61ce8fe1756 | 3fbb098ea483c85a7f0bec46ca33c3b3b417dbbf | refs/heads/master | "2020-04-05T23:28:35.698159" | "2017-09-10T00:35:39" | "2017-09-10T00:35:39" | 83,551,646 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import googlemaps, requests, json
from datetime import datetime
# Reads the Google API key safely
google_API_key_file = open('googleAPI-key.txt', 'r')
google_API_key = google_API_key_file.read()
google_API_key_file.close()
# Set up the Google API
gmaps = googlemaps.Client(key=google_API_key)
# Makes a simple request
geocode_result = gmaps.geocode('1600 Amphitheatre Parkway, Mountain View, CA')
# Saves result
response_file = open('response.json', 'w')
response = json.dump(geocode_result, response_file)
response_file.close() | UTF-8 | Python | false | false | 532 | py | 17 | google-maps-API.py | 15 | 0.753759 | 0.746241 | 0 | 18 | 28.611111 | 78 |
certifiedloud/clido | 3,925,600,129,633 | 6bd982bb2f2fb788311058fa5e49a6dec02e28a6 | 24f409902cb56e352f175100c154f1c31c29ab1a | /clido.py | af05f0cf4a45a318108f8ec59475719cd0eda4be | [
"MIT"
] | permissive | https://github.com/certifiedloud/clido | 09a177de877c83057fd81419a302f2c035d071e5 | eddccbf8b3443e4fb6869c20c22a040086b2989e | refs/heads/master | "2020-05-16T19:41:51.636162" | "2015-05-19T19:58:35" | "2015-05-19T19:58:35" | 35,076,225 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import argparse
from tabulate import tabulate
import sys
from os.path import expanduser
try:
import digitalocean
except ImportError:
print("python-digitalocean is required. pip install it and try again")
# Set the API token for authenticated interaction
# TODO cycle through a list of tokens to avoid rate limiting
config_file = expanduser('~') + '/.clido.cfg'
f = open(config_file, 'r')
api_token = f.readline().split('=')[1].strip()
# Initialize the API
do = digitalocean.Manager(token=api_token)
# Use argparse to set all of the CLI options
description = """
Interact with your DigitalOcean account through their API.
When run without any arguments, you simply get a list of
all the active droplets in the specified account
"""
parser = argparse.ArgumentParser(description=description)
parser.add_argument('operand',
help="specify one of the following: "
"droplets, domains, images, sizes, regions, keys.")
parser.add_argument('-c', '--create', action="store_true",
help="create droplet. -c -n <name> -s <size>"
"-i <image> -r <region>")
parser.add_argument('-d', '--destroy',
help="destroy <operand> by <id>")
parser.add_argument('-n', '--name',
help="name of droplet to create")
parser.add_argument('-u', '--update-domain', action="store_true",
help="update specifc domain with a record")
parser.add_argument('-a', '--ip-address',
help="Specify ip address for domain creation")
parser.add_argument('-s', '--size',
help="size-slug of droplet to create")
parser.add_argument('-i', '--image',
help="image-slug of droplet to create")
parser.add_argument('-r', '--region',
help="region-slug of droplet to create")
parser.add_argument('-k', '--ssh-keys', default=[], nargs='+', type=int,
help="list of ssh key id's to add to new droplets")
parser.add_argument('-l', '--lookup',
help="lookup details of <operand> by <id>")
args = parser.parse_args()
if args.operand == 'sizes':
sizes = do.get_all_sizes()
for size in sizes:
print(size)
elif args.operand == 'regions':
# Print a detailed list of available regions
regions = do.get_all_regions()
for region in regions:
print("{}({})".format(region, region.slug))
sys.exit(0)
elif args.operand == 'keys':
keys = do.get_all_sshkeys()
for key in keys:
print(key)
elif args.operand == 'images':
images = do.get_all_images()
for image in images:
print("{}({})".format(image, image.slug))
elif args.operand == 'droplets':
# check if we want to destroy a droplet
if args.destroy:
try:
droplet_to_destroy = do.get_droplet(args.destroy)
droplet_to_destroy.destroy()
sys.exit(0)
except Exception as e:
print("Unable to destroy droplet: {}".format(e))
sys.exit(1)
droplets = do.get_all_droplets()
for droplet in droplets:
print(droplet)
elif args.operand == 'domains':
# check if we want to create a domain
if args.create:
if not args.name and not args.ip_address:
parser.error("In order to create a domain you must specify both "
"--name and --ip-address")
sys.exit(1)
else:
try:
domain = digitalocean.Domain(token=api_token,
name=args.name,
ip_address=args.ip_address).create()
sys.exit(0)
except Exception as e:
print("Unable to create domain: {}".format(e))
sys.exit(1)
# check if we want to destroy a domain
if args.destroy:
try:
digitalocean.Domain(token=api_token,
name=args.destroy).destroy()
sys.exit(0)
except Exception as e:
print("Couldn't destroy domain: {}".format(e))
sys.exit(1)
# Check if we want to update existing domain
if args.update_domain:
if not args.name:
print("You must specify -n, which domain to update")
sys.exit(1)
# TODO finish domain record updating
if args.lookup:
domain = digitalocean.Domain(token=api_token,
name=args.lookup)
records = domain.get_records()
record_list = []
for record in records:
record_list.append({"Type": record.type,
"Priority": record.priority,
"Name": record.name,
"Data": record.data,
"ID": record.id})
print(tabulate(record_list, headers='keys', stralign='center'))
sys.exit(0)
# If nothing else, just print a list of domains
domains = do.get_all_domains()
for domain in domains:
print(domain)
else:
print("{} is not a valid operand, please choose one of the following:"
.format(args.operand))
print("droplets, domains, keys, regions, sizes, images.")
| UTF-8 | Python | false | false | 5,273 | py | 4 | clido.py | 1 | 0.569884 | 0.567798 | 0 | 147 | 34.870748 | 81 |
jiluhu/dirtysalt.github.io | 3,461,743,667,485 | c4fee36cd15e55edd0db56c00b1940a04bf57929 | 9099ed0407521ac40b88f3b92872307f66c57bf9 | /codes/contest/leetcode/subsets-ii.py | 673281f982e936ee7749215f53f3052e1bd2aa83 | [] | no_license | https://github.com/jiluhu/dirtysalt.github.io | 0cea3f52d2c4adf2bbf5c23b74f4cb1070025816 | c026f2969c784827fac702b34b07a9268b70b62a | refs/heads/master | "2020-08-31T09:32:05.273168" | "2019-10-29T01:53:45" | "2019-10-29T01:53:45" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# coding:utf-8
# Copyright (C) dirlt
class Solution(object):
def subsetsWithDup(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
from collections import defaultdict
d = defaultdict(int)
for n in nums:
d[n] += 1
keys = list(d.keys())
res = [[]]
for k in keys:
res2 = []
for v in range(0, d[k] + 1):
for r in res:
res2.append([k] * v + r)
res = res2
return res
| UTF-8 | Python | false | false | 579 | py | 594 | subsets-ii.py | 550 | 0.445596 | 0.433506 | 0 | 26 | 21.269231 | 44 |
Torniojaws/vortech-backend | 5,257,040,018,210 | 867a2b985540a16c03883cca0021ae1eeed7492b | 5794c1dd7e8ec0ec279ea72c22ccd0cec2f8ead0 | /migrations/versions/0894a7be87c8_added_videos_model.py | 32163c7e934b9c99c475705c2ae477d3ebd20886 | [
"MIT"
] | permissive | https://github.com/Torniojaws/vortech-backend | 7c4365e232cab9dfe5abebd3a544f2e2bba49e91 | 62f8e8e904e379541193f0cbb91a8434b47f538f | refs/heads/master | "2023-05-12T02:47:13.396311" | "2023-03-29T10:38:51" | "2023-03-29T10:38:51" | 102,006,733 | 0 | 0 | MIT | false | "2023-08-17T12:51:56" | "2017-08-31T14:00:50" | "2021-12-22T14:07:22" | "2023-08-17T12:51:55" | 603 | 0 | 0 | 12 | Python | false | false | """Added Videos model
Revision ID: 0894a7be87c8
Revises: d255000b4aa6
Create Date: 2017-09-25 23:09:13.516907
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0894a7be87c8'
down_revision = 'd255000b4aa6'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('VideoCategories',
sa.Column('VideoCategoryID', sa.Integer(), nullable=False),
sa.Column('Category', sa.String(length=200), nullable=False),
sa.PrimaryKeyConstraint('VideoCategoryID')
)
op.create_table('Videos',
sa.Column('VideoID', sa.Integer(), nullable=False),
sa.Column('Title', sa.String(length=200), nullable=False),
sa.Column('URL', sa.Text(), nullable=False),
sa.Column('Created', sa.DateTime(), nullable=True),
sa.Column('Updated', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('VideoID')
)
op.create_table('VideosCategoriesMapping',
sa.Column('VideosCategoriesMappingID', sa.Integer(), nullable=False),
sa.Column('VideoID', sa.Integer(), nullable=False),
sa.Column('VideoCategoryID', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['VideoCategoryID'], ['VideoCategories.VideoCategoryID'], ),
sa.ForeignKeyConstraint(['VideoID'], ['Videos.VideoID'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('VideosCategoriesMappingID')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('VideosCategoriesMapping')
op.drop_table('Videos')
op.drop_table('VideoCategories')
# ### end Alembic commands ###
| UTF-8 | Python | false | false | 1,726 | py | 169 | 0894a7be87c8_added_videos_model.py | 141 | 0.68482 | 0.651217 | 0 | 50 | 33.04 | 88 |
Tanushree28/12-Python-Projects | 515,396,092,297 | 20e66f99632fc2c8a1f88729484f5e0ccaf60328 | b5d4d92c16dfc69f6d8235b9b22530c4b407f6df | /Madlib/madlibs.py | aeb1ffa0981454d62090f670f9fb55a4c1bdfd76 | [] | no_license | https://github.com/Tanushree28/12-Python-Projects | 28277af8b4284af25ffb2e9848d250cf6b416778 | 60c2c2ef071ebb8d8d0203fe9f5a9b2b1cb0e7e0 | refs/heads/master | "2023-04-20T02:36:18.757941" | "2021-05-12T00:54:05" | "2021-05-12T00:54:05" | 366,333,519 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #string concatenation(put strings together)
#suppose we want to create string that says "subscribe to __"
#youtuber = "Coder" #some string variable
#a few ways to do this
#print("subscribe to " + youtuber)
#print("subscribe to {}".format(youtuber)) #in the curly braces the value of youtuber is placed
#print(f"subscribe to {youtuber}") #app string
adj = input("Adjective: ")
verb1 = input("Verb: ")
verb2 = input("Verb: ")
famous_person = input("Famous Person: ")
madlib = f"Computer programming is so {adj}! It makes me so excited all the times because I love to {verb1}. Stay hydrarted and {verb2} like you are {famous_person}!"
print(madlib) | UTF-8 | Python | false | false | 650 | py | 3 | madlibs.py | 3 | 0.721538 | 0.715385 | 0 | 17 | 37.294118 | 166 |
simtb/coding-puzzles | 8,830,452,786,274 | bfbc50a2c2a1eb41eb8848844092cb09f7eef1bd | 942f0b081d2271978ffe20fbbfa8d687b57e5c02 | /coding_problems/tests/test_flattening_a_linked_list.py | 7ce85443a4032f39d868595c375e779acd65047d | [] | no_license | https://github.com/simtb/coding-puzzles | 99762322606bb505d82924d4d5843db1c04aafbd | 9e1d53e35b2117240eb357d7930cdb8cfd891c8e | refs/heads/master | "2021-04-12T15:46:40.181048" | "2021-02-28T23:47:36" | "2021-02-28T23:47:36" | 249,089,264 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import unittest
from problems.flattening_a_linked_list import Node, solution_1
class TestSolution1(unittest.TestCase):
head = Node(5)
head.bottom = Node(7)
head.bottom.bottom = Node(8)
head.bottom.bottom.bottom = Node(30)
head.next = Node(10)
head.next.bottom = Node(20)
head.next.next = Node(19)
head.next.next.bottom = Node(22)
head.next.next.bottom.bottom = Node(50)
head.next.next.next = Node(28)
head.next.next.next.bottom = Node(35)
head.next.next.next.bottom.bottom = Node(40)
head.next.next.next.bottom.bottom.bottom = Node(45)
def test_empty_string(self):
test_head = None
output = solution_1(test_head)
self.assertEqual(output, None)
def test_example(self):
test_head = self.head
output = solution_1(test_head)
self.assertEqual(output.value, 5)
self.assertEqual(output.next.value, 7)
self.assertEqual(output.next.next.value, 8)
self.assertEqual(output.next.next.next.value, 10)
self.assertEqual(output.next.next.next.next.value, 19)
self.assertEqual(output.next.next.next.next.next.value, 20)
self.assertEqual(output.next.next.next.next.next.next.value, 22)
self.assertEqual(output.next.next.next.next.next.next.next.value, 28)
self.assertEqual(output.next.next.next.next.next.next.next.next.value, 30)
self.assertEqual(output.next.next.next.next.next.next.next.next.next.value, 35)
self.assertEqual(output.next.next.next.next.next.next.next.next.next.next.value, 40)
self.assertEqual(output.next.next.next.next.next.next.next.next.next.next.next.value, 45)
self.assertEqual(output.next.next.next.next.next.next.next.next.next.next.next.next.value, 50)
self.assertEqual(output.next.next.next.next.next.next.next.next.next.next.next.next.next, None)
if __name__ == "__main__":
unittest.main() | UTF-8 | Python | false | false | 1,932 | py | 281 | test_flattening_a_linked_list.py | 281 | 0.680124 | 0.654244 | 0 | 50 | 37.66 | 103 |
uwaces/DAGit | 9,972,914,102,417 | 8847ffa9875483ba49d448ac49a2082df91878ee | a2a0a918fb382174cc36d25adb214a429d478df5 | /src/kirkpatrick/git_dag.py | 216be89bb0de41d6dfc742b225593a11792c9cc2 | [] | no_license | https://github.com/uwaces/DAGit | d9305d7219a39c5430cb151cda4368aa7c5b6c9f | 7f93cd376c6becdbe50530a9d6ad82d7c6cf06ff | refs/heads/master | "2021-09-02T07:15:19.778023" | "2017-12-31T10:33:04" | "2017-12-31T10:33:04" | 112,979,620 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import git
class DAG:
def __init__(self, root=None):
self.adj_list = dict()
self.root = root
def __str__(self):
ret = "=================================="
ret += "\nDAG ADJACENCY LIST:\n"
for x in self.adj_list.keys():
ret += str(x) + " maps to: ["
for y in self.adj_list[x]:
ret += str(y) + ", "
ret += "] \n"
ret += "=================================="
return ret
def add_root(self, elem):
if elem not in self.adj_list.keys():
self.adj_list[elem] = set()
self.root = elem
def add_directed_edge(self, v1, v2):
if v1 not in self.adj_list:
self.adj_list[v1] = set()
self.adj_list[v1].add(v2)
if v2 not in self.adj_list:
self.adj_list[v2] = set()
def children(self, elem):
return self.adj_list[elem]
def biuld_git(self, filepath):
"""
make a new repo with git at the filepath
make a branch for each child in the adj_list
copy adj_list
until adj_list copy is empty...
remove children from adj_list copy (including the references in other nodes)
for each child in copy - merge branches -- and overwrite merge conflict with new triangle
"""
pass
# Use condition_fun to trace to bottom
def find_leaf_where(self, condition_fun):
if not condition_fun(self.root):
return None
cur = self.root
while len(self.children(cur)) > 0:
for v in self.children(cur):
if condition_fun(v):
cur = v
break
return cur
| UTF-8 | Python | false | false | 1,720 | py | 29 | git_dag.py | 12 | 0.494186 | 0.488953 | 0 | 57 | 29.175439 | 97 |
jdixosnd/jsonl-to-conll | 6,158,983,149,031 | 4ae49bce35a1903ad621b2d2edaa8c3cb96f1f5c | 178f33d92ac6a8ff1c67797183de070a69857f08 | /jsonl_to_conll/convert.py | f7dfa2ab65a785592eb199ded0439996a9a940bc | [
"Apache-2.0"
] | permissive | https://github.com/jdixosnd/jsonl-to-conll | 7be3819261180c0e335df31b0cc2d473492cb6a8 | 3a7c14b7f0cb362abe6b9ddd0ab93d59e12078da | refs/heads/master | "2022-11-08T04:45:54.129788" | "2020-05-30T01:55:42" | "2020-05-30T01:55:42" | 274,192,153 | 1 | 0 | Apache-2.0 | true | "2020-06-22T16:43:12" | "2020-06-22T16:43:11" | "2020-06-21T07:54:25" | "2020-05-30T01:55:47" | 9 | 0 | 0 | 0 | null | false | false | import json
def flatten(data):
output_text = []
beg_index = 0
end_index = 0
text = data["text"]
all_labels = sorted(data["labels"])
for ind in range(len(all_labels)):
next_label = all_labels[ind]
output_text += [(label_word, "O") for label_word in text[end_index:next_label[0]].strip().split()]
label = next_label
beg_index = label[0]
end_index = label[1]
label_text = text[beg_index:end_index]
output_text += [(label_word, "B-" + label[2]) if not i else (label_word, "I-" + label[2]) for i, label_word in enumerate(label_text.split(" "))]
output_text += [(label_word, "O") for label_word in text[end_index:].strip().split()]
return output_text
def flatten_all(datas):
return [flatten(data) for data in datas]
| UTF-8 | Python | false | false | 764 | py | 9 | convert.py | 6 | 0.632199 | 0.623037 | 0 | 27 | 27.296296 | 148 |
awalshz/masterMemory | 9,706,626,089,685 | 6ca7ccbd2e655ad0ebff17139719ae3cbf8fdc94 | 7c9bd321297a18f99ff069f47492fcb366e535bd | /Memoire.VersionFinal/Population_homogene/comportemental/choix_fenetre.py | 76c5841036fd7a36f71aaee3387589647fbf0788 | [] | no_license | https://github.com/awalshz/masterMemory | 180c84cb55033dd96d25c1aab0de5687ef099037 | c524231c3cde000eb1c4fbfa8a12c3ba8db87f91 | refs/heads/master | "2020-09-24T21:28:29.091520" | "2019-12-04T11:09:29" | "2019-12-04T11:09:29" | 225,847,067 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from stock_pel import stock_pel
import pandas as pd
import numpy as np
import time
from non_parametric_regression import Kernel
# ==============================================================================
# choisir fenetre de lissage pour le taux de cloture.
# ==============================================================================
print('Computing R2 for taux cloture')
t0 = time.time()
X = stock_pel.values[:, 0:2]
encours = stock_pel.values[:, 2]
clotures = stock_pel.values[:, 4]
tx_cloture = clotures / encours
# On fait une grille avec possibles choises de fenetres h. Chaque entree aura
# le coeficient R2 pour le test_data. On choisi la fenetre que maximise le R2.
grille = pd.DataFrame(index=range(1, 12),
columns=[x / 200 for x in range(2, 11)])
RS = np.random.RandomState(13062013)
model = Kernel(X=X, Y=tx_cloture, RS=RS)
for h1 in grille.index:
for h2 in grille.columns:
print(f'Clotures: computing R2 for h=[{h1},{h2}]')
grille[h2][h1] = model.R2(h=[h1, h2])
grille.to_csv('grille_clotures.csv')
t1 = time.time()
print(f'on a finit avec les clotures en {t1-t0:.2f} secondes \n')
# ==============================================================================
# choisir fenetre de lissage pour le taux de versement.
# ==============================================================================
print('Computing R2 for taux versement')
stock_pel = stock_pel[stock_pel['age'] < 120]
X = stock_pel.values[:, 0:2]
encours = stock_pel.values[:, 2]
versements = stock_pel.values[:, 3]
tx_versement = versements / encours
RS = np.random.RandomState(13062013)
model = Kernel(X=X, Y=tx_versement, RS=RS)
for h1 in grille.index:
for h2 in grille.columns:
print(f'Versements: computing R2 for h=[{h1},{h2}]')
grille[h2][h1] = model.R2(h=[h1, h2])
grille.to_csv('grille_versements.csv')
t2 = time.time()
print(f'on a finit avec les versements en {t2-t1:.2f} secondes \n')
# ============================================================================
# Comentaire : d'apres les grilles de lissage, on n'a pas aucune doute que le
# meilleur choix de h1 ( la fentre de lissage pour la variable age ) est h1 =
# 1 mois. Pour la ecart de taux, s'est pas tres claire, car les valeurs de R2
# sont tres proches et peut dependre du decoupage aleatoire choisi. Dans le
# fichier choix_h2.py on fixe h1 = 1 et on calcule R2 pour plusieurs decoupages
# aleatoires et plusieurs valeurs de h2
| UTF-8 | Python | false | false | 2,481 | py | 43 | choix_fenetre.py | 16 | 0.583233 | 0.551794 | 0 | 72 | 33.458333 | 80 |
folomeev1980/kwork_64 | 5,403,068,883,207 | b46a17db47f9c08339cedb8681462181b3b89c05 | f30653d6e808879f97d52dc1443d10783f4982e6 | /RosMed3/doctorsCvs2.py | 87585776049ed474f670547dd3408aa29aac3b02 | [] | no_license | https://github.com/folomeev1980/kwork_64 | 6786cbe76a36e90a293f016c5c07cb7af66c278b | 47169fb64b5bde5436a39469b8bb3e88a0da360d | refs/heads/master | "2022-12-17T18:13:37.082079" | "2020-08-16T17:22:46" | "2020-08-16T17:22:46" | 228,093,574 | 0 | 0 | null | false | "2022-11-22T05:01:10" | "2019-12-14T21:49:26" | "2020-08-16T17:21:10" | "2022-11-22T05:01:10" | 126,908 | 0 | 0 | 6 | Python | false | false | import requests
import csv
from bs4 import BeautifulSoup
from selenium import webdriver
from bs4 import BeautifulSoup
from random import randint
from time import sleep
from progress.bar import Bar
from openpyxl import Workbook, load_workbook
def lst(l):
s = ""
for i in l:
s = s + i + "####"
return (s)
def create_csv():
header = tuple(["Словарная метка",
"ФИО",
"Организация, проводящая КИ",
"Код врача",
"День рождения врача"])
with open("dbs\\doctors.csv", "w", newline='', encoding='utf-8') as f:
writer = csv.writer(f, delimiter=';')
#writer.writerow(header)
def get_html_(url):
page = ""
while page == '':
try:
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.84 Safari/537.36'}
page = requests.get(url, headers=headers)
return page.text
break
except:
print("Connection refused by the doctor server..")
print("Let me sleep for 10 seconds")
# print("ZZzzzz...")
sleep(10)
# print("Was a nice sleep, now let me continue...")
continue
def get_list_of_doctors(url):
list_of_doctors = []
# print(url)
if url != "":
clinics = get_html_(url)
soup = BeautifulSoup(clinics, "lxml")
try:
tds = soup.find("table", class_="ts1").findAll("tr", class_="hi_sys")
for td in tds:
temp = td.findAll("td")
# print(temp)
a = (temp[-2].text.strip().split("-"))
b = (temp[-1].text.strip().split()[1][1:])
c = temp[2]
c = str(c).split(">")[1]
c = c.split("<")[0].strip()
list_of_doctors.append([a[0], a[1], b, c])
# print(list_of_doctors)
except Exception as e:
# print(e, "except by list of doctors",url)
list_of_doctors = []
return list_of_doctors
else:
return list_of_doctors
def get_append_list_of_doctors(url):
list_of_doctors = []
try:
options = webdriver.ChromeOptions()
options.add_argument('headless')
options.add_argument('window-size=1920x935')
driver = webdriver.Chrome('chromedriver.exe', options=options)
driver.get(url)
except Exception as e:
driver.quit()
k = 2
while True:
try:
# print("ok")
page = "/html/body/form/table/tbody/tr/td/table/tbody/tr[3]/td/div[2]/table/tbody/tr[3]/td/div/table/tbody/tr[22]/td/table/tbody/tr/td[{}]/a".format(
k)
driver.find_element_by_xpath(page).click()
html = driver.page_source
# print(html)
soup = BeautifulSoup(html, "lxml")
tds = soup.find("table", class_="ts1").findAll("tr", class_="hi_sys")
for td in tds:
temp = td.findAll("td")
c = temp[2]
c = str(c).split(">")[1]
c = c.split("<")[0].strip()
a = (temp[-2].text.strip().split("-"))
b = (temp[-1].text.strip().split()[1][1:])
list_of_doctors.append([a[0], a[1], b, c])
k = k + 1
# time.sleep(2)
except Exception as e:
# print("End pages",e)
break
# driver.quit()
driver.quit()
return list_of_doctors
def get_info_for_each_page_(html):
list_dics = []
soup = BeautifulSoup(html, "lxml")
list_of_page = (soup.findAll("tr", class_="hi_sys poi"))
# print(list_of_page)
for st, page in enumerate(list_of_page):
dic = {}
list_some = []
try:
doctor_link = "https://grls.rosminzdrav.ru/" + \
page.get("onclick").split("'")[1]
doctors_list = get_list_of_doctors(doctor_link)
except:
doctors_list = []
try:
page_data = page.findAll("td")
for td in page_data:
list_some.append(td.text.strip())
# print(list_some[6])
if int(list_some[6]) <= 20:
list_some.append(doctors_list)
else:
list_some.append(doctors_list)
# print(doctor_link)
list_some[-1].extend(get_append_list_of_doctors(doctor_link))
# print(list_some)
dic["code"] = list_some[1]
dic["famaly"] = list_some[2]
dic["name"] = list_some[3]
dic["fathername"] = list_some[4]
dic["numer_of_ki"] = list_some[6]
dic["birthday"] = list_some[7]
dic["doctors_list"] = list_some[8]
# print(dic)
list_dics.append(dic)
except Exception as e:
print(e, "Ошибка ")
dic = {}
list_dics.append(dic)
return list_dics
def writer_csv(data):
for dic in data:
for j in dic["doctors_list"]:
with open("dbs\\doctors.csv", "a", newline='', encoding='utf-8') as f:
writer = csv.writer(f, delimiter=';')
a = "{} {} {}".format(dic["famaly"], dic["name"], dic["fathername"])
b = "{}_{}_{}".format(j[0], j[1], j[2])
c = j[-1]
d = dic["code"]
e = dic["birthday"]
writer.writerow((b, a, c, d, e))
def cvs_doctors():
i = 1
k = 0
sum = 0
create_csv()
while True:
try:
url = "https://grls.rosminzdrav.ru/ciexperts.aspx?F=&N=&P=&D=&ExpertID=&order=fio&orderType=desc&moduleId=2&pageSize=30&pageNum={}".format(
i)
html = get_html_(url)
list_of_dics_one_page = get_info_for_each_page_(html)
if len(list_of_dics_one_page) == 0:
k = k + 1
sum = sum + len(list_of_dics_one_page)
writer_csv(list_of_dics_one_page)
i = i + 1
print(sum)
if k > 5:
break
except Exception as e:
if k > 5:
break
print(i, "Pages\n", sum, "Doctors")
def excel_doctors():
book_new = Workbook()
dic = {}
sheet_new = book_new.active
header = tuple(["ID_PI",
"FIO",
"Organization",
"Email",
"Phone",
"VK",
"Facebook",
"Instagram",
"Birthday",
])
sheet_new.append(header)
with open('dbs\\doctors.csv', newline='', encoding='utf-8') as csv_file_clinics:
csv_reader_clinics = csv.reader(csv_file_clinics, delimiter=';')
for i in csv_reader_clinics:
#print(lst(i[1:]))
dic[lst(i[1:])]=None
# try:
# i=[i[3],i[1],i[2],"","","","","",i[4]]
#
# sheet_new.append(i)
# except:
# pass
for k in dic:
try:
i=k.split("####")[0:-1]
i = [i[2], i[0], i[1], "", "", "", "", "", i[3]]
#print(i)
sheet_new.append(i)
except:
pass
# print(k.split("####")[0:])
#sheet_new.append()
book_new.save("final_xls_tables\\PI.xlsx")
if __name__ == "__main__":
excel_doctors()
# cvs_doctors()
| UTF-8 | Python | false | false | 7,685 | py | 97 | doctorsCvs2.py | 57 | 0.465064 | 0.450223 | 0 | 285 | 25.715789 | 161 |
freelancer1845/rsockets2-try | 14,757,507,676,604 | f61dbfc6276b7339ef1418511c0536b658402737 | a56a8fce70036f2c40d9090077450a83b27173d1 | /rsockets2/frames/frame_abc.py | 91adb9d1943bfff3288bfd9d77bc53ff3c6df38d | [
"Apache-2.0"
] | permissive | https://github.com/freelancer1845/rsockets2-try | c68be4853d2d0b3bf05b32a235dcba01419029bf | 25b8b38e00925c3feb6c6e790624a35bc8689619 | refs/heads/master | "2021-11-19T02:47:21.188595" | "2021-08-30T10:31:29" | "2021-08-30T10:31:29" | 244,369,768 | 3 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from abc import ABC, abstractmethod
class Frame_ABC(ABC):
def __init__(self):
super().__init__()
self.stream_id = 0
@abstractmethod
def to_bytes(self):
raise NotImplementedError()
@classmethod
@abstractmethod
def from_data(cls, stream_id: int, flags: int, full_data: bytes):
raise NotImplementedError()
| UTF-8 | Python | false | false | 364 | py | 40 | frame_abc.py | 39 | 0.620879 | 0.618132 | 0 | 17 | 20.411765 | 69 |
ashutoshdhondkar/basic-python | 4,492,535,841,980 | c1af1a82e22a24f74463695b12511853b83bfb18 | d960ed6ede1e863d931489cca70662ee0703d7f7 | /regexValidEmails.py | 000767b442f7a2572471a90733a17ee1b3c39ddf | [] | no_license | https://github.com/ashutoshdhondkar/basic-python | 4bd7e37969e3145a15d99f2116f640825e1c7a0a | 61e6d733b02f27670319d598b67c8f266ad68d63 | refs/heads/master | "2021-04-03T08:50:01.738137" | "2019-05-05T19:03:21" | "2019-05-05T19:03:21" | 125,065,667 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # program to find valid email addresses
import re
emails = ["john@example.com", "pythonl985ist@python.org", "??%4ly@email.com",'abx@11.com']
pattern = re.compile("^[a-zA-Z0-9.?]+@\w+\.\w+")
for line in emails:
match = re.search(pattern,line)
if(match):
print(match.group(0))
| UTF-8 | Python | false | false | 309 | py | 57 | regexValidEmails.py | 48 | 0.605178 | 0.576052 | 0 | 12 | 23.583333 | 90 |
Shunichi09/RNN_practice | 15,307,263,482,397 | 0300d656abab99eb62cad736462b8ea0f60f5370 | 11e3e5cbdd6c21fca1eb593ec7e376eb922e7451 | /5th/RNN_ch5_main.py | 903f7212c1eb4e77364381aa3b9cdc716396061c | [] | no_license | https://github.com/Shunichi09/RNN_practice | df175aaf54825b100babd406f5fd52a5b638af26 | 1ebb3005dfe5f8fc7480dfcf4679168a1322df01 | refs/heads/master | "2020-03-26T01:51:59.047123" | "2018-08-23T13:44:41" | "2018-08-23T13:44:41" | 144,386,344 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
sys.path.append('..')
from NN_ch5 import SimpleRnnlm, SGD # , RnnlmTrainer
from common import ptb # dataset読み込む用
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def main():
# ハイパーパラメータの設定
batch_size = 10
wordvec_size = 100 # Embeddingの大きさ
hidden_size = 100 # 隠れ層の大きさ
time_size = 5 # Truncated BPTTの展開する時間サイズ,RNNのステップ数
lr = 0.1 # 学習率
max_epoch = 100 # 最大epoch
# 学習データの読み込み(データセットを小さくする)
corpus, word_to_id, id_to_word = ptb.load_data('train')
corpus_size = 1000
corpus = corpus[:corpus_size] # コーパス小さくします
vocab_size = int(max(corpus) + 1)
xs = corpus[:-1] # 入力,最後までは見ない
ts = corpus[1:] # 出力(教師ラベル),最初は飛ばす
data_size = len(xs)
print('corpus size: %d, vocabulary size: %d' % (corpus_size, vocab_size))
# 学習時に使用する変数
max_iters = data_size // (batch_size * time_size) # 現実的に繰り返せる回数(データの数),ランダムに使うわけではない!!,今回は99
time_idx = 0
total_loss = 0
loss_count = 0
ppl_list = []
# モデルの生成
model = SimpleRnnlm(vocab_size, wordvec_size, hidden_size)
optimizer = SGD(lr)
# ミニバッチの各サンプルの読み込み開始位置を計算
jump = (corpus_size - 1) // batch_size
offsets = [i * jump for i in range(batch_size)]
print('max_iters = {0}'.format(max_iters))
print('offsets = {0}'.format(offsets))
for epoch in range(max_epoch):
for iter in range(max_iters):
# ミニバッチの取得
batch_x = np.empty((batch_size, time_size), dtype='i')
batch_t = np.empty((batch_size, time_size), dtype='i')
for t in range(time_size):
for i, offset in enumerate(offsets):
batch_x[i, t] = xs[(offset + time_idx) % data_size] #
batch_t[i, t] = ts[(offset + time_idx) % data_size] # 今回はbatchを作ってもバッチ×時間×1かも
time_idx += 1
print(time_idx)
if time_idx > 200:
sys.exit()
# print('batch_t.shape = {0}'.format(batch_t.shape))
# print('batch_x.shape = {0}'.format(batch_x.shape))
# 勾配を求め、パラメータを更新
loss = model.forward(batch_x, batch_t)
model.backward()
optimizer.update(model.params, model.grads)
total_loss += loss
loss_count += 1
# エポックごとにパープレキシティの評価
ppl = np.exp(total_loss / loss_count)
print('| epoch %d | perplexity %.2f'
% (epoch+1, ppl))
ppl_list.append(float(ppl))
total_loss, loss_count = 0, 0
# グラフの描画
x = np.arange(len(ppl_list))
plt.plot(x, ppl_list, label='train')
plt.xlabel('epochs')
plt.ylabel('perplexity')
plt.show()
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 3,220 | py | 18 | RNN_ch5_main.py | 15 | 0.564338 | 0.548897 | 0 | 87 | 30.264368 | 97 |
Olo23/eth2.0-specs | 11,089,605,594,936 | dc012af335e5e5af46efcdb7272e290361741472 | 56e0576622b74f77c4d22269b1db7376b1f06d23 | /tests/generators/fork_choice/main.py | f09bbcc0aa12c41819c526669be33a5def526f26 | [
"CC0-1.0"
] | permissive | https://github.com/Olo23/eth2.0-specs | c0653bfde91978dc9016caccde1151ddda52afbf | 0d22e08a7ec2c40c68d9813503cc01b32fbbd138 | refs/heads/master | "2023-05-02T21:53:45.009244" | "2021-04-06T21:46:36" | "2021-04-06T21:46:36" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators
from eth2spec.phase0 import spec as spec_phase0
from eth2spec.altair import spec as spec_altair
from eth2spec.test.context import PHASE0, ALTAIR
specs = (spec_phase0, spec_altair)
if __name__ == "__main__":
phase_0_mods = {key: 'eth2spec.test.phase0.fork_choice.test_' + key for key in [
'get_head',
]}
# No additional Altair specific finality tests, yet.
altair_mods = phase_0_mods
all_mods = {
PHASE0: phase_0_mods,
ALTAIR: altair_mods,
}
run_state_test_generators(runner_name="fork_choice", specs=specs, all_mods=all_mods)
| UTF-8 | Python | false | false | 662 | py | 10 | main.py | 2 | 0.681269 | 0.660121 | 0 | 22 | 29.090909 | 88 |
KamilJerzyWojcik/PopularPythonPackages | 10,350,871,184,303 | 371af43020fa1344aee4840381ef216c90498f2e | 93798867df24c2d469dd97783ff5eadc2b459e9f | /NumPyApp/main2.py | 2b658154368371153e3a7fd272109de7aff26deb | [] | no_license | https://github.com/KamilJerzyWojcik/PopularPythonPackages | 79f568cf44a868bb9d6e6276270a86a7119e86f9 | 72d52a2759ae25c11024c372a2850158c9b5eb01 | refs/heads/master | "2020-05-19T04:25:49.485841" | "2019-05-03T22:09:47" | "2019-05-03T22:09:47" | 184,825,881 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
first = np.array([1, 2, 3])
second = np.array([1, 2, 3])
print(first + second)
print(first + 2)
dimensions_inch = np.array([1, 2, 3])
dimensions_cm = dimensions_inch * 2.54
| UTF-8 | Python | false | false | 195 | py | 7 | main2.py | 6 | 0.646154 | 0.579487 | 0 | 10 | 18.5 | 38 |
ningliang/bagger | 10,376,641,015,802 | e860517cda36c6c4001b26ed168497c42b376115 | 054c3f0cb8a5046ccbd70c2fb228a934161af440 | /steve/app/flags.py | 76d8bd688433a5866c218b5905ab2c56cac1bc9d | [] | no_license | https://github.com/ningliang/bagger | 16049d96dd65ac863073bf49992d0fcfe6cb602c | 075f3463d6319996399a46971a4da8f054fbf900 | refs/heads/master | "2020-05-19T15:39:26.592662" | "2009-10-25T17:28:50" | "2009-10-25T17:28:50" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """Methods to make app-level global definition of flags easy.
TODO(fedele): write example usage.
"""
__author__ = 'stephen.fedele@gmail.com (Stephen Fedele)'
# Note: you probably can't import the prelude here,
# as that would set up a cyclic dependency?
# from cobra.steve.util.prelude import *
import optparse
import re
class FlagParser(object):
def __init__(self):
self.parser = optparse.OptionParser()
self.options = None
self.args = None
def _DoParse(self):
self.options, self.args = self.parser.parse_args()
def __getattr__(self, attr):
if attr.startswith("Define"):
raise AttributeError, "Use 'flags.%s' instead of 'FLAGS.%s'" % (attr, attr)
return getattr(self.options, attr)
def _Define(self, value_type, flag_name, default, description):
# TODO(fedele): output a reasonable error message here
assert re.match(r'^[-A-Za-z_]+$', flag_name)
self.parser.add_option("--%s" % flag_name,
dest=flag_name,
default=default,
help=description,
type=value_type)
def _DefineString(self, *args, **kwargs):
self._Define("string", *args, **kwargs)
def _DefineInteger(self, *args, **kwargs):
self._Define("int", *args, **kwargs)
def _DefineFloat(self, *args, **kwargs):
self._Define("float", *args, **kwargs)
def _DefineBoolean(self, flag_name, default, description):
# TODO(fedele): output a reasonable error message here
assert re.match(r'^[-A-Za-z_]+$', flag_name)
true_name = "--%s" % flag_name
false_name = "--no%s" % flag_name
self.parser.add_option(true_name, action="store_true", dest=flag_name,
help=description)
self.parser.add_option(false_name, action="store_false", dest=flag_name,
default=default)
FLAGS = FlagParser()
DefineBoolean = FLAGS._DefineBoolean
DefineString = FLAGS._DefineString
DefineInteger = FLAGS._DefineInteger
DefineFloat = FLAGS._DefineFloat
DoParse = FLAGS._DoParse
| UTF-8 | Python | false | false | 2,076 | py | 88 | flags.py | 66 | 0.627649 | 0.627649 | 0 | 64 | 31.4375 | 81 |
bizzyvinci/project-euler | 4,329,327,083,613 | 4e1f0fb5aba72ec6e70a8ea092c00f9ff854f1f2 | c3523f94789d6f6e301f66f06938bcdf718afb49 | /014.py | 0637ea87d2e21db5ef1d80e352055db7e4460767 | [] | no_license | https://github.com/bizzyvinci/project-euler | 382427a43a4d70ef7c535f2c3896ad49eef550f0 | 1b52c872beb9fa564dd1975b6292063fc297031e | refs/heads/master | "2022-09-07T20:23:56.884228" | "2020-06-03T09:34:25" | "2020-06-03T09:34:25" | 264,598,848 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
Longest Collatz sequence(conjecture)
Problem: Which starting number, under one million, produces the longest chain?
NOTE: Once the chain starts the terms are allowed to go above one million.
'''
from math import floor
# This code is efficient because it saves every n that is solved in a dictionary
def count_chain(n):
if n in values:
return values[n]
if n%2==0:
values[n]=1+count_chain(n/2)
else:
values[n]=2+count_chain((3*n+1)/2)
return values[n]
limit = 1000000
longest_chain = 0
answer = -1
values = {1:1}
for x in range(floor(limit/2), limit):
if count_chain(x)>longest_chain:
longest_chain=count_chain(x)
answer=x
print(answer)
| UTF-8 | Python | false | false | 657 | py | 29 | 014.py | 28 | 0.719939 | 0.689498 | 0 | 27 | 23.333333 | 80 |
manuelcarrizo/rss-filter | 8,787,503,130,537 | 8d9f47be986c1d410528f96349d71cdf92da7cf9 | 67754d244493cd1a31b2a489be3fa8941c74eecb | /filter.py | 61e8405575260bd3ab44377de65f47c3053ff6ff | [] | no_license | https://github.com/manuelcarrizo/rss-filter | 5372a8cda94a10e00c199b5706c5283af8bfe4bc | 14173668d13758be55e2c41d5578370324ba60e9 | refs/heads/master | "2021-06-18T11:34:14.452348" | "2021-02-21T22:22:58" | "2021-02-21T22:22:58" | 175,505,447 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
import os
import json
from urllib import request
from defusedxml import ElementTree
def usage():
print("python %s CONFIG_FILE" % __file__)
print("Filters URL or FILE xml file with an RSS feed to remove entries that don't match the filters on config.json")
def input(source):
if source is None:
data = sys.stdin.read()
elif os.path.exists(source) and os.path.isfile(source):
with open(source) as f:
data = f.read()
else:
with request.urlopen(source, timeout=10) as f:
data = f.read().decode('utf-8')
return data
def filter_xml(data, config):
et = ElementTree.fromstring(data)
channel = et.find("channel")
for item in channel.findall("item"):
title = item.find("title").text
some_matches = False
for filters in config["filters"]:
some_matches = all(map(lambda w: w in title, tuple(filters["keys"])))
if some_matches:
#if filters.get("category", None):
# elem = '<qbCategory>%s</qbCategory>' % filters["category"]
# item.append((ElementTree.fromstring(elem)))
break
if not some_matches:
channel.remove(item)
print(ElementTree.tostring(et).decode('utf-8'))
if __name__ == "__main__":
if len(sys.argv) != 2:
usage()
exit(1)
config = {}
with open(sys.argv[1], "r") as config_file:
config = json.load(config_file)
source = config.get("source", None)
data = input(source)
filter_xml(data, config) | UTF-8 | Python | false | false | 1,589 | py | 6 | filter.py | 3 | 0.586532 | 0.582127 | 0 | 60 | 25.5 | 120 |
qianpeng-qp/pytest1 | 8,658,654,070,568 | 0ab5f3c7d1094641c2d3da60f09682f457dea345 | 1c4fc7a849df519615b3ae41b7578f1e9fb4fe0d | /src/json_1/number_write1.py | 7a9ca0096a501fa32c855f0b21b29510ee118b8a | [] | no_license | https://github.com/qianpeng-qp/pytest1 | 889b223542f22d6f2121bd12aa2ee770f9ef460d | 6eb3007547ddeabed07b46cecaaffde9d8e32e64 | refs/heads/master | "2021-01-06T10:44:16.612503" | "2020-03-03T13:39:34" | "2020-03-03T13:39:34" | 241,301,054 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import json
numbers = [2,3,4,5,11,13]
filename = 'number.json'
with open(filename, 'w') as f_obj:
json.dump(numbers, f_obj) #json.dump 存储这组数字
with open(filename) as f_obj:
numbers = json.load(f_obj) #json.load 将json存储到内存,可以使用
print(numbers)
#如果已有用户名,则读取,否则写入
filename1 = 'username1.json'
# username = input("输入姓名:")
# with open(filename1, 'w') as f_obj1: # 存储到json文件
# json.dump(username, f_obj1)
# print("remember:"+username)
# with open(filename1) as f_obj2: # 存储到内存并读取
# username = json.load(f_obj2)
# print(" user_naem :" + username)
def greet_user():
"""问候用户并指出名字"""
try:
with open(filename1) as f_obj2: # 存储到内存并读取
username = json.load(f_obj2)
except FileNotFoundError:
username = input("输入姓名:")
with open(filename1, 'w') as f_obj1: # 存储到json文件
json.dump(username, f_obj1)
print("remember:"+username)
else:
print(" user_naem :" + username)
greet_user()
def get_stored_username():
"""如果存储了姓名就打出"""
try:
with open(filename1) as f_obj2: # 存储到内存并读取
username = json.load(f_obj2)
except FileNotFoundError:
return None
else:
return username
def get_new_username():
"""提示输入新用户名"""
username = input("输入姓名:")
with open(filename1, 'w') as f_obj1: # 存储到json文件
json.dump(username, f_obj1)
print("remember:" + username)
def greet_user():
"""问候用户并指出名字"""
username = get_stored_username()
if username:
print("welcome"+ username)
else:
username = get_new_username()
greet_user() | UTF-8 | Python | false | false | 1,855 | py | 78 | number_write1.py | 68 | 0.602859 | 0.585457 | 0 | 58 | 26.758621 | 63 |
johnzouzou/somePython | 10,316,511,469,689 | 3ffe691954dddacf90aa53d6d85f8aa6e44d7d9e | ae91771bf4d9f514765bf4312e0aaae4acef2748 | /dadjokes.py | d77af8f9965cf336f322bdb387816534802cc3fb | [] | no_license | https://github.com/johnzouzou/somePython | 403d62799ad48e8ef63c6d8fa6420cc4cc73c51c | 954cc6c0da4060be4d8a2e2c08edca6fb12f28b2 | refs/heads/master | "2020-04-26T01:02:50.217995" | "2019-02-28T22:01:34" | "2019-02-28T22:01:34" | 173,193,734 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import requests
import time
while True:
url = "https://icanhazdadjoke.com/"
response = requests.get(url, headers={"Accept":"application/json"})
data = response.json()
print(data["joke"])
time.sleep(1)
val = input ('\n q to quit or enter for more jokes! \n')
if(val == 'q'):
break
for i in range(5):
time.sleep(1)
print(f"New joke in {5 - i}")
| UTF-8 | Python | false | false | 372 | py | 11 | dadjokes.py | 11 | 0.626344 | 0.615591 | 0 | 14 | 24.5 | 68 |
ahmadly/django_national | 18,004,502,915,156 | eda91450d0bff8728cc780dbcedba586ff27cb1e | fdd32b7ff3a945bf5dcc297f28517ad18ddd46f8 | /django_national/management/commands/process_data.py | b58c18428f455e1e217c9e0fa30c4d5d68688221 | [
"BSD-3-Clause"
] | permissive | https://github.com/ahmadly/django_national | eb36bf904d9ea1bbabc0f141ee77651949d7ffd2 | 4a979187d8ecb9aaecc79825520e8e40f97ebffc | refs/heads/master | "2020-03-28T06:45:06.612791" | "2018-09-15T20:24:34" | "2018-09-15T20:24:34" | 147,857,318 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.core.management import BaseCommand
import csv
from models import Region
class Command(BaseCommand):
def handle(self, *args, **options):
_data = set()
with open('/Users/ahmad/PycharmProjects/WorldCitiesDatabase/all.csv', encoding='utf-8') as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
if 'region' in row:
_data.add(row['region'].strip().lower())
for i in _data:
if i:
Region.objects.create(
type=Region.CONTINENT,
name=i,
)
| UTF-8 | Python | false | false | 630 | py | 9 | process_data.py | 7 | 0.539683 | 0.538095 | 0 | 20 | 30.5 | 108 |
Gabegit/gmtools | 2,224,793,079,780 | 11006791a2c9951481e04d6c16acb3b6e67afc9a | bdca54dfd0c008e461df5da4a66dd73dc0485981 | /inst/onetrans.py | ba8ee165313ecbe5feb3d2499d6fc70bd2dc37ad | [] | no_license | https://github.com/Gabegit/gmtools | d40c57b6d8be2170498637d5c7781f553ccc91be | b0ddfcae9d54b5130e4523e2965c74d82e7a12f5 | refs/heads/master | "2021-01-10T17:20:33.928549" | "2019-02-09T02:46:01" | "2019-02-09T02:46:01" | 55,433,310 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/anaconda3/bin/python
# -*- coding: utf-8 -*-
# python package "translation"
# python onetrans.py "China is cracking down on online moneylenders who target university students, following concerns about the largely unregulated industry." "bing" "zh-CHS"
import sys
from translation import bing # only for bing translator.
def onetrans(string,dest = "zh-CHS"):
if dest == "zh-CN":
dest = "zh-CHS"
#data = bing(string,dst=dest).encode('UTF8') #for python2.7
data = bing(string,dst=dest)
return(data)
# python googletrans.py en zh-CN string
def main():
"""
python onetrans.py '我们爱世界' 'en'
python onetrans.py 'hello world' 'zh-CN'
"""
input = sys.argv[1]
dest1 = sys.argv[2]
output = onetrans(input,dest=dest1)
print(output)
return(output)
if __name__ == '__main__':
#gtrans('China')
main()
| UTF-8 | Python | false | false | 855 | py | 45 | onetrans.py | 43 | 0.662722 | 0.652071 | 0 | 33 | 24.606061 | 175 |
lulu1008-beep/amsterdam-embedding-model | 16,724,602,657,572 | 1dab734d50639ab43ab9596af6661fb9c9c02bb4 | f7a3fb6ea332bffadcd455653af674a56102dd6f | /src/analysis/classifier_full.py | 19ae85c8e6d1dbffe3d7cad5df25b91b7e12cf1f | [] | no_license | https://github.com/lulu1008-beep/amsterdam-embedding-model | d95c2a9c0d1f38b636421ba644b79a9ca7c690a6 | f68fd9529dee047e9e2747d5bb97c1203e6cc68c | refs/heads/master | "2023-04-14T18:25:25.319004" | "2021-04-06T20:05:27" | "2021-04-06T20:05:27" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, TfidfTransformer
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.metrics import classification_report
from sklearn import metrics
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import load_iris
from scipy.sparse import csr_matrix
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import MultinomialNB
import logging
import json
from sklearn.svm import SVC
import embeddingvectorizer
from sklearn.ensemble import ExtraTreesClassifier
import gensim
import os
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
#path_to_embeddings='/home/anne/tmpanne/AEM_small_sample/test'
class classifier_analyzer():
def __init__(self, path_to_data, path_to_embeddings, dataset):
self.nmodel = 0
df = pd.read_pickle(path_to_data + dataset)
logging.info("... loading the data...\n\nthis is length of the dataframe: {}".format(len(df)))
self.test_size = 0.2
self.data = df['text']
self.labels = df['topic']
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.data, self.labels, test_size=self.test_size, random_state=42)
self.basepath = path_to_embeddings
self.names = ["GaussianNB", "Passive Agressive", "SGDClassifier" , "SVM", "ET"]
self.parameters = [
{ 'clf__var_smoothing' : [1e-8, 1e-7, 1e-6, 1e-5, 1e-4] } ,
{'clf__loss': ('hinge', 'squared_hinge'),
'clf__C': (0.01, 0.5, 1.0) ,
'clf__fit_intercept': (True, False) ,
'clf__max_iter': (5 ,10 ,15) } ,
{'clf__max_iter': (20, 30) ,
'clf__alpha': (1e-2, 1e-3, 1e-5),
'clf__penalty': ('l2', 'elasticnet')} ,
{'clf__C': [1, 10, 100, 1000],
'clf__gamma': [0.001, 0.0001],
'clf__kernel': ['rbf', 'linear']},
{ "clf__max_features": ['auto', 'sqrt', 'log2'] }
]
self.classifiers = [GaussianNB(),
PassiveAggressiveClassifier(),
SGDClassifier(),
SVC(),
ExtraTreesClassifier() ]
def get_w2v_model(self):
'''yields a dict with one item. key is the filename, value the gensim model'''
filenames = [e for e in os.listdir(self.basepath) if not e.startswith('.')]
for fname in filenames:
model = {}
path = os.path.join(self.basepath, fname)
logging.info("\nLoading gensim model")
if fname.startswith('w2v'):
mod = gensim.models.Word2Vec.load(path)
else:
mod = gensim.models.KeyedVectors.load_word2vec_format(path)
model['gensimmodel'] = dict(zip(mod.wv.index2word, mod.wv.vectors))
model['filename'] = fname
self.nmodel +=1
logging.info("loaded gensim model nr {}, named: {}".format(self.nmodel, model['filename']))
yield model
def get_vectorizer(self, vectorizer, model):
logging.info("the vectorizer is: {}".format(vectorizer))
vec = {}
vec['filename'] = vectorizer
if vectorizer == 'w2v_count':
s = embeddingvectorizer.EmbeddingCountVectorizer(model['gensimmodel'], 'mean')
elif vectorizer == 'w2v_tfidf':
s = embeddingvectorizer.EmbeddingTfidfVectorizer(model['gensimmodel'], 'mean')
vec['vectorizer'] = s
yield vec
def gridsearch_with_classifiers(self):
class_report = []
results = []
for model in self.get_w2v_model():
for v in ["w2v_count", "w2v_tfidf"]:
for vec in self.get_vectorizer(v, model):
print("loaded the vectorizer: {}".format(vec['filename']))
for name, classifier, params in zip(self.names, self.classifiers, self.parameters):
my_dict = {}
logging.info("Starting gridsearch CV..")
logging.info("Classifier name: {}\n\n\n\n\nModel name:{}\n\n\n\n\nVectorizer: {}\n\n\n\n\nParameter settings: {}\n".format(name, model['filename'], vec['filename'], params))
clf_pipe = Pipeline([ ('vect', vec['vectorizer']), ('clf', classifier), ])
gs_clf = GridSearchCV(clf_pipe, param_grid=params, cv=2)
clf = gs_clf.fit(self.X_train, self.y_train)
score = clf.score(self.X_test, self.y_test)
logging.info("{} score: {}".format(name, score))
#logging.info("{} are the best estimators".format(clf.best_estimator_))
results_to_dict = classification_report((clf.best_estimator_.predict(self.X_test)), self.y_test, output_dict= True)
results_to_dict['classifier'] = name
results_to_dict['parameters'] = clf.best_params_
results_to_dict['vectorizer'] = vec['filename']
results_to_dict['model'] = model['filename']
logging.info("Created dictionary with classification report: \n\n{}".format(results_to_dict))
class_report.append(results_to_dict)
y_hats = clf.predict(self.X_test)
results.append({"predicted": y_hats,
"actual" : self.y_test.values ,
"classifier": name,
"vectorizer":vec['filename'],
"model": model['filename'] } )
return class_report, results
def gridsearch_with_classifiers_baseline(self):
class_report = []
results = []
for vec, n in zip([CountVectorizer(), TfidfVectorizer()], ["Count", "Tfidf"]):
print("loaded the vectorizer: {}\n\n\{}".format(n, vec))
for name, classifier, params in zip(self.names, self.classifiers, self.parameters):
my_dict = {}
logging.info("Starting gridsearch CV..")
logging.info("Classifier name: {}\n classifier:{}\n params{}\n".format(name, classifier, params))
#clf_pipe = Pipeline([ ('vect', vec), ('clf', classifier), ])
clf_pipe = make_pipeline(vec, FunctionTransformer(lambda x: x.todense(), accept_sparse=True), classifier)
gs_clf = GridSearchCV(clf_pipe, param_grid=params, cv=2)
clf = gs_clf.fit(self.X_train, self.y_train)
score = clf.score(self.X_test, self.y_test)
logging.info("{} score: {}".format(name, score))
logging.info("{} are the best estimators".format(clf.best_estimator_))
results_to_dict = classification_report((clf.best_estimator_.predict(self.X_test)), self.y_test, output_dict= True)
results_to_dict['classifier'] = name
results_to_dict['parameters'] = clf.best_params_
results_to_dict['vectorizer'] = n
results_to_dict['model'] = "baseline"
logging.info("Created dictionary with classification report: \n\n{}".format(results_to_dict))
class_report.append(results_to_dict)
y_hats = clf.predict(self.X_test)
results.append({"predicted": y_hats,
"actual" : self.y_test.values ,
"classifier": name ,
"vectorizer": n ,
"model": "baseline" } )
return class_report, results
def clean_df_true_pred(results):
data = pd.DataFrame.from_dict(results)
predicted = data.predicted.apply(pd.Series).merge(data, right_index = True, left_index = True) \
.drop(["predicted"], axis = 1).melt(id_vars = ['classifier'], value_name = "Predicted label")
actual = data.actual.apply(pd.Series).merge(data, right_index = True, left_index = True) \
.drop(["predicted"], axis = 1).melt(id_vars = ['classifier'], value_name = "Actual label")
df = pd.merge(predicted, actual, how = 'inner', left_index = True, right_index = True)
# df['Classifier'] = df['classifier_x']
# df = df[df.variable_x != 'actual']
# df = df[['Predicted label', 'Actual label', 'Classifier', 'model', 'vectorizer']]
return df
#"w2v_count", "w2v_tfidf", "count", "tfidf"
| UTF-8 | Python | false | false | 9,571 | py | 25 | classifier_full.py | 12 | 0.558771 | 0.550935 | 0 | 215 | 43.516279 | 198 |
svspirivm/data_science_it_academy | 17,033,840,320,721 | 221c3f9b9a77137b1501e68673dfcd07d8e08bfc | cb264273c597f65e1fa141801c320eef18bdf2e8 | /hw5_classification_with_API/app.py | cdfe5a2a139df880b08be68db152b19cc864da3e | [] | no_license | https://github.com/svspirivm/data_science_it_academy | 301a00f2b250523e1593a821eb875a74b14b64d0 | 083dfd3f5ee5d9878678ed742a04594a30588070 | refs/heads/main | "2023-04-06T07:37:47.776437" | "2021-04-03T10:15:47" | "2021-04-03T10:15:47" | 324,000,988 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import Flask, request, jsonify
import json
import pickle
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
# creating a Flask application
app = Flask(__name__)
# Load the model
filename = 'scaler.pkl'
with open(filename,'rb') as f:
loaded_scaler = pickle.load(f)
filename = 'extra_trees_classifier.pkl'
with open(filename,'rb') as f:
loaded_model = pickle.load(f)
# creating target
target_class = np.array(['Insufficient_Weight', 'Normal_Weight', 'Obesity_Type_I',
'Obesity_Type_II', 'Obesity_Type_III', 'Overweight_Level_I',
'Overweight_Level_II'], dtype=object)
le = LabelEncoder()
target_asced = le.fit_transform(target_class)
# creating predict url and only allowing post requests.
@app.route('/predict', methods=['POST'])
def predict():
# Get data from Post request
data = request.get_json()
# converting a json request to the model format
df = pd.read_json(data, orient='split')
print('\n\n\n Request \n', df.head())
# Make prediction
df_norm = pd.DataFrame(data=loaded_scaler.transform(df),
columns=['Gender', 'Age', 'Height', 'Weight', 'family_history_with_overweight',
'FAVC', 'FCVC', 'NCP', 'CAEC', 'SMOKE', 'CH2O', 'SCC', 'FAF', 'TUE', 'CALC', 'MTRANS'])
df_norm.drop('SMOKE', axis = 1, inplace = True)
df_norm['BMI'] = df_norm['Weight'] / (df_norm['Height'] ** 2)
pred = le.inverse_transform(loaded_model.predict(df_norm))
print('\n\n\n Prediction ', pred, '\n\n')
# returning a prediction as json
responses = pd.Series(pred).to_json(orient='values')
return (responses)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True) | UTF-8 | Python | false | false | 1,716 | py | 12 | app.py | 1 | 0.657343 | 0.650932 | 0 | 50 | 33.34 | 94 |
empiredata/empire-python-client | 18,425,409,701,029 | 00b3b9db112416c7b73b532556de394ff92e8f15 | 6f112f770d460f4c5778f0ff59c5a21d58170c3c | /empire/tests.py | df671e040deebaa8438c58067c2c8bc4f21db743 | [
"Apache-2.0"
] | permissive | https://github.com/empiredata/empire-python-client | 83952b060515e2f4146764d5d419440c6e54a128 | 0c662e268839a5e676e94dbc9cc2e14be44dc345 | refs/heads/master | "2020-06-26T19:27:23.565201" | "2014-09-18T01:31:33" | "2014-09-18T01:31:33" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
"""
Empire command-line client tests
"""
import json
import unittest
from empire.client import EmpireException
import httmock
from empire import Empire
class FakeConnection(object):
def close(self):
pass
class EmpireTest(unittest.TestCase):
json_headers = {'content-type': 'application/json'}
def setUp(self):
self.empire = Empire(appkey="MOCK_APPKEY", enduser="MOCK_ENDUSER", api_server='api.empire.co')
self.services = {}
def mock_response_200(self, request, data):
headers = {}
if not isinstance(data, basestring):
data = json.dumps(data)
headers = self.json_headers
response = httmock.response(200, data, headers, None, 5, request)
setattr(response, 'connection', FakeConnection())
return response
def mock_response_500(self, request, message):
response = httmock.response(500, message, {}, None, 5, request)
setattr(response, 'connection', FakeConnection())
return response
@httmock.urlmatch(netloc='api.empire.co', path='/empire/session/create')
def session_create_mock(self, url, request):
return self.mock_response_200(request, {'status': 'OK', 'sessionkey': 'TESTSESSION'})
@httmock.urlmatch(netloc='api.empire.co', path='/empire/services/salesforce/connect')
def connect_mock(self, url, request):
data = json.loads(request.body)
self.services['salesforce'] = data
return self.mock_response_200(request, {'status': 'OK'})
def test_connect(self):
sf_data = {
"access_token": "MOCK_ACCESS_TOKEN",
"client_id": "MOCK_CLIENT",
"refresh_token": "MOCK_REFRESH_TOKEN",
"endpoint": "https://na15.salesforce.com"
}
with httmock.HTTMock(self.session_create_mock):
with httmock.HTTMock(self.connect_mock):
self.empire.connect("salesforce", sf_data)
self.assertEqual(self.services['salesforce'], sf_data)
@httmock.urlmatch(netloc='api.empire.co', path='/empire/services')
def describe_all_mock(self, url, request):
return self.mock_response_200(request, {'status': 'OK', "name": "salesforce"})
def test_describe_all(self):
with httmock.HTTMock(self.session_create_mock):
with httmock.HTTMock(self.describe_all_mock):
services_data = self.empire.describe()
self.assertEqual(services_data, {'status': 'OK', "name": "salesforce"})
@httmock.urlmatch(netloc='api.empire.co', path='/empire/services/salesforce')
def describe_one_mock(self, url, request):
return self.mock_response_200(request, {'status': 'OK', "name": "salesforce", "tables": ["table1"]})
def test_describe_one(self):
with httmock.HTTMock(self.session_create_mock):
with httmock.HTTMock(self.describe_one_mock):
service_data = self.empire.describe("salesforce")
self.assertEqual(service_data, {'status': 'OK', "name": "salesforce", "tables": ["table1"]})
@httmock.urlmatch(netloc='api.empire.co', path='/empire/services/salesforce/table1')
def describe_table_mock(self, url, request):
return self.mock_response_200(request, {'status': 'OK', "name": "table1"})
def test_describe_table(self):
with httmock.HTTMock(self.session_create_mock):
with httmock.HTTMock(self.describe_table_mock):
service_data = self.empire.describe("salesforce", "table1")
self.assertEqual(service_data, {'status': 'OK', "name": "table1"})
def test_describe_table_without_service(self):
self.assertRaises(ValueError, self.empire.describe, None, "table1")
@httmock.urlmatch(netloc='api.empire.co', path='/empire/services/salesforce/table1')
def describe_table_broken_mock(self, url, request):
return self.mock_response_500(request, "Something is broken")
def test_describe_broken(self):
with httmock.HTTMock(self.session_create_mock):
with httmock.HTTMock(self.describe_table_broken_mock):
self.assertRaises(EmpireException, self.empire.describe, "salesforce", "table1")
@httmock.urlmatch(netloc='api.empire.co', path='/empire/query')
def query_mock(self, url, request):
return self.mock_response_200(request, json.dumps({'col1': 'val1'}) + '\n' + json.dumps({'col2': 'val2'}))
def test_query(self):
with httmock.HTTMock(self.session_create_mock):
with httmock.HTTMock(self.query_mock):
query_result = list(self.empire.query('SELECT * FROM salesforce.account'))
self.assertEqual(query_result, [{'col1': 'val1'}, {'col2': 'val2'}])
@httmock.urlmatch(netloc='api.empire.co', path='/empire/view/viewName')
def view_create_mock(self, url, request):
if request.method == 'PUT':
if json.loads(request.body) == {'query': 'SELECT QUERY'}:
self.view_created = True
return self.mock_response_200(request, {'status': 'OK'})
return self.mock_response_500(request, "Something is broken")
def test_materialize_view(self):
self.view_created = False
self.assertEqual(self.view_created, False)
with httmock.HTTMock(self.session_create_mock):
with httmock.HTTMock(self.view_create_mock):
r = self.empire.materialize_view(name='viewName', sql='SELECT QUERY')
self.assertEqual(r, {'status': 'OK'})
self.assertEqual(self.view_created, True)
@httmock.urlmatch(netloc='api.empire.co', path='/empire/view/viewName')
def view_delete_mock(self, url, request):
if request.method == 'DELETE':
self.view_deleted = True
return self.mock_response_200(request, {'status': 'OK'})
return self.mock_response_500(request, "Something is broken")
def test_drop_view(self):
self.view_deleted = False
self.assertEqual(self.view_deleted, False)
with httmock.HTTMock(self.session_create_mock):
with httmock.HTTMock(self.view_delete_mock):
r = self.empire.drop_view(name='viewName')
self.assertEqual(r, {'status': 'OK'})
self.assertEqual(self.view_deleted, True)
| UTF-8 | Python | false | false | 6,287 | py | 9 | tests.py | 5 | 0.63846 | 0.627962 | 0 | 159 | 38.534591 | 114 |
radiasoft/pykern | 11,897,059,441,528 | 6489d8e251ab91a9a7fa81336c9f19a3bc27d862 | 77ae4c6e2cd813e0fefe3efe4946c2a298882e34 | /tests/mpi_data/p1.py | a6989ff034b6a294c630503f158f27c92043ede6 | [
"Apache-2.0"
] | permissive | https://github.com/radiasoft/pykern | 8a2886680a5f496efacda6df7d7c614bfe7c3844 | 20f63ada9149e287fdae1f2f6f93faa5cc03963d | refs/heads/master | "2023-07-25T10:58:09.937031" | "2023-07-16T20:25:06" | "2023-07-16T20:25:06" | 33,033,605 | 9 | 11 | Apache-2.0 | false | "2023-09-12T23:03:16" | "2015-03-28T13:03:16" | "2023-01-31T16:37:16" | "2023-09-12T23:03:15" | 986 | 6 | 7 | 51 | Python | false | false | # -*- coding: utf-8 -*-
u"""test pykern.mpi
:copyright: Copyright (c) 2017 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
import pykern.mpi
def op():
from mpi4py import MPI
import time
import sys
x = sys.argv[1]
print(x)
if 'normal' in x:
return
if 'exit-1' == x:
raise SystemExit(1)
if 'divide-zero' == x:
i = 1 / 0
if 'exit-13-rank-0' == x:
if MPI.COMM_WORLD and MPI.COMM_WORLD.Get_rank() == 0:
raise SystemExit(13)
time.sleep(1)
return
if 'divide-zero-rank-2' == x:
if MPI.COMM_WORLD and MPI.COMM_WORLD.Get_rank() == 2:
time.sleep(.2)
i = 1 / 0
time.sleep(1)
else:
raise ValueError('{}: invalid argv'.format(sys.argv))
pykern.mpi.checked_call(op)
| UTF-8 | Python | false | false | 925 | py | 178 | p1.py | 132 | 0.570811 | 0.542703 | 0 | 38 | 23.342105 | 67 |
ropso/flask_SIP | 3,092,376,503,593 | 94f2fdb99a9b74642b0ac84bf8a4fa9836581a84 | 1971049601dca8c28c74a326fa8fb39b402669f0 | /models.py | 5c91b101d971eb03fdb3e0a3acb7cf1609ca0ee7 | [] | no_license | https://github.com/ropso/flask_SIP | be05ceb42efbbf61ec6758f228c630a041ac57f8 | cf8dc854ad479ebe376bb6ff951835b015c7eea8 | refs/heads/master | "2020-07-06T09:21:45.528770" | "2019-08-20T04:18:55" | "2019-08-20T04:18:55" | 202,968,968 | 0 | 0 | null | false | "2019-08-20T04:18:56" | "2019-08-18T06:37:30" | "2019-08-18T09:38:03" | "2019-08-20T04:18:55" | 8,574 | 0 | 0 | 0 | Python | false | false | from app import db
from sqlalchemy.dialects.postgresql import JSON
class Result(db.Model):
__tablename__='results'
id=db.Column(db.Integer,primary_key=True)
url=db.Column(db.String())
result_all=db.Column(JSON)
result_no_stop_words=db.Column(JSON)
def __init__(self,url,result_all,result_no_stop_words):# this method would be called on every result creation
self.url=url
self.result_all=result_all
self.result_no_stop_words=result_no_stop_words
def __repr__(self):
return '<id {}>'.format(self.id)
| UTF-8 | Python | false | false | 569 | py | 7 | models.py | 5 | 0.664323 | 0.664323 | 0 | 18 | 30.222222 | 113 |
wangefan/ert | 6,390,911,351,480 | bb15d04e160deaa4db61134e7357d6deb1308343 | 793f7517a7f9620b16f8178baf0891899d8534e7 | /Configuration.py | 8c5505db4ae81b911b045d49033f7f7355ed2a57 | [] | no_license | https://github.com/wangefan/ert | c0bb5002c0a5596c04435b589a985387ab0fd633 | 474ae0e6957bf6034e2c88b25d689dd61a9e647f | refs/heads/main | "2023-07-23T01:24:38.148815" | "2021-09-07T04:26:39" | "2021-09-07T04:26:39" | 401,659,873 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Configuration:
def __init__(self,
num_landmarks,
train_data_times,
cascade_number,
ferm_number,
ferm_group_number,
ferm_depth,
num_candidate_ferm_node_infos,
feature_pool_size,
shrinkage_factor,
padding,
lamda):
self._num_landmarks = num_landmarks
self._train_data_times = train_data_times
self._cascade_number = cascade_number
self._ferm_number = ferm_number
self._ferm_num_per_group = ferm_group_number
self._ferm_depth = ferm_depth
self._num_candidate_ferm_node_infos = num_candidate_ferm_node_infos
self._feature_pool_size = feature_pool_size
self._shrinkage_factor = shrinkage_factor
self._padding = padding
self._lamda = lamda | UTF-8 | Python | false | false | 849 | py | 11 | Configuration.py | 10 | 0.580683 | 0.580683 | 0 | 24 | 34.416667 | 71 |
hudongxing/SYAutoTest | 17,334,488,018,558 | 386889b69d776993dc7351cc4e867a14c55822ed | b002515e24b6a3a39ddd32ddb1b9640ce590bc39 | /mk_event/mk_share.py | 44bb53b9c0309de7cf07496bfc24eff762b66888 | [] | no_license | https://github.com/hudongxing/SYAutoTest | 73041b159c875fbe5a8aa371b5ed7c41920e20bb | 8e8dbb577e4c1ee9eb3f78a4cd090f46994ba16f | refs/heads/master | "2021-08-07T22:14:21.572992" | "2017-11-09T03:50:32" | "2017-11-09T03:50:32" | 110,060,617 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | __author__ = "VioLet"
def share_event(driver, width):
x = width / 10
y = 25
print('[ RUN ] sending Event : Share->( %s , %s )' %(x, y))
json = [(x, y)]
driver.tap(json)
return True | UTF-8 | Python | false | false | 207 | py | 10 | mk_share.py | 10 | 0.507246 | 0.487923 | 0 | 11 | 17.909091 | 63 |
nova-labs/space_switch | 19,215,683,685,669 | 1e00fbb7097525ffb385d4276f6a6ae05fcf610e | e38d93e253b292e09ebdccf182a956362c6337f8 | /test/switch_test.py | e393d6982ebae046adc905a3c1872a9492c7b6f8 | [] | no_license | https://github.com/nova-labs/space_switch | 24668616c176ce0fa197a41421e7c0a4a6fb63bb | 708711596686b9a28027b60f0c795d718238a4b0 | refs/heads/develop | "2021-09-26T12:37:42.002570" | "2018-10-30T09:21:28" | "2018-10-30T09:21:28" | 77,878,657 | 1 | 0 | null | false | "2018-10-30T09:17:13" | "2017-01-03T02:51:55" | "2018-01-13T16:35:01" | "2018-10-30T09:17:13" | 23 | 1 | 0 | 1 | Python | false | null | #!/usr/bin/env python3
#
# Nova Labs space_switch
#import network, requests, json, time, math
import signal
import sys
import time
#from machine import Pin
import RPi.GPIO as GPIO
SWITCH_ONE_GPIO = 22
SWITCH_TWO_GPIO = 27
#GPIO.setmode(GPIO.BOARD)
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(SWITCH_ONE_GPIO, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(SWITCH_TWO_GPIO, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
print("GPIO pin %d", SWITCH_ONE_GPIO)
print("GPIO mode %d", GPIO.getmode())
def signal_handler(signal, frame):
sys.exit(0)
old_switch_one_state = GPIO.input(SWITCH_ONE_GPIO)
old_switch_two_state = GPIO.input(SWITCH_TWO_GPIO)
print("start switch state: %d", old_switch_state)
while True:
current_switch_one_state = GPIO.input(SWITCH_ONE_GPIO)
current_switch_two_state = GPIO.input(SWITCH_TWO_GPIO)
if old_switch_one_state == current_switch_one_state & old_switch_two_state == current_switch_two_state:
time.sleep(.01)
continue
old_switch_one_state = current_switch_one_state
old_switch_two_state = current_switch_two_state
print("SWITCH: new values %d - %d" % (current_switch_one_state, current_switch_two_state))
| UTF-8 | Python | false | false | 1,200 | py | 9 | switch_test.py | 7 | 0.713333 | 0.706667 | 0 | 42 | 27.571429 | 107 |
andras-p/pygyver | 14,731,737,837,109 | f79bc4bb946848ef4b6603951eb9a68954757470 | 6c935e865cdba32f1aaa9c3e3cb99339197ae80e | /tests/test_dw.py | f924f1e83f35bba04c4799506f082ac2fad51089 | [
"MIT"
] | permissive | https://github.com/andras-p/pygyver | 8d3416cce6d7d45a48d21a5e0b385cc05b4d4842 | e410b08367f99face487dd9abd4f50d1ad31386a | refs/heads/master | "2023-03-14T11:17:13.559427" | "2021-03-04T11:18:30" | "2021-03-04T11:18:30" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """ DW Tests """
import os
import logging
import unittest
import pandas as pd
from unittest import mock
from pygyver.etl import dw
from google.cloud import bigquery
from google.cloud import exceptions
from pandas.testing import assert_frame_equal
from sqlalchemy import create_engine
from sqlalchemy.pool import NullPool
from pygyver.etl.lib import bq_token_file_path
from pygyver.etl.lib import bq_default_project
from pygyver.etl.dw import BigQueryExecutorError
from pygyver.etl.storage import GCSExecutor
class get_table_attributes(unittest.TestCase):
""" Test """
def setUp(self):
self.db = dw.BigQueryExecutor()
self.db.create_dataset(
dataset_id='test_get_table_attributes'
)
self.db.initiate_table(
dataset_id='test_get_table_attributes',
table_id='partition_table',
schema_path='tests/schema/orig_table.json',
partition=True,
clustering=['fullname']
)
self.db.initiate_table(
dataset_id='test_get_table_attributes',
table_id='partition_table_with_partition_field',
schema_path='tests/schema/orig_table_with_date.json',
partition=True,
partition_field='birthday',
clustering=['fullname']
)
def test_get_table_attributes(self):
""" Test """
attributes = self.db.get_table_attributes(
dataset_id='test_get_table_attributes',
table_id='partition_table'
)
self.assertEqual(
str(attributes),
(
"{'clustering_fields': ['fullname'], 'description': None, 'encryption_configuration': None, "
"'expires': None, 'external_data_configuration': None, 'friendly_name': None, 'labels': {}, "
"'range_partitioning': None, 'require_partition_filter': None, "
"'schema': [SchemaField('fullname', 'STRING', 'NULLABLE', '', (), None), "
"SchemaField('age', 'INTEGER', 'NULLABLE', '', (), None)], 'time_partitioning': TimePartitioning(type=DAY)}"
)
)
attributes = self.db.get_table_attributes(
dataset_id='test_get_table_attributes',
table_id='partition_table_with_partition_field'
)
self.assertEqual(
str(attributes),
(
"{'clustering_fields': ['fullname'], 'description': None, 'encryption_configuration': None, "
"'expires': None, 'external_data_configuration': None, 'friendly_name': None, 'labels': {}, "
"'range_partitioning': None, 'require_partition_filter': None, "
"'schema': [SchemaField('fullname', 'STRING', 'NULLABLE', '', (), None), "
"SchemaField('age', 'INTEGER', 'NULLABLE', '', (), None), SchemaField('birthday', 'DATE', 'NULLABLE', '', (), None)], "
"'time_partitioning': TimePartitioning(field=birthday,type=DAY)}"
)
)
def tearDown(self):
self.db.delete_dataset(
dataset_id='test_get_table_attributes',
delete_contents=True
)
def get_existing_partition_query_mock(dataset_id, table_id):
d = {'partition_id': ["20200101", "20200102", "20200103"]}
return pd.DataFrame(data=d)
class test_read_sql(unittest.TestCase):
""" Test """
def test_class_read_sql(self):
""" Test """
sql = dw.read_sql(
file="tests/sql/read_sql.sql",
param1="type",
param2="300",
param3="shipped_date",
param4='trying'
)
self.assertEqual(
sql,
'select type, shipped_date from `table1` where amount > 300',
"read_sql unit test"
)
sql = dw.read_sql(
file="tests/sql/read_sql.sql"
)
self.assertTrue(
sql == 'select {param1}, {param3} from `table1` where amount > {param2}',
"read_sql unit test no opt parameters"
)
with self.assertRaises(KeyError):
dw.read_sql(
file="tests/sql/read_sql.sql",
partition_date='20200101'
)
def test_class_read_sql_extra_arg(self):
""" Test """
sql = dw.read_sql(
file="tests/sql/read_sql_extra_arg.sql",
param1="type",
param2="300"
)
def test_class_read_sql_with_dataset_prefix(self):
""" Test """
sql = dw.read_sql(
file="tests/sql/read_sql_dataset_prefix.sql",
param1="type",
param2="300",
param3="shipped_date",
param4='trying',
dataset_prefix='1001_'
)
self.assertEqual(
sql,
'select type, shipped_date from `1001_data.table1` and `1001_data.table2` and `1001_data.table3` where amount > 300',
"read_sql add suffix to dataset: ok"
)
class BigQueryExecutorDatasets(unittest.TestCase):
""" Test """
def setUp(self):
""" Test """
try:
self.create_my_dataset()
except exceptions.Conflict as exc:
logging.info(exc)
self.bq_client = dw.BigQueryExecutor()
def create_my_dataset(self):
""" Test """
client = bigquery.Client()
dataset_id = "{}.my_dataset".format(os.environ['BIGQUERY_PROJECT'])
dataset = bigquery.Dataset(dataset_id)
dataset.location = "US"
client.create_dataset(dataset)
def tearDown(self):
""" Test """
client = bigquery.Client()
dataset_id = "{}.my_dataset".format(os.environ['BIGQUERY_PROJECT'])
dataset = bigquery.Dataset(dataset_id)
client.delete_dataset(dataset)
def test_dataset_exists(self):
""" Test """
self.assertTrue(self.bq_client.dataset_exists("my_dataset"),
"Dataset does not exists")
def test_dataset_does_not_exists(self):
""" Test """
self.assertFalse(
self.bq_client.dataset_exists("my_dataset_which_does_not_exists"),
"Dataset exists"
)
class BigQueryExecutorTables(unittest.TestCase):
""" Test """
def setUp(self):
""" Test """
try:
self.create_my_table()
except exceptions.Conflict as exc:
logging.info(exc)
self.bq_client = dw.BigQueryExecutor()
def create_my_table(self):
""" Test """
client = bigquery.Client()
dataset_id = "{}.test_bq_executor_table".format(os.environ['BIGQUERY_PROJECT'])
dataset = bigquery.Dataset(dataset_id)
dataset.location = "US"
client.create_dataset(dataset)
table_ref = dataset.table('my_table')
table = bigquery.Table(table_ref)
table = client.create_table(table)
def test_table_exists(self):
""" Test """
self.assertTrue(
self.bq_client.table_exists(
dataset_id="test_bq_executor_table",
table_id="my_table"
),
"Table does not exists"
)
def test_table_does_not_exists(self):
""" Test """
self.assertFalse(
self.bq_client.table_exists(
dataset_id="test_bq_executor_table_2",
table_id="my_table_2"
),
"Table exists"
)
def test_initiate_table(self):
""" Test """
self.assertFalse(
self.bq_client.table_exists(
dataset_id='test_bq_executor_table',
table_id='test'
)
)
self.bq_client.initiate_table(
dataset_id='test_bq_executor_table',
table_id='test',
schema_path='tests/schema/initiate_table.json'
)
self.assertTrue(
self.bq_client.table_exists(
dataset_id='test_bq_executor_table',
table_id='test'
)
)
def tearDown(self):
""" Test """
self.bq_client.delete_table(
dataset_id='test_bq_executor_table',
table_id='test'
)
class BigQueryExecutorDatasetCreation(unittest.TestCase):
""" Test """
def setUp(self):
""" Test """
self.bq_client = dw.BigQueryExecutor()
def tearDown(self):
""" Test """
client = bigquery.Client()
dataset_id = "{}.test_bq_create_dataset".format(os.environ['BIGQUERY_PROJECT'])
dataset = bigquery.Dataset(dataset_id)
client.delete_dataset(dataset)
def test_create_dataset(self):
self.assertFalse(
self.bq_client.dataset_exists('test_bq_create_dataset')
)
self.bq_client.create_dataset('test_bq_create_dataset')
self.assertTrue(
self.bq_client.dataset_exists('test_bq_create_dataset')
)
class BigQueryExecutorDatasetDeletion(unittest.TestCase):
"""
Testing different scenarios
"""
def setUp(self):
""" Test """
self.bq_client = dw.BigQueryExecutor()
self.bq_client.create_dataset('test_bq_delete_dataset')
def test_delete_empty_dataset(self):
self.bq_client.delete_dataset('test_bq_delete_dataset')
self.assertFalse(
self.bq_client.dataset_exists('test_bq_delete_dataset'),
"Dataset was not deleted"
)
def test_delete_non_empty_dataset(self):
self.bq_client.initiate_table(
dataset_id='test_bq_delete_dataset',
table_id='test',
schema_path='tests/schema/initiate_table.json'
)
with self.assertRaises(exceptions.BadRequest):
self.bq_client.delete_dataset('test_bq_delete_dataset')
self.bq_client.delete_dataset(
dataset_id='test_bq_delete_dataset',
delete_contents=True
)
self.assertFalse(
self.bq_client.dataset_exists('test_bq_delete_dataset'),
"Dataset was not deleted"
)
class BigQueryExecutorExecutes(unittest.TestCase):
""" Test """
def setUp(self):
""" Test """
self.bq_client = dw.BigQueryExecutor()
def test_execute_sql(self):
""" Test """
result = self.bq_client.execute_sql(
"SELECT 'test' AS value"
)
self.assertEqual(
result['value'][0],
'test'
)
class BigQueryExecutorExecutesPatch(unittest.TestCase):
"""
Testing different scenarios
"""
def setUp(self):
""" Test """
try:
self.create_my_dataset()
except exceptions.Conflict as exc:
logging.info(exc)
self.db = dw.BigQueryExecutor()
self.db.initiate_table(
dataset_id='test',
table_id='table_to_patch',
schema_path='tests/schema/orig_table.json'
)
def create_my_dataset(self):
""" Test """
client = bigquery.Client()
dataset_id = "{}.test".format(os.environ['BIGQUERY_PROJECT'])
dataset = bigquery.Dataset(dataset_id)
dataset.location = "US"
client.create_dataset(dataset)
def test_identity_new_fields_no_field(self):
""" Tests that using same schema, no new field identified"""
lf = self.db.identify_new_fields(
dataset_id='test',
table_id='table_to_patch',
schema_path='tests/schema/orig_table.json'
)
self.assertEqual(lf, [])
def test_identity_new_fields_2_fields(self):
""" Tests that using another schema, 2 new fields are identified"""
lf = self.db.identify_new_fields(
dataset_id='test',
table_id='table_to_patch',
schema_path='tests/schema/table_plus_2.json'
)
self.assertEqual(len(lf), 2)
def test_apply_patch_error(self):
""" Tests that if trying to update type, apply_patch fails"""
with self.assertRaises(exceptions.BadRequest):
self.db.apply_patch(
dataset_id='test',
table_id='table_to_patch',
schema_path='tests/schema/orig_table_error.json'
)
def test_apply_patch(self):
""" Tests that apply_patch can add both descriptions and new fields"""
old_schema = self.db.get_table_schema(
dataset_id='test',
table_id='table_to_patch'
)
self.assertEqual(len(old_schema), 2)
self.assertEqual(old_schema[0].description, "")
new_schema_length = self.db.apply_patch(
schema_path='tests/schema/table_plus_2.json',
dataset_id='test',
table_id='table_to_patch'
)
new_schema = self.db.get_table_schema(
dataset_id='test',
table_id='table_to_patch'
)
self.assertEqual(new_schema_length, 4)
self.assertEqual(new_schema[0].description, "My description")
def tearDown(self):
""" Test """
self.db.delete_table(
dataset_id='test',
table_id='table_to_patch'
)
class BigQueryExecutorTableCreationDescription(unittest.TestCase):
"""
Testing different scenarios
"""
def setUp(self):
""" Test """
self.db = dw.BigQueryExecutor()
self.client = bigquery.Client()
self.project = self.client.project
def test_create_table_with_description(self):
self.db.create_table(
dataset_id='test',
table_id='my_table_with_description',
schema_path='tests/schema/orig_table.json',
sql="SELECT 'Beth Harmon' AS fullname, 26 AS age",
description="test_dw table description bar foo"
)
table_ref = self.db.get_table_ref(dataset_id='test', table_id='my_table_with_description', project_id=bq_default_project())
table = self.client.get_table(table_ref) # API request
self.assertTrue(
self.db.table_exists(
dataset_id='test',
table_id='my_table_with_description'
),
"Table was not created"
)
self.assertTrue(
table.description == "test_dw table description bar foo",
"Description is not the same on either side"
)
def tearDown(self):
if self.db.table_exists(dataset_id='test', table_id='my_table_with_description'):
self.db.delete_table(dataset_id='test', table_id='my_table_with_description')
class BigQueryExecutorTableCreation(unittest.TestCase):
"""
Testing different scenarios
"""
def setUp(self):
""" Test """
try:
self.create_my_dataset()
except exceptions.Conflict as exc:
logging.info(exc)
self.db = dw.BigQueryExecutor()
self.client = bigquery.Client()
if self.db.table_exists('test', 'a_table_that_does_not_exists'):
self.db.delete_table('test', 'a_table_that_does_not_exists')
self.db.initiate_table(
dataset_id='test',
table_id='test_table_creation',
schema_path='tests/schema/initiate_table.json'
)
self.db.initiate_table(
dataset_id='test',
table_id='my_partition_table',
schema_path='tests/schema/orig_table.json',
partition=True,
clustering=None
)
self.env = mock.patch.dict('os.environ', {'BIGQUERY_START_DATE': '2020-01-01', 'BIGQUERY_END_DATE': '2020-01-05'})
def create_my_dataset(self):
""" Test """
client = bigquery.Client()
dataset_id = "{}.test".format(os.environ['BIGQUERY_PROJECT'])
dataset = bigquery.Dataset(dataset_id)
dataset.location = "US"
client.create_dataset(dataset)
def test_set_partition_name(self):
""" Tests that using same schema, no new field identified"""
table_id = 'my_table'
date = '20200101'
partition_name = self.db.set_partition_name(table_id, date)
self.assertEqual(
partition_name,
"my_table$20200101"
)
def test_set_partition_name_invalid_date(self):
""" Tests that using same schema, no new field identified"""
table_id = 'my_table'
date = '2020-01-01'
with self.assertRaises(ValueError):
partition_name = self.db.set_partition_name(table_id, date)
def test_get_existing_partition_dates(self):
with mock.patch('pygyver.etl.dw.BigQueryExecutor.get_existing_partition_query') as mock_get_existing_partition_query:
mock_get_existing_partition_query.side_effect = get_existing_partition_query_mock
existing_partition_dates = self.db.get_existing_partition_dates(
dataset_id='test',
table_id='test_table_creation'
)
self.assertEqual(
existing_partition_dates,
['20200101', '20200102', '20200103'],
"Wrong existing partition dates"
)
def test_get_partition_dates(self):
partition_dates = self.db.get_partition_dates(
start_date="2020-01-01",
end_date="2020-01-05",
existing_dates=['20200101', '20200102', '20200103']
)
self.assertEqual(
partition_dates,
['20200104', '20200105'],
"Wrong partition dates"
)
def test_create_partition_table(self):
with self.env:
self.db.create_partition_table(
dataset_id='test',
table_id="my_partition_table",
description="descriptive text for partition table creation",
sql="SELECT 'Angus MacGyver' AS fullname, 2 AS age"
)
number_of_partitions = self.db.execute_sql(
"SELECT FORMAT_DATE('%Y%m%d', DATE(_PARTITIONTIME)) as partition_id FROM test.my_partition_table GROUP BY 1"
)
table_ref = self.db.get_table_ref(dataset_id='test', table_id='my_partition_table',
project_id=bq_default_project())
table = self.client.get_table(table_ref) # API request
self.assertTrue(
table.description == "descriptive text for partition table creation",
"The description is not the same"
)
self.db.delete_table(
dataset_id='test',
table_id="my_partition_table"
)
self.assertEqual(
number_of_partitions.shape[0],
5,
"Wrong number of partitions created"
)
def test_create_table_raises_errors(self):
with self.assertRaises(BigQueryExecutorError):
self.db.create_table(
dataset_id='test',
table_id='my_normal_table',
schema_path='tests/schema/initiate_table.json'
)
def test_create_table(self):
self.db.create_table(
dataset_id='test',
table_id='my_normal_table',
schema_path='tests/schema/orig_table.json',
sql="SELECT 'Angus MacGyver' AS fullname, 2 AS age"
)
self.assertTrue(
self.db.table_exists(
dataset_id='test',
table_id='my_normal_table'
),
"Table was not created"
)
self.db.delete_table(
dataset_id='test',
table_id='my_normal_table'
)
def test_create_table_with_param(self):
self.db.create_table(
dataset_id='test',
table_id='my_param_table',
schema_path='tests/schema/orig_table.json',
file="tests/sql/sql_with_parameters.sql",
who="'Angus MacGyver'",
)
self.assertTrue(
self.db.table_exists(
dataset_id='test',
table_id='my_param_table'
),
"Table was not created"
)
df1 = self.db.execute_sql("select * from test.my_param_table")
df2 = self.db.execute_sql("SELECT 'Angus MacGyver' AS fullname, 2 AS age")
self.assertTrue(
df1.equals(df2),
"parameter for create table: ok"
)
def tearDown(self):
self.db.delete_table(
dataset_id='test',
table_id='my_param_table'
)
self.db.delete_table(
dataset_id='test',
table_id='my_normal_table'
)
class BigQueryLoadDataframe(unittest.TestCase):
""" Test """
def setUp(self):
self.db = dw.BigQueryExecutor()
self.db.initiate_table(
table_id='load_dataframe',
dataset_id='test',
schema_path='tests/schema/test_load_dataframe.json'
)
def test_load_dataframe_on_existing_table(self):
""" Test """
data = pd.DataFrame(data={'my_date_string': ["20200101", "20200102", "20200103"]})
self.db.load_dataframe(
df=data,
table_id='load_dataframe',
dataset_id='test'
)
result = self.db.execute_sql(
"SELECT * FROM test.load_dataframe"
)
assert_frame_equal(
result,
data
)
def test_load_dataframe_on_non_existing_table_with_schema(self):
""" Test """
data = pd.DataFrame(data={'my_date_string': ["20200101", "20200102", "20200103"]})
self.db.load_dataframe(
df=data,
table_id='load_dataframe_non_existing_schema',
dataset_id='test',
schema_path='tests/schema/test_load_dataframe.json'
)
result = self.db.execute_sql(
"SELECT * FROM test.load_dataframe_non_existing_schema"
)
assert_frame_equal(
result,
data
)
def tearDown(self):
self.db.delete_table(
table_id='load_dataframe',
dataset_id='test'
)
self.db.delete_table(
table_id='load_dataframe_non_existing_schema',
dataset_id='test'
)
class BigQueryLoadJSONfile(unittest.TestCase):
""" Test """
def setUp(self):
self.db = dw.BigQueryExecutor()
self.db.initiate_table(
table_id='load_json_file',
dataset_id='test',
schema_path='tests/schema/test_load_json.json'
)
def test_load_json_file_on_existing_table(self):
""" Test """
self.db.load_json_file(
file='tests/json/test_json_file.json',
table_id='load_json_file',
dataset_id='test'
)
result = self.db.execute_sql(
"SELECT * FROM test.load_json_file"
)
data = pd.read_json('tests/json/test_json_file.json', lines=True)
assert_frame_equal(
result,
data
)
def test_load_json_file_on_non_existing_table(self):
""" Test """
self.db.load_json_file(
file='tests/json/test_json_file.json',
table_id='load_json_file_non_existing_table',
dataset_id='test',
schema_path='tests/schema/test_load_json.json'
)
result = self.db.execute_sql(
"SELECT * FROM test.load_json_file_non_existing_table"
)
data = pd.read_json('tests/json/test_json_file.json', lines=True)
assert_frame_equal(
result,
data
)
def test_load_json_file_on_non_existing_table_without_schema(self):
""" Test """
with self.assertRaises(Exception):
self.db.load_json_file(
file='tests/json/test_json_file.json',
table_id='load_json_non_existing_schema',
dataset_id='test'
)
def tearDown(self):
self.db.delete_table(
table_id='load_json_file',
dataset_id='test'
)
self.db.delete_table(
table_id='load_json_file_non_existing_table',
dataset_id='test'
)
# class BigQueryInsertJSONRows(unittest.TestCase):
# """ Test """
# def setUp(self):
# self.db = dw.BigQueryExecutor()
# self.db.initiate_table(
# table_id='insert_json_rows',
# dataset_id='test',
# schema_path='tests/schema/test_insert_json_rows.json'
# )
# self.data = [
# {"name": "John", "age": 30, "car": ''},
# {"name": "James", "age": 35, "car": 'Toyota'}
# ]
# def test_insert_json_rows(self):
# """ Test """
# self.assertEqual(
# self.db.execute_sql(
# "select count(*) AS row_count FROM test.insert_json_rows"
# )['row_count'][0],
# 0
# )
# self.db.insert_rows_json(
# dataset_id='test',
# table_id='insert_json_rows',
# rows=self.data
# )
# self.assertEqual(
# self.db.execute_sql(
# "select count(*) AS row_count FROM test.insert_json_rows"
# )['row_count'][0],
# 2
# )
# def tearDown(self):
# self.db.delete_table(
# dataset_id='test',
# table_id='insert_json_rows'
# )
class BigQueryLoadJSONData(unittest.TestCase):
""" Test """
def setUp(self):
self.db = dw.BigQueryExecutor()
self.db.initiate_table(
table_id='load_json_flat',
dataset_id='test',
schema_path='tests/schema/test_load_json_flat.json'
)
self.db.initiate_table(
table_id='load_json_nested',
dataset_id='test',
schema_path='tests/schema/test_load_json_nested.json'
)
self.data_flat = [{"name": "John", "age": 30, "car": ''},
{"name": "James", "age": 35, "car": 'Toyota'}]
self.data_nested = [
{
"name": "John",
"age": 30,
"cars": [{
"car": 'Toyota',
"year": 2003},
{
"car": "BMW",
"year": 2010}]
},
{
"name": "Jane",
"age": 35,
"cars": [{
"car": 'Fiat',
"year": 2012},
{
"car": "Kia",
"year": 2015}]
}
]
def test_load_json_data_on_existing_flat_table(self):
""" Test """
old_schema = self.db.get_table_schema(
table_id='load_json_flat',
dataset_id='test'
)
self.db.load_json_data(
json=self.data_flat,
table_id='load_json_flat',
dataset_id='test'
)
new_schema = self.db.get_table_schema(
table_id='load_json_flat',
dataset_id='test'
)
result = self.db.execute_sql(
"SELECT * FROM test.load_json_flat"
)
data = pd.DataFrame(self.data_flat)
assert_frame_equal(
result,
data,
check_like=True
)
self.assertNotEqual(
old_schema,
new_schema
)
def test_load_json_data_on_existing_flat_table_with_schema(self):
""" Test """
old_schema = self.db.get_table_schema(
table_id='load_json_flat',
dataset_id='test'
)
self.db.load_json_data(
json=self.data_flat,
table_id='load_json_flat',
dataset_id='test',
schema_path='tests/schema/test_load_json_flat.json'
)
new_schema = self.db.get_table_schema(
table_id='load_json_flat',
dataset_id='test'
)
self.assertEqual(
old_schema,
new_schema
)
def test_load_json_on_existing_nested_table(self):
""" Test """
self.db.load_json_data(
json= self.data_nested,
table_id='load_json_nested',
dataset_id='test'
)
result = self.db.execute_sql(
"SELECT * FROM test.load_json_nested"
)
data = pd.DataFrame(self.data_nested)
result = result[result.columns.sort_values().values]
data = data[data.columns.sort_values().values]
assert_frame_equal(
result,
data
)
def test_load_json_on_non_existing_flat_table(self):
""" Test """
self.db.load_json_data(
json=self.data_flat,
table_id='load_json_non_existing_flat_table',
dataset_id='test',
schema_path='tests/schema/test_load_json_flat.json'
)
result = self.db.execute_sql(
"SELECT * FROM test.load_json_non_existing_flat_table"
)
data = pd.DataFrame(self.data_flat)
assert_frame_equal(
result,
data,
check_like=True
)
def test_load_json_on_non_existing_nested_table(self):
""" Test """
self.db.load_json_data(
json=self.data_nested,
table_id='load_json_non_existing_nested_table',
dataset_id='test',
schema_path='tests/schema/test_load_json_nested.json'
)
result = self.db.execute_sql(
"SELECT * FROM test.load_json_non_existing_nested_table"
)
data = pd.DataFrame(self.data_nested)
result = result[result.columns.sort_values().values]
data = data[data.columns.sort_values().values]
assert_frame_equal(
result,
data
)
def test_load_json_on_non_existing_table_without_schema(self):
""" Test """
with self.assertRaises(Exception):
self.db.load_json_data(
json=self.data_flat,
table_id='load_json_non_existing_schema',
dataset_id='test'
)
def tearDown(self):
self.db.delete_table(
table_id='load_json_flat',
dataset_id='test'
)
self.db.delete_table(
table_id='load_json_nested',
dataset_id='test'
)
self.db.delete_table(
table_id='load_json_non_existing_flat_table',
dataset_id='test'
)
self.db.delete_table(
table_id='load_json_non_existing_nested_table',
dataset_id='test'
)
class BigQueryLoadGCS(unittest.TestCase):
""" Test """
def setUp(self):
self.gcs = GCSExecutor()
self.db = dw.BigQueryExecutor()
self.data = pd.DataFrame(
data={
"first_name": ["Boris", "Emmanuel", "Angela"],
"age": [55, 42, 65]
}
)
self.gcs.df_to_gcs(
df=self.data,
gcs_path='test-bigquery-load/test.csv'
)
def test_load_gcs_autodetect(self):
""" Test """
self.db.load_gcs(
gcs_path='test-bigquery-load/test.csv',
table_id='load_gcs',
dataset_id='test'
)
result = self.db.execute_sql(
"SELECT * FROM test.load_gcs"
)
assert_frame_equal(
result,
self.data
)
def test_load_gcs_schema(self):
""" Test """
self.db.load_gcs(
gcs_path='test-bigquery-load/test.csv',
table_id='load_gcs',
dataset_id='test',
schema_path='tests/schema/test_load_gcs.json'
)
result = self.db.execute_sql(
"SELECT * FROM test.load_gcs"
)
assert_frame_equal(
result,
self.data
)
def tearDown(self):
self.db.delete_table(
table_id='load_gcs',
dataset_id='test'
)
class BigQueryExportGCS(unittest.TestCase):
""" Test """
def setUp(self):
self.gcs = GCSExecutor()
self.db = dw.BigQueryExecutor()
self.data = pd.DataFrame(
data={
"first_name": ["Boris", "Emmanuel", "Angela"],
"age": [55, 42, 65]
}
)
self.db.load_dataframe(
df=self.data,
dataset_id='test',
table_id='export_gcs'
)
def test_extract_table_to_gcs(self):
""" Test """
self.db.extract_table_to_gcs(
dataset_id='test',
table_id='export_gcs',
gcs_path='test-bigquery-export/test.csv'
)
result = self.gcs.csv_to_df(
gcs_path='test-bigquery-export/test.csv'
)
assert_frame_equal(
result,
self.data
)
def tearDown(self):
self.db.delete_table(
dataset_id='test',
table_id='export_gcs'
)
class BigQueryLoadFromDB(unittest.TestCase):
""" Test """
def setUp(self):
self.bq_client = dw.BigQueryExecutor()
self.test_df = pd.DataFrame(
data={
"fullname": ["Angus MacGyver", "Jack Dalton"],
"age": [2, 4],
"date_of_birth": [pd.Timestamp('2018-01-01'), pd.Timestamp('2016-01-01')],
"iq": [176.5, 124.0]
}
)
def setup_test_db(self, url):
self.engine = create_engine(url, poolclass=NullPool)
self.test_df.to_sql(
"table_1",
con=self.engine,
if_exists="replace",
index=False
)
def tearDown(self):
self.engine.dispose()
self.bq_client.delete_table(
dataset_id='test',
table_id='load_from_db'
)
def test_load_from_db_postgres(self):
url = "postgresql+pg8000://user:password@postgres-test:5432/testing"
self.setup_test_db(url)
self.bq_client.load_from_db(
dataset_id="test",
table_id="load_from_db",
source_url=url,
sql="SELECT * FROM table_1"
)
result_df = self.bq_client.execute_sql(
"SELECT * FROM `test.load_from_db` ORDER BY fullname"
)
assert_frame_equal(
result_df,
self.test_df
)
def test_load_from_db_mysql(self):
url = "mysql+pymysql://user:password@mysql-test:3306/testing"
self.setup_test_db(url)
self.bq_client.load_from_db(
dataset_id="test",
table_id="load_from_db",
source_url=url,
sql="SELECT * FROM table_1"
)
result_df = self.bq_client.execute_sql(
"SELECT * FROM `test.load_from_db` ORDER BY fullname"
)
assert_frame_equal(
result_df,
self.test_df
)
class BigQueryExecutorTableTruncate(unittest.TestCase):
"""
Test
"""
def setUp(self):
""" Test """
self.bq_client = dw.BigQueryExecutor()
self.bq_client.initiate_table(
dataset_id='test',
table_id='bq_truncate_table',
schema_path='tests/schema/orig_table.json',
partition=True,
clustering=None
)
self.env = mock.patch.dict('os.environ', {'BIGQUERY_START_DATE': '2020-01-01', 'BIGQUERY_END_DATE': '2020-01-05'})
with self.env:
self.bq_client.create_partition_table(
dataset_id='test',
table_id='bq_truncate_table',
sql="SELECT 'Angus MacGyver' AS fullname, 2 AS age"
)
def tearDown(self):
""" Test """
self.bq_client.delete_table(
dataset_id='test',
table_id='bq_truncate_table'
)
def test_truncate_table(self):
""" Test """
self.bq_client.truncate_table(
dataset_id='test',
table_id='bq_truncate_table'
)
truncated_table = bigquery.Client().get_table(
self.bq_client.get_table_ref(
dataset_id='test',
table_id='bq_truncate_table'
)
)
self.assertEqual(
truncated_table.time_partitioning,
bigquery.table.TimePartitioning(type_='DAY')
)
self.assertEqual(
len(truncated_table.schema),
2
)
self.assertEqual(
truncated_table.num_rows,
0
)
class BigQueryExecutorCreateTableStructure(unittest.TestCase):
"""
Testing different scenarios
"""
def setUp(self):
""" Test """
self.bq_client = dw.BigQueryExecutor()
if not self.bq_client.dataset_exists('test'):
self.bq_client.create_dataset('test')
self.bq_client.create_table(
dataset_id='test',
table_id='bq_copy_table_source',
schema_path='tests/schema/orig_table.json',
sql="SELECT 'Angus MacGyver' AS fullname, 2 AS age"
)
self.bq_client.create_table(
dataset_id='test',
table_id='bq_copy_table_target',
schema_path='tests/schema/orig_table.json',
sql="SELECT 'Angus MacGyver' AS fullname, 2 AS age"
)
def test_copy_table_succeeded(self):
self.bq_client.copy_table_structure(
source_project_id=os.environ['BIGQUERY_PROJECT'],
source_dataset_id='test',
source_table_id='bq_copy_table_source',
dest_dataset_id='test',
dest_table_id='bq_copy_table_source3'
)
self.assertTrue(
self.bq_client.table_exists(
dataset_id='test',
table_id='bq_copy_table_source3'),
"table structure properly copied"
)
def test_copy_table_does_not_error_and_delete_dest_table(self):
try:
self.bq_client.copy_table_structure(
source_dataset_id='test',
source_table_id='does_not_exist',
dest_dataset_id='test',
dest_table_id='bq_copy_table_target'
)
except:
self.fail("it should not fail!")
self.assertTrue(
not self.bq_client.table_exists(
dataset_id='test',
table_id='bq_copy_table_target'),
"local table deleted even though not in source project"
)
def tearDown(self):
""" Test """
self.bq_client.delete_table(
dataset_id='test',
table_id='bq_copy_table_source'
)
self.bq_client.delete_table(
dataset_id='test',
table_id='bq_copy_table_source_2'
)
self.bq_client.delete_table(
dataset_id='test',
table_id='bq_copy_table_source_3'
)
class BigQueryExecutorTableCopy(unittest.TestCase):
"""
Testing different scenarios
"""
def setUp(self):
""" Test """
self.bq_client = dw.BigQueryExecutor()
self.bq_client.create_table(
dataset_id='test',
table_id='bq_copy_table_source',
schema_path='tests/schema/orig_table.json',
sql="SELECT 'Angus MacGyver' AS fullname, 2 AS age"
)
self.bq_client.create_table(
dataset_id='test',
table_id='bq_copy_table_source_2',
schema_path='tests/schema/table_plus_2.json',
sql="SELECT 'Jack Dalton' AS fullname, 4 AS age, 'ABC' AS postcode"
)
def tearDown(self):
""" Test """
self.bq_client.delete_table(
dataset_id='test',
table_id='bq_copy_table_source'
)
self.bq_client.delete_table(
dataset_id='test',
table_id='bq_copy_table_source_2'
)
self.bq_client.delete_dataset(
'test_bq_copy_table',
delete_contents=True
)
def test_copy_table(self):
""" Test """
self.assertFalse(
self.bq_client.dataset_exists('test_bq_copy_table')
)
self.bq_client.copy_table(
source_project_id=os.environ['BIGQUERY_PROJECT'],
source_dataset_id='test',
source_table_id='bq_copy_table_source_2',
dest_dataset_id='test_bq_copy_table',
dest_table_id='dest'
)
self.assertTrue(
self.bq_client.table_exists(
dataset_id='test_bq_copy_table',
table_id='dest'
)
)
self.bq_client.copy_table(
source_project_id=os.environ['BIGQUERY_PROJECT'],
source_dataset_id='test',
source_table_id='bq_copy_table_source',
dest_dataset_id='test_bq_copy_table',
dest_table_id='dest'
)
data = self.bq_client.execute_sql("SELECT fullname FROM `test_bq_copy_table.dest`")
self.assertEqual(data['fullname'][0], "Angus MacGyver")
class BigQueryExecutorListFunctions(unittest.TestCase):
""" Test """
def setUp(self):
""" Test """
self.bq_client = dw.BigQueryExecutor()
def tearDown(self):
""" Test """
self.bq_client.delete_dataset(
'test_bq_list_functions',
delete_contents=True
)
def test_list_datasets(self):
datasets = self.bq_client.list_datasets()
datasets.append('test_bq_list_functions')
expected_outcome = sorted(datasets)
self.bq_client.create_dataset('test_bq_list_functions')
self.assertEqual(
self.bq_client.list_datasets(),
expected_outcome
)
def test_list_tables(self):
self.bq_client.create_dataset('test_bq_list_functions')
self.bq_client.initiate_table(
dataset_id='test_bq_list_functions',
table_id='bq_list_tables_Z',
schema_path='tests/schema/initiate_table.json'
)
self.bq_client.initiate_table(
dataset_id='test_bq_list_functions',
table_id='bq_list_tables_A',
schema_path='tests/schema/initiate_table.json'
)
self.assertEqual(
self.bq_client.list_tables('test_bq_list_functions'),
['bq_list_tables_A', 'bq_list_tables_Z']
)
class BigQueryExecutorCheckDQ(unittest.TestCase):
""" Test """
def setUp(self):
""" Test """
self.bq_client = dw.BigQueryExecutor()
self.bq_client.create_table(
dataset_id='test',
table_id='bq_check_dq',
sql="""
SELECT 'spam' AS col1, 'ham' AS col2 UNION ALL
SELECT 'spam', 'eggs' UNION ALL
SELECT 'ham', 'eggs'
"""
)
def tearDown(self):
""" Test """
self.bq_client.delete_table(
dataset_id='test',
table_id='bq_check_dq'
)
def test_check_dq(self):
""" Test """
self.assertEqual(
self.bq_client.count_rows(
dataset_id='test',
table_id='bq_check_dq'
),
3
)
self.assertEqual(
self.bq_client.count_columns(
dataset_id='test',
table_id='bq_check_dq'
),
2
)
self.assertEqual(
self.bq_client.count_duplicates(
dataset_id='test',
table_id='bq_check_dq',
primary_key=['col1']
),
1
)
with self.assertRaises(AssertionError):
self.bq_client.assert_unique(
dataset_id='test',
table_id='bq_check_dq',
primary_key=['col1']
)
with self.assertLogs(level='WARNING'):
self.bq_client.assert_unique(
dataset_id='test',
table_id='bq_check_dq',
primary_key=['col1'],
ignore_error=True
)
self.bq_client.assert_unique(
dataset_id='test',
table_id='bq_check_dq',
primary_key=['col1', 'col2']
)
class BigQueryCheckSql(unittest.TestCase):
""" Test """
def setUp(self):
""" Test """
self.bq_client = dw.BigQueryExecutor()
def test_check_sql_output_ok(self):
""" Test """
cte = """`staging.table1` AS (
SELECT 'A001' AS id, '102461350' AS order_reference, 'sofa' AS sku UNION ALL
SELECT 'A003' AS id,'1600491919' AS order_reference, 'sofa' AS sku UNION ALL
SELECT 'A002' AS id,'1600491918' AS order_reference, 'chair' AS sku
),
`staging.table2` AS (
SELECT 100 AS price, 'A001' AS id, UNION ALL
SELECT 200 AS price, 'A002' AS id UNION ALL
SELECT 300 AS price, 'A003' AS id
),
`expected_output` AS (
SELECT 'A001' AS id,'sofa' AS sku, 100 AS price, '102461350' AS order_reference UNION ALL
SELECT 'A002' AS id,'chair' AS sku, 200 AS price, '1600491918' AS order_reference UNION ALL
SELECT 'A003' AS id,'sofa' AS sku, 300 AS price, '1600491919' AS order_reference
)"""
sql = """
SELECT a.*, b.price from `staging.table1` a LEFT JOIN `staging.table2` b on a.id = b.id
"""
try:
self.bq_client.assert_acceptance(
sql=sql,
cte=cte
)
except AssertionError:
self.fail("run_checks() raised AssertionError unexpectedly!")
def test_assert_acceptance_ko(self):
""" Test """
cte = """`staging.table1` AS (
SELECT 'A001' AS id,'102461350' AS order_reference,'sofa' AS sku UNION ALL
SELECT 'A002','1600491918','chair'
),
`staging.table2` AS (
SELECT 'A001' AS id, 100 AS price UNION ALL
SELECT 'A002', 200
),
`expected_output` AS (
SELECT 'A001' AS id,'102461350' AS order_reference,'sofa' AS sku, 100 AS price UNION ALL
SELECT 'A003','1600491918','chair', 200) """
sql = """
SELECT a.*, b.price from `staging.table1` a LEFT JOIN `staging.table2` b on a.id = b.id
"""
with self.assertRaises(AssertionError):
self.bq_client.assert_acceptance(
sql=sql,
cte=cte
)
class BigQueryExecutorLoadGoogleSheet(unittest.TestCase):
""" Test """
def setUp(self):
self.bq_exec = dw.BigQueryExecutor()
self.bq_client = bigquery.Client()
self.test_csv = open('tests/csv/test_load_gs.csv', 'r').read()
def test_load_gs_to_bq(self):
self.bq_exec.load_google_sheet(
googlesheet_key='19Jmapr9G1nrMcW2QTpY7sOvKYaFXnw5krK6dD0GwEqU',
sheet_name='input',
table_id='table1',
dataset_id='test',
schema_path='tests/schema/test_load_gs.json',
description='some text description for the table'
)
result = self.bq_exec.execute_sql(
"SELECT * FROM test.table1 ORDER BY date"
)
test_df = pd.read_csv('tests/csv/test_load_gs.csv', parse_dates=['date'])
assert_frame_equal(
result,
test_df
)
table_ref = self.bq_exec.get_table_ref(dataset_id='test', table_id='table1', project_id=bq_default_project())
table = self.bq_client.get_table(table_ref) # API request
self.assertTrue(
table.description == "some text description for the table",
"Description is not the same"
)
def tearDown(self):
self.bq_exec.delete_table(
dataset_id='test',
table_id='table1'
)
class BigQueryExecutorGetTableClusteringFields(unittest.TestCase):
""" Test """
def setUp(self):
self.db = dw.BigQueryExecutor()
def test_get_table_clustering_fields_table1(self):
self.db.initiate_table(
dataset_id='test',
table_id='table1',
schema_path='tests/schema/initiate_table.json'
)
clustering_fields = self.db.get_table_clustering_fields(
dataset_id='test',
table_id='table1'
)
self.assertIsNone(
clustering_fields,
"Clustering fields is not None"
)
def test_get_table_clustering_fields_table2(self):
self.db.initiate_table(
dataset_id='test',
table_id='table2',
schema_path='tests/schema/initiate_table.json',
partition=True
)
clustering_fields = self.db.get_table_clustering_fields(
dataset_id='test',
table_id='table2'
)
self.assertIsNone(
clustering_fields,
"Clustering fields is not None"
)
def test_get_table_clustering_fields_table3(self):
self.db.initiate_table(
dataset_id='test',
table_id='table3',
schema_path='tests/schema/initiate_table.json',
partition=True,
clustering=['survey_id', 'language_code']
)
clustering_fields = self.db.get_table_clustering_fields(
dataset_id='test',
table_id='table3'
)
self.assertEqual(
clustering_fields,
['survey_id', 'language_code'],
"Clustering fields is not ['survey_id', 'language_code']"
)
def tearDown(self):
self.db.delete_table(
dataset_id='test',
table_id='table1'
)
self.db.delete_table(
dataset_id='test',
table_id='table2'
)
self.db.delete_table(
dataset_id='test',
table_id='table3'
)
class BigQueryExecutorGetTablePartititonType(unittest.TestCase):
""" Test """
def setUp(self):
self.db = dw.BigQueryExecutor()
def test_get_table_clustering_fields_table1(self):
self.db.initiate_table(
dataset_id='test',
table_id='table1',
schema_path='tests/schema/initiate_table.json'
)
partition_type = self.db.get_table_partitioning_type(
dataset_id='test',
table_id='table1'
)
self.assertIsNone(
partition_type,
"Partition Type is not None"
)
def test_get_table_clustering_fields_table2(self):
self.db.initiate_table(
dataset_id='test',
table_id='table2',
schema_path='tests/schema/initiate_table.json',
partition=True
)
partition_type = self.db.get_table_partitioning_type(
dataset_id='test',
table_id='table2'
)
self.assertEqual(
partition_type,
"DAY"
)
def tearDown(self):
self.db.delete_table(
dataset_id='test',
table_id='table1'
)
self.db.delete_table(
dataset_id='test',
table_id='table2'
)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
unittest.main()
| UTF-8 | Python | false | false | 51,725 | py | 75 | test_dw.py | 43 | 0.525626 | 0.513794 | 0 | 1,626 | 30.811193 | 135 |
kkretschmer/python-isgri | 8,589,946,969 | 6445ae280e280926fb3ef2e13d6ccb3d51219ec3 | 2266521041285fb2d39c7093eca506de19e3c9e1 | /src/integral_isgri/bgserver.py | 587083c21a426bb74d52a225baac8c3bcd77cdcd | [] | no_license | https://github.com/kkretschmer/python-isgri | 3b70f96bfd8fa55fdc1cf26aed35834cdfaeb713 | 7d50e02013b42d1e7fb761636a5226af48080cdb | refs/heads/master | "2020-05-07T14:59:58.140407" | "2015-11-24T13:20:33" | "2015-11-24T13:20:33" | 40,010,798 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Karsten Kretschmer <kkretsch@apc.univ-paris7.fr>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
"""
Serve ISGRI background cubes via HTTP.
"""
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
from future import standard_library
standard_library.install_aliases()
import argparse
import io
import logging
import six
import socketserver
import http.server
try:
from astropy.io import fits
except ImportError:
import pyfits as fits
from .bgcube import BGCube, BGCubeSet
from .bglincomb import BGLinComb
class CubeHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
"""Handle HTTP requests for ISGRI background cubes
"""
def do_GET(self):
"""Serve a GET request."""
f = self.send_head()
if f:
try:
self.wfile.write(f.getvalue())
finally:
f.close()
def do_HEAD(self):
"""Serve a HEAD request."""
f = self.send_head()
if f:
f.close()
def send_head(self):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is a BytesIO object containing the output FITS data.
"""
components = self.path.split('/')
if len(components) == 4:
indexing = components[2]
if indexing == 'ijd':
method, ijd = components[1], float(components[3])
if method in self.server.fromijd.keys():
bc = self.server.fromijd[method](ijd)
else:
self.send_error(
404, "Generating method '{}' not found.".format(method))
return None
else:
self.send_error(
404, "Indexing method '{}' not found.".format(indexing))
return None
else:
self.send_error(
404, "URI format not supported.")
return None
blob = six.BytesIO()
bc.writeto(blob, template=self.server.template)
self.send_response(200)
self.send_header('Content-Type', 'application/octet-stream')
self.send_header('Content-Length', str(len(blob.getvalue())))
self.end_headers()
return blob
class ForkingHTTPServer(socketserver.ForkingMixIn,
http.server.HTTPServer):
pass
def serve_cubes():
parser = argparse.ArgumentParser(
description="""Read a template for an ISGRI background model
composed of a linear combination of background cubes, each scaled
by linear interpolation of a light curve over time. Interpolate
it in time and write it to a background cube."""
)
parser.add_argument('inputs', nargs='+',
help='input FITS file(s)')
parser.add_argument('-p', '--http-port', type=int, default=8000,
help='port to listen for HTTP requests')
parser.add_argument('-t', '--template', help='template FITS file')
parser.add_argument('-l', '--outlier-map',
help='FITS file with outlier counts per pixel')
parser.add_argument('-c', '--max-outlier-count', type=int, default=0,
help='maximum allowed outlier count')
parser.add_argument('-e', '--mask-module-edges', type=int, default=0,
metavar='PIXELS',
help='number of pixels to mask around module edges')
parser.add_argument('-v', '--verbose', action='count', default=0)
args = parser.parse_args()
if args.verbose >= 2:
logging.basicConfig(level=logging.DEBUG)
elif args.verbose >= 1:
logging.basicConfig(level=logging.INFO)
server_address = ('', args.http_port)
httpd = ForkingHTTPServer(server_address, CubeHTTPRequestHandler)
httpd.fromijd = {}
stacks = []
httpd.fromijd['zero'] = lambda ijd: BGCube()
for infile in args.inputs:
signature = [t[1] for t in fits.info(infile, output=False)]
if signature[1:5] == ['TIME', 'ENERGY', 'TRACERS', 'CUBES']:
# bglincomb template
#
blc = BGLinComb(file=infile)
httpd.fromijd['lincomb'] = blc.bgcube
logging.info('Initalised method "lincomb" from file: {}'.format(
infile))
elif signature[1:3] == ['COUNTS', 'EXPOSURE']:
# cube stack
#
stacks.append(BGCube.fromstack(infile))
logging.info('Added file to stack set: {}'.format(infile))
if len(stacks) > 0:
bcs = BGCubeSet(stacks)
httpd.fromijd['nearest'] = bcs.nearest
logging.info('Initalised stack set method "nearest".')
httpd.fromijd['linear'] = bcs.linear
logging.info('Initalised stack set method "linear".')
httpd.template = fits.open(args.template, memmap=True)
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
| UTF-8 | Python | false | false | 5,761 | py | 20 | bgserver.py | 14 | 0.605798 | 0.597292 | 0 | 163 | 34.343558 | 81 |
Supper-of-Developers/remindanuki | 15,504,831,956,295 | 49a5efea36c6d544ca5d9d260a404f91cc64c09a | 330cd4bab016a32ed15a52d8a1891fbf8727858c | /app/config.py | f53cc83e4da3cec63af4db24445216f32d631621 | [] | no_license | https://github.com/Supper-of-Developers/remindanuki | 0f3abe857dd053cc4a80ce39b6b71542123e4190 | 9a398d5dfff4b7764d587574f98051d28304cae0 | refs/heads/master | "2020-03-08T02:47:45.021118" | "2018-05-31T07:50:49" | "2018-05-31T07:50:49" | 127,871,046 | 1 | 2 | null | false | "2018-06-05T04:16:36" | "2018-04-03T07:48:32" | "2018-05-31T07:50:51" | "2018-06-05T04:15:33" | 1,138 | 1 | 2 | 20 | Python | false | null | # coding=utf-8
import os
# LINE API情報
ACCESS_TOKEN = os.environ['ACCESS_TOKEN']
CHANNEL_SECRET = os.environ['CHANNEL_SECRET']
# redis接続情報
REDIS_URL = os.environ['REDIS_URL']
REDIS_PORT = os.environ['REDIS_PORT']
# MySQL接続情報
MYSQL_CONFIG= {
"host": os.environ['MYSQL_HOST'],
"port": os.environ['MYSQL_PORT'],
"user": os.environ['MYSQL_USER'],
"password": os.environ['MYSQL_PASS'],
"database": os.environ['MYSQL_DATABASE'],
"charset": 'utf8'
}
| UTF-8 | Python | false | false | 492 | py | 15 | config.py | 11 | 0.650424 | 0.646186 | 0 | 20 | 22.6 | 45 |
assmdx/ml_code | 5,231,270,194,787 | c35a617e84018a3071c72ab7d46657bafb7a82b9 | cda49bd7ea51484e439f5651a909bcdc0be8bbc1 | /huangbaoche/example_code/code/base4.py | 7d6d28adbebb42cd021026e8dd184d62db3d34e6 | [] | no_license | https://github.com/assmdx/ml_code | 58ac17d9f7dac28a4f0d5056384c767aa39ad3d7 | 0e36a0766ee986586fe677e4b4299e09f2d52959 | refs/heads/master | "2021-09-23T08:50:54.549426" | "2018-09-21T03:12:02" | "2018-09-21T03:12:02" | 121,278,684 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import xgboost as xgb
from sklearn import preprocessing
version = 5
def XGB(train, test):
train_x = train.drop(['orderType', 'userid'], axis=1)
train_y = train.orderType.values
print(train_x.shape)
print(len(train_y))
param = {}
param['booster'] = 'gbtree'
param['objective'] = 'binary:logistic'
param['eval_metric'] = 'auc'
param['stratified'] = 'True'
param['eta'] = 0.02
param['silent'] = 1
param['max_depth'] = 5
param['subsample'] = 0.7
param['colsample_bytree'] = 0.8
# param['lambda'] = 2
# param['min_child_weight'] = 10
param['scale_pos_weight'] = 1
param['seed'] = 1024
param['nthread'] = 16
dtrain = xgb.DMatrix(train_x, label=train_y)
res = xgb.cv(param, dtrain, 3500, nfold=5, early_stopping_rounds=100, verbose_eval=20)
model = xgb.train(param, dtrain, res.shape[0], evals=[(dtrain, 'train')], verbose_eval=500)
test_x = test[train_x.columns.values]
dtest = xgb.DMatrix(test_x)
y = model.predict(dtest)
test['orderType'] = y
test[['userid', 'orderType']].to_csv('../result/xgb_baseline' + str(version) + '.csv', index=False)
imp_f = model.get_fscore()
imp_df = pd.DataFrame(
{'feature': [key for key, value in imp_f.items()], 'fscore': [value for key, value in imp_f.items()]})
imp_df = imp_df.sort_values(by='fscore', ascending=False)
imp_df.to_csv('../imp/xgb_imp' + str(version) + '.csv', index=False)
train = pd.read_csv('../feature/train' + str(version) + '.csv')
test = pd.read_csv('../feature/test' + str(version) + '.csv')
XGB(train, test) | UTF-8 | Python | false | false | 1,664 | py | 19 | base4.py | 11 | 0.613582 | 0.591947 | 0 | 49 | 32.959184 | 110 |
caseyalananderson/thosedanggirls | 773,094,136,245 | 8fa6a3addbaff6064b96c749e7c3947c0dc6dc5f | 4dfeeb1a5a851b368d7a11107789913ac0a92dc6 | /food/urls.py | 9bc95edcad091977eb7eef3aecef1377cd2b527a | [] | no_license | https://github.com/caseyalananderson/thosedanggirls | dbc6a6e46a405099ffdf2ec04494666f56ae9f85 | 10f2c6d0f7760a0634b04aaab8e5715cae91411f | refs/heads/master | "2021-09-10T20:47:05.768447" | "2018-04-02T04:22:08" | "2018-04-02T04:22:08" | 122,774,927 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.conf.urls import url
from . import views
urlpatterns = [
# Posts
url(r'^post/$', views.foodpost_list, name='foodpost_list'),
url(r'^post/(?P<pk>\d+)/$', views.foodpost_detail, name='foodpost_detail'),
# Recipes
# url(r'^recipe/$', views.recipe_list, name='recipe_list'),
url(r'^recipe/(?P<pk>\d+)/$', views.recipe_detail, name='recipe_detail'),
]
| UTF-8 | Python | false | false | 387 | py | 57 | urls.py | 43 | 0.622739 | 0.622739 | 0 | 14 | 26.642857 | 79 |
SteadBytes/multi-user-blog | 18,382,460,038,612 | 0e649f043216a31f61a86c5e56c1c43a5e33cc55 | 16270d37d819a35777ab6d6a6430cd63551917f1 | /handlers/deletepost.py | 262502e2a5d8eb4e9210b0aef29a0d8c9e2894b4 | [] | no_license | https://github.com/SteadBytes/multi-user-blog | de28673155b81bd22e6cb57825670beca9ffdcf0 | a3269a949753c72fad0915a1a6feafe50d75c4e3 | refs/heads/master | "2021-06-22T13:42:58.381491" | "2017-06-28T15:22:14" | "2017-06-28T15:22:14" | 89,211,765 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from handlers.blog import BlogHandler
from models.blogpost import BlogPost
from models.like import Like
from models.comment import Comment
from helpers import *
from google.appengine.ext import db
class DeletePostHandler(BlogHandler):
@BlogHandler.user_logged_in
@BlogHandler.post_exists
@BlogHandler.user_owns_post
def get(self, post):
post_id = post.key().id()
likes = post.likes.ancestor(likes_key())
comments = post.comments.ancestor(comments_key())
post.delete()
db.delete(likes)
db.delete(post)
self.redirect('/blog')
| UTF-8 | Python | false | false | 598 | py | 37 | deletepost.py | 22 | 0.69398 | 0.69398 | 0 | 20 | 28.9 | 57 |
metocean/SSC | 738,734,396,327 | b54a7c201cf7153fb507faeff8797e04a2a496b0 | f2acfc33b807324a3a5e1fa381c65b1d35a4fca8 | /func/schismIO.py | e2d2ebc747c99ac184be6f4ed85e13b228e29d3b | [] | no_license | https://github.com/metocean/SSC | af92decce4085c438d9f5c0e9f68b067654e24e4 | aaa4cc42825ec49455c867bce3e19bf12630672e | refs/heads/master | "2018-11-22T07:59:01.475485" | "2018-11-21T02:02:31" | "2018-11-21T02:02:31" | 114,040,100 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python2.7
import os
import subprocess
import numpy as np
import netCDF4
soft='combine_output11'
hot='combine_hotstart7'
class schismIO:
def __init__(self,dirIN):
self.dir= dirIN
# def get_file(self,ts):
# F=np.ceil(ts/self.file_length)
# return F
# def get_timestep(self,ts):
# file_end=np.ceil(ts/self.file_length)
# timestep=np.floor((ts-((file_end-1)*self.file_length))/self.output_dt)
# return timestep
# def get_first_timestep(self):
# f=os.path.join(self.dir,'mirror.out')
# for line in open(f).readlines():
# if 'TIME=' in line:
# elapse=float(line.split(';')[1].split('\n')[0].replace('TIME=',''))
# break
# return elapse
# def get_last_timestep(self):
# f=os.path.join(self.dir,'mirror.out')
# for line in reversed(open(f).readlines()):
# if 'TIME=' in line:
# elapse=float(line.split(';')[1].split('\n')[0].replace('TIME=',''))
# break
# return elapse
def get_startfile(self):
f=os.path.join(self.dir,'hotstart.nc')
nc=netCDF4.Dataset(f)
ifile=nc.variables['ifile'][0]
if 'P2' in nc.variables:
P2=nc.variables['P2'][0]
else:
P2=1000.
return ifile,P2
def create_nectdf_file(self,file_num):
subprocess.call("%s -b %i -e %i" % (soft,file_num,file_num), cwd="%s" % os.path.join(self.dir,'outputs'),shell = True)
def create_hotstart_file(self,file_num):
subprocess.call("%s -i %i " % (hot,file_num), cwd="%s" % os.path.join(self.dir,'outputs'),shell = True)
if __name__ == "__main__":
sc=schismIO('/home/remy/Buisness/0336_SSC_tides/make_hotstart')
te=sc.get_last_timestep()
sc.create_nectdf_file('dahv.62',to_ts=te)
| UTF-8 | Python | false | false | 1,639 | py | 23 | schismIO.py | 20 | 0.639414 | 0.621721 | 0 | 60 | 26.283333 | 120 |
vitorAmorims/python | 3,453,153,746,057 | 4356d0d0ddc1c32d1ac8d6fc2479d57c5e772a8b | 73df3de550af2f973db1f198f1eb5bcd77519dd5 | /tupla_adicionar.py | 3363459d140b78184e63d47e958f50a6d3647f95 | [] | no_license | https://github.com/vitorAmorims/python | fbe308b2eb1fcb87ae1d19dea37d9b23d4e8e635 | e55d3234956fa833d6a94ab5876c9c08061a478d | refs/heads/master | "2023-06-28T04:32:18.179193" | "2021-07-14T19:05:06" | "2021-07-14T19:05:06" | 301,401,222 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
Coleções Python (matrizes)
Existem quatro tipos de dados de coleção na linguagem de programação Python:
Lista é uma coleção ordenada e mutável. Permite membros duplicados.
Tupla é uma coleção ordenada e imutável. Permite membros duplicados.
Set é uma coleção não ordenada e não indexada. Sem membros duplicados.
O dicionário é uma coleção ordenada * e mutável. Sem membros duplicados.
* A partir da versão 3.7 do Python, os dicionários são solicitados . No Python 3.6 e anteriores, os dicionários não são ordenados .
'''
'''
Tuple
Tuples are used to store multiple items in a single variable.
Tuple is one of 4 built-in data types in Python used to store collections of data, the other 3 are List, Set, and Dictionary, all with different qualities and usage.
A tuple is a collection which is ordered and unchangeable.
Tuples are written with round brackets.
'''
# Create a Tuple:
thistuple = ("apple", "banana", "cherry")
print(thistuple)
'''
Tuple Items
Tuple items are ordered, unchangeable, and allow duplicate values.
Tuple items are indexed, the first item has index [0], the second item has index [1] etc.
'''
'''
Example
Tuples allow duplicate values:
'''
thistuple = ("apple", "banana", "cherry", "apple", "cherry")
print(thistuple)
| UTF-8 | Python | false | false | 1,302 | py | 24 | tupla_adicionar.py | 24 | 0.745283 | 0.738994 | 0 | 46 | 26.630435 | 165 |
HandsumeDan/FEASTFREEDOM | 12,060,268,185,008 | 76c28734d5cf65e156df4b1d8e391bc68c4584a3 | 4422da68b4b61a248b8649f13b99816afa9a9277 | /myApp/views.py | ea9f40645c267997deb2cf8f46f4cc941d215460 | [] | no_license | https://github.com/HandsumeDan/FEASTFREEDOM | ef2729e3f0c919ac9fdb92e65cd99f6b1bcb6997 | 7008e54e5099a5e0892506055b7f7b88a43745f1 | refs/heads/master | "2021-04-09T00:12:52.270122" | "2020-03-26T17:02:31" | "2020-03-26T17:02:31" | 248,821,776 | 0 | 1 | null | false | "2020-03-25T21:33:28" | "2020-03-20T18:07:57" | "2020-03-25T21:09:37" | "2020-03-25T21:33:28" | 30,637 | 0 | 1 | 0 | Python | false | false | from django.http import HttpResponse
from django.shortcuts import render
from cart.forms import CartAddProductForm
from cart.cart import Cart
from serviceProviderApp.models import Kitchen2Register
# Create your views here.
def index(request):
#cart_product_form = CartAddProductForm()
cart = Cart(request)
if request.POST:
k = Kitchen2Register.objects.all() # filter(available=True)
return render(request, 'userModule/kitchen_list.html', {'kitchens': k, 'cart': cart})
else:
# return render(request,'shop/product/detail.html',{'product': kitchen,'cart_product_form': cart_product_form})
#return render(request, 'userModule/kitchen_detail.html',{'kitchen': kitchen, 'cart_product_form': cart_product_form})
return render(request,'myApp/home.html', {'cart': cart})
| UTF-8 | Python | false | false | 781 | py | 25 | views.py | 18 | 0.758003 | 0.755442 | 0 | 18 | 42.333333 | 120 |
aasparks/cryptopals-py-rkt | 6,193,342,846,141 | 1b5712c9b3e9669fae9dbcb561e28b948e19bfe2 | 3e76f106e145d0c145233c3dae5083d40bb20705 | /cryptopals-py/set5/c38.py | 32357eff05aa6ebd56ad58061794a97ee69f9c34 | [] | no_license | https://github.com/aasparks/cryptopals-py-rkt | b7d0acd5a9f4790e9b310c505407e52bccf7b367 | 5119b857927d604a6e0ab074e5f000f3f2ac4ee1 | refs/heads/master | "2022-04-30T12:22:14.546094" | "2022-03-25T01:35:45" | "2022-03-25T01:35:45" | 106,590,838 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
**Challenge 38**
*Offline Dictionary Attack on Simplified SRP*::
S
x = SHA256(salt || password)
v = g**x % N
C->S
I, A = g**a % N
S->C
salt, B = g**b % N, u = 128-bit random number
C
x = SHA256(salt || password)
S = B**(a + ux) % N
K = SHA256(S)
S
S = (A * v**u)**b % N
K = SHA256(S)
C->S
Send HMAC-SHA256(K, salt)
S->C
Send 'OK' if HMAC-SHA256(K, salt) validates
Note that in this protocol, the server's "B" parameter doesn't depend on the
password (it's just a Diffie-Hellman public key).
Make sure the protocol works given a valid password.
Now, run the protocol as a MITM attacker, pose as the server and use arbitrary
values for b,B,u, and salt.
Crack the password from A's HMAC-SHA256(K, salt)
"""
from c36 import int_to_bytes, hmac_sha256
from hashlib import sha256
import unittest, sys, queue, os, threading
sys.path.insert(0, '../set1')
import c1, c2
DEBUG = False
class SimplifiedSRPServer():
"""
Represents a server that uses a simplified version of Secure Remote
Password to authenticate users.
Attributes:
N (int): A NIST prime
I (bytestring): Email of user
salt (bytestring): Random integer
v (int): Value for the password
"""
def __init__(self, prime, email, password):
"""
Initializes the class with a provided NIST prime, email, and password.
Computes salt, v so that the password does not need to be saved.
Args:
prime (int): The NIST prime used by both client and server
email (bytestring): The email for the user
password (bytestring): The password for the user
"""
self.N = prime
self.g = 2
self.k = 3
self.I = email
self.salt = os.urandom(8)
xH = sha256(self.salt + password).digest()
if DEBUG:
print('SERVER: salt: ' + str(c1.asciitohex(self.salt)))
print('SERVER: xH: ' + str(c1.asciitohex(xH)))
x = int.from_bytes(xH, byteorder='big')
self.v = pow(self.g, x, self.N)
def authenticate(self, email, A, inp, out):
"""
Authenticates the user.
Args:
email (bytestring): The user's email
A (int): The SRP value for authentication
inp (queue): Input queue for communication
out (queue): Output queue for communication
"""
threading.Thread(target=self.__auth, args=(email, A, inp, out)).start()
def __auth(self, email, A, inp, output):
# Send salt, B
b = int.from_bytes(os.urandom(8), byteorder='big')
B = pow(self.g, b, self.N)
if DEBUG:
print('SERVER: B: ' + str(B))
u = int.from_bytes(os.urandom(16), byteorder='big')
output.put([self.salt, B, u])
# Generate S = (A * v**u)**b % N, K
S = pow(A * pow(self.v, u, self.N), b, self.N)
K = sha256(int_to_bytes(S)).digest()
hmac = hmac_sha256(self.salt, K)
if DEBUG:
print('SERVER: S: ' + str(S))
print('SERVER: K: ' + str(c1.asciitohex(K)))
print('SERVER: hmac: ' + str(c1.asciitohex(hmac)))
client_hmac = inp.get()
output.put(hmac == client_hmac)
class MITMSimplifiedSRPServer():
def __init__(self, prime, email, password):
"""
Initializes the class with a provided NIST prime, email, and password.
Computes salt, v so that the password does not need to be saved.
Args:
prime (int): The NIST prime used by both client and server
email (bytestring): The email for the user
password (bytestring): The password for the user
"""
# Setting the password list small for testing but really, you would
# need to sit here and do a long dictionary attack. I'm not wasting
# my time.
self.password_list = [b'password', b'password1', b'imready', b'ready',
b'krustykrab', b'1234567890', b'invalid']
self.N = prime
self.g = 2
self.k = 3
self.I = email
self.salt = os.urandom(8)
xH = sha256(self.salt + password).digest()
if DEBUG:
print('SERVER: salt: ' + str(c1.asciitohex(self.salt)))
print('SERVER: xH: ' + str(c1.asciitohex(xH)))
x = int.from_bytes(xH, byteorder='big')
self.v = pow(self.g, x, self.N)
def authenticate(self, email, A, inp, out):
"""
Authenticates the user.
Args:
email (bytestring): The user's email
A (int): The SRP value for authentication
inp (queue): Input queue for communication
out (queue): Output queue for communication
"""
threading.Thread(target=self.__auth, args=(email, A, inp, out)).start()
def __auth(self, email, A, inp, output):
self.salt = b'0' * 8
b = 1
B = 2 # x needs to not be canceled out by bad math
u = 1 # these two values make it S = g**(a+x) % N
output.put([self.salt, B, u])
# Generate S = (A * v**u)**b % N, K
if DEBUG:
print('SERVER: S: ' + str(S))
print('SERVER: K: ' + str(c1.asciitohex(K)))
print('SERVER: hmac: ' + str(c1.asciitohex(hmac)))
client_hmac = inp.get()
self.__dictionary_attack(client_hmac, self.salt, A, u)
output.put(False)
def __dictionary_attack(self, client_hmac, salt, A, u):
for password in self.password_list:
xH = sha256(salt + password).digest()
x = int.from_bytes(xH, byteorder='big')
v = pow(self.g, x, self.N)
S = (A * v) % self.N
K = sha256(int_to_bytes(S)).digest()
hmac = hmac_sha256(salt, K)
if hmac == client_hmac:
self.password = password
return
raise RuntimeError('Password not found')
class SimplifiedSRPClient():
def __init__(self, prime, server):
"""
Represents a simplified SRP Client.
Args:
prime (int): The NIST prime used by both client and server
server (SRPServer): The server to talk to
"""
self.N = prime
self.g = 2
self.k = 3
self.server = server
def login(self, email, password):
"""
Attempts to log into the SRP server with the given credentials.
Args:
email: The email of the user
password: The password of the user
Returns:
True if successful login
"""
a = int.from_bytes(os.urandom(8), byteorder='big')
A = pow(self.g, a, self.N)
# Send I, A
out = queue.Queue()
inp = queue.Queue()
self.server.authenticate(email, A, out, inp)
# S->C salt, B, u
salt, B, u = inp.get()
if DEBUG:
print('CLIENT: salt: ' + str(c1.asciitohex(salt)))
print('CLIENT: B: ' + str(B))
print('CLIENT: u: ' + str(u))
# Generate xH, K, S= (B - k * g**x)**(a + u*x) % N
xH = sha256(salt + password).digest()
x = int.from_bytes(xH, byteorder='big')
S = pow(B, (a+u*x), self.N)
K = sha256(int_to_bytes(S)).digest()
hmac = hmac_sha256(salt, K)
if DEBUG:
print('CLIENT: xH: ' + str(c1.asciitohex(xH)))
print('CLIENT: S: ' + str(S))
print('CLIENT: K: ' + str(c1.asciitohex(K)))
print('CLIENT: HMAC: ' + str(c1.asciitohex(hmac)))
out.put(hmac)
auth = inp.get()
return auth
class TestSRP(unittest.TestCase):
def test_simple_srp(self):
p = "0xffffffffffffffffc90fdaa22168c234c4c6628b80dc1cd129024"
p += "e088a67cc74020bbea63b139b22514a08798e3404ddef9519b3cd"
p += "3a431b302b0a6df25f14374fe1356d6d51c245e485b576625e7ec"
p += "6f44c42e9a637ed6b0bff5cb6f406b7edee386bfb5a899fa5ae9f"
p += "24117c4b1fe649286651ece45b3dc2007cb8a163bf0598da48361"
p += "c55d39a69163fa8fd24cf5f83655d23dca3ad961c62f356208552"
p += "bb9ed529077096966d670c354e4abc9804f1746c08ca237327fff"
p += "fffffffffffff"
p = int(p, 16)
email = b'ssquarepants@krustyk.com'
password = b'imready'
server = SimplifiedSRPServer(p, email, password)
client = SimplifiedSRPClient(p, server)
self.assertTrue(client.login(email, password))
self.assertFalse(client.login(email, b'imnotready'))
def test_mitm_srp(self):
p = "0xffffffffffffffffc90fdaa22168c234c4c6628b80dc1cd129024"
p += "e088a67cc74020bbea63b139b22514a08798e3404ddef9519b3cd"
p += "3a431b302b0a6df25f14374fe1356d6d51c245e485b576625e7ec"
p += "6f44c42e9a637ed6b0bff5cb6f406b7edee386bfb5a899fa5ae9f"
p += "24117c4b1fe649286651ece45b3dc2007cb8a163bf0598da48361"
p += "c55d39a69163fa8fd24cf5f83655d23dca3ad961c62f356208552"
p += "bb9ed529077096966d670c354e4abc9804f1746c08ca237327fff"
p += "fffffffffffff"
p = int(p, 16)
email = b'ssquarepants@krustyk.com'
password = b'imready'
b = 0
B = 0
u = 0
salt = 0
server = MITMSimplifiedSRPServer(p, email, password)
client = SimplifiedSRPClient(p, server)
self.assertFalse(client.login(email, password))
self.assertTrue(server.password == password)
if __name__ == "__main__":
unittest.main() | UTF-8 | Python | false | false | 9,702 | py | 200 | c38.py | 54 | 0.562461 | 0.503504 | 0 | 268 | 35.205224 | 79 |
thoas/django-data-exporter | 10,625,749,107,918 | 6bc15aa86c17fb7f97c1108cd83be76e797a0ec9 | 74021503513ba14ec8b5cfaa563fa4c6b2bae337 | /data_exporter/base.py | bfc422f71091a8e0aaad3d18c4a08474998aca21 | [] | no_license | https://github.com/thoas/django-data-exporter | c2284fed5d69ec1ed8d56ccfd977d0a4e195692d | 01017509feb2e77566e6af672bc995da514f8d93 | refs/heads/master | "2021-06-02T03:29:51.291307" | "2013-09-12T17:15:58" | "2013-09-12T17:15:58" | 5,936,878 | 1 | 0 | null | false | "2019-10-03T04:38:45" | "2012-09-24T15:54:15" | "2015-04-06T11:31:12" | "2019-10-03T04:38:44" | 216 | 5 | 0 | 1 | Python | false | false | import os
import tablib
from datetime import datetime
from django.core.files.storage import get_storage_class
from django.core.files.base import ContentFile
from . import settings
from .signals import export_done, combine_done
class Export(object):
columns = ()
headers = ()
filename = None
directory = None
date_format = '%Y/%m/%d'
filename_format = '%(filename)s'
def __init__(self, *args, **kwargs):
storage_class = get_storage_class(kwargs.pop('storage_class',
settings.DATA_EXPORTER_STORAGE_CLASS))
self.storage = kwargs.pop('storage',
storage_class(location=settings.DATA_EXPORTER_DIRECTORY))
self.args = args
self.kwargs = kwargs
def get_query(self, *args, **kwargs):
raise NotImplementedError
def get_count(self):
raise NotImplementedError
def format(self, key, obj, default=None):
value = getattr(obj, key, default)
if callable(value):
return unicode(value() or default)
return unicode(value or default)
def get_directory(self):
return os.path.join(self.directory, self.get_formatted_date())
def get_filename_format(self):
return self.filename_format % {
'filename': self.filename,
}
def get_file_root(self, mimetype, offset=None, limit=None):
filename_format = self.get_filename_format()
extension = '.%s' % mimetype
chunk = ''
if not offset is None and not limit is None:
chunk = '_%s_%s' % (offset, limit)
return os.path.join(self.get_directory(),
filename_format + chunk + extension)
def get_formatted_date(self):
return datetime.now().strftime(self.date_format)
def write(self, data, mimetype, offset=None, limit=None, signal=True):
self.write_dataset(tablib.Dataset(*data), mimetype,
offset=offset,
limit=limit,
signal=signal)
def write_dataset(self, dataset, mimetype, offset=None, limit=None, signal=True):
self.pre_export(dataset, mimetype, offset=offset, limit=limit)
file_root = self.get_file_root(mimetype, offset, limit)
self.storage.save(file_root, ContentFile(getattr(dataset, mimetype)))
if signal:
export_done.send(sender=self, file=file)
self.post_export(file, dataset, mimetype, offset=offset, limit=limit)
def combine(self, offsets, mimetype, signal=True):
self.pre_combine(offsets, mimetype)
file_root = self.get_file_root(mimetype)
parts = [getattr(tablib.Dataset([], headers=self.headers), mimetype)]
for i, current_offset in enumerate(offsets):
offset, limit = current_offset
with self.storage.open(self.get_file_root(mimetype, offset, limit)) as file:
for chunk in file:
parts.append(chunk)
self.storage.save(file_root, ContentFile(''.join(parts)))
if signal:
combine_done.send(sender=self, file=file)
self.post_combine(file, offsets, mimetype)
def pre_export(self, dataset, mimetype, offset=None, limit=None):
pass
def post_export(self, file, dataset, mimetype, offset=None, limit=None):
pass
def pre_combine(self, offsets, mimetype):
pass
def post_combine(self, file, offsets, mimetype):
pass
| UTF-8 | Python | false | false | 3,549 | py | 12 | base.py | 8 | 0.606086 | 0.606086 | 0 | 119 | 28.823529 | 91 |
ezio647/algorithm | 5,360,119,215,264 | a1e7e05fe4183239a4210a902fae80005b96e8fa | 666c3ddd1724d2b58555af16eb9b76609f57af70 | /selection_sort.py | 399cf95db0dd839f22d8435c7685efcffdcd8695 | [] | no_license | https://github.com/ezio647/algorithm | 168f0f8e32e8430663cdfc733ba699bc22ae9d3c | d5241fd295497753a51f7d383db8cf772f16c610 | refs/heads/master | "2021-01-10T19:26:22.228371" | "2015-04-09T21:43:22" | "2015-04-09T21:43:22" | 33,482,634 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def selection_sort(num_list):
length = len(num_list)
for i in range (0, length):
min = i
for j in range(i + 1 , length):
print "comparing", num_list[j], "and", num_list[min]
if num_list[j] < num_list[min]:
min = j
num_list[i], num_list[min] = num_list[min], num_list[i]
return num_list
num = [4, 3, 7,5,1,2]
selection_sort(num)
print num | UTF-8 | Python | false | false | 456 | py | 22 | selection_sort.py | 22 | 0.484649 | 0.467105 | 0 | 16 | 27.5625 | 64 |
ryanwersal/crosswind | 15,187,004,387,266 | d3e3fb168715d2e8c1ebd1a76a4e3c925b0f722f | 2c341caeb9fab68e89021014620713f22aa649e6 | /fixer_suites/defuturize/tests/test_past.py | e9686d97c967c132ff36c6d5f2cf63541a6db208 | [
"Apache-2.0",
"Python-2.0"
] | permissive | https://github.com/ryanwersal/crosswind | cb111506b1238e68cfeb027ba3be39f5fa4fafbe | 87d746479f9984e510f6ae36d417639c4fb83a82 | refs/heads/master | "2020-08-07T02:51:01.645914" | "2020-06-15T01:28:32" | "2020-06-15T01:44:13" | 213,267,864 | 13 | 4 | NOASSERTION | false | "2019-11-08T18:59:12" | "2019-10-07T00:49:27" | "2019-11-08T16:36:02" | "2019-11-08T18:59:12" | 1,062 | 5 | 0 | 17 | Python | false | false | import pytest
@pytest.fixture(name="fixer")
def fixer_fixture(defuturize_test_case):
return defuturize_test_case("past")
def test_warns_on_past_usage(fixer):
u = "from past.foo import bar"
fixer.warns_unchanged(u, "'bar' imported from past.foo")
| UTF-8 | Python | false | false | 262 | py | 162 | test_past.py | 152 | 0.709924 | 0.709924 | 0 | 11 | 22.818182 | 60 |
HelloMorrisMoss/python | 16,329,465,676,654 | 4c92b2a6fa0bb7f772437a3c7ec0a9ad20a21ff0 | 82d75b75953594b681e9405171be3c04ee20f833 | /xls2xlsx_export2img.py | 1d517f889d611c721c594d61e4d9c55a314ca0e6 | [] | no_license | https://github.com/HelloMorrisMoss/python | efbbfa4fee9416c8d5e8f1e2f900cb3c90e255bc | e5d562c8a6fc92b951e4ff14cb05031dce37b516 | refs/heads/master | "2020-09-04T17:33:45.690895" | "2019-11-08T22:18:32" | "2019-11-08T22:18:32" | 219,832,742 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import shutil
import zipfile
import sys
import excel2img # https://github.com/glexey/excel2img
import os
import datetime
import ntpath
import xmltodict as xmltodict
import win32com.client as win32
dt = datetime.datetime
def xport(xlfile, outDir='', imgType='.png'):
justpath = os.path.dirname(xlfile)
justname = os.path.basename(xlfile)
barename = os.path.splitext(justname)[0]
bname = ntpath.basename(xlfile)
outDir = justpath
fname = bname[:len(bname)-5]
outImg = os.path.join(outDir, fname + imgType)
# sheetName = 1
sheetName = "49510"
print('xport: filepath', xlfile, 'output dir', outDir, 'image name', outImg, 'image type', imgType)
excel2img.export_img(xlfile, outImg, sheetName, None)
# "C:/my documents/nh-putups/LW NH Putup T-C 49506 PE-7100T.xls","image.png/image.bmp","91517!a1:e41")\
# except Exception as e:
# print(e)
## this failed to work out
# def xport(xlfile, outDir, imgType='.png'):
# # try:
# # xlfile = "C:/my documents/nh-putups/LW NH Putup T-C 49506 PE-7100T.xls"
# # outImg = str(dt.now()) + '.png'
# bname = ntpath.basename(xlfile)
# fname = bname[:len(bname)-5]
# outImg = os.path.join(outDir, fname + imgType)
# sheetName = 1
# print('xport: filepath', xlfile, 'output dir', outDir, 'image name', outImg, 'image type', imgType)
# excel2img.export_img(xlfile, outImg, sheetName, None)
# # "C:/my documents/nh-putups/LW NH Putup T-C 49506 PE-7100T.xls","image.png/image.bmp","91517!a1:e41")\
# # except Exception as e:
# # print(e)
# def xport(filepath, imgtype='png'):
# justpath = os.path.dirname(filepath)
# justname = os.path.basename(filepath)
# barename = os.path.splitext(justname)[0]
# # filepath = 'r' + filepath
# # Dispatch = win32.gencache.EnsureDispatch('Excel.Application')
# xlApp = win32.gencache.EnsureDispatch('Excel.Application')
# print(repr(xlApp))
# # xlApp = Dispatch('Excel.Application')
# opn = xlApp.Workbooks.Open
# print('justpath: ', justpath, 'justname: ', justname, 'barename: ', barename)
# print('open is %r' % opn)
# with opn(filepath, False, True, None):
# # with win32.gencache.EnsureDispatch('Excel.Application').Workbooks.Open(filepath, False, True, None, ''):
#
# # xlApp.Sheets("Sheet1").Select()
#
# xlSheet1 = xlApp.Sheets(1)
# urange = xlSheet1.UsedRange
# urange.Export(os.path.join(justpath, barename + imgtype))
# def xport(filepath, imgtype='png'):
# justpath = os.path.dirname(filepath)
# justname = os.path.basename(filepath)
# barename = os.path.splitext(justname)[0]
# win32c = win32.client.constants
#
# Dispatch = win32.gencache.EnsureDispatch('Excel.Application')
# xlApp = win32.gencache.EnsureDispatch('Excel.Application')
# xlApp.
# ws.Range(ws.Cells(1, 1), ws.Cells(66, 16)).CopyPicture(Format=win32c.xlBitmap)
#
# img = ImageGrab.grabclipboard()
# imgFile = os.path.join(path_to_img, 'test.jpg')
# img.save(imgFile)
def get_sheet_details(file_path, export_path=None):
'''
Parses an excel file's xml metadata for sheet info, returns a dictionary.
originally from https://stackoverflow.com/a/56320206/10941169
:param file_path:
:return dictionary of key=sheet name, value :
'''
try:
sheets = []
file_name = os.path.splitext(os.path.split(file_path)[-1])[0]
# Make a temporary directory with the file name
# directory_to_extract_to = os.path.join(settings.MEDIA_ROOT, file_name)
export_path = os.path.dirname(file_path)
directory_to_extract_to = os.path.join(export_path, file_name)
# try:
# os.mkdir(directory_to_extract_to)
# except Exception as e:
# exc_type, exc_obj, exc_tb = sys.exc_info()
# fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
# print('mkdir error ', e, exc_type, fname, exc_tb.tb_lineno)
# print('after inner try')
# Extract the xlsx file as it is just a zip file
print(file_path)
zip_ref = zipfile.ZipFile(file_path, 'r')
zip_ref.extractall(directory_to_extract_to)
zip_ref.close()
print('after zip')
# Open the workbook.xml which is very light and only has meta data, get sheets from it
path_to_workbook = os.path.join(directory_to_extract_to, 'xl', 'workbook.xml')
with open(path_to_workbook, 'r') as f:
xml = f.read()
dictionary = xmltodict.parse(xml)
sheet_details = dictionary['workbook']['sheets']['sheet']['@name']
shutil.rmtree(directory_to_extract_to)
return sheet_details
# sheets.append(sheet_details)
# for sheet in dictionary['workbook']['sheets']['sheet']['@name']:
# sheet_details = {
# 'id': sheet['@sheetId'], # can be @sheetId for some versions
# 'name': sheet['@name'] # can be @name
# # }
# print(type(sheet))
# sheet_details = str(sheet)
# sheets.append(sheet_details)
# Delete the extracted files directory
shutil.rmtree(directory_to_extract_to)
return sheets
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
print(e)
return None
def errInfo():
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
return exc_type, fname, exc_tb.tb_lineno
def linenum():
exc_type, exc_obj, exc_tb = sys.exc_info()
return exc_tb.tb_lineno
def dbug(*args):
printstring = ''
for arg in args:
printstring += repr(arg) + ' '
print(dt.now(), printstring)
def xls2xlsx(full_path_to_file, full_path_to_export):
"""
Takes a path to an xls file and a directory for export, saves a copy of the xls file
to the directory in xlsx format.
example: xls2xlsx('c:\\documents\\bills.xlsx', 'c:\\documents\\new')
:param full_path_to_file: full path and filename of xls file
:param full_path_to_export: directory path to copy the file into
:return: nothing
"""
import win32com.client as win32
fname = full_path_to_file
print(fname, full_path_to_export)
sname = ntpath.basename(full_path_to_file)
excel = win32.gencache.EnsureDispatch('Excel.Application')
savename = os.path.join(full_path_to_export, sname + "x")
print('fname ', fname, 'sname', sname, 'save name ', savename)
wb = excel.Workbooks.Open(fname)
wb.SaveAs(savename, FileFormat=51) # FileFormat = 51 is for .xlsx extension
wb.Close() # FileFormat = 56 is for .xls extension
# excel.Application.Quit()
print(dt.now(), '====================================================')
strDirOfFiles = 'C:\\my documents\\nh-putups\\' # directory containing files
dirOfFiles = os.listdir(strDirOfFiles) # list of files
str_xlsxDir = os.path.join(strDirOfFiles, 'xlsx') # for export folder
strNewDir = os.path.join(strDirOfFiles, 'images') # new folder for images
#
# try:
# os.mkdir(str_xlsxDir) # folder for xlsx files, for making images
# os.mkdir(strNewDir) # folder for the images
#
# except:
# print(errInfo())
# for filename in dirOfFiles:
# fullName = os.path.join(strDirOfFiles, filename)
# print('------------------------------------------')
# if filename.endswith('.xls'):
# print(' file name ends .xls', repr(dirOfFiles), type(filename), fullName)
# xls2xlsx(fullName, xlsxDir)
# # xport(filename)
# if filename.endswith('.xlsx'):
# print(' file name ends .xlsx', repr(dirOfFiles), type(filename), fullName)
# shutil.move(fullName, os.path.join(strDirOfFiles, 'xlsx', filename))
# testing
# path = "C:\\my documents\\nh-putups\\xlsx\\xLW NH Putup T-C 49510 PE-7100T.xlsx"
# # path = r"C:\my documents\nh-putups\xlsx\xLW NH Putup T-C 49510 PE-7100T.xlsx"
# sheetname = "49510"
# # xlrange = sheetname + "!A1:E55"
# # sheetname = None
# xlrange = None
# # print('sheet name:')
# # print(repr(get_sheet_details(path)))
# # excel2img.export_img(path, '49510-image.png', sheetname, xlrange)
# xport(path)
print('finished first for loop')
xlsxDir = os.listdir(str_xlsxDir) # list of files
print('str new dir', strNewDir, ' xlsxDir ', xlsxDir)
for filesname in xlsxDir:
fullName = os.path.join(str_xlsxDir, ntpath.basename(filesname))
print('fullname :', fullName)
print(filesname)
sheetname = get_sheet_details(fullName, str_xlsxDir)
xport(fullName, strNewDir)
#
# sheets = {}
# with open("C:\\my documents\\nh-putups\\LW NH Putup T-C 49510 PE-7100T\\xl\workbook.xml", 'r') as f:
# xml = f.read()
# dictionary = xmltodict.parse(xml)
# # for sheet in dictionary['workbook']['sheets']['sheet']:
# # sheet_details = {
# # 'id': sheet['sheetId'], # can be @sheetId for some versions
# # 'name': sheet['name'] # can be @name
# # }
# # sheets.append(sheet_details)
# print(dictionary['workbook']['sheets']['sheet']['@name'])
# for filename in os.listdir('./'):
# # xport(filename)
# print(repr(get_sheet_details(filename, "./img")))
| UTF-8 | Python | false | false | 9,410 | py | 2 | xls2xlsx_export2img.py | 1 | 0.624548 | 0.606908 | 0.000531 | 263 | 34.779468 | 116 |
defyattack/pybo | 14,233,521,628,400 | b272822a00bacc220abfb5edc49db21dcfb174d4 | 79fcffd1d0e6cfc1ad92d0663090d0655dc00e92 | /pybo/migrations/0009_category.py | d6aadb0514da158fabeb2831044bfd235b69c432 | [] | no_license | https://github.com/defyattack/pybo | cb86e196baad294385bb2f696b9db741669bb38e | 136e6f7421f6cfa1b93ebd31c9e9327f72f2d425 | refs/heads/master | "2022-12-01T00:50:15.284302" | "2020-08-18T04:32:15" | "2020-08-18T04:32:15" | 287,217,438 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.0.8 on 2020-08-07 03:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pybo', '0008_auto_20200803_1540'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cname', models.CharField(max_length=100)),
('create_date', models.DateTimeField()),
('modify_date', models.DateTimeField(blank=True, null=True)),
],
),
]
| UTF-8 | Python | false | false | 649 | py | 20 | 0009_category.py | 12 | 0.563945 | 0.511556 | 0 | 22 | 28.5 | 114 |
JRafaelNascimento/8Puzzle | 10,196,252,408,267 | f8f9b11ce9ca5e64d5cdc9a528a6ef2ad116c3f1 | a14cde57c9a50aac35bd78b760d3f565ef8d0f63 | /main.py | 981cc1a8a0afd55c9e8a1fee6e01da6955d1104a | [] | no_license | https://github.com/JRafaelNascimento/8Puzzle | 2d34df418e89c1ce1385770c8a80cf2276eaea60 | d2a56063dac116dca6a81178091bf1aea6cab6c7 | refs/heads/master | "2020-03-27T09:48:48.825483" | "2018-08-28T01:27:38" | "2018-08-28T01:27:38" | 146,374,604 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
size = 3
total_size = pow(size, 2)
answer = range(1, total_size)
answer.append(0)
def initial_state():
start_matrix = (answer)[:]
for _ in range(10):
start_matrix = get_possibility(start_matrix)
return start_matrix
def expand_matrix(matrix):
position_expands = {}
for key in range(total_size):
position_expands[key] = get_values(key)
pos = matrix.index(0)
moves = position_expands[pos]
expanded_states = []
for mv in moves:
nstate = matrix[:]
(nstate[pos + mv], nstate[pos]) = (nstate[pos], nstate[pos +
mv])
expanded_states.append(nstate)
return expanded_states
def print_matrix(matrix):
for (index, value) in enumerate(matrix):
print ' %s ' % value,
if index in [x for x in range(size - 1, total_size,
size)]:
print
print
def get_possibility(matrix):
exp_matrix = expand_matrix(matrix)
rand_matrix = random.choice(exp_matrix)
return rand_matrix
def get_distance(matrix):
mdist = 0
for node in matrix:
if node != 0:
gdist = abs(answer.index(node) - matrix.index(node))
(jumps, steps) = (gdist // size, gdist % size)
mdist += jumps + steps
return mdist
def get_next_state(matrix):
exp_matrices = expand_matrix(matrix)
m_distances = []
for matrix in exp_matrices:
m_distances.append(get_distance(matrix))
m_distances.sort()
short_path = m_distances[0]
if m_distances.count(short_path) > 1:
least_paths = [
matrix for matrix in exp_matrices if get_distance(matrix) == short_path]
return random.choice(least_paths)
else:
for matrix in exp_matrices:
if get_distance(matrix) == short_path:
return matrix
def get_values(key):
values = [1, -1, size, -size]
valid_values = []
for x in values:
if 0 <= key + x < total_size:
if x == 1 and key in range(size - 1, total_size,
size):
continue
if x == -1 and key in range(0, total_size, size):
continue
valid_values.append(x)
return valid_values
def is_game_over(matrix):
return matrix == answer
def solve(matrix):
while not is_game_over(matrix):
matrix = get_next_state(matrix)
print_matrix(matrix)
print 'Initial State:'
start = initial_state()
print_matrix(start)
print 'Step by Step:'
print_matrix(start)
solve(start)
| UTF-8 | Python | false | false | 2,634 | py | 1 | main.py | 1 | 0.566439 | 0.559226 | 0 | 103 | 24.572816 | 84 |
hieunguyen1815/ai | 18,038,862,656,073 | 06869c592ab21087b9eff6f2139509c786a78dab | 7d800a5d62c4fa8b397ff788a67a17a9fcd848e6 | /tictactoe/tictactoe.py | 25c9f00175c72d53dc4a226585fe0e2bee337b1c | [
"MIT"
] | permissive | https://github.com/hieunguyen1815/ai | e033164ce62b3777f68397da33c6d39f52700e67 | 533f445a49bd1ca38a838c69860ec85df514924b | refs/heads/master | "2022-04-21T10:08:30.235416" | "2020-04-20T03:47:12" | "2020-04-20T03:47:12" | 256,777,558 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Tic Tac Toe Player
"""
import copy
X = "X"
O = "O"
EMPTY = None
def initial_state():
"""
Returns starting state of the board.
"""
return [[EMPTY, EMPTY, EMPTY],
[EMPTY, EMPTY, EMPTY],
[EMPTY, EMPTY, EMPTY]]
def player(board) -> str:
"""
Returns player who has the next turn on a board.
"""
x_amount = 0
o_amount = 0
for row in board:
for column in row:
if column == X:
x_amount += 1
elif column == O:
o_amount += 1
return X if x_amount == o_amount else O
def actions(board) -> set:
"""
Returns set of all possible actions (i, j) available on the board.
"""
set_actions = set()
for i, row in enumerate(board):
for j, column in enumerate(row):
if column == EMPTY:
set_actions.add((i, j))
return set_actions
def result(board, action):
"""
Returns the board that results from making move (i, j) on the board.
"""
val_at_action = board[action[0]][action[1]]
if val_at_action != EMPTY:
raise Exception('invalid action')
next_board = copy.deepcopy(board)
next_board[action[0]][action[1]] = player(board)
return next_board
def winner(board) -> any:
"""
Returns the winner of the game, if there is one.
"""
numeric_board = [[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]
total_horizon = [0, 0, 0]
total_vertical = [0, 0, 0]
total_diagonally = [0, 0]
for i, row in enumerate(board):
for j, column in enumerate(row):
if column == X:
numeric_board[i][j] = 1
total_horizon[i] += 1
total_vertical[j] += 1
elif column == O:
numeric_board[i][j] = -1
total_horizon[i] += -1
total_vertical[j] += -1
n = len(numeric_board)
total_diagonally[0] = sum(numeric_board[i][i] for i in range(n))
total_diagonally[1] = sum(numeric_board[i][n - i - 1] for i in range(n))
if 3 in total_horizon or 3 in total_vertical or 3 in total_diagonally:
return X
elif -3 in total_horizon or -3 in total_vertical or -3 in total_diagonally:
return O
else:
return None
def terminal(board) -> bool:
"""
Returns True if game is over, False otherwise.
"""
if winner(board) is not None:
return True
for row in board:
for column in row:
if column == EMPTY:
return False
return True
def utility(board) -> int:
"""
Returns 1 if X has won the game, -1 if O has won, 0 otherwise.
"""
winner_player = winner(board)
if winner_player == X:
return 1
elif winner_player == O:
return -1
else:
return 0
def minimax(board) -> (int, int):
"""
Returns the optimal action for the current player on the board.
"""
if terminal(board):
return None
if board == initial_state():
return 1, 1
optimal_action = None
max_v = -1000
min_v = 1000
person = player(board)
if person == X:
for action in actions(board):
v = min_value(result(board, action))
if v > max_v:
max_v = v
optimal_action = action
else:
for action in actions(board):
v = max_value(result(board, action))
if v < min_v:
min_v = v
optimal_action = action
return optimal_action
def max_value(board) -> int:
if terminal(board):
return utility(board)
v = -1000
for action in actions(board):
v = max(v, min_value(result(board, action)))
return v
def min_value(board) -> int:
if terminal(board):
return utility(board)
v = 1000
for action in actions(board):
v = min(v, max_value(result(board, action)))
return v
| UTF-8 | Python | false | false | 3,977 | py | 2 | tictactoe.py | 1 | 0.530551 | 0.514458 | 0 | 169 | 22.532544 | 79 |
dl942702882/myRepro | 16,801,912,064,182 | 6d35bbe72efb3e2a9d171e15f1c170ce8e6e60fa | 8bca8fd132612eda84d7729bf619adec4e7a6c7a | /unittest2.py | 0585de1fd22eb3e95b9b3ee3ca38cf194f3fe757 | [] | no_license | https://github.com/dl942702882/myRepro | 7ae57a06d1f68673c763b47d947490a455ee0971 | c73b269c6ae19d12e766471e026370e09d32c2ef | refs/heads/master | "2019-04-10T00:02:37.537152" | "2017-05-17T05:55:39" | "2017-05-17T05:55:39" | 90,240,838 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import unittest
class simple_test(unittest.TestCase):
@classmethod
def setUpClass(self):
self.foo = list(range(10))
def test_1st(self):
self.assertEqual(self.foo.pop(),9)
def test_2nd(self):
self.assertEqual(self.foo.pop(),8)
if __name__ == '__main__':
unittest.main() | UTF-8 | Python | false | false | 316 | py | 11 | unittest2.py | 10 | 0.610759 | 0.591772 | 0 | 15 | 20.133333 | 42 |
raqso/steelseries-oled | 15,582,141,359,493 | bbc703e0479d45c1b753360fe117b9129ad0bd84 | 1b42f03961a87ad95ed8a43ab87b11ebe7c6a5dc | /profile.py | ad85af8775cd83c47048128e45ff7683875c56b8 | [
"MIT"
] | permissive | https://github.com/raqso/steelseries-oled | 5714465692a4d1ac27551d73d8fc32fd0bcaeebf | cb3a98d02589baed05bd4343b4a200460012785c | refs/heads/master | "2023-06-03T15:12:12.795861" | "2021-06-18T17:57:05" | "2021-06-18T17:57:05" | 377,982,473 | 1 | 0 | MIT | true | "2021-06-17T23:20:18" | "2021-06-17T23:20:18" | "2021-06-08T05:31:26" | "2021-02-02T11:31:32" | 122 | 0 | 0 | 0 | null | false | false | #!/usr/bin/env python3
from easyhid import Enumeration
import sys
# Check for arguments
if(len(sys.argv) < 2):
print("Usage: profile.py profile_number\n")
sys.exit(0)
# Stores an enumeration of all the connected USB HID devices
en = Enumeration()
# Return a list of devices based on the search parameters / Hardcoded to Apex 7
devices = en.find(vid=0x1038, pid=0x1612, interface=1)
if not devices:
devices = en.find(vid=0x1038, pid=0x1618, interface=1)
if not devices:
print("No devices found, exiting.")
sys.exit(0)
# Use first device found with vid/pid
dev = devices[0]
dev.open()
data = bytearray([0x89]) + int(sys.argv[1]).to_bytes(16, sys.byteorder) + bytearray([0x00] * 62)
dev.send_feature_report(data)
dev.close()
| UTF-8 | Python | false | false | 750 | py | 8 | profile.py | 7 | 0.708 | 0.656 | 0 | 28 | 25.785714 | 96 |
ankushsaini44/WebTemp | 15,444,702,421,089 | 99deb9ef4a0ce7cbf2998abff71bc046d0069d3d | a77a99db7e3257cc890d00b5d17edea27e40a742 | /core/migrations/0001_initial.py | d15532a3a990cdb439d1188d2327346f58349a09 | [] | no_license | https://github.com/ankushsaini44/WebTemp | 4141c6b9a4f163d08c03f8b5671ea9255a5cad9e | 9b1846097891195f5cdf77793f78280401664389 | refs/heads/master | "2022-05-09T02:38:12.280749" | "2019-09-20T07:41:02" | "2019-09-20T07:41:02" | 209,730,747 | 0 | 0 | null | false | "2022-04-22T22:30:15" | "2019-09-20T07:23:06" | "2019-09-20T07:41:05" | "2022-04-22T22:30:12" | 33 | 0 | 0 | 2 | Python | false | false | # Generated by Django 2.2 on 2019-04-25 11:00
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='City',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('name', models.CharField(blank=True, max_length=100, null=True)),
('source_id', models.CharField(blank=True, max_length=10, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='PostalCodeMapper',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('postal_code', models.CharField(max_length=255)),
('latitude', models.CharField(blank=True, max_length=255, null=True)),
('longitude', models.CharField(blank=True, max_length=255, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='State',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('name', models.CharField(blank=True, max_length=100, null=True)),
('short_name', models.CharField(blank=True, max_length=5, null=True)),
('timezone', models.CharField(blank=True, max_length=100, null=True)),
('source_id', models.CharField(blank=True, max_length=10, null=True)),
('cities', models.ManyToManyField(blank=True, related_name='included_cities', to='core.City')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Country',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('name', models.CharField(blank=True, max_length=100, null=True, verbose_name='country_name')),
('short_name', models.CharField(blank=True, max_length=5, null=True)),
('source_id', models.CharField(blank=True, max_length=10, null=True)),
('states', models.ManyToManyField(blank=True, related_name='included_states', to='core.State')),
],
options={
'abstract': False,
},
),
]
| UTF-8 | Python | false | false | 3,218 | py | 15 | 0001_initial.py | 13 | 0.541641 | 0.528278 | 0 | 72 | 43.694444 | 114 |
gauenk/lpirc2017 | 10,694,468,592,555 | f3b0d99fa6822e88841837cc72e981aeffcfa18d | 81454a7c0d267cabbe42c11cc1954217fcf1deb0 | /get_zip.py | 88bab451ab80f89b019d4ac89b6d4f6df43b3355 | [] | no_license | https://github.com/gauenk/lpirc2017 | 86c3ebd19efac20fef3cbc9b9517df370ab5aacc | f4593ac077855e3bff8210aab50b42b011aab470 | refs/heads/master | "2021-01-20T12:55:29.295034" | "2017-05-06T00:52:12" | "2017-05-06T00:52:12" | 90,427,638 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #general
import os,sys,zipfile,shutil
import numpy as np
from PIL import Image
#networking
import pycurl
from urllib.parse import urlencode as urlencode
#file io
from io import StringIO as StringIO
from io import BytesIO as BytesIO
#subprocessing
import subprocess
from utils import *
def parse_cmd_line():
network_dict = {}
import getopt,sys, time
try:
opts, args = getopt.getopt(sys.argv[1:], "hw:p:", ["help", "host_ipaddress=", "host_port=", "username=", "password=", "image_directory=","temp_directory=","token=","csv_filename="])
except getopt.GetoptError as err:
usage()
sys.exit(2)
for switch, val in opts:
if switch in ("-h", "--help"):
usage()
sys.exit()
elif switch in ("-w", "--host_ipaddress"):
network_dict["host_ipaddress"] = val
elif switch in ("-p", "--host_port"):
network_dict["host_port"] = val
elif switch == "--username":
network_dict["username"] = val
elif switch == "--password":
network_dict["password"] = val
elif switch in ("-csv","--csv_filename"):
network_dict["csv_filename"] = val
elif switch == "--image_directory":
network_dict["image_directory"] = val
elif switch == "--temp_directory":
network_dict["temp_directory"] = val
elif switch == "--token":
network_dict["token"] = val
else:
assert False, "unhandled option"
# print("\nhost = "+network_dict["host_ipaddress"]+":"+network_dict["host_port"]+"\nUsername = "+network_dict["username"]+"\nPassword = "+network_dict["password"]+"")
return network_dict
def network_buffer(network,count):
c = pycurl.Curl()
c.setopt(c.URL, network["host_ipaddress"]+':'+network["host_port"]+'/zipimages')
post_data = {'token':network["token"], 'image_name':str(count)}
postfields = urlencode(post_data)
c.setopt(c.POSTFIELDS,postfields)
my_buffer = BytesIO()
c.setopt(c.WRITEDATA,my_buffer)
#c.setopt(c.WRITEDATA, f)
c.perform()
status = c.getinfo(pycurl.HTTP_CODE)
c.close()
if status == 401:
#Server replied 401, Unauthorized Access, remove the temporary file
print("Invalid")
elif status == 406:
#Server replied 406, Not Acceptable, remove the temporary file
print("Invalid")
return my_buffer
if __name__ == "__main__":
network = parse_cmd_line()
prev_count,count = 0,1
## FILE IO FOR DEBUFFING
# my_buffer = BytesIO()
# with open('foo.zip', 'rb') as f:
# shutil.copyfileobj(f,my_buffer)
# my_buffer.seek(0)
# with open('foo.zip', 'wb') as f:
# shutil.copyfileobj(my_buffer,f)
import subprocess
args = ""
while(True):
# try:
# print("try")
# args = input()
# except:
# print("except")
# args = ""
# if len(args) > 0:
# #count = int(args)
count = 1
if prev_count != count:
my_buffer = network_buffer(network,count)
my_buffer.seek(0)
my_z = zipfile.ZipFile(my_buffer)
for n in my_z.namelist():
mbuffer = BytesIO(my_z.read(n))
img = np.asarray(Image.open(mbuffer), dtype=np.uint8)
#sys.stdout.write(str(img.shape)+'\n')
filename = numpy_to_memory(img)
print(filename)
sys.stdout.write(filename+'\n')
sys.stdout.flush()
prev_count = count
print("alive")
| UTF-8 | Python | false | false | 3,662 | py | 5 | get_zip.py | 5 | 0.554888 | 0.549153 | 0 | 117 | 30.299145 | 189 |
AndreasScharf/IotAdapter | 2,791,728,752,477 | f56f1adb2e5d1a762e27756a6b9f364fde584a91 | 610b36801b59f5c7b94cb97e833e04175844499e | /updates/update0.1/updater.py | a02e0521a960567bcebc2afd857f5a449aaa6588 | [] | no_license | https://github.com/AndreasScharf/IotAdapter | 0df2445e8f6d417a4819ebd5310435e943cc568c | 7543e7e5679ab7f5982b8893d33fce3ff9be437c | refs/heads/master | "2023-07-27T19:03:34.071444" | "2023-07-26T11:47:59" | "2023-07-26T11:47:59" | 199,659,553 | 0 | 0 | null | false | "2022-12-13T01:42:07" | "2019-07-30T13:42:43" | "2021-12-17T16:09:43" | "2022-12-13T01:42:06" | 737 | 0 | 0 | 6 | Python | false | false | #im gleichen verzeichnis muss die updater.json liegen um
#regelmaesige updates zu ermoeglichen
import os
import git
import sys
import requests
from colorama import init, Fore, Style
path = '/home/pi/Documents'
seperator = '/'
update_folder = ''
def main():
global path
#path = os.getcwd()
check_folder = False
for file in os.listdir(path):
if file == 'IotAdapter':
check_folder = True
if not check_folder:
print('need to move')
move_updater()
print('execute again')
exit()
print('checking for updates...')
need_to_update = len(sys.argv) <= 1
if not need_to_update :
need_to_update = (sys.argv[1] == '-force' or sys.argv[1] == '-f')
else:
need_to_update = check_version()
if need_to_update == 2:
print('no network')
if need_to_update == 1:
print('update...')
update()
else:
print('up to date')
#wait 24hours
def move_updater():
global path
new_path = '/'.join(path.split(seperator)[:-1])
order = 'cp ' + path + '/updater.py ' + new_path + '/updater.py'
os.system(order)
order = 'cp ' + path + '/updater.json ' + new_path + '/updater.json'
os.system(order)
order = 'python ' + new_path + '/updater.py'
os.system(order)
#register updater in rc.local oder in cronjob
def check_version():
path_of_updates = path + '/IotAdapter/updates/'
lastest_version = '0'
for file in os.listdir(path_of_updates):
version = file.replace('update', '')
if version > lastest_version:
lastest_version = version
#send https request an license.enwatmon.de fuer version vergleich
url = 'https://license.enwatmon.de/version'
myobj = {'version': (lastest_version)}
x = {}
try:
x = requests.post(url, data = myobj)
except Exception as e:
print(e)
if not x:
return 2
print(x.text)
return x.text == 'new version available'
def update():
#git pull muss config auslassen bzw in gitignore schreiben
g = git.cmd.Git(path + '/IotAdapter')
g.stash()
g.pull()
lastest_version = ''
for file in os.listdir(path + '/IotAdapter/updates'):
version = file.replace('update', '')
if version > lastest_version:
lastest_version = version
global update_folder
update_folder = path + '/IotAdapter/updates/' + file
if not update_folder or update_folder == '':
print('no updates available')
return
print(update_folder + '/update.sh')
f = open(update_folder + '/update.sh')
orders = f.readlines()
for order in orders:
print('\n' + order)
print(Fore.WHITE + 'Order executing...')
res = os.popen(order).read()
print(res)
print(Fore.GREEN + 'Order done\n')
print('done')
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 2,917 | py | 76 | updater.py | 41 | 0.58519 | 0.581762 | 0 | 112 | 25.044643 | 73 |
cohesity/management-sdk-python | 12,859,132,101,945 | 757cfff4cc5c0caf8d72c19e65deb2c277eee9b4 | 09f8a3825c5109a6cec94ae34ea17d9ace66f381 | /cohesity_management_sdk/models/deploy_vms_to_cloud_task_state_proto.py | 37cad97ef22300288e54312ab19beee1ab060551 | [
"Apache-2.0"
] | permissive | https://github.com/cohesity/management-sdk-python | 103ee07b2f047da69d7b1edfae39d218295d1747 | e4973dfeb836266904d0369ea845513c7acf261e | refs/heads/master | "2023-08-04T06:30:37.551358" | "2023-07-19T12:02:12" | "2023-07-19T12:02:12" | 134,367,879 | 24 | 20 | Apache-2.0 | false | "2023-08-31T04:37:28" | "2018-05-22T06:04:19" | "2023-08-28T20:41:21" | "2023-08-31T04:37:26" | 55,712 | 20 | 18 | 5 | Python | false | false | # -*- coding: utf-8 -*-
# Copyright 2023 Cohesity Inc.
import cohesity_management_sdk.models.deploy_vms_to_cloud_params
class DeployVMsToCloudTaskStateProto(object):
"""Implementation of the 'DeployVMsToCloudTaskStateProto' model.
TODO: type description here.
Attributes:
deploy_vms_to_cloud_params (DeployVMsToCloudParams): This captures all
the necessary information required to deploy VMs to cloud.
"""
# Create a mapping from Model property names to API property names
_names = {
"deploy_vms_to_cloud_params":'deployVmsToCloudParams',
}
def __init__(self,
deploy_vms_to_cloud_params=None,
):
"""Constructor for the DeployVMsToCloudTaskStateProto class"""
# Initialize members of the class
self.deploy_vms_to_cloud_params = deploy_vms_to_cloud_params
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
deploy_vms_to_cloud_params = cohesity_management_sdk.models.deploy_vms_to_cloud_params.DeployVMsToCloudParams.from_dictionary(dictionary.get('deployVmsToCloudParams')) if dictionary.get('deployVmsToCloudParams') else None
# Return an object of this model
return cls(
deploy_vms_to_cloud_params
) | UTF-8 | Python | false | false | 1,790 | py | 1,430 | deploy_vms_to_cloud_task_state_proto.py | 1,405 | 0.660894 | 0.658101 | 0 | 57 | 30.421053 | 229 |
fjguaita/codesignal | 6,811,818,140,476 | 751da9fbfe05c8ad0c959ee4402321801969de99 | f37add4d162d92dc9b07eac49c3bbdde0c162a7f | /alphabeticShift.py | e52a98fe4040b06343291c08147c49be9bf29354 | [] | no_license | https://github.com/fjguaita/codesignal | 9c497a3c0012d7adf605794d93835c99d853f33c | ea5325f2c8234d549717a9bd7eeec974d82155d2 | refs/heads/main | "2023-04-20T05:22:32.300918" | "2021-04-28T01:30:27" | "2021-04-28T01:30:27" | 359,280,938 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def alphabeticShift(s):
return "".join([chr(ord(i)+1) if ord(i) <122 else 'a' for i in s])
def alphabeticShift2(s):
return "".join(chr((ord(i)-96)%26+97) for i in s)
def alphabeticShift3(s):
return ''.join((chr(ord(i)+1) if i!="z" else "a" for i in s)) | UTF-8 | Python | false | false | 268 | py | 85 | alphabeticShift.py | 85 | 0.604478 | 0.55597 | 0 | 10 | 25.9 | 70 |
eschanet/leetcode | 2,078,764,193,778 | 19a8b937a7c59cc42a295873c3fbf665b30b21d6 | ca8a39e0f5b4f23a03738599f724748f9fd3a6a8 | /pascals-triangle/pascals-triangle.py | 6f0f4c31206455b92d8d2ace7a303aab6d8e2767 | [] | no_license | https://github.com/eschanet/leetcode | 01bafec756267a17dbac75dba80b86ef527b7fdc | 9fe6714d440a445ca0c7b5e2f2a2c0410157ae63 | refs/heads/main | "2023-07-18T10:29:56.502817" | "2021-08-29T16:13:23" | "2021-08-29T16:13:23" | 370,804,844 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution:
def generate(self, numRows: int) -> List[List[int]]:
ans = [[1],[1,1]]
if numRows == 1:
return ans[:1]
elif numRows == 2:
return ans
for i in range(2,numRows):
curr_row = list(map(sum, zip(ans[i-1], ans[i-1][1:])))
ans.append([1]+curr_row+[1])
return ans
| UTF-8 | Python | false | false | 408 | py | 52 | pascals-triangle.py | 52 | 0.42402 | 0.394608 | 0 | 14 | 27.285714 | 72 |
open-mmlab/mmagic | 17,360,257,827,101 | 1861b88df2b91a45e061ebc71d4d94f6ec128d05 | af101b467134e10270bb72d02f41f07daa7f57d8 | /configs/real_esrgan/realesrgan_c64b23g32_4xb12-lr1e-4-400k_df2k-ost.py | 187f96385deba22763abb13d4994833ec36da536 | [
"Apache-2.0"
] | permissive | https://github.com/open-mmlab/mmagic | 4d864853417db300de4dfe7e83ce380fd1557a23 | a382f143c0fd20d227e1e5524831ba26a568190d | refs/heads/main | "2023-08-31T14:40:24.936423" | "2023-08-30T05:05:56" | "2023-08-30T05:05:56" | 203,999,962 | 1,370 | 192 | Apache-2.0 | false | "2023-09-14T11:39:18" | "2019-08-23T13:04:29" | "2023-09-14T11:22:31" | "2023-09-14T11:39:17" | 26,511 | 5,729 | 962 | 22 | Jupyter Notebook | false | false | _base_ = './realesrnet_c64b23g32_4xb12-lr2e-4-1000k_df2k-ost.py'
experiment_name = 'realesrgan_c64b23g32_4xb12-lr1e-4-400k_df2k-ost'
work_dir = f'./work_dirs/{experiment_name}'
save_dir = './work_dirs/'
# load_from = # path of pre-trained real-esrnet
scale = 4
# model settings
model = dict(
type='RealESRGAN',
generator=dict(
type='RRDBNet',
in_channels=3,
out_channels=3,
mid_channels=64,
num_blocks=23,
growth_channels=32,
upscale_factor=scale),
discriminator=dict(
type='UNetDiscriminatorWithSpectralNorm',
in_channels=3,
mid_channels=64,
skip_connection=True),
pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'),
perceptual_loss=dict(
type='PerceptualLoss',
layer_weights={
'2': 0.1,
'7': 0.1,
'16': 1.0,
'25': 1.0,
'34': 1.0,
},
vgg_type='vgg19',
perceptual_weight=1.0,
style_weight=0,
norm_img=False),
gan_loss=dict(
type='GANLoss',
gan_type='vanilla',
loss_weight=1e-1,
real_label_val=1.0,
fake_label_val=0),
is_use_sharpened_gt_in_pixel=True,
is_use_sharpened_gt_in_percep=True,
is_use_sharpened_gt_in_gan=False,
is_use_ema=True,
train_cfg=dict(start_iter=1000000),
test_cfg=dict(),
data_preprocessor=dict(
type='DataPreprocessor',
mean=[0., 0., 0.],
std=[255., 255., 255.],
))
train_cfg = dict(
_delete_=True,
type='IterBasedTrainLoop',
max_iters=400_000,
val_interval=5000)
# optimizer
optim_wrapper = dict(
_delete_=True,
constructor='MultiOptimWrapperConstructor',
generator=dict(
type='OptimWrapper',
optimizer=dict(type='Adam', lr=1e-4, betas=(0.9, 0.99))),
discriminator=dict(
type='OptimWrapper',
optimizer=dict(type='Adam', lr=1e-4, betas=(0.9, 0.99))),
)
# learning policy
param_scheduler = None
| UTF-8 | Python | false | false | 2,026 | py | 1,249 | realesrgan_c64b23g32_4xb12-lr1e-4-400k_df2k-ost.py | 952 | 0.577986 | 0.520237 | 0 | 78 | 24.974359 | 70 |
Upabjojr/rubi_generated | 9,216,999,864,430 | 34ebc0ec054bbad176c8011cccbf62b21d7065b5 | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part009579.py | 87e1e479abfefec69ecc745a09d852f23a7e7ef8 | [] | no_license | https://github.com/Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | "2020-07-25T17:26:19.227918" | "2019-09-15T15:41:48" | "2019-09-15T15:41:48" | 208,357,412 | 4 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher50724(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({0: 1}), [
(VariableWithCount('i3.1.2.2.1.0', 1, 1, S(1)), Mul)
]),
1: (1, Multiset({1: 1}), [
(VariableWithCount('i3.1.2.2.1.0', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher50724._instance is None:
CommutativeMatcher50724._instance = CommutativeMatcher50724()
return CommutativeMatcher50724._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 50723
if len(subjects) >= 1 and isinstance(subjects[0], Pow):
tmp1 = subjects.popleft()
subjects2 = deque(tmp1._args)
# State 50725
if len(subjects2) >= 1:
tmp3 = subjects2.popleft()
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i3.1.2.2.1.1', tmp3)
except ValueError:
pass
else:
pass
# State 50726
if len(subjects2) >= 1 and subjects2[0] == Integer(2):
tmp5 = subjects2.popleft()
# State 50727
if len(subjects2) == 0:
pass
# State 50728
if len(subjects) == 0:
pass
# 0: x**2
yield 0, subst1
subjects2.appendleft(tmp5)
subjects2.appendleft(tmp3)
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i3.1.2.2.1.2.0', S(0))
except ValueError:
pass
else:
pass
# State 52873
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i3.1.2.2.1.2.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 52874
if len(subjects2) >= 1:
tmp8 = subjects2.popleft()
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i3.1.2.2.1.2.1.0', tmp8)
except ValueError:
pass
else:
pass
# State 52875
if len(subjects2) >= 1 and subjects2[0] == Integer(-1):
tmp10 = subjects2.popleft()
# State 52876
if len(subjects2) == 0:
pass
# State 52877
if len(subjects) == 0:
pass
# 1: 1/(f + x*g)
yield 1, subst3
subjects2.appendleft(tmp10)
subjects2.appendleft(tmp8)
if len(subjects2) >= 1 and isinstance(subjects2[0], Mul):
tmp11 = subjects2.popleft()
associative1 = tmp11
associative_type1 = type(tmp11)
subjects12 = deque(tmp11._args)
matcher = CommutativeMatcher52879.get()
tmp13 = subjects12
subjects12 = []
for s in tmp13:
matcher.add_subject(s)
for pattern_index, subst2 in matcher.match(tmp13, subst1):
pass
if pattern_index == 0:
pass
# State 52880
if len(subjects2) >= 1 and subjects2[0] == Integer(-1):
tmp14 = subjects2.popleft()
# State 52881
if len(subjects2) == 0:
pass
# State 52882
if len(subjects) == 0:
pass
# 1: 1/(f + x*g)
yield 1, subst2
subjects2.appendleft(tmp14)
subjects2.appendleft(tmp11)
if len(subjects2) >= 1 and isinstance(subjects2[0], Add):
tmp15 = subjects2.popleft()
associative1 = tmp15
associative_type1 = type(tmp15)
subjects16 = deque(tmp15._args)
matcher = CommutativeMatcher52884.get()
tmp17 = subjects16
subjects16 = []
for s in tmp17:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp17, subst0):
pass
if pattern_index == 0:
pass
# State 52890
if len(subjects2) >= 1 and subjects2[0] == Integer(-1):
tmp18 = subjects2.popleft()
# State 52891
if len(subjects2) == 0:
pass
# State 52892
if len(subjects) == 0:
pass
# 1: 1/(f + x*g)
yield 1, subst1
subjects2.appendleft(tmp18)
subjects2.appendleft(tmp15)
subjects.appendleft(tmp1)
return
yield
from matchpy.matching.many_to_one import CommutativeMatcher
from .generated_part009581 import *
from .generated_part009580 import *
from collections import deque
from matchpy.utils import VariableWithCount
from multiset import Multiset | UTF-8 | Python | false | false | 6,934 | py | 287 | generated_part009579.py | 285 | 0.424286 | 0.373089 | 0 | 172 | 39.319767 | 83 |
penguin138/purerpc | 7,859,790,163,404 | faf869d087b9b745b75c1a11eeedad66a7b72709 | a72e1fd4496b6b5db863822344fd10f849dd3497 | /tests/test_buffers.py | aa3d66002319cc8f31a6d4b4f66c8687dec99fba | [] | no_license | https://github.com/penguin138/purerpc | daadad13d047de9d6cfc49ecc30dae1e3a9b3ab7 | f5580872e2f0faf5d8c76e35914bc3be80757ae6 | refs/heads/master | "2020-03-19T14:27:40.437544" | "2018-06-09T14:50:17" | "2018-06-09T14:50:17" | 136,623,813 | 0 | 0 | null | true | "2018-06-08T13:39:33" | "2018-06-08T13:39:32" | "2018-06-08T13:39:26" | "2018-06-07T14:20:45" | 94 | 0 | 0 | 0 | null | false | null | import zlib
import unittest
import random
import struct
from purerpc.grpclib.buffers import ByteBuffer, MessageReadBuffer
class TestByteBuffer(unittest.TestCase):
def test_byte_buffer_random(self):
byte_buffer = ByteBuffer()
byte_array = bytearray()
for i in range(1000):
data = bytes(random.randint(0, 255) for _ in range(random.randint(0, 100)))
byte_buffer.append(data)
byte_array.extend(data)
self.assertEqual(len(byte_buffer), len(byte_array))
num_elements = min(random.randint(0, 100), len(byte_buffer))
self.assertEqual(byte_array[:num_elements], byte_buffer.popleft(num_elements))
byte_array = byte_array[num_elements:]
def test_byte_buffer_large_reads(self):
byte_buffer = ByteBuffer()
byte_array = bytearray()
for i in range(1000):
for j in range(100):
data = bytes([(i + j) % 256])
byte_buffer.append(data)
byte_array.extend(data)
self.assertEqual(len(byte_array), len(byte_buffer))
num_elements = min(random.randint(0, 100), len(byte_buffer))
self.assertEqual(byte_array[:num_elements], byte_buffer.popleft(num_elements))
byte_array = byte_array[num_elements:]
def test_byte_buffer_large_writes(self):
byte_buffer = ByteBuffer()
byte_array = bytearray()
data = bytes(range(256)) * 10
for i in range(250):
byte_buffer.append(data)
byte_array.extend(data)
for j in range(10):
self.assertEqual(len(byte_array), len(byte_buffer))
num_elements = min(random.randint(0, 100), len(byte_buffer))
self.assertEqual(byte_array[:num_elements], byte_buffer.popleft(num_elements))
byte_array = byte_array[num_elements:]
class TestMessageReadBuffer(unittest.TestCase):
def test_message_read_buffer(self):
buffer = bytearray()
for i in range(100):
data = bytes(range(i))
compress_flag = False
if i % 2:
data = zlib.compress(data)
compress_flag = True
buffer.extend(struct.pack('>?I', compress_flag, len(data)))
buffer.extend(data)
read_buffer = MessageReadBuffer(message_encoding="gzip")
messages = []
while buffer:
if random.choice([True, False]):
num_bytes = random.randint(0, 50)
read_buffer.data_received(bytes(buffer[:num_bytes]))
buffer = buffer[num_bytes:]
else:
messages.extend(read_buffer.read_all_complete_messages())
messages.extend(read_buffer.read_all_complete_messages())
self.assertEqual(len(messages), 100)
for idx, message in enumerate(messages):
self.assertEqual(message, bytes(range(idx)))
| UTF-8 | Python | false | false | 2,957 | py | 24 | test_buffers.py | 23 | 0.589449 | 0.571187 | 0 | 73 | 39.493151 | 94 |
karthikpappu/pyc_source | 8,031,588,870,347 | c0cbe97376569a4e9038f8181ab96a0647760302 | 91fa095f423a3bf47eba7178a355aab3ca22cf7f | /pycfiles/django-direct-render-0.1.2.macosx-10.10-intel.tar/views.py | 522849ee79019c2c0dc020a020f268e517dcc5c3 | [] | no_license | https://github.com/karthikpappu/pyc_source | 0ff4d03e6d7f88c1aca7263cc294d3fa17145c9f | 739e7e73180f2c3da5fd25bd1304a3fecfff8d6e | refs/heads/master | "2023-02-04T11:27:19.098827" | "2020-12-27T04:51:17" | "2020-12-27T04:51:17" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # uncompyle6 version 3.7.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04)
# [GCC 8.4.0]
# Embedded file name: /Users/tim/Projects/styo/venv/lib/python2.7/site-packages/direct_render/views.py
# Compiled at: 2015-05-23 10:14:47
from django.shortcuts import render
import os.path
def direct_render(request, template_name):
return render(request, os.path.join('direct_render', template_name)) | UTF-8 | Python | false | false | 442 | py | 114,545 | views.py | 111,506 | 0.742081 | 0.640271 | 0 | 11 | 39.272727 | 102 |
BhathiyaTK/Automated-Lecturers-Time-Table-Generator | 9,002,251,499,726 | 44199e6d64232ae0d1f5610d5abd84b61a0542d4 | 6b38ec447eb7a4e9ecb18bde71d7878665475029 | /altg_app/models.py | 10033a40b8018be970a05dfa3c26bc297cfb2f2b | [] | no_license | https://github.com/BhathiyaTK/Automated-Lecturers-Time-Table-Generator | 02999b64a4b8e7e387828d017fd4ddb8aaeb41d7 | 85d333c20d337d73a020c7d1a2b9e1214c26b4d2 | refs/heads/master | "2023-05-28T20:57:52.595321" | "2021-10-19T15:34:55" | "2021-10-19T15:34:55" | 225,145,578 | 6 | 1 | null | false | "2023-05-23T01:28:20" | "2019-12-01T10:50:03" | "2022-09-12T11:10:16" | "2023-05-23T01:28:20" | 8,712 | 5 | 1 | 3 | CSS | false | false | from django.db import models
from django.contrib.auth.models import AbstractUser
from django.contrib.auth import get_user_model
from PIL import Image
class User(AbstractUser):
USER_ROLES = (
('admin', 'Admin'),
('guest', 'Guest'),
)
TITLES = (
('Prof', 'Prof'),
('Dr', 'Dr'),
('Mr', 'Mr'),
('Mrs', 'Mrs'),
('Miss', 'Miss'),
)
POSITION = (
('lecturer', 'Lecturer'),
('demo', 'Demonstrator'),
('naStaff', 'Non Acedamic Staff'),
('other', 'Other'),
)
user_title = models.CharField(max_length=30, choices=TITLES, default=None)
first_name = models.CharField(max_length=224, default=None)
last_name = models.CharField(max_length=224, default=None)
lecturer_name = models.CharField(max_length=224, default=None)
email = models.EmailField()
username = models.CharField(max_length=150, unique=True)
password = models.CharField(max_length=100)
lecturer_code = models.CharField(max_length=224)
user_position = models.CharField(max_length=30, choices=POSITION, default=None)
def get_profile_img(self):
try:
return Profiles.objects.filter(username=self.username).values_list('user_profile_img', flat=True)[0]
except:
return None
class ProcessData(models.Model):
BATCH = (
('1', '1st Year'),
('2', '2nd Year'),
('3', '3rd Year'),
('4', '4th Year'),
)
HALL = (
('NLH', 'NLH'),
('204', '204'),
('104', '104'),
('cis1', 'CIS Lab 1'),
('cis2', 'CIS Lab 2'),
)
lecturer_name = models.CharField(max_length=100)
batch = models.IntegerField(choices=BATCH)
hall = models.IntegerField(choices=HALL)
subject = models.CharField(max_length=50)
students = models.IntegerField()
class AllLectureHalls(models.Model):
hall_number = models.CharField(max_length=224)
hall_name = models.CharField(max_length=224)
hall_capacity = models.IntegerField()
class AllSubjects(models.Model):
BATCH = (
('1', '1st Year'),
('2', '2nd Year'),
('3', '3rd Year'),
('4', '4th Year'),
)
SEMESTERS = (
('1', 'Semester I'),
('2', 'Semester II'),
)
subject_code = models.CharField(max_length=100)
subject_name = models.CharField(max_length=224)
related_batch = models.CharField(max_length=20, choices=BATCH, default=None)
semester = models.CharField(max_length=20, choices=SEMESTERS, default=None)
related_lecturer = models.CharField(max_length=224, default='')
std_count = models.IntegerField()
class AllBatches(models.Model):
batch_no = models.CharField(max_length=20)
batch_name_suffix = models.CharField(max_length=20)
no_of_students = models.CharField(max_length=50)
class AllSemesters(models.Model):
semester_no = models.CharField(max_length=20)
semester_name_suffix = models.CharField(max_length=20)
class AllTimeSlots(models.Model):
slot_id = models.CharField(max_length=50)
time_slot = models.CharField(max_length=100)
class Profiles(models.Model):
username = models.CharField(max_length=224)
user_profile_img = models.ImageField(upload_to='users/', blank=True, null=True)
def save(self):
super().save()
img = Image.open(self.user_profile_img.path)
if img.height > img.width:
left = 0
right = img.width
top = (img.height - img.width)/2
bottom = (img.height + img.width)/2
img = img.crop((left, top, right, bottom))
elif img.width > img.height:
left = (img.width - img.height)/2
right = (img.width + img.height)/2
top = 0
bottom = img.height
img = img.crop((left, top, right, bottom))
if img.height > 300 or img.width >300:
output_size = (300, 300)
img.thumbnail(output_size)
img.save(self.user_profile_img.path)
class SavedSchedules(models.Model):
lecturer_name = models.CharField(max_length=224)
semester = models.CharField(max_length=20)
hall_n_time = models.CharField(max_length=1000)
schedule = models.CharField(max_length=1000)
| UTF-8 | Python | false | false | 4,260 | py | 28 | models.py | 12 | 0.608216 | 0.5777 | 0 | 128 | 32.28125 | 112 |
ppesh/SoftUni-Python-Developer | 9,088,150,843,631 | b49af0172827fc34841b3c7d663c12d86fd814ef | 3fd7e066e68b528ef21a861f4ab4955fcf70ad22 | /Python-OOP/05-Inheritance/03-Players-and-Monsters/project/blade_knight.py | 50bba2d6ff97429d2c699ed6d24d931c444996b1 | [] | no_license | https://github.com/ppesh/SoftUni-Python-Developer | 3123f2789c71340cf659205a967b61cb05df0997 | f56ad6e391de6c2b67c4bd402a7d42111f7b7df8 | refs/heads/main | "2023-06-28T05:33:41.833126" | "2021-07-25T17:46:58" | "2021-07-25T17:46:58" | 324,375,684 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #from players_and_monsters_3.project.dark_knight import DarkKnight
from project.dark_knight import DarkKnight
class BladeKnight(DarkKnight):
pass | UTF-8 | Python | false | false | 148 | py | 110 | blade_knight.py | 109 | 0.831081 | 0.824324 | 0 | 6 | 23.833333 | 66 |
denisjackman/Python | 18,915,035,998,005 | beded35948bf78bf3706e3b87b0f1c5c7f3ef3b5 | eb389a52b5fb6ea53d287eea03378b42fbf56ee2 | /Code/Library/Codewars/codewars/functions/CamelCase.py | c10af0a813bd60969b80602ab743e7e267b1baa5 | [
"CC-BY-4.0"
] | permissive | https://github.com/denisjackman/Python | b2f13ba0990abbf65decd5d4d8db19b2f3c9456d | a24a05c38d59d3e6a65d01c616c30e949d1db5c2 | refs/heads/master | "2019-08-20T19:59:42.096219" | "2019-05-02T16:00:11" | "2019-05-02T16:00:11" | 42,041,826 | 1 | 0 | null | false | "2018-09-10T13:18:17" | "2015-09-07T08:47:41" | "2018-09-10T10:19:40" | "2018-09-10T13:18:17" | 16,297 | 0 | 0 | 0 | HTML | false | null | def to_camel_case(text):
#your code here
result = ""
store = text.replace("_","-")
newstore = store.split("-")
counter = 0
for item in newstore:
if counter == 0:
counter += 1
result += item
else:
result += item.capitalize()
return result
print to_camel_case('')
print to_camel_case("the_stealth_warrior")
print to_camel_case("The-Stealth-Warrior")
print to_camel_case("A-B-C")
| UTF-8 | Python | false | false | 461 | py | 317 | CamelCase.py | 277 | 0.563991 | 0.557484 | 0 | 21 | 20.952381 | 42 |
10yung/Ml-SOM-hw | 8,899,172,255,485 | d981e2a7ac84608c44659cadb0f4ba8c26b83859 | f57c00da53aa5fc87d431de9eca36fd3b9276518 | /model/reformat.py | 43b407467b0e4d80a8446a48a72fc5a8829f39e7 | [] | no_license | https://github.com/10yung/Ml-SOM-hw | 15a7fce398445398648570606cc4319a33a9abce | 6702cf379f8cbf7b760235cf232c02f1723289ca | refs/heads/master | "2021-01-19T22:36:31.972686" | "2020-05-29T05:50:51" | "2020-05-29T05:50:51" | 88,830,884 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
# concatenate nominal data title first N words
def title_concat(df, nChar):
# process nominal data to Ove_TPC_TOS... format
label_abbr = np.empty([1, len(df.index)], dtype=str)
label_abbr[:] = ''
for column in df:
fist_two_char = np.array(df[column].astype(str).str[:nChar])
string = '_'
fist_two_char = [ string + x for x in fist_two_char]
label_abbr = np.core.defchararray.add(label_abbr, fist_two_char)
return label_abbr
| UTF-8 | Python | false | false | 499 | py | 10 | reformat.py | 5 | 0.631263 | 0.629259 | 0 | 16 | 30.1875 | 72 |
sputnikpetrinets/project_sputnik | 10,445,360,498,873 | 9a0ceb13e98faa8c430276cfc76f33c3cf19b12e | 94dd0e49160a4a48e3cb8d4a987f4e6c7ede8822 | /view_configuration_simulation.py | f1e77df2b292b6df749bc42b41e3c92f1070c26a | [] | no_license | https://github.com/sputnikpetrinets/project_sputnik | fdccabc4ca2cdd6e9d99e9f8a6354b076d615c08 | e9dfefb35a99f3a5c4c6a8492050a63bc9f509b2 | refs/heads/master | "2016-09-10T09:18:11.560512" | "2013-03-26T15:32:00" | "2013-03-26T15:32:00" | 8,856,426 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
import pygtk
import gtk
import view
class ViewConfigurationSimulation(view.View):
""" The ViewConfigurationSimulation class is a specific view that inherits from the general View class and is used as a parent class to visualise the configuration window for the simulations and contains a ControllerConfigurationSimulation object. """
A_GILLESPIE = 0
A_TAULEAP = 1
_algorithm = None
def __init__(self):
""" Constructor of ViewConfigurationSimulation. """
# call constructor of parent class
view.View.__init__(self)
# set title
self._window.set_title("Configuration: Simulation")
def __init__(self, model = None, controller = None):
""" Constructor of ViewConfigurationSimulation. """
# call constructor of parent class
view.View.__init__(self, model, controller)
# set title
self._window.set_title("Configuration: Simulation")
def show(self):
""" Interface to create and display the GUI on the screen. """
pass
def update(self):
""" Interface to notify MVCObserver objects about a general data change. """
pass
def update_component(self, key):
""" Interface to notify Observer objects about a data change of a component. """
pass
def update_output(self):
""" Interface to notify Observer objects about a data change of simulation results. """
pass
def undo(self):
""" Interface to notify Observer objects about an undo. """
pass
if __name__ == "__main__":
app = ViewConfigurationSimulation()
app.show()
gtk.main()
| UTF-8 | Python | false | false | 1,680 | py | 138 | view_configuration_simulation.py | 83 | 0.639881 | 0.63869 | 0 | 57 | 28.473684 | 255 |
hoangnm/fastapi-example-shorten-url | 7,559,142,451,200 | 081ce1f81ea844075d204674c193b18f6cc321d2 | 360ff9af7ea923f4f96016369ccc167550ea543a | /app/models/ShortenUrl.py | c5dcd2811c96f53296e83e626fc9907440475d28 | [] | no_license | https://github.com/hoangnm/fastapi-example-shorten-url | c821514d62f2196fbc686834ab34f59cf4b5d5b1 | 61f63e963e18674b22a33af5b8da730700bcb4de | refs/heads/main | "2023-06-17T16:25:09.562780" | "2021-07-22T08:15:41" | "2021-07-22T08:15:41" | 387,736,862 | 0 | 0 | null | false | "2021-07-22T08:15:41" | "2021-07-20T09:15:17" | "2021-07-20T11:53:11" | "2021-07-22T08:15:41" | 11 | 0 | 0 | 0 | Python | false | false | from dataclasses import dataclass, field
import hashlib
@dataclass
class ShortenUrl:
id: int = field(init=False)
origin_url: str
generated_url: str = field(init=False)
def __post_init__(self):
self.generated_url = hashlib.md5(self.origin_url.encode()).hexdigest()
| UTF-8 | Python | false | false | 291 | py | 15 | ShortenUrl.py | 10 | 0.690722 | 0.687285 | 0 | 11 | 25.363636 | 78 |
sw2227/PS4 | 14,173,392,108,710 | 041c1a9365161a7996510b8462b3461978a5c8a9 | ff408bd6d970d68882a525e5416d9f58b693fa4e | /PS4_1c.py | ac9c0d205068be0428cb90073ea1cfc7c87cc913 | [] | no_license | https://github.com/sw2227/PS4 | de9f389cb81bfb8e8a694cc9b665856d134148da | 59ed39c1fa88b6f885b1376c12101edbe12b952f | refs/heads/master | "2022-04-26T05:08:37.743005" | "2020-04-28T17:01:32" | "2020-04-28T17:01:32" | 259,621,787 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #Problem 1 - part c
import numpy
from scipy.optimize import *
from matplotlib import *
K1 = 0.1
K2 = 10
mesh = 1000
KDi = numpy.linspace(0.1,100,mesh)
sitaB = KDi/(1+KDi)
# For x*
xnorm_K1 = numpy.zeros(mesh)
xnorm_K2 = numpy.zeros(mesh)
for a in range(mesh):
def for_x(E):
x1 = E[0]
x2 = E[1]
Z = numpy.empty((2))
Z[0] = 5*sitaB[a]*(1-x1)/(K1+1-x1)-x1/(K1+x1)
Z[1] = 5*sitaB[a]*(1-x2)/(K2+1-x2)-x2/(K2+x2)
return Z
xnorm_K1[a],xnorm_K2[a] = fsolve(for_x, numpy.array([1,1]))
# For y*
ynorm_K1 = numpy.zeros(mesh)
ynorm_K2 = numpy.zeros(mesh)
for b in range(mesh):
def for_y(F):
y1 = F[0]
y2 = F[1]
Z = numpy.empty((2))
Z[0] = 10*xnorm_K1[b]*(1-y1)/(K1+1-y1)-y1/(K1+y1)
Z[1] = 10*xnorm_K2[b]*(1-y2)/(K2+1-y2)-y2/(K2+y2)
return Z
ynorm_K1[b],ynorm_K2[b] = fsolve(for_y, numpy.array([1,1]))
#Plot
matplotlib.pyplot.figure(figsize = (10,8))
matplotlib.pyplot.title(
'Responses vs. Non-dimensional Input',fontsize = 30)
matplotlib.pyplot.xlabel('$1 /'+'\kappa_D$',fontsize = 15)
matplotlib.pyplot.ylabel('',fontsize = 15)
matplotlib.pyplot.xlim(0,100)
matplotlib.pyplot.ylim(0,1.1)
matplotlib.pyplot.plot(KDi,sitaB,color='black',label='$\Theta _b$ ($\kappa$ = 0.1)');
matplotlib.pyplot.plot(KDi,xnorm_K1,color='red',label='x* ($\kappa$ = 0.1)');
matplotlib.pyplot.plot(KDi,xnorm_K2,color='red',linestyle='dashed',label='x* ($\kappa$ = 10)');
matplotlib.pyplot.plot(KDi,ynorm_K1,color='blue',label='y* ($\kappa$ = 0.1)');
matplotlib.pyplot.plot(KDi,ynorm_K2,color='blue',linestyle='dashed',label='y* ($\kappa$ = 10)');
matplotlib.pyplot.legend(loc='lower right',fontsize = 20);
#Problem 1 - part c
def hillfit(x,a,b,c,d):
return a+(b-a)/(1+10**((c-x)*d))
b00 = [0.1, 0.1, 0.1];
B00 = scipy.optimize.curve_fit(hillfit, b00, KDi, xnorm_K1[a]);
Ag = numpy.linspace(min(KDi), max(KDi));
matplotlib.pyplot.figure(2)
matplotlib.pyplot.plot(KDi, xnorm_K1[a])
matplotlib.pyplot.hold(True)
matplotlib.pyplot.plot(AgVct, hill_fit(B00,Ag))
matplotlib.pyplot.hold(False)
matplotlib.pyplot.xlabel('1/kD')
matplotlib.pyplot.ylabel('x*')
matplotlib.pyplot.legend('Data', 'Hill Function Fit', 'Location','SE')
def hillfit(x,a,b,c,d):
return a+(b-a)/(1+10**((c-x)*d))
b00 = [0.1, 0.1, 0.1];
B00 = scipy.optimize.curve_fit(hillfit, b00, KDi, ynorm_K1[a]);
Ag = numpy.linspace(min(KDi), max(KDi));
matplotlib.pyplot.figure(2)
matplotlib.pyplot.plot(KDi, ynorm_K1[a])
matplotlib.pyplot.hold(True)
matplotlib.pyplot.plot(Ag, hill_fit(B00,Ag))
matplotlib.pyplot.hold(False)
matplotlib.pyplot.xlabel('1/kD')
matplotlib.pyplot.ylabel('y*')
matplotlib.pyplot.legend('Data', 'Hill Function Fit')
def hillfit(x,a,b,c,d):
return a+(b-a)/(1+10**((c-x)*d))
b00 = [0.1, 0.1, 0.1];
B_theta = scipy.optimize.curve_fit(hillfit, b00, KDi, sitaB);
Ag = numpy.linspace(min(KDi), max(KDi));
matplotlib.pyplot.figure(4)
matplotlib.pyplot.plot(KDi, sitaB)
matplotlib.pyplot.hold(True)
matplotlib.pyplot.plot(Ag, hill_fit(B_theta,Ag))
matplotlib.pyplot.hold(False)
matplotlib.pyplot.xlabel('1/kD')
matplotlib.pyplot.ylabel('$\theta$_b$')
matplotlib.pyplot.legend('Data', 'Hill Function Fit')
| UTF-8 | Python | false | false | 3,295 | py | 4 | PS4_1c.py | 4 | 0.631259 | 0.578452 | 0 | 94 | 33.053191 | 96 |
tperrier/mwachx | 18,571,438,618,742 | 6560634a834a1e69e789d249e1483f73725fd48d | f97a6c73a594829c78ef605dc73ec77eddd6a73f | /contacts/views/ajax.py | 00c3bc037f7aac1e9820eeb20078ceb30dce37b6 | [
"Apache-2.0"
] | permissive | https://github.com/tperrier/mwachx | 220c6a8a0206f0968aaff9b74bd78bf0425b3f0c | 94616659dc29843e661b2ecc9a2e7f1d4e81b5a4 | refs/heads/master | "2021-06-30T12:42:04.125431" | "2020-09-13T19:38:20" | "2020-09-13T19:38:20" | 30,064,531 | 3 | 6 | Apache-2.0 | false | "2021-06-10T19:44:21" | "2015-01-30T09:05:00" | "2020-09-13T19:38:32" | "2021-06-10T19:44:20" | 2,662 | 2 | 7 | 14 | Python | false | false | #Django Imports
from django.shortcuts import render, redirect
from django.views.decorators.csrf import csrf_protect, ensure_csrf_cookie
from django.contrib.auth.decorators import login_required
from django.conf import settings
@csrf_protect
@ensure_csrf_cookie
@login_required()
def angular_view(request):
FAKE_DATE = getattr(settings,'FAKE_DATE', True)
return render(request, 'app/index.html', context={'config': {
'SHOW_DATE':FAKE_DATE,
'user':request.user
}})
| UTF-8 | Python | false | false | 496 | py | 163 | ajax.py | 116 | 0.731855 | 0.731855 | 0 | 15 | 32.066667 | 73 |
JoonBeomLee/Algorithm_Python | 2,645,699,899,169 | 66fb2ca4cfdfeb13f8c5acc4f33c146d1a8cf0a3 | f6b5799c13fad2382d638a1208f4972ce818174a | /site/BAEKJOON/src/python/5_Ex01/2523.py | 602885d2be6d06ee4e91d7228439e04eb02d63ce | [] | no_license | https://github.com/JoonBeomLee/Algorithm_Python | 6bf0cc29ffaf75156bfa44ea531c33b3d2b2a129 | 185fb39d535573c374f1d0d88f728f97086a4246 | refs/heads/master | "2023-06-11T10:27:10.228151" | "2021-07-05T14:59:40" | "2021-07-05T14:59:40" | 193,500,999 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | star_cnt = int(input())
for i in range(1, star_cnt*2):
if i <= star_cnt:
for j in range(i):
print("*", end="")
else:
for j in range(i, star_cnt*2):
print("*", end="")
print() | UTF-8 | Python | false | false | 240 | py | 198 | 2523.py | 195 | 0.425 | 0.4125 | 0 | 11 | 20.909091 | 38 |
robbiejdunne/Scrape-Whoscored-Event-Data | 8,426,725,847,637 | fd7e238fabb97a69eb9bb1b757eb76b4714da193 | ad492df8364b6a388b993ea672ae4250bdbeb0d8 | /tutorial.py | ac319d595ec372c47c44caf8e9f00c02fefb64e5 | [
"MIT"
] | permissive | https://github.com/robbiejdunne/Scrape-Whoscored-Event-Data | 62013a4b50cd1db0b73dd23f26f3fc2be1b2809d | f4e3d64351a6790912e5e3398181a8e3254dcc53 | refs/heads/main | "2023-08-14T01:26:18.167456" | "2021-09-25T15:22:50" | "2021-09-25T15:22:50" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 14 14:28:34 2020
@author: aliha
@twitter: @rockingAli5
"""
"""
Tutorial on getting hands on the event data for a single match.
New: Now added xG data for shots from Understat.com(only available for top 5 european leagues since 2014-15).
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from selenium import webdriver
import main
import visuals
import seaborn as sns
### Get Match Data (Run from line 29 to line 35 together) ###
if __name__ == "__main__":
driver = webdriver.Chrome('chromedriver.exe')
# whoscored match centre url of the required match (Example: Barcelona vs Sevilla)
url = "https://www.whoscored.com/Matches/1491995/Live/Spain-LaLiga-2020-2021-Barcelona-Sevilla"
match_data = main.getMatchData(driver, url, close_window=True)
# Match dataframe containing info about the match
matches_df = main.createMatchesDF(match_data)
# Events dataframe
events_df = main.createEventsDF(match_data)
# match Id
matchId = match_data['matchId']
# Information about respective teams as dictionary
home_data = matches_df['home'][matchId]
away_data = matches_df['away'][matchId]
### Get EPV for successful passes ###
events_df = main.addEpvToDataFrame(events_df)
### Get data for multiple matches ###
# getting competition urls
league_urls = main.getLeagueUrls()
# getting match urls for that competition and season
match_urls = main.getMatchUrls(comp_urls=league_urls, competition='LaLiga', season='2021/2022')
# getting match urls for a specific team
team_urls = main.getTeamUrls(team='Barcelona', match_urls=match_urls)
# getting match data for the required urls(eg. first 5 matches of Barcelona)
matches_data = main.getMatchesData(match_urls=team_urls[:5])
# getting events dataframe for required matches
events_ls = [main.createEventsDF(match) for match in matches_data]
# adding EPV column
events_list = [main.addEpvToDataFrame(match) for match in events_ls]
events_dfs = pd.concat(events_list)
# saving events as csv
events_dfs.to_csv('events.csv')
### Pass Network Examples from Barcelona vs Sevilla game ###
team = 'Barcelona'
teamId = 65
opponent = 'Sevilla'
venue = 'home'
team_players_dict = {}
for player in matches_df['home'][match_data['matchId']]['players']:
team_players_dict[player['playerId']] = player['name']
# Total Passes
passes_df = events_df.loc[events_df['type']=='Pass'].reset_index(drop=True)
passes_df = passes_df.loc[passes_df['outcomeType']=='Successful'].reset_index(drop=True)
passes_df = passes_df.loc[passes_df['teamId'] == teamId].reset_index(drop=True)
### Get Passes For Different Durations ###
# Cut in 2
first_half_passes = passes_df.loc[passes_df['period']=='FirstHalf']
second_half_passes = passes_df.loc[passes_df['period']=='SecondHalf'].reset_index(drop=True)
# Cut in 4 (quarter = 25 mins)
first_quarter_passes = first_half_passes.loc[first_half_passes['minute'] <= 25]
second_quarter_passes = first_half_passes.loc[first_half_passes['minute'] > 25].reset_index(drop=True)
third_quarter_passes = second_half_passes.loc[second_half_passes['minute'] <= 70]
fourth_quarter_passes = second_half_passes.loc[second_half_passes['minute'] > 70].reset_index(drop=True)
### Get Team Total Passes ###
visuals.getTeamTotalPasses(events_df, teamId, team, opponent, pitch_color='#000000')
### Get Completed Box Passes by Team ###
# You can select more cmaps here: https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html
# opp_transparent/opp_comet are manually added by me to this visual, you can change it in visuals.py
# If you get an error regarding 'opp_transparent/opp_comet' you probably haven't replaced pitch.py/linecollection.py file
visuals.getTeamSuccessfulBoxPasses(events_df, teamId, team, pitch_color='#000000', cmap='YlGn')
# Create Pass Network
# you can change marker_label to 'name' as well
fig,ax = plt.subplots(figsize=(16,11))
visuals.createPassNetworks(match_data, events_df, matchId=match_data['matchId'], team='Barcelona', max_line_width=6,
marker_size=1500, edgewidth=3, dh_arrow_width=25, marker_color='#0e5cba',
marker_edge_color='w', shrink=24, ax=ax, kit_no_size=25)
# Create Progressive Pass Network
# you can change marker_label to 'name' as well
fig,ax = plt.subplots(figsize=(16,11))
visuals.createAttPassNetworks(match_data, events_df, matchId=match_data['matchId'], team='Barcelona', max_line_width=6,
marker_size=1300, edgewidth=3, dh_arrow_width=25, marker_color='#0e5cba',
marker_edge_color='w', shrink=24, ax=ax, kit_no_size=25)
### Get Shot map for a team ###
fig,ax = plt.subplots(figsize=(16,11))
visuals.createShotmap(match_data, events_df, team='Barcelona', pitchcolor='black', shotcolor='white',
goalcolor='red', titlecolor='white', legendcolor='white', marker_size=300, fig=fig, ax=ax)
### Get Net PV formation map for a team ###
# Choose your color palette from here: https://seaborn.pydata.org/tutorial/color_palettes.html
fig,ax = plt.subplots(figsize=(16,11))
visuals.createPVFormationMap(match_data, events_df, team='Barcelona', color_palette=sns.color_palette("flare", as_cmap=True),
markerstyle='h', markersize=1000, markeredgewidth=2, labelsize=7, labelcolor='w', ax=ax)
| UTF-8 | Python | false | false | 5,493 | py | 5 | tutorial.py | 3 | 0.698161 | 0.674131 | 0 | 172 | 30.837209 | 125 |
jpolitz/lambda-py | 5,703,716,571,246 | 4b90026a78771f96721641f53782a673368803cd | 18a51a897441d33155dda9b1969c1d4898820530 | /base/pylib/file.py | 4189595d5f93b2ad5bd8b4d263348baff1ab0985 | [
"Apache-2.0"
] | permissive | https://github.com/jpolitz/lambda-py | 6fc94e84398363c6bb2e547051da9db6466ab603 | a2d1be7e017380414429a498435fb45975f546dd | refs/heads/master | "2019-04-21T09:36:42.116912" | "2013-04-16T13:30:44" | "2013-04-16T13:30:44" | 7,525,003 | 1 | 0 | null | true | "2013-01-09T22:32:47" | "2013-01-09T16:54:10" | "2013-01-09T22:32:47" | "2013-01-09T16:54:57" | 176 | null | 0 | 0 | Racket | null | null | class file:
def __new__(self, *args):
path = args.__getitem__(0)
mode = args.__getitem__(1)
if mode == "r":
if ___delta("existing-file?", path):
return ___delta("file-open", path, mode)
else:
raise IOError("No such file: " + path)
else:
return ___delta("file-open", path, mode)
def __init__(self, path, mode):
self.path = path
self.mode = mode
def read(self, *args):
___assign('%str', str)
if ___delta("num=", args.__len__(), 0):
return ___delta("file-readall", self, str)
elif ___delta("num=", args.__len__(), 1):
size = ___delta("tuple-getitem", args, 0)
return ___delta("file-read", self, size, str)
def readline(self):
___assign('%str', str)
return ___delta("file-readline", self, str)
def write(self, data):
return ___delta("file-write", self, data)
def close(self):
return ___delta("file-close", self)
def __str__(self):
return "<file '" + self.path + "', mode '" + self.mode + "'>"
___assign('%file', file)
___assign('open', file)
___assign('%open', file)
| UTF-8 | Python | false | false | 1,182 | py | 142 | file.py | 68 | 0.494924 | 0.490694 | 0 | 40 | 28.55 | 69 |
bytevictor/DAI | 8,907,762,208,218 | 97d0b97ee03c81855afbeb0b11b58cbdd903a588 | 74ef75b85106b3e8e2af0743ad86656110de8af0 | /practicas/practica 6/mi_aplicacion/urls.py | 1f834a06844a58b60b0adf2ea6f14573ac420189 | [] | no_license | https://github.com/bytevictor/DAI | fba713f298b3f3d8388c9466a4d921c01b0856ef | c859507d464d574c5b44011bbe7f58542876dc9a | refs/heads/master | "2023-03-03T06:42:23.895477" | "2021-02-16T07:12:11" | "2021-02-16T07:12:11" | 306,115,841 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # mi_aplicacion/urls.py
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('test_template', views.test_template, name='test_template'),
path('lista_autores', views.lista_autores, name='lista_autores'),
path('anadir_autor', views.anadir_autor, name='anadir_autor'),
path('borrar_autor', views.borrar_autor, name='borrar_autor'),
path('modificar_autor/<int:id_autor>', views.modificar_autor, name='modificar_autor'),
path('lista_libros', views.lista_libros, name='lista_libros'),
path('anadir_libro', views.anadir_libro, name='anadir_libro'),
path('borrar_libro', views.borrar_libro, name='borrar_libro'),
path('modificar_libro/<int:id_libro>', views.modificar_libro, name='modificar_libro'),
path('lista_prestamos', views.lista_prestamos, name='lista_prestamos'),
path('anadir_prestamo', views.anadir_prestamo, name='anadir_prestamo'),
path('borrar_prestamo', views.borrar_prestamo, name='borrar_prestamo'),
path('modificar_prestamo/<int:id_prestamo>', views.modificar_prestamo, name='modificar_prestamo'),
] | UTF-8 | Python | false | false | 1,093 | py | 39 | urls.py | 20 | 0.723696 | 0.723696 | 0 | 21 | 51.095238 | 100 |
Shikhar-S/TreeCodeGen | 2,972,117,403,464 | 7b462ea0b668a314b602c2f6f3a82088a3db18d8 | c625157aa5f39541dc5dd86977c0eb2299db1780 | /args.py | 34ebec54f63cb977ead528d3172f845ee91e7a45 | [] | no_license | https://github.com/Shikhar-S/TreeCodeGen | e56eb8ec13e42fbae642f880388dd985e7fc531e | 572aa96fabb98d62335bc43084acafbe4bd93fe5 | refs/heads/main | "2023-08-31T12:52:24.776300" | "2021-09-28T18:14:05" | "2021-09-28T18:14:05" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import options
from fairseq import utils
from fairseq.modules import (
AdaptiveInput, AdaptiveSoftmax, CharacterTokenEmbedder, LearnedPositionalEmbedding, MultiheadAttention,
SinusoidalPositionalEmbedding
)
from fairseq.models import (
FairseqIncrementalDecoder, FairseqEncoder, FairseqLanguageModel, FairseqModel, register_model, FairseqEncoderModel,
register_model_architecture,
)
from modules.encoder import *
from modules.decoder import *
from modules.attention import *
from modules.embeddings import *
from model import *
from modules.nstack_tree_attention import *
def nstack2seq_base(args):
nstack_class_base(args)
def nstack_class_base(args):
args.encoder_type = getattr(args, 'encoder_type', Encoder)
args.dptree_class = getattr(args, 'dptree_class', NodeStackOnValueAttention)
args.placeholder_const = getattr(args, 'placeholder_const', False)
args.pretrain_embed_mode = getattr(args, 'pretrain_embed_mode', 'const')
args.on_seq = getattr(args, 'on_seq', 'key')
args.divide_src_len = getattr(args, 'divide_src_len', True)
args.src_len_norm = getattr(args, 'src_len_norm', 'none')
args.nstack_pos_embed = getattr(args, 'nstack_pos_embed', False)
args.nstack_pos_embed_learned = getattr(args, 'nstack_pos_embed_learned', False)
args.cum_node = getattr(args, 'cum_node', 'sum')
args.nstack_linear = getattr(args, 'nstack_linear', False)
args.wnstack_include_leaves = getattr(args, 'wnstack_include_leaves', True)
args.wnstack_norm = getattr(args, 'wnstack_norm', 'none')
args.wnstack_up_norm = getattr(args, 'wnstack_up_norm', 'none')
args.nstack_mask_fn = getattr(args, 'nstack_mask_fn', 'default')
args.nstack_mask_df_layer = getattr(args, 'nstack_mask_df_layer', None)
args.nstack_hier_embed = getattr(args, 'nstack_hier_embed', False)
args.nstack_hier_embed_max_horiz = getattr(args, 'nstack_hier_embed_max_horiz', 100)
args.nstack_hier_embed_max_ver = getattr(args, 'nstack_hier_embed_max_ver', 1024)
args.nstack_hier_embed_share = getattr(args, 'nstack_hier_embed_share', False)
args.take_full_dim = getattr(args, 'take_full_dim', False)
args.hier_embed_right = getattr(args, 'hier_embed_right', False)
args.dwstack_proj_act = getattr(args, 'dwstack_proj_act', 'none')
args.node_embed_init = getattr(args, 'node_embed_init', 'embed')
args.embed_pretrained_no_scale = getattr(args, 'embed_pretrained_no_scale', False)
args.first_layer_nonodes = getattr(args, 'first_layer_nonodes', False)
args.vanilla_layers = getattr(args, 'vanilla_layers', 0)
args.transition_act = getattr(args, 'transition_act', 'none')
args.transition_dropout = getattr(args, 'transition_dropout', 0.0)
args.mutual_ancestor_level = getattr(args, 'mutual_ancestor_level', 5)
args.sep_dwstack_proj_act = getattr(args, 'sep_dwstack_proj_act', 'tanh')
args.nstack_cross = getattr(args, 'nstack_cross', True)
#TODO: turn it back on
# args.nstack_cross=False
args.input_dropout = getattr(args, 'input_dropout', 0)
print(base_architecture)
base_architecture(args)
#TODO: change the dimensions
def add_iwslt(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 640)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 640)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
def dwnstack_merge2seq_node_iwslt_onvalue_base_upmean_mean_mlesubenc_allcross_hier(args):
args.encoder_type = getattr(args, 'encoder_type', Encoder)
args.decoder_type = getattr(args, 'decoder_type', Decoder)
args.dptree_class = getattr(args, 'dptree_class', MergeStackNodesOnValueAttention)
args.wnstack_norm = getattr(args, 'wnstack_norm', 'mean')
args.wnstack_up_norm = getattr(args, 'wnstack_up_norm', 'mean')
args.cross_nstack_mask_fn = getattr(args, 'cross_nstack_mask_fn', WeightMask.ALL_ALL)
args.nstack_mask_fn = getattr(args, 'nstack_mask_fn', WeightMask.LEAVES_SUBTREE)
args.nstack_hier_embed = getattr(args, 'nstack_hier_embed', True)
args.nstack_hier_embed_max_horiz = getattr(args, 'nstack_hier_embed_max_horiz', 100)
args.nstack_hier_embed_max_ver = getattr(args, 'nstack_hier_embed_max_ver', 50)
add_iwslt(args)
nstack2seq_base(args)
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--workers', default=0, type=int)
args = parser.parse_args()
dwnstack_merge2seq_node_iwslt_onvalue_base_upmean_mean_mlesubenc_allcross_hier(args)
| UTF-8 | Python | false | false | 4,999 | py | 14 | args.py | 13 | 0.714343 | 0.706341 | 0 | 112 | 43.580357 | 119 |
StadlerMaximilian/dataset_preperation | 16,904,991,307,535 | 9ae7e8b83836a6afcadf4d0d2bffe2ff92407000 | 0a2ad88a8e6700d5bd39b5b40639dd3e9dc3bffe | /dataset_conversion/json_dataset_combine_sets.py | 3f974849e22b389626c89b0871628b03b22b82fe | [] | no_license | https://github.com/StadlerMaximilian/dataset_preperation | 6b245f2eba13d0930d1622c33a60995933965e52 | 403ed82bc5390921bc3dac39124a95bee409c1ac | refs/heads/master | "2020-03-26T02:06:01.793093" | "2018-09-03T20:33:35" | "2018-09-03T20:33:35" | 144,396,719 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import json
import sys
import os
import argparse
"""
Utilites to combine datasets (currently only vkitti supported intentionally (always works when images are in the same
image directory and the image_names for different datasets are distinct
"""
def parse_args():
parser = argparse.ArgumentParser(
description='Remove categories that sould be ignored during testing'
)
parser.add_argument(
'--json',
dest='json_files',
action='append',
type=str,
help='Include here the path to the train_json file of your dataset that should be combined.',
required=True
)
parser.add_argument(
'--name',
dest='name',
help='name for combined datasets',
default='dataset_combined'
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def create_coco_dataset_dict(new_images, new_annotations, categories, licenses, info):
dataset_dict = {"info": info,
"images": new_images,
"annotations": new_annotations,
"type": "instances",
"licenses": licenses,
"categories": categories
}
return dataset_dict
def create_modified_coco_images(image, new_id):
image['id'] = new_id
return image
def create_modified_coco_annotation(annotation, new_id, new_img_id):
annotation['id'] = new_id
annotation['image_id'] = new_img_id
return annotation
def main():
args = parse_args()
for file in args.json_files:
if not os.path.exists(file):
raise ValueError("FILE {} does not exist!!!".format(file))
anno_dir = os.path.split(args.json_files[0])[0]
json_files = []
categories = []
annotations = []
images = []
licenses = []
info = []
for file in args.json_files:
json_file = json.loads(open(file).read())
json_files.append(json_file)
categories.append(json_file['categories'])
annotations.append(json_file['annotations'])
images.append(json_file['images'])
licenses.append(json_file['licenses'])
info.append(json_file['info'])
image_counter = 0
annotation_counter = 0
new_images = []
new_annotations = []
# TODO implement checking whether datasets can be combined, i.e. if categories are the same
# TODO current workaround: simply take first entry of list of files to be combined
categories = categories[0]
info = info[0]
licenses = licenses[0]
for set in range(len(json_files)):
for img in images[set]:
annos_per_im = [x for x in annotations[set] if x['image_id'] == img['id']]
new_images.append(create_modified_coco_images(img, image_counter))
for anno in annos_per_im:
new_annotations.append(create_modified_coco_annotation(anno,
annotation_counter,
image_counter))
annotation_counter += 1
image_counter += 1
combined_dict = create_coco_dataset_dict(new_images, new_annotations, categories, licenses, info)
combined_dataset_name = os.path.join(anno_dir, args.name + '.json')
with open(combined_dataset_name, 'w') as fp:
json.dump(combined_dict, fp)
print("wrote file {}".format(combined_dataset_name))
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 3,553 | py | 20 | json_dataset_combine_sets.py | 19 | 0.588517 | 0.585421 | 0 | 120 | 28.6 | 117 |
moe9195/wishlist-api | 8,400,956,068,391 | ed78111821c98b3a637f20999b879de44a23fa65 | 812c25c505edbfac9456dfad16b7772d4eba01ed | /api/serializers.py | d0fdef523599243f96e36810f8880a7b269eae53 | [] | no_license | https://github.com/moe9195/wishlist-api | 9d1f9aa10cdb9a591fd38176cb3dd1ed61441c0b | cce4700e64ed5ca4f81b7ceb09f5e10cfbacd255 | refs/heads/master | "2021-01-16T02:46:19.372494" | "2020-02-25T12:32:09" | "2020-02-25T12:32:09" | 242,950,094 | 0 | 0 | null | true | "2020-02-25T08:37:40" | "2020-02-25T08:37:39" | "2020-02-24T13:08:16" | "2020-02-24T13:08:14" | 12,120 | 0 | 0 | 0 | null | false | false | from items.models import Item, FavoriteItem
from rest_framework import serializers
from django.contrib.auth.models import User
class RegisterSerializer(serializers.ModelSerializer):
password = serializers.CharField(write_only=True)
class Meta:
model = User
fields = ['username', 'password', 'first_name', 'last_name']
def create(self, validated_data):
username = validated_data['username']
password = validated_data['password']
first_name = validated_data['first_name']
last_name = validated_data['last_name']
new_user = User(username=username, first_name=first_name, last_name=last_name)
new_user.set_password(password)
new_user.save()
return validated_data
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ["first_name", "last_name"]
class ListSerializer(serializers.ModelSerializer):
detail = serializers.HyperlinkedIdentityField(
view_name = "api-detail",
lookup_field = "id",
lookup_url_kwarg = "item_id"
)
added_by = UserSerializer()
favourited = serializers.SerializerMethodField()
class Meta:
model = Item
fields = ['name', 'id', 'detail', 'added_by', 'favourited']
def get_favourited(self, obj):
return len(obj.favoriteitem_set.all())
class DetailSerializer(serializers.ModelSerializer):
favourited_by = serializers.SerializerMethodField()
class Meta:
model = Item
fields = ['name', 'description', 'image', 'favourited_by', 'id']
def get_favourited_by(self, obj):
favourited_by = obj.favoriteitem_set.all()
favusers = []
for user in favourited_by:
favusers.append(user.user)
return favusers
| UTF-8 | Python | false | false | 1,802 | py | 3 | serializers.py | 2 | 0.654273 | 0.654273 | 0 | 55 | 31.763636 | 86 |
python-attrs/cattrs | 6,227,702,606,673 | 283ebc47adfd664ba56160bf8b250abfd9d6c103 | d00b999cab00ca302f03f3ef6aecea77a2c24741 | /tests/test_dataclasses.py | 0f86c7a0fd39334c92b3a9dc4d2b4dbc228b7d89 | [
"MIT"
] | permissive | https://github.com/python-attrs/cattrs | ad4df8fa8a5cff3ecf7a073fd62406aa8a8de51f | acd3d1052776e9f815fdadf49cff02aa6c5a7a91 | refs/heads/main | "2023-08-16T22:38:55.362864" | "2023-08-13T11:42:21" | "2023-08-13T11:42:21" | 66,779,450 | 317 | 52 | MIT | false | "2023-08-31T22:35:41" | "2016-08-28T16:45:58" | "2023-08-31T01:21:24" | "2023-08-31T22:35:40" | 1,777 | 681 | 98 | 64 | Python | false | false | import dataclasses
from typing import List
import attr
from cattrs import BaseConverter
@dataclasses.dataclass
class Foo:
bar: str
@attr.define
class Container:
foos: List[Foo]
def test_dataclasses_in_attrs(converter: BaseConverter):
struct = Container([Foo("bar")])
unstruct = {"foos": [{"bar": "bar"}]}
assert converter.unstructure(struct) == unstruct
assert converter.structure(unstruct, Container) == struct
def test_dataclasses_in_container(converter: BaseConverter):
struct = [Foo("bar"), Foo("bat")]
unstruct = [{"bar": "bar"}, {"bar": "bat"}]
assert converter.unstructure(struct) == unstruct
assert converter.structure(unstruct, List[Foo]) == struct
def test_dataclasses(converter: BaseConverter):
struct = Foo("bar")
unstruct = {"bar": "bar"}
assert converter.unstructure(struct) == unstruct
assert converter.structure(unstruct, Foo) == struct
| UTF-8 | Python | false | false | 927 | py | 90 | test_dataclasses.py | 67 | 0.68932 | 0.68932 | 0 | 43 | 20.55814 | 61 |