repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
sequence | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
unknown | revision_date
unknown | committer_date
unknown | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
unknown | gha_created_at
unknown | gha_updated_at
unknown | gha_pushed_at
unknown | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ShannonTully/data-structures-and-algorithms | 12,283,606,469,524 | 59dc1afd081d0911947f411ad958c2e61fd867cb | 1efd977af1638b7afa63c43a36e964ffe5db295a | /challenges/multi-bracket-validation/test_multi_bracket_validation.py | 778e33b27d0b67fd8da4b2bdd2d3655aa5cbdfe8 | [
"MIT"
] | permissive | https://github.com/ShannonTully/data-structures-and-algorithms | 847c8fa99b395efa254c364345302ccafaa6e802 | f43f24f9f60beb37a2717f387c1050c9fd1ece60 | refs/heads/master | "2021-09-14T06:51:48.510869" | "2018-05-09T03:24:26" | "2018-05-09T03:24:26" | 125,955,350 | 0 | 0 | MIT | false | "2018-05-07T21:25:50" | "2018-03-20T03:27:42" | "2018-05-07T21:08:20" | "2018-05-07T21:25:50" | 32,336 | 0 | 0 | 0 | Python | false | null | import multi_bracket_validation
import pytest
def test_basic():
assert multi_bracket_validation.multi_bracket_validation('()') is True
def test_fail():
assert multi_bracket_validation.multi_bracket_validation('(}') is False
def test_complex():
assert multi_bracket_validation.multi_bracket_validation('[][Code][Fellows](())') is True
| UTF-8 | Python | false | false | 352 | py | 47 | test_multi_bracket_validation.py | 46 | 0.730114 | 0.730114 | 0 | 14 | 24.142857 | 93 |
carden-code/python | 2,473,901,188,308 | 49500361d80edfa1c0a18a39cf3c780c3ac8f1b5 | b6fa182321756b891b84958e2b2c01e63b3f88b2 | /stepik/minute_of_genetics.py | 47ff69a096d269f31de471f1a7f40f7cf50f22e6 | [] | no_license | https://github.com/carden-code/python | 872da0dff5466070153cf945c428f1bc8309ea2b | 64e4df0d9893255ad362a904bb5d9677a383591c | refs/heads/master | "2023-07-05T05:14:16.479392" | "2021-08-22T21:27:36" | "2021-08-22T21:27:36" | 305,476,509 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # На вход программе подается строка генетического кода,
# состоящая из букв А (аденин), Г (гуанин), Ц (цитозин), Т (тимин).
# Напишите программу, которая подсчитывает сколько аденина, гуанина, цитозина и тимина
# входит в данную строку генетического кода.
#
# Формат входных данных
# На вход программе подается строка генетического кода, состоящая из символов А, Г, Ц, Т, а, г, ц, т.
#
# Формат выходных данных
# Программа должна вывести сколько гуанина, тимина, цитозина, аденина входит в данную строку генетического кода.
#
# Примечание. Строка не содержит символов, кроме как А, Г, Ц, Т, а, г, ц, т.
string = input().lower()
a = string.count('а')
b = string.count('г')
c = string.count('ц')
d = string.count('т')
print(f'Аденин: {a}')
print(f'Гуанин: {b}')
print(f'Цитозин: {c}')
print(f'Тимин: {d}')
| UTF-8 | Python | false | false | 1,284 | py | 295 | minute_of_genetics.py | 292 | 0.711443 | 0.711443 | 0 | 21 | 37.285714 | 112 |
dplepage/dfiance | 9,320,079,040,356 | f6e4e8f6f11945286c0848e752dafc19ed1cea44 | 8f0387b86bde8eb44e593ec16a96338843c83ca0 | /dfiance/datetypes.py | cc6a2a0076a3d8368d878e4afa95a9931a4d95cc | [] | no_license | https://github.com/dplepage/dfiance | 84a083b325288fb8730f4b20700d738db3792364 | 3c4c916eadb9fdef1c762491ca40d726f3575258 | refs/heads/master | "2016-08-11T10:14:51.126704" | "2013-08-15T18:08:25" | "2013-08-15T18:08:25" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import datetime
try:
from dateutil.parser import parse as parse_datetime
_have_dateutil = True
except ImportError:
_have_dateutil = False
from base import Invalid, Dictifier
class DateTime(Dictifier):
"""
Dictifier for datetimes.
The 'format' argument determines what format will be used for serializing,
and if the 'require_format' argument is True, only strings in that format
will be deserialized. 'require_format' must be true if the dateutil module
is not installed.
If you have dateutil installed, however, then 'require_format' defaults to
False, and DateTime will use dateutil.parser.parse to process dates, so any
string dateutil can parse can be undictified:
>>> dtfier = DateTime()
>>> dtfier.undictify("2003-12-25")
datetime.datetime(2003, 12, 25, 0, 0)
>>> dtfier.undictify("2003-12-25T14:25:03.1")
datetime.datetime(2003, 12, 25, 14, 25, 3, 100000)
>>> dtfier.undictify("Dec 25, 2003 2:25:03.1 pm")
datetime.datetime(2003, 12, 25, 14, 25, 3, 100000)
Invalid strings get specific error messages:
>>> dtfier.undictify("200003-12-25")
Traceback (most recent call last):
...
Invalid: bad_year
>>> dtfier.undictify("2003-13-25")
Traceback (most recent call last):
...
Invalid: bad_month
>>> dtfier.undictify("2003-12-32")
Traceback (most recent call last):
...
Invalid: bad_day
>>> dtfier.undictify("2003-12-25T26:25:03.1")
Traceback (most recent call last):
...
Invalid: bad_hour
>>> dtfier.undictify("2003-12-25T14:71:03.1")
Traceback (most recent call last):
...
Invalid: bad_minute
>>> dtfier.undictify("2003-12-25T14:25:93.1")
Traceback (most recent call last):
...
Invalid: bad_second
Non-numbers get generic "bad_format" error messages:
>>> dtfier.undictify("2003-hello-12")
Traceback (most recent call last):
...
Invalid: bad_format
>>> dtfier.undictify("Not even remotely a datetime.")
Traceback (most recent call last):
...
Invalid: bad_format
>>> dtfier.undictify(["this", "isn't", "even", "a", "string"])
Traceback (most recent call last):
...
Invalid: type_error
Dictifying will serialize to self.format, which defaults to
"%Y-%m-%dT%H:%M:%S.%f" but can be set to anything.
>>> d = datetime.datetime(2003, 12, 25, 14, 21, 3, 100500)
>>> DateTime().dictify(d)
u'2003-12-25T14:21:03.100500'
>>> DateTime(format='%I:%M:%S.%f%p on %h %e, %Y').dictify(d)
u'02:21:03.100500PM on Dec 25, 2003'
Be warned, though, that you can fail at formatting by entering something
which neither datetime.strptime nor dateutil can parse:
>>> dumb_format = 'At %I:%M:%S.%f%p on day number %e of the year %Y, in %h'
>>> dtfier = DateTime(format=dumb_format)
>>> dtfier.dictify(d)
u'At 02:21:03.100500PM on day number 25 of the year 2003, in Dec'
>>> dtfier.undictify(dtfier.dictify(d))
Traceback (most recent call last):
...
Invalid: bad_format
Or by entering something that throws away data:
>>> dtfier = DateTime(format="%h %e %Y")
>>> dtfier.dictify(d)
u'Dec 25 2003'
>>> dtfier.undictify(dtfier.dictify(d))
datetime.datetime(2003, 12, 25, 0, 0)
>>> dtfier.undictify(dtfier.dictify(d)) == d
False
Validate only approves of datetimes:
>>> DateTime().validate(datetime.datetime.now())
>>> DateTime().validate("")
Traceback (most recent call last):
...
Invalid: type_error
Like all good dictifiers, undictify is idempotent:
>>> dtfier = DateTime()
>>> dt = dtfier.undictify("2003-1-1")
>>> dtfier.undictify(dt) == dt
True
"""
_dt_type = datetime.datetime
def __init__(self, format="%Y-%m-%dT%H:%M:%S.%f", require_format=not _have_dateutil):
self.format = format
if not require_format and not _have_dateutil:
msg = ("Can't parse dates or times with require_format=False"
" unless dateutil is installed.")
raise ValueError(msg)
def parse_datetime(self, value):
'''Parse a formatted datetime.
Tries datetime.datetime.strptime; if that fails it falls back to
dateutil if present.
'''
if not isinstance(value, basestring):
raise Invalid("type_error")
try:
return datetime.datetime.strptime(value, self.format)
except ValueError, e:
if not _have_dateutil:
raise Invalid("bad_format")
# If we get here, then we have dateutil - try to use it!
try:
return parse_datetime(value)
except ValueError, e:
if e.message.lower() == 'year is out of range':
raise Invalid("bad_year")
elif e.message.lower() == 'month must be in 1..12':
raise Invalid("bad_month")
elif e.message.lower() == 'day is out of range for month':
raise Invalid("bad_day")
elif e.message.lower() == 'hour must be in 0..23':
raise Invalid('bad_hour')
elif e.message.lower() == 'minute must be in 0..59':
raise Invalid('bad_minute')
elif e.message.lower() == 'second must be in 0..59':
raise Invalid('bad_second')
else:
raise Invalid("bad_format")
def undictify(self, value, **kwargs):
if isinstance(value, self._dt_type):
return value
return self.parse_datetime(value)
def validate(self, value, **kwargs):
if not isinstance(value, self._dt_type):
raise Invalid("type_error")
def dictify(self, value, **kwargs):
return unicode(value.strftime(self.format))
class Date(DateTime):
"""
Dictifier for dates.
All the notes about DateTime apply:
Dateutil parsing:
>>> date = Date()
>>> date.undictify("2003-12-25")
datetime.date(2003, 12, 25)
Error msgs for each field that can fail, contingent on dateutil recognizing
that something is the field in question:
>>> date.undictify("2003-13-25")
Traceback (most recent call last):
...
Invalid: bad_month
>>> date.undictify("2003-12-32")
Traceback (most recent call last):
...
Invalid: bad_day
>>> date.undictify("200000-12-25")
Traceback (most recent call last):
...
Invalid: bad_year
>>> date.undictify("2003-hi-12")
Traceback (most recent call last):
...
Invalid: bad_format
>>> date.undictify("Not a date")
Traceback (most recent call last):
...
Invalid: bad_format
Formats work, but don't choose something dateutil can't read back:
>>> date = datetime.date(2003, 12, 25)
>>> datefier = Date(format="%h %e '%g")
>>> datefier.dictify(date)
u"Dec 25 '03"
>>> datefier.undictify(datefier.dictify(date))
datetime.date(2003, 12, 25)
>>> datefier = Date(format="Day %e of month %h in the year %g")
>>> datefier.dictify(date)
u'Day 25 of month Dec in the year 03'
>>> datefier.undictify(datefier.dictify(date))
Traceback (most recent call last):
...
Invalid: bad_format
Validate passes only dates:
>>> Date().validate(datetime.date.today())
>>> Date().validate(datetime.time())
Traceback (most recent call last):
...
Invalid: type_error
As a side effect of dateutil, the Date dictifier WILL undictify strings
dateutil parses as full datetimes, discarding the time:
>>> Date().undictify('2003-12-25 14:21:03.100500')
datetime.date(2003, 12, 25)
"""
_dt_type = datetime.date
def __init__(self, format="%Y-%m-%d"):
super(Date, self).__init__(format)
def undictify(self, value, **kwargs):
if isinstance(value, self._dt_type):
return value
return self.parse_datetime(value).date()
class Time(DateTime):
"""
>>> Time().undictify("14:25:03.1")
datetime.time(14, 25, 3, 100000)
>>> Time().undictify("26:25:03.1")
Traceback (most recent call last):
...
Invalid: bad_hour
>>> Time().undictify("14:71:03.1")
Traceback (most recent call last):
...
Invalid: bad_minute
>>> Time().undictify("14:25:93.1")
Traceback (most recent call last):
...
Invalid: bad_second
>>> Time().undictify("Not a time")
Traceback (most recent call last):
...
Invalid: bad_format
>>> Time().dictify(datetime.time(14, 25, 3, 100000))
u'14:25:03.100000'
>>> Time(format="%I:%M%p").dictify(datetime.time(14, 25, 3, 1))
u'02:25PM'
>>> Time().undictify(datetime.time(14, 25, 3, 1))
datetime.time(14, 25, 3, 1)
"""
_dt_type = datetime.time
def __init__(self, format="%H:%M:%S.%f"):
super(Time, self).__init__(format)
def undictify(self, value, **kwargs):
if isinstance(value, self._dt_type):
return value
return self.parse_datetime(value).time()
if __name__ == '__main__':
import doctest
doctest.testmod() | UTF-8 | Python | false | false | 9,082 | py | 18 | datetypes.py | 17 | 0.605263 | 0.555384 | 0 | 297 | 29.582492 | 89 |
guillaume-havard/testdjango | 3,547,643,025,015 | 6e61c6075490a98a9ce3ba7c46e84ffd2ea70ff9 | ecdbbfbbff48a1199e124daf5c97a26826301c72 | /sitetest/blog/forms.py | 008239a485aa2cff4b6584316e89603539b9d74c | [
"MIT"
] | permissive | https://github.com/guillaume-havard/testdjango | 41aa84570259f511cd2773c9c691e88df1b0b691 | 66176f81660715dcb3ef4da083e4ea584762c27d | refs/heads/master | "2016-08-04T09:57:21.119528" | "2014-12-17T21:12:54" | "2014-12-17T21:12:54" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django import forms
from blog.models import Article
class ContactForm(forms.Form):
sujet = forms.CharField(max_length=100)
message = forms.CharField(widget=forms.Textarea)
envoyeur = forms.EmailField(label="Votre adresse mail")
renvoi = forms.BooleanField(help_text="Cochez si vous souhaitez obtenir une copie du mail envoyé.", required=False)
# Pour vérifier un champs
# doit dommencer par "clean_" puis continuer avec le nom de la variable à vérifier.
# est appelé après le Form.cleaned_data
"""
def clean_message(self):
message = self.cleaned_data['message']
if "pizza" in message:
raise forms.ValidationError("On ne veut pas entendre parler de pizza !")
return message # Ne pas oublier de renvoyer le contenu du champ traité
"""
# Pour vérifier plusieurs champs entre eux il faut surcharger la méthode clean.
def clean(self):
cleaned_data = super(ContactForm, self).clean()
sujet = cleaned_data.get('sujet')
message = cleaned_data.get('message')
if sujet and message: # Est-ce que sujet et message sont valides ?
if "pizza" in sujet and "pizza" in message:
# Erreur général en haut du formulaire
#raise forms.ValidationError("Vous parlez de pizzas dans le sujet ET le message ? Non mais ho !")
# Pour l'avoir sur le bon champs :
msg = "Vous parlez déjà de pizzas dans le sujet, n'en parlez plus dans le message !"
#self.add_error("message", msg)
# ou
self.add_error("message",
forms.ValidationError("Vous parlez de pizzas dans le sujet ET le message ? Non mais ho !"))
return cleaned_data # N'oublions pas de renvoyer les données si tout est OK
#Pour faire un formulaire depuis un modèle. (/!\ héritage différent)
class ArticleForm(forms.ModelForm):
class Meta:
model = Article
#exclude = ('auteur','categorie','slug') # Exclura les champs nommés « auteur », « categorie » et « slug »
fields = ('titre', 'auteur', 'categorie', 'contenu')
# Pour récupérer des données cel apeut ce faire avec un POST
# ou directement en donnant un objet du modele :
#form = ArticleForm(instance=article) # article est bien entendu un objet d'Article quelconque dans la base de données
# Le champs est ainsi préremplit.
# Quand on a recu une bonne formeModele il suffit de save() pour la mettre en base
# /!\ S'il faut faire des verifications/modifications avant d'enregistrer il est possible de faire :
#form = ArticleForm(donnees) # Pas besoin de spécifier les autres champs, ils ont été exclus
#article = form.save(commit=False) # Ne sauvegarde pas directement l'article dans la base de données
#article.categorie = Categorie.objects.all()[0] # Nous ajoutons les attributs manquants
#article.auteur = "Jean-Albert"
#article.save()
#Pour les images.
class NouveauContactForm(forms.Form):
nom = forms.CharField()
adresse = forms.CharField(widget=forms.Textarea)
photo = forms.ImageField()
| UTF-8 | Python | false | false | 3,159 | py | 26 | forms.py | 16 | 0.674984 | 0.673704 | 0 | 69 | 44.231884 | 122 |
b-liw/cryptopals | 171,798,735,030 | c3954b5fdfc304e00636365154dacc9cf960311a | 7955547961ddde1c5ea1a1a7390538d9c0d0effb | /set2/byte_at_a_time_ecb_decryption_harder.py | 78b1b67343fcced5cf97ab0fbcbacf6a8e09e61e | [] | no_license | https://github.com/b-liw/cryptopals | 83b444f30aa459b2ee5d0ec7081b6ca155234def | 14e19cfc63ff93a55bec498a1c0b2d4746d639f4 | refs/heads/master | "2020-03-27T04:03:52.427917" | "2018-09-09T16:48:20" | "2018-09-09T16:48:20" | 145,909,514 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import base64
from Crypto.Random import get_random_bytes
from set2.aes_ecb import encrypt_aes_128_ecb
from set2.pkcs7_pad import pkcs7_pad
oracle_global_key = get_random_bytes(16)
def brute_force_block_size(ciphertext):
for block_size in range(2, 64):
if len(ciphertext) % block_size == 0:
blocks = [ciphertext[i * block_size:i * block_size + block_size] for i in
range(0, len(ciphertext) // block_size)]
for i in range(len(blocks) - 1):
if blocks[i] == blocks[i + 1]:
return block_size
return None
def ecb_suffix_oracle(plaintext):
random_prefix = b"tEOW49TgR2bg7HRhJdFLpjAXX7Ju6iZgxJRbyUdvoWQAETI1Gt5x5Gyp47rjZw"
secret_suffix = base64.b64decode(
"Um9sbGluJyBpbiBteSA1LjAKV2l0aCBteSByYWctdG9wIGRvd24gc28gbXkgaGFpciBjYW4gYmxvdwpUaGUgZ2lybGllcyBvbiBzdGFuZGJ5IHdhdmluZyBqdXN0IHRvIHNheSBoaQpEaWQgeW91IHN0b3A/IE5vLCBJIGp1c3QgZHJvdmUgYnkK")
return encrypt_aes_128_ecb(pkcs7_pad(random_prefix + plaintext + secret_suffix, 16), oracle_global_key)
def get_block_size():
ciphertext = ecb_suffix_oracle(b"A" * 100)
return brute_force_block_size(ciphertext)
def get_offset_and_pad_count_of_prefix(block_size):
for pad_count in range(1, block_size + 1):
pad_to_align_prefix_to_block_size = b"B" * pad_count
ciphertext = ecb_suffix_oracle(pad_to_align_prefix_to_block_size + b"A" * block_size * 2)
blocks = [ciphertext[i * block_size:i * block_size + block_size] for i in
range(0, len(ciphertext) // block_size)]
for i in range(0, len(blocks) - 1):
if blocks[i] == blocks[i + 1]:
return pad_count, i * block_size
return None
def break_ecb_suffix_oracle_harder():
retrieved_secret = b""
try:
block_size = get_block_size()
pad_count, offset = get_offset_and_pad_count_of_prefix(block_size)
num_of_blocks = 10
for n in range(1, num_of_blocks * block_size):
encrypt_dict = dict()
prefix_template = b"B" * pad_count + b"A" * ((block_size * num_of_blocks) - n)
for unknown_byte in range(0, 256):
prefix = prefix_template + retrieved_secret + chr(unknown_byte).encode()
k = ecb_suffix_oracle(prefix)
encrypt_dict[k[offset:offset + block_size * num_of_blocks]] = unknown_byte
retrieved_secret += chr(
encrypt_dict[ecb_suffix_oracle(prefix_template)[offset:offset + block_size * num_of_blocks]]).encode()
except ValueError:
return retrieved_secret
finally:
return retrieved_secret
| UTF-8 | Python | false | false | 2,661 | py | 32 | byte_at_a_time_ecb_decryption_harder.py | 31 | 0.640737 | 0.612176 | 0 | 64 | 40.578125 | 195 |
Gunjankushwaha/hackthon2 | 16,492,674,455,208 | aa168b49737aa48d65c8ad896bf7389c6e22a583 | 96f000d16e29fd60199fea8585af515da7818b03 | /gunjan.py | 14ab45213550b08e4ecd2ded6c0ec1006ccaf74d | [] | no_license | https://github.com/Gunjankushwaha/hackthon2 | d80e7b9450398ed1abdca2fc9a49f1cf9c6bc46f | bf7fab859fc45260f7c7aa6f2378deb30d6e616d | refs/heads/main | "2023-09-04T22:32:23.670930" | "2021-11-21T15:08:27" | "2021-11-21T15:08:27" | 430,399,336 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | basic_salary=int(input("enter the basic_salary"))
salary=10000
if basic_salary<=10000:
# print("basic_salary+hrd+da")
hrd=int(input("enter the hrd"))
if hrd%10000==20:
salary%=salary-20
# print("basic_salary%20")
# num2=int(input ("enter the salary"))
# if num2<=20000:
# c=num2-num2*20/100
# print(c)
# num3=int(input("enter the salary"))
# if num3>=20000:
# e=num3-num3*30/100
# print(e)
# else:
# print("not right")
# else:
# print("good")
# else:
# print("not r
# da=int(input("enter the amount"))
# if da/10000==80:
# print("right amount")
# # prin("it is right salary")
# basac_salary=int(inpu("enter the basac_salary"))
# print("basic_salary")
# elif basic_salary<=20000:
# hrd=int(input("enter the hrd"))
# if hrd==20/:
# da=int(input("ente the da"))
# if da==90/:
# print("it is right salary")
# basic_salary==int(input("enter the basic_salary"))
# elif basic_salary>=20000:
# hrd=int(input("enter the hrd"))
# if hrd==30/:
# da=int(input("enter the da"))
# if da==95/:
# print("it is goo salary")
# else:
# print("right")
# else:
# print("nice")
# else:
# print("parphect")
| UTF-8 | Python | false | false | 1,436 | py | 16 | gunjan.py | 15 | 0.489554 | 0.438022 | 0 | 72 | 18.708333 | 60 |
lenchevsky/organikos | 16,518,444,253,059 | ac8972fc2694be1fe320b84409dabfc4ec140ab5 | 4b15b39222699e85178c88232fb128a90a9e4c08 | /organikos_cms/core/views.py | 6b55e94c491b356596c54992dcd96ed65eff4b48 | [
"MIT"
] | permissive | https://github.com/lenchevsky/organikos | 118a051bc2c61a04c8eb675ba4167b131cde56b5 | bc54234152a0b6c5a0cc68f07119ce8bdcfd1024 | refs/heads/master | "2018-03-24T07:40:29.375011" | "2017-03-29T00:32:18" | "2017-03-29T00:32:18" | 38,467,574 | 0 | 0 | null | false | "2017-03-28T23:49:39" | "2015-07-03T02:28:41" | "2016-02-24T03:15:33" | "2017-03-28T23:49:39" | 1,202 | 0 | 0 | 5 | JavaScript | null | null | # @Project: Organikos Git Repositories (OGR)
# @Version: 0.6b
# @Author: Oleg Snegirev <ol.snegirev@gmail.com>
# @Functionality: Views
from django.shortcuts import render
from django.http import HttpResponse
from .models import OGRCore, OGRRepo
def allReposIndex(request):
# List all repositories found on this Core
try:
default_core = OGRCore.objects.get(host__exact="localhost")
except OGRCore.DoesNotExist:
default_core = OGRCore(name="Local Repository", host="localhost")
default_core.save()
default_core.populateRepoList()
all_cores = OGRCore.objects.all()
context = {'core': default_core, 'cores_list': all_cores}
return render(request, 'core/repo_list.html', context)
def repoIndex(request,repo_id):
return HttpResponse("Please see paricular repo with id %s." %repo_id)
def repoSource(request, repo_id, branch_name):
# Display root content of the repository within specified branch
repo = OGRRepo.objects.get(id=repo_id)
rootSource = repo.fetchRepo(branch_name)
context = {'folders': rootSource['folders'], 'files': rootSource['files'], 'repo': repo}
return render(request, 'core/source.html', context)
def repoSourceByHash(request, repo_id, hash, branch_name):
repo = OGRRepo.objects.get(id=repo_id)
rootSource = repo.getChilds(hash)
context = {'folders': rootSource['folders'], 'files': rootSource['files'], 'repo': repo}
return render(request, 'core/source.html', context)
def fileContent(request, repo_id, hash):
repo = OGRRepo.objects.get(id=repo_id)
file_source = repo.getFileContent(hash)
context = {'source': file_source}
return render(request,'core/content.html',context)
| UTF-8 | Python | false | false | 1,737 | py | 15 | views.py | 7 | 0.694301 | 0.693149 | 0 | 44 | 38.477273 | 92 |
cliffc2/cardano-api-tests | 2,757,369,045,411 | 9312ead5d4a3e51be7ccca6d77e9ae2c1046d59d | 3f91ee669b928b0cbd82e93c644370066b60bbf7 | /ada-ticker.py | c93f355dd9add7d17adf73ce76c66fe9576d119d | [] | no_license | https://github.com/cliffc2/cardano-api-tests | 569ceee3f2a736d338473016e293c06aca05a5ca | 12120c5e592fc677d0953324988a9cb2acc336a1 | refs/heads/master | "2020-11-26T10:12:00.803371" | "2019-12-19T16:37:34" | "2019-12-19T16:37:34" | 229,039,496 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #this is a bitcoin cardano api test
#https://medium.com/@randerson112358/get-bitcoin-price-in-real-time-using-python-98b7393b6152
#Import the requests library
import requests
TICKER_API_URL = 'https://api.coinmarketcap.com/v1/ticker/'
def get_latest_crypto_price(crypto):
response = requests.get(TICKER_API_URL+crypto)
response_json = response.json()
return float(response_json[0]['price_usd'])
#Test the function to see if it returns the current price of the crypto currency.
get_latest_crypto_price('cardano')
| UTF-8 | Python | false | false | 529 | py | 6 | ada-ticker.py | 1 | 0.758034 | 0.724008 | 0 | 18 | 28.388889 | 93 |
MaximHirschmann/project-euler-solutions | 9,354,438,818,190 | 1c2cfef21d5016073cb5ee842cdac7b2bb120b8e | aeb8f555ccead132a59553977958954b28ac5350 | /Python/798.py | bae2740b3cef3d6cc38f67b93fe4c251faac95ea | [] | no_license | https://github.com/MaximHirschmann/project-euler-solutions | 25ed23e945894654c9c90b7d3dbb3c9c4d0f944e | ca5fff4498d6e884d122c97804a6cc37ca6c2feb | refs/heads/master | "2022-10-13T06:35:19.931133" | "2022-10-11T11:34:48" | "2022-10-11T11:34:48" | 157,978,850 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # NOT FINISHED
from collections import defaultdict
import functools
import itertools
from tabulate import tabulate
from time import time
from sympy import factorint
start = time()
n = 4
s = 6
def other(player):
return -player
def filter_deck(visible, deck):
smallest_of_suit = [n] * s
for num, suit in visible:
smallest_of_suit[suit] = min(smallest_of_suit[suit], num)
new_deck = []
for num, suit in deck:
if num <= smallest_of_suit[suit]:
continue
else:
new_deck.append((num, suit))
return new_deck
def filter_visible(visible, deck):
biggest_of_suit = [-1] * s
for num, suit in deck:
biggest_of_suit[suit] = max(biggest_of_suit[suit], num)
new_visible = []
for num, suit in visible:
if num >= biggest_of_suit[suit]:
continue
else:
new_visible.append((num, suit))
return new_visible
# players turn
def rec(visible, deck, player = 1, depth = 0):
if visible == [(0, 1), (1, 0)]:
pass
if visible == [] or deck == []:
return other(player)
#deck = filter_deck(visible, deck)
#visible = filter_visible(visible, deck)
move_available = False
for i in range(len(deck)):
num1, suit1 = deck[i]
for j in range(len(visible)):
num2, suit2 = visible[j]
if suit2 != suit1 or num2 > num1: # you cant put that card on top of the other one
continue
new_visible = [(num1, suit1) if k == j else visible[k] for k in range(len(visible))]
new_deck = [deck[k] for k in range(len(deck)) if k != i ]
#print(" " * depth, (num1, suit1), "on", (num2, suit2))
res = rec(new_visible, new_deck, other(player), depth + 1)
if res == player:
return player
return other(player)
@functools.lru_cache(None)
def rec3(higher, depth = 0):
higher = tuple(i for i in higher if i != 0)
if higher == ():
return -1
grouped = defaultdict(int)
for i in higher:
grouped[i] += 1
g = tuple(sorted(grouped.items()))
# iterate over moves
for v, _ in g:
for i in range(v):
new = []
once = False
for elem in higher:
if elem == v:
if once:
new.append(elem - 1)
else:
new.append(i)
once = True
elif elem < v:
if i < elem:
new.append(elem - 1)
else:
new.append(elem)
else:
new.append(elem - 1)
new.sort()
new = tuple(new)
res = rec3(new, depth + 1)
if res == -1:
return 1
return -1
def filter_zeros(higher):
new = []
for suit in higher:
new2 = []
for elem in suit:
if elem != 0:
new2.append(elem)
if new2:
new.append(tuple(new2))
return tuple(new)
def base2(higher):
new = []
for suit in higher:
highest = max(suit)
c = 0
for i in suit:
if i == highest:
c += 1
c = min(c, 2)
new_suit = [highest for _ in range(c)]
new.append(tuple(new_suit))
return tuple(new)
mem = {}
# input of form ((2, 3, 4), (1, 2), (2))
def rec3_2(higher):
higher = filter_zeros(higher)
if higher in mem:
return mem[higher]
if higher == ():
return -1
for idx, suit in enumerate(higher):
last = None
for i in suit:
if i == last:
continue
last = i
for value in range(i):
new = []
once = False
for elem in suit:
to_add = 0
if elem == i:
if once:
to_add = elem - 1
else:
to_add = value
once = True
elif elem < i:
if value < elem:
to_add = elem - 1
else:
to_add = elem
else:
to_add = elem - 1
if to_add != 0:
new.append(to_add)
new.sort()
new = tuple(new)
new2 = [higher[j] if j != idx else new for j in range(len(higher))]
new2.sort()
#print(new2)
res = rec3_2(tuple(new2))
if res == -1:
mem[higher] = 1
return 1
mem[higher] = -1
return -1
def who_wins(visible, deck, player = 1):
ret = []
for suit in range(s):
new_visible = [(num1, suit1) for num1, suit1 in visible if suit1 == suit]
new_deck = [(num1, suit1) for num1, suit1 in deck if suit1 == suit]
res = who_wins_one_suite(new_visible, new_deck, player)
match res:
case (False, True):
new = 0
case (True, False):
new = 1
case (True, True):
new = 2
ret.append(new)
return ret
def split_to_result(splitted):
count = [0, 0, 0]
for i in splitted:
count[i] += 1
if count[1] % 2 == 0 and count[2] % 2 == 0:
return -1
else:
return 1
def who_wins_one_suite(visible, deck, player = 1):
# can i force a win?
win = winner_best_play(visible, deck) == 1
# can i force a loss
loss = winner_worst_play(visible, deck) == -1
return (win, loss)
def winner_best_play(visible, deck, player=1):
if visible == [] or deck == []:
return other(player)
for card_chosen in deck:
for stack_chosen in visible:
if stack_chosen[1] != card_chosen[1] or stack_chosen[0] > card_chosen[0]: # you cant put that card on top of the other one
continue
new_visible = [card_chosen if card == stack_chosen else card for card in visible]
new_deck = [card for card in deck if card != card_chosen]
res = winner_best_play(new_visible, new_deck, other(player))
if res == player:
return player
return other(player)
def winner_worst_play(visible, deck, player=1):
if visible == [] or deck == []:
return other(player)
move_available = False
for card_chosen in deck:
for stack_chosen in visible:
if stack_chosen[1] != card_chosen[1] or stack_chosen[0] > card_chosen[0]: # you cant put that card on top of the other one
continue
move_available = True
new_visible = [card_chosen if card == stack_chosen else card for card in visible]
new_deck = [card for card in deck if card != card_chosen]
res = winner_worst_play(new_visible, new_deck, other(player))
if res == other(player):
return other(player)
if not move_available:
return other(player)
return player
cards = list(itertools.product(range(n), range(s)))
count = 0
count2 = 0
table = []
for i in range(2**(len(cards))):
b = bin(i)[2:].rjust(len(cards), "0")
visible = []
deck = []
for j in range(len(cards)):
if b[j] == "0":
deck.append(cards[j])
else:
visible.append(cards[j])
#winner = rec(visible, deck)
higher = []
for suit in range(s):
add = []
for card in visible:
if card[1] != suit:
continue
count = 0
for card2 in deck:
if card2[1] != suit:
continue
if card2[0] > card[0]:
count += 1
add.append(count)
if add:
higher.append(tuple(add))
higher.sort()
higher = tuple(higher)
higher = filter_zeros(higher)
higher = base2(higher)
w = rec3_2(higher)
if w == -1:
count2 += 1
#table.append((higher, w))
# table.sort()
# print(tabulate(table))
print(count2)
print(time() - start, "s") | UTF-8 | Python | false | false | 8,501 | py | 162 | 798.py | 160 | 0.473474 | 0.457828 | 0 | 303 | 27.059406 | 134 |
scadding/MyUniverse | 12,936,441,532,273 | d82449329850b834d598df68200cf1e5ed0f6969 | 6ffde6c22c923e773f791925fa7df6341ccc1402 | /Data.old/Tables/Generators/Sites - Mystic Places.py | bd76bb4cf67127ccc7db264ff2e5bcdc9b703c15 | [] | no_license | https://github.com/scadding/MyUniverse | e2163b57aefbdc5f2dd2533cf836db26fc19d95c | d2b196bae19592784bfe4315baf76a6954261391 | refs/heads/master | "2021-01-15T09:20:21.411904" | "2018-07-03T08:05:31" | "2018-07-03T08:05:31" | 35,813,452 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from random import choice
from random import randint
class generator:
def version(self):
return 1.0
def start(self):
result = ''
groups = { 0: self.place() + ' of ' + self.thing(),
1: self.adj() + ' ' + self.place(),
2: self.adj() + ' ' + self.place() + ' of ' + self.thing(),
}
result = groups[randint(0,2)]
return result
def adj(self):
elements = ["Bright", "Burning", "Dead", "Eternal", "Forgotten", "Golden", "Living", "Lost", "Shining", "Silver", "Sunken", "Undying"]
return choice(elements)
def place(self):
elements = ["Castle", "Cave", "Chasm", "City", "Coast", "Desert", "Field", "Forest", "Fortress", "Fountain", "Garden", "Grove", "Hamlet", "Hill", "Isle", "Jungle", "Lake", "Mansion", "Marsh", "Mesa", "Mine", "Mire", "Mountain", "Pillar", "River", "Ruins", "Sea", "Shores", "Shrine", "Source", "Spire", "Stone", "Swamp", "Temple", "Tower", "Valley"]
return choice(elements)
def thing(self):
elements = ["the Abyss", "Blood", "Bronze", "Dawn", "the Dead", "Doom", "Dragons", "Dread", "Dust", "Eternal Peril", "Fire", "Flame", "Frost", "the Gods", "Gold", "Heaven", "Ice", "the Inferno", "Iron", "Lightning", "Maps Edge", "Mists", "Mithril", "No Return", "Runes", "Scales", "the Seven", "Shadow", "Silver", "Skulls", "the Sky", "Stone", "Swords", "Thunder", "Wind", "Youth"]
return choice(elements)
#x = generator()
#print x.start() | UTF-8 | Python | false | false | 1,506 | py | 826 | Sites - Mystic Places.py | 626 | 0.549801 | 0.545153 | 0 | 33 | 44.666667 | 389 |
l1berov/Switch | 11,407,433,170,866 | 41403d6ec7921068f0e0596cfa51c07af8811d3b | 5a97f04444c4a05c1e9170a4b751972c550847fa | /liberscripts/fff.py | d226a296f5b609662e49128956f2964325ae93ef | [] | no_license | https://github.com/l1berov/Switch | 1aa20b8229a35355d0ded96b81ae5a750ee8badb | 3d12c0e132cc06cfd83bf9b7d770c98d91e9787d | refs/heads/main | "2023-01-24T00:43:24.084726" | "2020-11-22T18:55:07" | "2020-11-22T18:55:07" | 315,108,179 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import colorama, os, time, vk_api, sys
from os import system
from colorama import Fore , Back , Style
os.system('clear')
print(Fore.YELLOW+'''
______ _ _
| ____| (_) | |
| |__ _ __ _ ___ _ __ __| |
| __| '__| |/ _ \ '_ \ / _` |
| | | | | | __/ | | | (_| |
|_| |_| |_|\___|_| |_|\__,_| by batyarimskiy
''')
with open("ids.txt") as f:
user_ids = f.read().splitlines()
session = vk_api.VkApi(token = input(Fore.GREEN+'Введите токен'+Fore.WHITE+' ~# '))
vk = session.get_api()
for user_id in user_ids:
vk.friends.add(
user_id=user_id
)
time.sleep(40) | UTF-8 | Python | false | false | 676 | py | 12 | fff.py | 11 | 0.426205 | 0.423193 | 0 | 27 | 23.62963 | 83 |
OpenDataCubePipelines/opendatacubepipeline.tesp | 10,101,763,100,227 | e83c22a9544c2c1a0f8ffd52dd368e85a3df3b21 | 6e86c22122a5a0a1bb0119844e4d1244d3b962ee | /bin/ard_pbs | dfbf3fd063891a40dde69b0cf9d5fd1aba7c3a50 | [] | no_license | https://github.com/OpenDataCubePipelines/opendatacubepipeline.tesp | 4464160cd48e6eacb9921a13653337dbf8d81410 | 0c20d54adc01f8c6cb370635ab7f4f9c0c4d5929 | refs/heads/master | "2021-04-29T20:11:39.089807" | "2021-04-19T02:53:46" | "2021-04-21T03:48:08" | 121,593,602 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
"""
PBS submission scripts.
"""
import os
import re
import json
import logging
from os.path import join as pjoin, dirname, exists
from pathlib import Path
import subprocess
import uuid
import click
import math
from wagl.tiling import scatter
PBS_RESOURCES = ("""#!/bin/bash
#PBS -P {project}
#PBS -W umask=017
#PBS -q {queue}
#PBS -l walltime={walltime},mem={memory}GB,ncpus={ncpus},jobfs={jobfs}GB,other=pernodejobfs
#PBS -l wd
#PBS -l storage={filesystem_projects}
#PBS -me
{email}
""")
NODE_TEMPLATE = ("""{pbs_resources}
source {env}
{daemon}
luigi --module tesp.workflow ARDP --level1-list {scene_list} --workdir {outdir} --pkgdir {pkgdir} --workers {workers} --parallel-scheduling
""")
SUMMARY_TEMPLATE = ("""{pbs_resources}
#PBS -W depend=afterany:{jobids}
source {env}
batch_summary --indir {indir} --outdir {outdir}
# jq queries
# concatenate logs to enable querying on a single file
find -type f -name 'task-log.jsonl' | xargs cat >> batch-{batchid}-task-log.jsonl
find -type f -name 'status-log.jsonl' | xargs cat >> batch-{batchid}-status-log.jsonl
# summaries on success and failure records
jq 'select(.status == "success") | {{task, level1}}' batch-{batchid}-task-log.jsonl | jq --slurp 'unique_by(.level1, .task) | group_by(.task) | map({{task: .[0].task, count: length}})' > batch-{batchid}-success-task-summary.json
jq 'select(.status == "failure") | {{task, level1}}' batch-{batchid}-task-log.jsonl | jq --slurp 'unique_by(.level1, .task) | group_by(.task) | map({{task: .[0].task, count: length}})' > batch-{batchid}-failure-task-summary.json
# capture failures and report the exception, level1, task and granule id
jq 'select(.status == "failure") | {{level1, exception, task, granule: .params.granule}}' batch-{batchid}-task-log.jsonl | jq --slurp 'unique_by(.level1, .task, .granule)' > batch-{batchid}-exception-log.json
# get a listing of which level1-lists from which jobid's had 100% success (all scenes processed through to packaging)
jq 'select(.task == "ARDP") | {{task, level1_list: .params.level1_list, status}}' batch-{batchid}-task-log.jsonl | jq --slurp 'unique_by(.level1_list)' > batch-{batchid}-ardp-job-sucess.json
# get a listing of level1 datasets and the granule id that packaged successfully (Package is the last task to be done as defined in the luigi workflow) and report that "ard processing is complete"
jq 'select(.status == "success" and .task == "Package") | {{level1, granule: .params.granule, notification: "ard processing complete"}}' batch-{batchid}-task-log.jsonl | jq --slurp 'unique_by(.level1, .granule)' > batch-{batchid}-package-success.json
# compile a list of successfully packaged datasets (and their path) to pass over to indexing
jq 'select(.event == "packaged dataset") | .dataset_path' batch-{batchid}-status-log.jsonl | jq -sr 'unique | .[]' > batch-{batchid}-datasets-to-index.txt
""")
INDEXING_TEMPLATE = ("""{pbs_resources}
#PBS -W depend=afterany:{jobid}
source {env}
# indexing
cat batch-{batchid}-datasets-to-index.txt | parallel -j 47 -m -n 20 --line-buffer datacube dataset add --no-verify-lineage
""")
ARCHIVING_TEMPLATE = ("""{pbs_resources}
source {env}
# archiving
cat {archive_list} | parallel -j 47 -m -n 20 --line-buffer datacube dataset archive
""")
FMT1 = 'batchid-{batchid}'
FMT2 = 'jobid-{jobid}'
FMT3 = 'level1-scenes-{jobid}.txt'
FMT4 = 'jobid-{jobid}.bash'
FMT5 = 'batch-{batchid}-summary.bash'
FMT6 = 'batch-{batchid}-indexing.bash'
FMT7 = 'scratch/{f_project}+gdata/{f_project}'
FMT8 = 'archiving-{batchid}.bash'
DAEMON_FMT = 'luigid --background --logdir {}'
def _calc_nodes_req(granule_count, walltime, workers, hours_per_granule=1.5):
""" Provides estimation of the number of nodes required to process granule count
>>> _calc_nodes_req(400, '20:59', 28)
2
>>> _calc_nodes_req(800, '20:00', 28)
3
"""
hours, _, _ = [int(x) for x in walltime.split(':')]
return int(math.ceil(float(hours_per_granule * granule_count) / (hours * workers)))
def _get_project_for_path(path: Path):
"""
Get the NCI project used to store the given path, if any.
>>> _get_project_for_path(Path('/g/data/v10/some/data/path.txt'))
'v10'
>>> _get_project_for_path(Path('/g/data4/fk4/some/data/path.txt'))
'fk4'
>>> _get_project_for_path(Path('/scratch/da82/path.txt'))
'da82'
>>> _get_project_for_path(Path('/tmp/other/data'))
"""
posix_path = path.as_posix()
if posix_path.startswith('/g/data'):
return posix_path.split('/')[3]
if posix_path.startswith('/scratch/'):
return posix_path.split('/')[2]
return None
def _filesystem_projects(level1_list: list,
env: str,
logdir: str,
workdir: str,
pkgdir: str):
"""
Collect all the filesystem projects into a set.
"""
fs_projects = {None}
fs_projects.add(_get_project_for_path(Path(workdir)))
fs_projects.add(_get_project_for_path(Path(logdir)))
fs_projects.add(_get_project_for_path(Path(pkgdir)))
fs_projects.add(_get_project_for_path(Path(env)))
fs_projects.add(_get_project_for_path(Path(click.__file__)))
with open(level1_list, 'r') as src:
paths = [p.strip() for p in src.readlines()]
for pathname in paths:
fs_projects.add(_get_project_for_path(Path(pathname)))
fs_projects.remove(None)
return fs_projects
# pylint: disable=too-many-arguments
def _submit_multiple(scattered, env, batch_logdir, batch_outdir, pkgdir,
workers, pbs_resources, test):
"""Submit multiple PBS formatted jobs."""
nci_job_ids = []
# setup and submit each block of scenes for processing
for block in scattered:
jobid = uuid.uuid4().hex[0:6]
jobdir = pjoin(batch_logdir, FMT2.format(jobid=jobid))
job_outdir = pjoin(batch_outdir, FMT2.format(jobid=jobid))
if not exists(jobdir):
os.makedirs(jobdir)
if not exists(job_outdir):
os.makedirs(job_outdir)
# write level1 data listing
out_fname = pjoin(jobdir, FMT3.format(jobid=jobid))
with open(out_fname, 'w') as src:
src.writelines(block)
pbs = NODE_TEMPLATE.format(pbs_resources=pbs_resources, env=env,
daemon=DAEMON_FMT.format(jobdir),
scene_list=out_fname, outdir=job_outdir,
pkgdir=pkgdir, workers=workers)
# write pbs script
out_fname = pjoin(jobdir, FMT4.format(jobid=jobid))
with open(out_fname, 'w') as src:
src.write(pbs)
if test:
click.echo("Mocking... Submitting Job: {} ...Mocking".format(jobid))
click.echo("qsub {}".format(out_fname))
continue
os.chdir(dirname(out_fname))
click.echo("Submitting Job: {}".format(jobid))
try:
raw_output = subprocess.check_output(['qsub', out_fname])
except subprocess.CalledProcessError as exc:
logging.error('qsub failed with exit code %s', str(exc.returncode))
logging.error(exc.output)
raise
if hasattr(raw_output, 'decode'):
matches = re.match(r'^(?P<nci_job_id>\d+\.gadi-pbs)$', raw_output.decode('utf-8'))
if matches:
nci_job_ids.append(matches.groupdict()['nci_job_id'])
# return a list of the nci job ids
return nci_job_ids
def _submit_summary(indir, outdir, batch_id, pbs_resources, env, job_ids, test):
"""Summarise the jobs submitted within the batchjob."""
jobids = ":".join([j.split('.')[0] for j in job_ids])
pbs = SUMMARY_TEMPLATE.format(pbs_resources=pbs_resources, env=env,
indir=indir, outdir=outdir, jobids=jobids,
batchid=batch_id)
out_fname = pjoin(indir, FMT5.format(batchid=batch_id))
with open(out_fname, 'w') as src:
src.write(pbs)
if test:
click.echo("Mocking... Submitting Summary Job for batch: {} ...Mocking".format(batch_id))
click.echo("qsub {}".format(out_fname))
return
os.chdir(dirname(out_fname))
click.echo("Submitting Summary Job for batch: {}".format(batch_id))
try:
raw_output = subprocess.check_output(['qsub', out_fname])
except subprocess.CalledProcessError as exc:
logging.error('qsub failed with exit code %s', str(exc.returncode))
logging.error(exc.output)
raise
if hasattr(raw_output, 'decode'):
matches = re.match(r'^(?P<nci_job_id>\d+\.gadi-pbs)$', raw_output.decode('utf-8'))
if matches:
job_id = matches.groupdict()['nci_job_id']
return job_id
def _submit_index(indir, outdir, batch_id, pbs_resources, env, job_id, test):
"""Submit a job that adds datasets to a datacube index."""
if job_id:
jobid = job_id.split('.')[0]
else:
jobid = ''
pbs = INDEXING_TEMPLATE.format(pbs_resources=pbs_resources, env=env,
indir=indir, outdir=outdir, jobid=jobid,
batchid=batch_id)
out_fname = pjoin(indir, FMT6.format(batchid=batch_id))
with open(out_fname, 'w') as src:
src.write(pbs)
if test:
click.echo("Mocking... Submitting Indexing Job for batch: {} ...Mocking".format(batch_id))
return
os.chdir(dirname(out_fname))
click.echo("Submitting Indexing Job for batch: {}".format(batch_id))
try:
raw_output = subprocess.check_output(['qsub', out_fname])
except subprocess.CalledProcessError as exc:
logging.error('qsub failed with exit code %s', str(exc.returncode))
logging.error(exc.output)
raise
if hasattr(raw_output, 'decode'):
matches = re.match(r'^(?P<nci_job_id>\d+\.gadi-pbs)$', raw_output.decode('utf-8'))
if matches:
job_id = matches.groupdict()['nci_job_id']
return job_id
def _submit_archive(indir, outdir, archive_list, batch_id, pbs_resources, env, test):
"""Submit a job that archives datasets given a file listing UUIDs."""
pbs = ARCHIVING_TEMPLATE.format(pbs_resources=pbs_resources, env=env,
indir=indir, outdir=outdir,
archive_list=archive_list)
out_fname = pjoin(indir, FMT8.format(batchid=batch_id))
with open(out_fname, 'w') as src:
src.write(pbs)
if test:
click.echo("Mocking... Submitting Archiving Job for batch: {} ...Mocking".format(batch_id))
return
os.chdir(dirname(out_fname))
click.echo("Submitting Archiving Job for batch: {}".format(batch_id))
try:
raw_output = subprocess.check_output(['qsub', out_fname])
except subprocess.CalledProcessError as exc:
logging.error('qsub failed with exit code %s', str(exc.returncode))
logging.error(exc.output)
raise
if hasattr(raw_output, 'decode'):
matches = re.match(r'^(?P<nci_job_id>\d+\.gadi-pbs)$', raw_output.decode('utf-8'))
if matches:
job_id = matches.groupdict()['nci_job_id']
return job_id
@click.command()
@click.option("--level1-list", type=click.Path(exists=True, readable=True),
help="The input level1 scene list.")
@click.option("--workdir", type=click.Path(file_okay=False, writable=True),
help="The base output working directory.")
@click.option("--logdir", type=click.Path(file_okay=False, writable=True),
help="The base logging and scripts output directory.")
@click.option("--pkgdir", type=click.Path(file_okay=False, writable=True),
help="The base output packaged directory.")
@click.option("--env", type=click.Path(exists=True, readable=True),
help="Environment script to source.")
@click.option("--workers", type=click.IntRange(1, 48), default=30,
help="The number of workers to request per node.")
@click.option("--nodes", default=0, help="The number of nodes to request.")
@click.option("--memory", default=192,
help="The memory in GB to request per node.")
@click.option("--jobfs", default=50,
help="The jobfs memory in GB to request per node.")
@click.option("--project", required=True, help="Project code to run under.")
@click.option("--queue", default='normal',
help="Queue to submit the job into, eg normal, express.")
@click.option("--walltime", default="48:00:00",
help="Job walltime in `hh:mm:ss` format.")
@click.option("--email", default="",
help="Notification email address.")
@click.option("--index-datacube-env", type=click.Path(exists=True, readable=True),
help="Datacube specific environment script to source.")
@click.option("--archive-list", type=click.Path(exists=True, readable=True),
help="UUID's of the scenes to archive. This uses the environment specified in index-datacube-env.")
@click.option("--test", default=False, is_flag=True,
help=("Test job execution (Don't submit the job to the "
"PBS queue)."))
# pylint: disable=too-many-arguments
def main(level1_list, workdir, logdir, pkgdir, env, workers, nodes, memory,
jobfs, project, queue, walltime, email, index_datacube_env, archive_list, test):
"""
Equally partition a list of scenes across n nodes and submit
n jobs into the PBS queue for ARD processing.
"""
with open(level1_list, 'r') as src:
scenes = src.readlines()
if nodes == 0:
nodes = _calc_nodes_req(len(scenes), walltime, workers)
scattered = scatter(scenes, nodes)
batchid = uuid.uuid4().hex[0:10]
batch_logdir = pjoin(logdir, FMT1.format(batchid=batchid))
batch_outdir = pjoin(workdir, FMT1.format(batchid=batchid))
fs_projects = _filesystem_projects(level1_list, env, logdir, workdir,
pkgdir)
fsys_projects = '+'.join([FMT7.format(f_project=f) for f in fs_projects])
# optionally set pbs email string
pbs_resources = PBS_RESOURCES.format(project=project, queue=queue,
walltime=walltime, memory=memory,
ncpus=workers, jobfs=jobfs,
filesystem_projects=fsys_projects,
email=('#PBS -M ' + email) if email else "")
if test:
click.echo("Mocking... Submitting Batch: {} ...Mocking".format(batchid))
else:
click.echo("Submitting Batch: {}".format(batchid))
click.echo("Executing Batch: {}".format(batchid))
nci_job_ids = _submit_multiple(scattered, env, batch_logdir, batch_outdir,
pkgdir, workers, pbs_resources, test)
# job resources for batch summary
pbs_resources = PBS_RESOURCES.format(project=project, queue='express',
walltime="00:10:00", memory=6,
ncpus=1, jobfs=2,
filesystem_projects=''.join(fsys_projects),
email=('#PBS -M ' + email) if email else "")
job_id = _submit_summary(batch_logdir, batch_logdir, batchid,
pbs_resources, env, nci_job_ids, test)
nci_job_ids.append(job_id)
if index_datacube_env:
pbs_resources = PBS_RESOURCES.format(project=project, queue='normal',
walltime="00:30:00", memory=192,
ncpus=48, jobfs=20,
filesystem_projects=''.join(fsys_projects),
email=('#PBS -M ' + email) if email else "")
index_job_id = _submit_index(batch_logdir, batch_logdir, batchid,
pbs_resources, index_datacube_env, job_id, test)
nci_job_ids.append(index_job_id)
if archive_list:
if index_datacube_env:
pbs_resources = PBS_RESOURCES.format(project=project, queue='normal',
walltime="00:30:00", memory=192,
ncpus=48, jobfs=20,
filesystem_projects=''.join(fsys_projects),
email=('#PBS -M ' + email) if email else "")
archive_job_id = _submit_archive(batch_logdir, batch_logdir, archive_list, batchid,
pbs_resources, index_datacube_env, test)
nci_job_ids.append(archive_job_id)
else:
logging.error('Archive list given but --index-datacube-env not specified.')
job_details = {
'ardpbs_batch_id': batchid,
'nci_job_ids': nci_job_ids
}
# Enable the job details to be picked up by the calling process
click.echo(json.dumps(job_details))
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 17,168 | 27 | ard_pbs | 21 | 0.60374 | 0.594478 | 0 | 425 | 39.395294 | 250 |
|
Shakil-1501/Session6 | 10,557,029,628,480 | 3b8e785de4480e9a0c6eabd0c69d7702af9d3ba0 | 669677ac5cccf328426ef141cfdb8e99e8127452 | /session6.py | 8fa1404add9e5fbe962589bfba800e3d8d033e30 | [] | no_license | https://github.com/Shakil-1501/Session6 | f6679ff5c2530e2c2c3b6ff7ae765a2378a6a2f4 | 7886602e463b3cb6c01bff7f8d0cb1d1d7bebdf6 | refs/heads/master | "2022-12-06T10:34:29.550328" | "2020-08-24T15:37:13" | "2020-08-24T15:37:13" | 289,886,461 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from typing import List
import time
import sys
import weakref
import random
from math import tan, pi
def poker(vals,suits):
'''
returns the list of 52 created cards in a deck
'''
k=[]
#l=[]
for i in vals:
for j in suits:
k.append((i+j))
return k
def pokerstar(no_of_card:'int',no_of_player:'int',sequence_cardsA:'list',sequence_cardsB:'list') ->'winner of the game':
"""returns p
some additional documentation
Inputs: number of cards,number of player,sequence_cards for A,sequence_cards for B
Outputs: winner of the match
"""
a=['acehearts','kinghearts','queenhearts','jackhearts','10hearts']
b=['10clubs','9clubs','8clubs','7clubs','6clubs']
c=['queenclubs','queenhearts','queenspades','queendiamonds','5clubs']
d=['acediamonds','acespades','acediamonds','kingspades','kingdiamonds']
e=['kinghearts','8hearts','6hearts','4hearts','2hearts']
f=['8diamonds','7clubs','6diamonds','5spades','4diamonds']
g=['queenclubs','queenhearts','queenspades','7diamonds','2spades']
h=['jackdiamonds','jackspades','9spades','9diamonds','5clubs']
i=['kinghearts','kingspades','9daimonds','8spades','4hearts']
j=['acehearts','queenclubs','6hearts','4spades','2diamonds']
if len(sequence_cardsA)==len(sequence_cardsB):
if no_of_player==2:
if no_of_card==3:
if sequence_cardsA == a[0:3]:
p="Player A is winner"
elif sequence_cardsA == b[0:3] and sequence_cardsB!=a[0:3] :
p="Player A is winner"
elif sequence_cardsA == c[0:3] and (sequence_cardsB!=a[0:3] or sequence_cardsB!=b[0:3]):
p="Player A is winner"
elif sequence_cardsA == d[0:3] and (sequence_cardsB!=a[0:3] or sequence_cardsB!=b[0:3] or sequence_cardsB!=c[0:3]):
p="Player A is winner"
elif sequence_cardsA == e[0:3] and (sequence_cardsB!=a[0:3] or sequence_cardsB!=b[0:3] or sequence_cardsB!=c[0:3] or sequence_cardsB!=d[0:3]):
p="Player A is winner"
elif sequence_cardsA == f[0:3] and (sequence_cardsB!=a[0:3] or sequence_cardsB!=b[0:3] or sequence_cardsB!=c[0:3] or sequence_cardsB!=d[0:3] or sequence_cardsB!=e[0:3]):
p="Player A is winner"
elif sequence_cardsA == g[0:3] and (sequence_cardsB == h[0:3] or sequence_cardsB==i[0:3] or sequence_cardsB==j[0:3]):
p="Player A is winner"
elif sequence_cardsA == h[0:3] and (sequence_cardsB == i[0:3] or sequence_cardsB==j[0:3]):
p="Player A is winner"
elif sequence_cardsA ==i[0:3] and sequence_cardsB == j[0:3]:
p="Player A is winner"
else:
p="Player B is winner"
elif no_of_card==4:
if sequence_cardsA == a[0:4]:
p="Player A is winner"
elif sequence_cardsA == b[0:4] and sequence_cardsB!=a[0:4] :
p="Player A is winner"
elif sequence_cardsA == c[0:4] and (sequence_cardsB!=a[0:4] or sequence_cardsB!=b[0:4]):
p="Player A is winner"
elif sequence_cardsA == d[0:4] and (sequence_cardsB!=a[0:4] or sequence_cardsB!=b[0:4] or sequence_cardsB!=c[0:4]):
p="Player A is winner"
elif sequence_cardsA == e[0:4] and (sequence_cardsB!=a[0:4] or sequence_cardsB!=b[0:4] or sequence_cardsB!=c[0:4] or sequence_cardsB!=d[0:4]):
p="Player A is winner"
elif sequence_cardsA == f[0:4] and (sequence_cardsB!=a[0:4] or sequence_cardsB!=b[0:4] or sequence_cardsB!=c[0:4] or sequence_cardsB!=d[0:4] or sequence_cardsB!=e[0:4]):
p="Player A is winner"
elif sequence_cardsA == g[0:4] and (sequence_cardsB == h[0:4] or sequence_cardsB==i[0:4] or sequence_cardsB==j[0:4]):
p="Player A is winner"
elif sequence_cardsA == h[0:4] and (sequence_cardsB == i[0:4] or sequence_cardsB==j[0:4]):
p="Player A is winner"
elif sequence_cardsA ==i[0:4] and sequence_cardsB == j[0:4]:
p="Player A is winner"
else:
p="Player B is winner"
elif no_of_card==5:
if (sequence_cardsA == a) and (sequence_cardsB!=b or sequence_cardsB!=c or sequence_cardsB!=d or sequence_cardsB!=e or sequence_cardsB!=f or sequence_cardsB!=g or sequence_cardsB!=h or sequence_cardsB!=i or sequence_cardsB!=j):
p="Player A is winner"
elif (sequence_cardsA == b) and (sequence_cardsB != a) :
p="Player A is winner"
elif (sequence_cardsA == c) and (sequence_cardsB!=a or sequence_cardsB!=b):
print("hello2")
p="Player A is winner"
elif sequence_cardsA == d and (sequence_cardsB!=a or sequence_cardsB!=b or sequence_cardsB!=c):
p="Player A is winner"
elif sequence_cardsA == e and (sequence_cardsB!=a or sequence_cardsB!=b or sequence_cardsB!=c or sequence_cardsB!=d):
p="Player A is winner"
elif sequence_cardsA == f and (sequence_cardsB!=a or sequence_cardsB!=b or sequence_cardsB!=c or sequence_cardsB!=d or sequence_cardsB!=e):
p="Player A is winner"
elif sequence_cardsA == g and (sequence_cardsB == h or sequence_cardsB==i or sequence_cardsB==j):
p="Player A is winner"
elif sequence_cardsA == h and (sequence_cardsB == i or sequence_cardsB==j):
p="Player A is winner"
elif sequence_cardsA ==i and sequence_cardsB == j:
p="Player A is winner"
else:
p="Player B is winner"
else:
p="please enter correct number of card"
else:
p="please enter correct number of players"
else:
p="please enter the same length for sequence of cards for both players"
return p | UTF-8 | Python | false | false | 6,352 | py | 2 | session6.py | 2 | 0.541877 | 0.517317 | 0 | 128 | 47.640625 | 243 |
FeelmyLife-droid/BOT_TG | 8,813,272,902,698 | c62e5483ff2c0adc9ec88052dcd0816039fe2b1e | 5548be9aa1bbc8640e35f3e4d43c32876aaea1f7 | /Keyboards/default/key_start.py | 82cd314be18d267c3c9f3796a162d30fec88984a | [] | no_license | https://github.com/FeelmyLife-droid/BOT_TG | 3dab8bfdde9df9493dcf3e3cb287c42161a5e08a | 8aec6c83886d0f0fbeb4af020919ae42bbed07ba | refs/heads/master | "2023-03-02T02:31:45.215366" | "2021-02-08T06:58:21" | "2021-02-08T06:58:21" | 335,944,264 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from aiogram.types import ReplyKeyboardMarkup, KeyboardButton
key_start = ReplyKeyboardMarkup(
keyboard=[
[
KeyboardButton(text='Новые адреса'),
KeyboardButton(text='Блокировка Фирм')
],
[
KeyboardButton(text='Данные по ИНН'),
KeyboardButton(text='Выписка из ЕГРЛ')
]
],
resize_keyboard=True)
| UTF-8 | Python | false | false | 449 | py | 44 | key_start.py | 43 | 0.5725 | 0.5725 | 0 | 14 | 26.571429 | 61 |
jonasguan/FSND-movie-project | 19,198,503,824,629 | bd970a4bda3eb13b19a519ef088558a6cebb7dfb | 9dcb1e586397536db8e0672056be30fe859c9440 | /entertainment_center.py | 22ae7ff24707af891c28c868504281bf6066c40e | [] | no_license | https://github.com/jonasguan/FSND-movie-project | 89e51e73052d9c425dc44603e6617a518943249f | a6d6aa222a30664a649d4700dbea61e9498a874c | refs/heads/master | "2021-01-12T15:14:52.878410" | "2016-10-23T23:51:24" | "2016-10-23T23:51:24" | 71,732,733 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import media
wreck_it_ralph = media.Movie( "Wreck-it Ralph",
"A disliked video game character gains popularity by wrecking things.",
"https://upload.wikimedia.org/wikipedia/en/1/15/Wreckitralphposter.jpeg",
"https://www.youtube.com/watch?v=OzMJkRwOxjs")
the_dark_knight = media.Movie( "The Dark Knight",
"Batman faces a new supervillian who throws Gotham into utter chaos.",
"https://upload.wikimedia.org/wikipedia/en/8/8a/Dark_Knight.jpg",
"https://www.youtube.com/watch?v=u843KNE-exo")
interstellar = media.Movie( "Interstellar",
"A group of scientists journey into space to save mankind from the brink of extinction.",
"https://upload.wikimedia.org/wikipedia/en/b/bc/Interstellar_film_poster.jpg",
"https://www.youtube.com/watch?v=lZMzf-SDWP8")
the_wolf_of_wall_street = media.Movie( "The Wolf of Wall Street",
"Based on the real life of Jordan Belfort, a man rises to wealth but gets"
"ultimately consumed by his hubris.",
"https://upload.wikimedia.org/wikipedia/en/1/1f/WallStreet2013poster.jpg",
"https://www.youtube.com/watch?v=BzpIB5TJ7LI")
pulp_fiction = media.Movie( "Pulp Fiction",
"Several storylines of bad motherfuckers intertwine in a black comedy masterpiece.",
"https://upload.wikimedia.org/wikipedia/en/thumb/3/3b/Pulp_Fiction_%281994%29_poster.jpg/220px-Pulp_Fiction_%281994%29_poster.jpg",
"https://www.youtube.com/watch?v=t7cuDVbN_mg")
| UTF-8 | Python | false | false | 1,433 | py | 3 | entertainment_center.py | 2 | 0.727146 | 0.69993 | 0 | 27 | 52.074074 | 135 |
sadxiaohu/icbckg | 2,078,764,183,232 | d7640043f97c642b73e9c032619446fff93b2f29 | 44e8497b108806d98a2d9bda293cc425ef3305fa | /tools/spider&extract/spider_from_web_demo/getting_nodes_fromweb.py | f7b930b2cfa1e5af5823aafe71b22731e97a5bbb | [] | no_license | https://github.com/sadxiaohu/icbckg | 138dacdb581b5bb81508250e437ed70a2918a3ac | d005aaf4d46e0d82a34a3a9318a4021d77a1b5da | refs/heads/master | "2020-04-05T11:03:39.040811" | "2018-11-16T11:03:25" | "2018-11-16T11:03:25" | 156,820,824 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #coding=utf-8
import requests
from bs4 import BeautifulSoup
import json
import codecs
def webNodesGet(word_url_dict):
nodes = []
id = 0
for word in word_url_dict:
url = 'http://www.icbc.com.cn'
url = url+word_url_dict[word]
wb_data = requests.get(url).content
soup = BeautifulSoup(wb_data,'lxml')
datalist1 = soup.select('.more')
info = {}
attr = []
for data in datalist1 :
attr.append(data.get_text().replace('\n',''))
info[data.get_text().replace('\n','')]=''
datalist2 = soup.select('table > tbody > tr > td ')
if len(datalist1) != 0 and len(datalist2) != 0:
string=''
for i in range(2,len(datalist2)):
string += datalist2[i].get_text().strip().replace(' ','')
string = string.replace('\r','')
string = string.replace('\n','')
string = string.replace('\t','')
string = string.replace('[返回页首]','')
string = string.replace('\u3000','')
length = len(attr)
for i in range(0,length-1):
begin = string.find(attr[i])
end = string.find(attr[i+1])
info[attr[i]] = string[begin+len(attr[i]):end]
info[attr[-1]] = string[(string.find(attr[-1]))+len(attr[-1]):]
info['name'] = word
info['id'] = str(id)
#print(info)
nodes.append(info)
id += 1
josn_data = json.dumps({"nodes":nodes},ensure_ascii=False)
with codecs.open('nodesfromweb.json','w',encoding='utf-8') as foo:
foo.write(josn_data)
#print(id)
#个人网上银行部分信息
url1 = 'http://www.icbc.com.cn/ICBC/电子银行/电子银行产品/金融a家产品/个人网上银行/个人网上银行/'
wb_data = requests.get(url1).content
soup = BeautifulSoup(wb_data,'lxml')
datalist1 = soup.select('li > ul > li > ul > li > a')
datalist2 = soup.select('li > ul > li > a')
datalist1.extend(datalist2)
#企业网上银行部分信息
url2 = 'http://www.icbc.com.cn/ICBC/电子银行/企业电子银行产品/工行财e通产品/企业网上银行/'
wb_data = requests.get(url2).content
soup = BeautifulSoup(wb_data,'lxml')
datalist3 = soup.select('li > ul > li > ul > li > a')
datalist4 = soup.select('li > ul > li > a')
datalist3.extend(datalist4)
datalist1.extend(datalist3)
entity = {}
for data in datalist1:
if data.get('href') != None:
entity[data.get_text().replace(' >','')] = data['href']
webNodesGet(entity)
| UTF-8 | Python | false | false | 2,564 | py | 903 | getting_nodes_fromweb.py | 48 | 0.565452 | 0.548882 | 0 | 65 | 36.138462 | 75 |
bridgecrewio/checkov | 16,518,444,250,443 | 20cedb19c9ce77746822d00300ab8389373c2d63 | 45e376ae66b78b17788b1d3575b334b2cb1d0b1c | /tests/terraform/checks/data/example_external_dir/extra_checks/DummyExternalDataCheck.py | 62f6e4377b92be2f288e9121981992a3d142d5d0 | [
"Apache-2.0"
] | permissive | https://github.com/bridgecrewio/checkov | aeb8febed2ed90e61d5755f8f9d80b125362644d | e64cbd27ffb6f09c2c9f081b45b7a821a3aa1a4d | refs/heads/main | "2023-08-31T06:57:21.990147" | "2023-08-30T23:01:47" | "2023-08-30T23:01:47" | 224,386,599 | 5,929 | 1,056 | Apache-2.0 | false | "2023-09-14T20:10:23" | "2019-11-27T08:55:14" | "2023-09-14T18:58:30" | "2023-09-14T20:06:30" | 79,102 | 5,891 | 951 | 149 | Python | false | false | from checkov.terraform.checks.data.base_check import BaseDataCheck
from checkov.common.models.enums import CheckResult, CheckCategories
class DummyExternalDataCheck(BaseDataCheck):
def __init__(self):
name = "check for terraform data entity"
id = "CKV_AWS_999"
supported_resources = ['aws_iam_policy_document']
categories = [CheckCategories.IAM]
super().__init__(name=name, id=id, categories=categories, supported_data=supported_resources)
def scan_data_conf(self, conf):
return CheckResult.PASSED
scanner = DummyExternalDataCheck()
| UTF-8 | Python | false | false | 594 | py | 4,040 | DummyExternalDataCheck.py | 1,890 | 0.713805 | 0.708754 | 0 | 17 | 33.941176 | 101 |
Taeheon-Lee/Programmers | 7,825,430,450,894 | fe80b9fbf0ac32c9ddb90c1eae2900cdb84bc470 | 9b96c37db1f61065094d42bc5c8ad6eb3925961b | /level1/phoneketmon.py | 6d1df017d3d2f70bcb0d8c7792409844d7434c9a | [] | no_license | https://github.com/Taeheon-Lee/Programmers | a97589498c866c498c1aa9192fdf8eec9f8e31f4 | c38b1c7dc4114c99191b77e5d19af432eaf6177e | refs/heads/master | "2023-07-09T21:10:25.064947" | "2021-08-30T05:17:49" | "2021-08-30T05:17:49" | 394,327,802 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | "폰켓몬"
# 문제 링크 "https://programmers.co.kr/learn/courses/30/lessons/1845"
def solution(nums):
select_num = len(nums) // 2 # 선택할 수 있는 포켓몬 수
types_num = len(set(nums)) # 포켓몬 종류 수
answer = types_num if types_num < select_num else select_num # 종류 수가 선택 수 보다 적을 경우, 최대 선택 가능 종류는 종류 수와 동일하며, 클 경우 선택 수 만큼 종류 수 획득 가능
return answer | UTF-8 | Python | false | false | 552 | py | 58 | phoneketmon.py | 57 | 0.53125 | 0.514423 | 0 | 9 | 45.333333 | 140 |
PuHaoran/jdata | 10,187,662,430,808 | c91752a90a325707a56906543e44380553f1e432 | 9e908f9ae9aebbc6242706249b9ed65c87d1a6c8 | /src/data/concat_action_files.py | 8bb21811173e9f62e572bf9bce2074d6c344e2a1 | [
"BSD-3-Clause"
] | permissive | https://github.com/PuHaoran/jdata | fda13c92b65fc75e7795c64c6988ff91fd40e288 | bb8839d7f7a4a10f3ebcdd8f4d1ce2fa29562868 | refs/heads/master | "2022-04-19T02:41:57.855479" | "2020-03-25T01:49:10" | "2020-03-25T01:49:10" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Concat JData Action Files And Save
"""
import pandas as pd
from .. import DATA_DIR
def main(input_dir=DATA_DIR.joinpath("raw"), output_dir=DATA_DIR.joinpath("interim")):
file_list = list(input_dir.glob("JData_Action*.csv"))
df_list = [pd.read_csv(p) for p in file_list]
actions = pd.concat(df_list)
actions.to_csv("{}/All_Action.csv".format(output_dir), index=False)
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 431 | py | 32 | concat_action_files.py | 21 | 0.645012 | 0.645012 | 0 | 18 | 22.944444 | 86 |
julianfloores/AprendiendoPython | 3,848,290,714,518 | 46f89d0e759d1a80bfea3aa9e941f251fd5bcd84 | 8c810aeb40c76e081982fde262e41133440f5b6f | /Tabla.py | 54dc04d672d7f36b3d6ea883d5665f9eaab54a9e | [] | no_license | https://github.com/julianfloores/AprendiendoPython | 7e0635c79ab8eb3d506ef1103c5705a6cd32710b | 4dad650cfde4ab29c8efce9adf7bfc022704f4fd | refs/heads/master | "2020-08-01T10:27:51.543923" | "2019-09-26T01:04:17" | "2019-09-26T01:04:17" | 210,966,863 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | n=input("Dame un numero del 1 al 9: ")
n:int(n)
for i in range (1,11):
salida="{} X {} = {}"
print(salida.format(n,i, i*n))
#Nombre: Francisco Julian Flores Hernandez
#Matricula: 1678204 | UTF-8 | Python | false | false | 202 | py | 9 | Tabla.py | 8 | 0.613861 | 0.554455 | 0 | 8 | 23.5 | 42 |
greseam/module5_python_practice_SMG | 3,556,232,944,931 | 20bdcb605ff642ee1d18206ac24f6ff550165eeb | b769a3992c2bf0b50f8f5860b5a41def32d4764e | /Module 5/mod5_hw1_task4_SMG.py | af51dff2df501bfefe8acb03726ee10f75075083 | [] | no_license | https://github.com/greseam/module5_python_practice_SMG | 540178db60af02bfeeb2e61d56e7c707b6fe0541 | f13e5422866a07e4a9ba06d0599ce6f0dfedbea3 | refs/heads/main | "2023-05-19T05:29:53.786620" | "2021-06-08T21:39:47" | "2021-06-08T21:39:47" | 375,149,924 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | ######
#every third word begin
#
# Sean Gregor
#
#desc: takes a string and only prints every thrid word
######
def SpaceBegone():
userString = input("Enter in a sentence at least 3 words long: ").split(" ")
userEndString = len(userString)
for i in range (2, userEndString, 3):#the range is set to grab the third word starting at position 2 in the list and then every three words
decode = userString[i]
print(decode, end=" ")
if __name__ == "__main__":
SpaceBegone() | UTF-8 | Python | false | false | 515 | py | 9 | mod5_hw1_task4_SMG.py | 7 | 0.629126 | 0.621359 | 0 | 17 | 28.411765 | 143 |
AhmedSeemal/Eclipsegirls | 12,575,664,277,718 | c339fce952354ca96be2236548e0c76bc8d28d48 | e4c97366f620cf4419b2cee370a6a594af90faf8 | /djangogirls/urls.py | 096b23849f3d2158ffa75499bf47b7be344f1a16 | [] | no_license | https://github.com/AhmedSeemal/Eclipsegirls | 476115293298b6d555af15f54d00161f89008f4b | 1ab03fffb0721804a5ca27e93716d5b775a76eef | refs/heads/master | "2021-05-11T10:46:38.051043" | "2018-01-19T10:46:54" | "2018-01-19T10:46:54" | 118,113,425 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """djangogirls URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include,url
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'', include('blog.urls')),
url(r'^dismantle/', include('dismantle.urls')),
url(r'^poll/', include('poll.urls')),
url(r'^home/', include('newapp.urls')),
url(r'^homes/', include('dum.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| UTF-8 | Python | false | false | 1,264 | py | 12 | urls.py | 10 | 0.691456 | 0.684335 | 0 | 31 | 39.774194 | 86 |
devonproudfoot/vandura | 506,806,166,985 | 6d15a9f5a758500ad9a4ab7c20e4797a305a7c48 | edcce4c7febeba37a91a249a69828f085b84015f | /archivesspace_defaults.py | 1fa4fe54cfed4a30d699bc2bcea2fce26cf541a7 | [] | no_license | https://github.com/devonproudfoot/vandura | 40748819948d5ca526c4d7ed79e9473c0513e865 | d1182f8c442510bf7185567707f59d234bdcf42c | refs/heads/master | "2019-08-08T23:55:53.682532" | "2016-06-29T19:36:54" | "2016-06-29T19:36:54" | 37,989,794 | 0 | 0 | null | true | "2015-06-24T14:32:34" | "2015-06-24T14:32:34" | "2015-06-10T16:39:10" | "2015-06-10T16:39:27" | 28,671 | 0 | 0 | 0 | null | null | null | from vandura.shared.scripts.archivesspace_authenticate import authenticate
from vandura.config import aspace_credentials
import requests
import time
import json
import getpass
from pprint import pprint
def make_note_multipart(note_type, note_content):
note_multipart = {"type":note_type,
"jsonmodel_type":"note_multipart",
"publish":True,
"subnotes":[{
"content":note_content,
"jsonmodel_type":"note_text",
"publish":True
}]}
return note_multipart
def resource_template():
resource_template = {
"record_type":"resource",
"defaults":{
"title":"[Enter a title]",
"publish":True,
"suppressed":False,
"level":"collection",
"language":"eng",
"finding_aid_title":"Finding aid for the [collection title]",
"notes":[
make_note_multipart("accessrestrict","This collection is open for research"),
make_note_multipart("bioghist", "Enter a biographical note"),
make_note_multipart("accruals", "REMOVE ONE:\r\nNo accruals expected\r\nAccruals expected"),
make_note_multipart("prefercite","[item],[box],[folder],[collection], etc.")]
}
}
return json.dumps(resource_template)
def add_enum_values(session, aspace_url, enum_set_id, new_values_to_add):
enum_address = aspace_url + '/config/enumerations/{}'.format(enum_set_id)
existing_enums_json = session.get(enum_address).json()
unique_values = [value for value in new_values_to_add if value not in existing_enums_json["values"]]
existing_enums_json["values"].extend(unique_values)
print(session.post(enum_address, data=json.dumps(existing_enums_json)).json())
def post_defaults(aspace_url, username, password):
s = authenticate(aspace_url, username, password)
s.headers.update({"Content-type":"application/json"})
bhl_repo = {
'name':'Bentley Historical Library',
'org_code':'MiU-H',
'repo_code':'BHL',
'parent_institution_name':'University of Michigan'
}
post_repo = s.post(aspace_url + '/repositories',data=json.dumps(bhl_repo)).json()
print post_repo
mhc_classification = {'title':'Michigan Historical Collections','identifier':'MHC'}
uarp_classification = {'title':'University Archives','identifier':'UA'}
faculty_classification = {'title':'Faculty Papers', 'identifier':'Faculty'}
rcs_classification = {'title':'Records Center Storage','identifier':'RCS'}
for classification in [mhc_classification, uarp_classification, faculty_classification, rcs_classification]:
classification_post = s.post(aspace_url + '/repositories/2/classifications',data=json.dumps(classification)).json()
print classification_post
add_enum_values(s, aspace_url, 23, ['lcnaf', 'lctgm', 'aacr2', 'lcgft', 'ftamc', 'fast']) # subject sources
add_enum_values(s, aspace_url, 4, ['lcnaf']) # name sources
add_enum_values(s, aspace_url, 55, ["on file", "pending", "sent", "n/a", "other"]) # user defined enum 1 values (gift agreement status)
add_enum_values(s, aspace_url, 14, ["TB"]) # extent
instance_types = s.get("{}/config/enumerations/22".format(aspace_url)).json()
for value in instance_types["enumeration_values"]:
value_uri = value["uri"]
s.post("{0}{1}/suppressed?suppressed=true".format(aspace_url, value_uri)).json()
repo_preferences = {
'repository':{'ref':'/repositories/2'},
'defaults':{'publish':True, 'default_values':True}
}
repo_preferences_post = s.post(aspace_url + '/repositories/2/preferences',data=json.dumps(repo_preferences)).json()
print repo_preferences_post
resource_template_post = s.post("{}/repositories/2/default_values/resource".format(aspace_url), data=resource_template()).json()
print resource_template_post
s.post("{}/logout".format(aspace_url))
def main():
aspace_url, username, password = aspace_credentials()
post_defaults(aspace_url, username, password)
if __name__ == "__main__":
main() | UTF-8 | Python | false | false | 3,864 | py | 1 | archivesspace_defaults.py | 1 | 0.695652 | 0.691253 | 0 | 102 | 36.892157 | 137 |
vvspearlvvs/CodingTest | 13,743,895,385,261 | 5b98146395ad29c6511925bbc47a3402f1251fa2 | 1e168ced1a4bdb53967021e082b98027aea9d38a | /1.알고리즘정리/정렬/삽입정렬.py | 6e0f94afc79ed7d33b51a468d14c6182e85e3d68 | [] | no_license | https://github.com/vvspearlvvs/CodingTest | 3ebf921308570ac11eb87e6660048ccfcaf90ce4 | fc61b71d955f73ef8710f792d008bc671614ef7a | refs/heads/main | "2023-07-13T15:57:11.312519" | "2021-08-25T02:15:28" | "2021-08-25T02:15:28" | 354,232,513 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #삽입정렬
arr = [7, 5, 9, 0, 3, 1, 6, 2, 4, 8]
for i in range(len(arr)):
for j in range(i,0,-1):
if arr[j]<arr[j-1]: #한칸씩 왼쪽으로 이동
arr[j],arr[j-1]=arr[j-1],arr[j]
else:
break
print(arr)
print("최종")
print(arr)
| UTF-8 | Python | false | false | 282 | py | 261 | 삽입정렬.py | 252 | 0.468254 | 0.40873 | 0 | 11 | 21.909091 | 43 |
JoannaPia/Just-ask- | 17,171,279,270,465 | e0011be665b61506fe425bd8d07c4308884569e7 | e6a8470a3bf4e9c770aa2810ca3353a5a106ea62 | /questions_data.py | faae7bc8d959e44e660ecc7052fe8d0f8f6c188d | [] | no_license | https://github.com/JoannaPia/Just-ask- | 6613ef3b1d344925104de15b8f99aa8ea50f71a5 | 20781a043a8d2a95526262bf32e028e6ecd0a752 | refs/heads/main | "2023-03-27T07:24:34.343894" | "2021-03-27T11:10:02" | "2021-03-27T11:10:02" | 352,051,376 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from psycopg2.extras import RealDictCursor, DictCursor
import database_common
@database_common.connection_handler
def get_all_questions(cursor: RealDictCursor) -> dict:
query = """
SELECT id, submission_time, view_number, vote_number, title
FROM question
ORDER BY submission_time DESC
"""
cursor.execute(query)
questions = cursor.fetchall()
return questions
@database_common.connection_handler
def add_question(cursor: RealDictCursor, sub, view_n, vote_n, title, mess, image, email):
query_max_id = """
SELECT MAX(id) FROM question
"""
cursor.execute(query_max_id)
new_id = cursor.fetchone()
nid = new_id['max']
query = "INSERT INTO question " \
"VALUES ({},'{}',{},{},'{}','{}','{}','{}')".format(nid+1, sub, view_n, vote_n, title, mess, image, email)
cursor.execute(query)
return nid+1
@database_common.connection_handler
def get_question(cursor: RealDictCursor, question_id: int):
query = """
SELECT *
From question
WHERE id=%(question_id)s
"""
param = {'question_id': str(question_id)}
cursor.execute(query, param)
return cursor.fetchone()
@database_common.connection_handler
def get_question_id(cursor: RealDictCursor, question_id):
query = """
SELECT id
From question
WHERE id = %(question_id)s;
"""
param = {'question_id': question_id}
cursor.execute(query, param)
result = cursor.fetchone()
print(result)
return result['id']
@database_common.connection_handler
def save_edit_question(cursor: RealDictCursor, question_id, message, title):
command = """
UPDATE question
SET message = (%(message)s), title = (%(title)s)
WHERE id=%(question_id)s
"""
param = {
'message': str(message),
'title': str(title),
'question_id': str(question_id)
}
cursor.execute(command, param)
return None
@database_common.connection_handler
def vote_up_question(cursor: RealDictCursor, item_id):
query = """
UPDATE question
SET vote_number = vote_number + 1
WHERE id=%(id)s
"""
param = {'id': item_id}
cursor.execute(query, param)
return None
@database_common.connection_handler
def vote_down_question(cursor: RealDictCursor, item_id):
query = """
UPDATE question
SET vote_number = vote_number - 1
WHERE id=%(id)s
"""
param = {'id': item_id}
cursor.execute(query, param)
return None
@database_common.connection_handler
def delete_question(cursor: RealDictCursor, question_id):
command1 = """
DELETE
FROM comment
WHERE question_id=%(id)s
"""
command2 = """
DELETE
FROM comment_q
WHERE question_id=%(id)s
"""
command3 = """
DELETE
FROM answer
WHERE question_id=%(id)s
"""
command4 = """
DELETE
FROM question
WHERE id=%(id)s
"""
param = {"id": str(question_id)}
cursor.execute(command1, param)
cursor.execute(command2, param)
cursor.execute(command3, param)
cursor.execute(command4, param)
return None
@database_common.connection_handler
def get_five_questions(cursor: RealDictCursor) -> dict:
query = """
SELECT id, submission_time, view_number, vote_number, title
FROM question
ORDER by submission_time DESC
LIMIT 5
"""
cursor.execute(query)
questions = cursor.fetchall()
return questions
@database_common.connection_handler
def get_user_from_question(cursor: RealDictCursor, question_id):
query = """
SELECT user_id
From question
WHERE id = %(question_id)s;
"""
param = {'question_id': question_id}
cursor.execute(query, param)
result = cursor.fetchone()
print(result)
return result['user_id'] | UTF-8 | Python | false | false | 4,043 | py | 18 | questions_data.py | 6 | 0.593866 | 0.590403 | 0 | 149 | 26.14094 | 117 |
nasutionam/Things_In_Python | 14,164,802,150,015 | 94c4347e2a33fdeb878ad1361660dd349d1be685 | 5d37baf1f73fa7ac406afa079c31e5603b3771ad | /wa.py | a19428a269a281b377bc818c0dc5eac3c8ff5999 | [] | no_license | https://github.com/nasutionam/Things_In_Python | 4ee87bf6af55260892596194f2d27acc4bcb6406 | ae01ce6cb42b7cccd737e1186897c4705c5353de | refs/heads/master | "2022-11-11T19:12:47.016516" | "2020-06-18T17:46:54" | "2020-06-18T17:46:54" | 272,763,922 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pywhatkit
pywhatkit.sendwhatmsg("+6283","test",00,36) | UTF-8 | Python | false | false | 61 | py | 7 | wa.py | 5 | 0.770492 | 0.639344 | 0 | 3 | 19.666667 | 43 |
arcadigonzalez-uoc/M2.951-Practica2 | 19,121,194,411,727 | db1934d57515d58af5c9b591d082cbc9ea85d3c9 | 546eda2426325e42afad5fddb3872b0eda4346c4 | /normalize/etl.py | 982e7c98875c5e8ec1bd99f56ff905ea9da7fe9c | [
"MIT"
] | permissive | https://github.com/arcadigonzalez-uoc/M2.951-Practica2 | 95ad5d1ba3154ba22dff858633d60e5756b56d59 | 6993251cb257c87377660ada0fad352ed05b9170 | refs/heads/master | "2020-03-29T02:20:45.558921" | "2019-08-20T14:23:08" | "2019-08-20T14:23:08" | 149,431,777 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from normalize.connection import cursor, client
from normalize.custom_sites import monster, indeed, reed
i = 0
for item in cursor:
if i % 100 == 0: print(i)
i += 1
if 'Spider' in item:
try:
if item['Spider'] == 'Monster':
dct = monster.Monster(item['Body'], item['Url']).to_dict()
elif item['Spider'] == 'Indeed':
dct = indeed.Indeed(item['Body'], item['Url']).to_dict()
elif item['Spider'] == 'Reed':
dct = reed.Reed(item['Body'], item['Url']).to_dict()
else:
dct = None
except:
dct = None
if dct:
client.jobCrawl.cleanJobs.insert_one(dct)
| UTF-8 | Python | false | false | 716 | py | 38 | etl.py | 24 | 0.50838 | 0.5 | 0 | 22 | 31.5 | 74 |
Angeliz/text-index | 10,428,180,596,433 | 7e66cc29871fd1f4e44c199f65581c772d108fee | 92687a5b70d771139ab79b91989b99d5ef666a45 | /corpus_to_bunch.py | 62dd12d7ad7fba0999ee7d439973f15b9da43e35 | [] | no_license | https://github.com/Angeliz/text-index | 8a1227a5779ca3edf10824c696c1d99494cb23b8 | cce1685669601ea40f0f723e6be638fb5c14485e | refs/heads/master | "2020-04-04T11:28:12.636946" | "2018-12-27T08:48:11" | "2018-12-27T08:48:11" | 155,892,018 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # encoding=utf-8
import pickle
from sklearn.datasets import base
from utils import read_file, listdir_nohidden
from config_local import seg_path, bunch_path, experiment_seg_path, experiment_bunch_path
def corpus_to_bunch(bunch_path, seg_path):
'''
:param bunch_path: Bunch存储路径
:param seg_path: 分词后语料库路径
'''
seg_class_list = listdir_nohidden(seg_path)
bunch = base.Bunch(target_name=[], label=[], filenames=[], contents=[])
bunch.target_name.extend(seg_class_list)
for seg_class_dir in bunch.target_name:
seg_class_path = seg_path + "/" + seg_class_dir + "/"
seg_file_list = listdir_nohidden(seg_class_path)
for seg_file in seg_file_list:
seg_full_path = seg_class_path + seg_file
bunch.label.append(seg_class_dir)
bunch.filenames.append(seg_file)
bunch.contents.append(read_file(seg_full_path))
with open(bunch_path, "wb") as file_obj:
pickle.dump(bunch, file_obj)
print("===================*****====================")
print("corpus_to_bunch end")
print("===================*****====================")
def experiment_corpus_to_bunch(bunch_path, seg_path):
'''
:param bunch_path: Bunch存储路径
:param seg_path: 分词后语料库路径
'''
bunch = base.Bunch(target_name=[], label=[], filenames=[], contents=[])
seg_file_list = listdir_nohidden(seg_path)
for seg_file in seg_file_list:
seg_full_path = seg_path + "/" + seg_file
bunch.filenames.append(seg_file)
bunch.contents.append(read_file(seg_full_path))
with open(bunch_path, "wb") as file_obj:
pickle.dump(bunch, file_obj)
print("===================*****====================")
print("experiment_corpus_to_bunch end")
print("===================*****====================")
if __name__ == "__main__":
corpus_to_bunch(bunch_path, seg_path)
experiment_corpus_to_bunch(experiment_bunch_path, experiment_seg_path)
| UTF-8 | Python | false | false | 2,020 | py | 9 | corpus_to_bunch.py | 7 | 0.579615 | 0.579108 | 0 | 61 | 31.327869 | 89 |
mrfrozen97/frozenSpider-A-library-for-complete-machine-learning-algorithms-implementation- | 1,468,878,844,927 | c3518f49b65b154dea865bd85dd55a7e7c132018 | e8d00a3cec4ab65edc3bb7ba0902d162b1637f44 | /frozenSpider/spiderAlgorithmLR.py | db511f0638aa47e5c5697aabcbd2ad172c973fb0 | [
"MIT"
] | permissive | https://github.com/mrfrozen97/frozenSpider-A-library-for-complete-machine-learning-algorithms-implementation- | ce2513db523066794d339cacccae31e6cfb5ff41 | 1d1fee8c4553f7d0c4c5339cef624e02eeb40650 | refs/heads/main | "2023-07-02T06:51:44.189733" | "2021-08-02T11:24:57" | "2021-08-02T11:24:57" | 328,441,861 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Implementation of ML introductory algorithms visulisation as ancillary
Author - mr frozen (or just frozen)
Github - github.com/mrfrozen97
Mail - mrfrozenpeak@gmail.com
This file has class that performs linear regression algorithm to the input data and can also visualise it .
The packages used here are numpy(for efficient calculations) and matplotlib(for data visualisation)
Classes:
Linear Regression- It carries out regression model
plot model- It carries out data visualisation
Equation of best fit line y = bx + a
In linear regression we want to minimise the squared error which is ( y(best_fit) - y(original) )^2
y(o) -> y coordinate of original line
x(o) -> x coordinate of original line
y(b) -> y coordinate of best fit line
x(b) -> x coordinate of best fit line
x(mean) -> mean of all x(o) values
y(mean) -> mean of all y(o) values
#Mathematics.........................................................................................................................
Let F = ∑ (y(b) - y(o))^2
F = ∑ (y(0) - y(b))^2
We also know that y(b) = b*x(b) + a
So, F = ∑ (y(o) - b*x(b) - a)^2
Minimising error.....
a and b are our unkowns and so to find them we partially differentiate them w.r.t. F
∂F/∂a = 0 -(1)
∂F/∂b = 0 -(2)
Let y(b) - y(o) = u
∂F/∂a = ∂F/∂u * ∂u/∂a -(3)
∂F/∂b = ∂F/∂u * ∂u/∂b -(4)
F = u^2
∂F/∂u = 2u
u = ∑y(o) - ∑(bx(b) - a)
∂u/∂a = -1
From 3 and 1,
0 = -2*u
0 = -2*(∑y(o) -∑(bx(b) - a))
na = (∑y(o) -∑(bx(b))
a = (y(mean) - b*x(mean)) -(5)
Now,
∂u/∂a = -∑x(b)
From 4 and 1,
0 = -2*u*∑x(b)
0 = (∑y(o)∑x(b) -∑(bx(b) - a)*∑x(b) )
∑x(b)*na = ∑y(o)∑x(b) -∑bx(b)
From 5
∑x(b)*n(y(mean) - bx(mean)) = ∑y(o)∑x(b) -∑bx(b)
Solving this
b = ∑(y(o)x(a))(mean) - (y(mean)*x(mean)) / ∑x(o)*x(mean) - x(mean)**2 ###############.....IMP
a = y(mean) - b*x(mean) ###############.....IMP
squared mean error is defined as
SME = 1 - ( ∑(y(o)-y(mean))^2 / ∑(y(o)-y(b))^2 ) ###############.....IMP
#End of math. Maybe, Never skip maths................................................................
Attributes:
............................
____________________________
b -> indicates slope of best fit line
a -> indicates x intercept of the best fit line
x -> input x coordinates
y -> input y coordinates
x_mean -> mean of x values
y_mean -> mean of y values
squared_mean_error -> squared error to check accuracy
showGrid -> Boolean which indicates weather grid should be shown or not
sizeX -> Horizontal size of graph in inches
sizeY -> Vertical size of graph in inches
labelx_count -> number of labels on x axis
labely_count -> number of labels on y axis
title_color -> color of title of plot
label_default -> Boolean which tells if the label is default or changed by user
xlabel_color -> color of x label
ylabel_color -> color of y label
title -> title text
x_label -> x label text
y_label -> y label text
xlabel_size -> x_label font size of text
ylabel_size -> y_label font size of text
title_size -> title font size of text
model -> this is the model obeject i.e. linear regression object in case we need any parent class variables
unknown_points_size-> this specifies the size of unknown points if any on the graph
unknown_points_color-> this specifies the color of unknown points if any on the graph
unknown_points_apl]ha-> this specifies the transparency of unknown points if any on the graph
#####.....IMP.......####
color_dict -> Dictionary that contains wide range of colors in the form of dictionary. The keys are the names of colors
, values are the hex codes of that color. You can get this dict using get_color_dict function()
You can print this dict i.e. print(object.get_color_dict()) to know which colors are available.
You are most welcomed to improve this code:
You can pull request for this code at
github.com/mrfrozen97/ (In spider algorithms repo)
or
Email - mrfrozenpeak@gmail.com
"""
import numpy as np
import matplotlib.pyplot as plt
import math
from frozenSpider import spiderAlgorithmResources as res
"""
Class that performs linear regression
It takes in the x and y coordinates of the data
It then calculates the value of b and a using numpy array functions
It returns values of a and b
It also returns values of squared mean error in squared_error function
"""
class Linear_regression:
def __init__(self):
self.a = 0.0
self.b = 0.0
self.squared_mean_error = 0.0
self.x_mean = 0
self.y_mean = 0
self.size = 1
self.x = []
self.y = []
self.calculated_y = []
self.calculated_x = []
#Function to calcute and return a, b values....................................
def bestFit(self, x_coordinates, y_coordinates):
size_coordinates = np.size(x_coordinates)
x_coordinates_mean = np.sum(x_coordinates)/size_coordinates
y_coordinates_mean = np.sum(y_coordinates)/size_coordinates
self.x = x_coordinates
self.y = y_coordinates
self.size = size_coordinates
self.x_mean = x_coordinates_mean
self.y_mean = y_coordinates_mean
bestFint_numerator = (np.sum(np.multiply(x_coordinates, y_coordinates))/size_coordinates) - (x_coordinates_mean*y_coordinates_mean)
bestFint_denominator = (np.sum(np.multiply(x_coordinates,x_coordinates))/size_coordinates) - (x_coordinates_mean**2)
self.b = bestFint_numerator/bestFint_denominator
self.a = y_coordinates_mean - (self.b*x_coordinates_mean)
return self.a, self.b
def bestFit_logrithemic(self, x_coordinates, y_coordinates, base=math.e):
y_coordinates = [math.log(x, base) for x in x_coordinates]
size_coordinates = np.size(x_coordinates)
x_coordinates_mean = np.sum(x_coordinates)/size_coordinates
y_coordinates_mean = np.sum(y_coordinates)/size_coordinates
self.x = x_coordinates
self.y = y_coordinates
self.size = size_coordinates
self.x_mean = x_coordinates_mean
self.y_mean = y_coordinates_mean
bestFint_numerator = (np.sum(np.multiply(x_coordinates, y_coordinates))/size_coordinates) - (x_coordinates_mean*y_coordinates_mean)
bestFint_denominator = (np.sum(np.multiply(x_coordinates,x_coordinates))/size_coordinates) - (x_coordinates_mean**2)
self.b = bestFint_numerator/bestFint_denominator
self.a = y_coordinates_mean - (self.b*x_coordinates_mean)
return self.a, self.b
#function that calculates and returns squarred error which can be attributed to the accuracy og our best fit line............
def squared_error(self):
mean_y_array = [self.y_mean for x in range(self.size)]
bestFit_y = [(self.b*x + self.a) for x in self.x]
# print(mean_y_array)
# print(bestFit_y)
squared_error_denominator = np.sum(np.square(np.subtract(bestFit_y, mean_y_array)))
squared_error_numerator = np.sum(np.square(np.subtract(bestFit_y, self.y)))
self.squared_mean_error = 1 - (squared_error_numerator/squared_error_denominator)
return self.squared_mean_error
def find_unknowns(self, x):
x = list(x)
self.calculated_x = x
for i in x:
self.calculated_y.append(self.b * i + self.a)
return self.calculated_y
"""
This is child class of the linear regression model class
It basically plots the result obtained by performing linear regression vs the original data
The parameters such as title, x/y labels , their colors, their size, plot colors, x/y label values, grid, etc can be set
There are function to set these values as well as to set the size of graph, to save it, etc.
All the default values of the above variables are already set so you can directly plot entire graph with just one line of code......
"""
class plot_model(Linear_regression):
#All the default value are already set but they can all be reset.........................
def __init__(self, model):
self.showGrid = True
self.sizeX = 10
self.labelx_count = 10
self.labely_count = 10
self.title_color = "#FF0000"
self.xlabel_color = "#663399"
self.ylabel_color = "#663399"
self.label_default = True
self.sizeY = 6
self.title = ""
self.x_label = ""
self.y_label = ""
self.xlabel_size = 15
self.ylabel_size = 15
self.title_size = 18
self.model = model
self.color_dict = res.Resources.get_color_dict()
self.unknown_points_size = 20
self.unknown_points_color = "Orange"
self.unknown_points_alpha = 0.8
#Function to get dictionary to access the different options avaliable for the colors.........................
def get_color_dict(self):
return self.color_dict
#function to set vlues for title, x/y label and values of labels in case.........................
def set_marker_properties(self,unknown_points_alpha=0.8, unknown_points_color = "Orange", unknown_points_size = 20, xlabel_count=17, label_default=True, ylabel_count=12, title_size=15, xlabel_size=10, ylabel_size=10, title="Linear Regression", x_label="x coordinates", y_label="y coordinates", title_color="#FF0000", xlabel_color="#663399", ylabel_color="#663399"):
self.title_size = title_size
self.xlabel_size = xlabel_size
self.ylabel_size = ylabel_size
self.x_label = x_label
self.y_label = y_label
self.title = title
self.xlabel_color = xlabel_color
self.ylabel_color = ylabel_color
self.title_color = title_color
self.labelx_count =xlabel_count
self.labely_count =ylabel_count
self.label_default = label_default
self.unknown_points_size = unknown_points_size
self.unknown_points_color = unknown_points_color
self.unknown_points_alpha = unknown_points_alpha
#Sets the display size which will be equal to size of image if it is being svaed.........................
def set_display_size(self, sizeX, sizeY):
self.sizeX = sizeX
self.sizeY = sizeY
#function to create x,y values according to the best fit line equation................................
def create_best_fit_line(self):
y_coords_line = []
x_coords_line = list(self.model.x)
for i in self.model.x:
y_coords_line.append(self.model.b*i + self.model.a)
#print(self.model.calculated_x)
for i in self.model.calculated_x:
x_coords_line.append(i)
y_coords_line.append(self.model.b*i + self.model.a)
#print(len(self.model.x) - len(self.model.y))
return x_coords_line, y_coords_line
#This function creates f\default ticks for x and y axis ....
def generate_ticks(self, label_x, label_y):
if self.label_default or len(label_x)==0 or len(label_y)==0:
min_y = min(self.model.y)
min_x = min(self.model.x)
max_y = max(max(self.model.y), max(self.model.calculated_y))
max_y += int(max_y*0.3)
max_x = max(max(self.model.x), max(self.model.calculated_x))
max_x += int(max_x * 0.2)
# dif_y = max(1, int(len(self.model.y)/self.labely_count))
dif_y = max(1, int((max_y-min_y) / self.labely_count))
#dif_x = max(1, int(len(self.model.x)/self.labelx_count))
dif_x = max(1, int((max_x-min_x) / self.labelx_count))
print(dif_x, dif_y)
x_labels_default = []
y_labels_default = []
while min_x<=max_x:
x_labels_default.append(min_x)
min_x+=dif_x
while min_y<max_y:
y_labels_default.append(min_y)
min_y += dif_y
# print(x_labels_default)
# print(y_labels_default)
plt.xticks(x_labels_default)
plt.yticks(y_labels_default)
else:
plt.xticks(label_x)
plt.yticks(label_y)
#main fuction thats plots the graph and saved it if path provided.........................
def plot_model(self,label="Points",alpha=0.6, point_size=25,label_x=[], label_y=[],line_color='Purple',unknown_points_label="Unknown points",line_label='Best Fit line', point_color='DeepSkyBlue', save_fig_path='dont'):
#These are the coordinates to plot the line which is inclusion of both known and calculated points
x_bestFitLine_coords, y_bestFitLine_coords = self.create_best_fit_line()
plt.plot(x_bestFitLine_coords, y_bestFitLine_coords, color=self.color_dict[line_color], label=line_label) #plot the best fit line
plt.scatter(self.model.x, self.model.y, zorder=3, label=label, s=point_size, color=self.color_dict[point_color], alpha=alpha) #plot all the points
plt.scatter(self.model.calculated_x, self.model.calculated_y, color=self.color_dict[self.unknown_points_color], label=unknown_points_label, zorder = 4, alpha=self.unknown_points_alpha, s=self.unknown_points_size)
plt.title(self.title,
fontdict=res.Resources.title_dict,
color=self.title_color)
plt.xlabel(self.x_label,
fontdict=res.Resources.label_dict,
color=self.xlabel_color)
plt.ylabel(self.y_label,
fontdict=res.Resources.label_dict,
color=self.ylabel_color)
self.generate_ticks(label_x, label_y)
plt.legend(loc = 'upper left')
if self.showGrid:
plt.grid(color='#cfd8dc', zorder=0)
figure = plt.gcf()
figure.set_size_inches(self.sizeX, self.sizeY)
if not(save_fig_path=='dont') :
plt.savefig("./" + save_fig_path)#, bbox_inches='tight')
plt.show()
#Sample code to work with this file of the library
"""
x = [-22, 17, 22, 28, 35]
y = [-9.3, 5, 8, 12, 14]
x1 = [13, 47]
le1 = linear_regression()
a,b =le1.bestFit(x, y)
le1.find_unknowns(x1)
print("Slope = " + str(b))
print("x intercept = " + str(a))
print("Squared error = "+ str(le1.squared_error()))
plot2 = plot_model(le1)
plot2.set_marker_properties(unknown_points_size=80, unknown_points_alpha=0.6, unknown_points_color='Red')
plot2.plot_model(save_fig_path="plot2")
"""
| UTF-8 | Python | false | false | 15,373 | py | 20 | spiderAlgorithmLR.py | 18 | 0.578134 | 0.567332 | 0 | 488 | 29.30123 | 369 |
dyingg/rk311_infowise-pub- | 11,742,440,624,462 | 2587a225e6cabb5f66558b5823b873d1ffc9f560 | e3c6c3e035430c8523aefbdb6f295444951b395c | /src/main/scraper-module/AI_retrain.py | 733795f7297c901e9f4e6bf722ba0785469241c0 | [] | no_license | https://github.com/dyingg/rk311_infowise-pub- | acbae670f04727dfd35c40dbb40f241ac62b418b | 48a43c9a75dc7bc191abb602b2ba92e1cac6787a | refs/heads/master | "2023-07-05T14:34:07.503763" | "2021-08-26T08:29:14" | "2021-08-26T08:29:14" | 284,060,242 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import keras
from keras.models import Sequential
from keras.layers import Dense
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
import pickle, os
import numpy as np
from Naked.toolshed.shell import muterun_js
import sys
import requests, random
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def save_model(filename, model):
pickle.dump(model, open(filename, 'wb'))
def load_model(filename):
return pickle.load(open(filename, 'rb'))
def get_score(ip):
url = "https://ipqualityscore.com/api/json/ip/F5FEG8NGn0stGDfizWPf7ylxu3iNqnX4/" + ip
response = requests.get(url).json()
# print(response)
return response['fraud_score']
def get_inputs(path, file_name, ip):
os.chdir(path)
file = os.path.join(path, file_name)
x = file_name + ' ' + ip
output = muterun_js(x)
s = output.stdout
s = str(s).strip()[2:-3]
print(s)
return s
def retrain(ip_list):
model_folder = "../ai-server/models"
model_names = os.listdir(model_folder)
models = []
for m in model_names:
model_path = os.path.join(model_folder, m)
model = load_model(model_path)
models.append(model)
curr_dir = os.getcwd()
os.chdir(curr_dir)
inp_folder = "../whois"
file_name = "getInspectorScores.js"
x_train, y_train = [], []
random.shuffle(ip_list)
for ip in ip_list[:3]:
inputs = get_inputs(inp_folder, file_name, ip).split()
x = list(map(float, inputs))
# print(x)
try:
y = int(get_score(ip))
except:
continue
y = np.array(y)
x_train.append(x)
y_train.append(y)
x_train, y_train = np.array(x_train), np.array(y_train)
# print(x_train.shape, y_train.shape)
for model in models:
model.fit(x_train, y_train, batch_size=10, nb_epoch=30)
print("Model Trained!") | UTF-8 | Python | false | false | 1,959 | py | 114 | AI_retrain.py | 75 | 0.630934 | 0.623788 | 0 | 86 | 21.790698 | 89 |
linklab/aiclass | 11,373,073,405,860 | 9eff9999ccecd071b291daddd8d38503b3aaf958 | bfdc632d1921c5a2f0877e44f1394f67d4ef6d7a | /1731095004_SiHoRyu/HW2/functions.py | fffb6e342a6db23b90dd52e366efa5b694a7ce99 | [] | no_license | https://github.com/linklab/aiclass | 8f638d5ff0d76ba5cc631c93c2b55ed06e2456e5 | 4655fceee6ff301f47e954d56b1dbacb3ccae8ac | refs/heads/master | "2021-01-19T23:39:45.530750" | "2017-11-28T07:09:58" | "2017-11-28T07:09:58" | 83,782,465 | 4 | 14 | null | false | "2017-04-04T10:06:18" | "2017-03-03T09:38:59" | "2017-03-31T02:42:08" | "2017-04-04T09:17:47" | 16,161 | 2 | 13 | 2 | Jupyter Notebook | null | null | import numpy as np
import math
import matplotlib.pyplot as plt
from scipy.stats import truncnorm
from numba import jit, float64, uint8, void, cuda
def get_truncated_normal(shape, mean=0, sd=1, low=0, upp=10):
x = truncnorm(a=(low - mean) / sd, b=(upp - mean) / sd, loc=mean, scale=sd)
num_elements = 1
for dim in shape:
num_elements *= dim
x = x.rvs(num_elements)
x = x.reshape(shape)
return x
def sigmoid(x, is_numba):
if is_numba:
return _sigmoid(x)
else:
return 1 / (1 + np.exp(-x))
@jit(nopython=True)
def _sigmoid(x):
return 1 / (1 + np.exp(-x))
def squared_error(output_value, target_value, is_numba):
if is_numba:
return _squared_error(output_value, target_value)
else:
return 0.5 * np.power(output_value - target_value, 2.0)
@jit(nopython=True)
def _squared_error(output_value, target_value):
return 0.5 * np.power(output_value - target_value, 2.0)
def softmax(x, is_numba):
if is_numba:
return _softmax(x)
else:
if x.ndim == 2:
x = x.T
x = x - x.max()
y = np.exp(x) / np.sum(np.exp(x), axis=0)
return y.T
x = x - x.max()
return np.exp(x) / np.sum(np.exp(x))
@jit(nopython=True)
def _softmax(x):
if x.ndim == 2:
x = x.T
x = x - x.max()
y = np.exp(x) / np.sum(np.exp(x), axis=0)
return y.T
x = x - x.max()
return np.exp(x) / np.sum(np.exp(x))
def cross_entropy_error(y, t, is_numba):
if y.ndim == 1 and t.ndim == 1:
t = t.reshape(1, t.size)
y = y.reshape(1, y.size)
y[y == 0] = 1e-15
batch_size = y.shape[0]
if is_numba:
return _cross_entropy_error(y, t, batch_size)
else:
return -np.sum(t * np.log(y)) / batch_size
@jit(nopython=True)
def _cross_entropy_error(y, t, batch_size):
return -np.sum(t * np.log(y)) / batch_size
def accuracy(forward_final_output, target):
y = np.argmax(forward_final_output, axis=1)
if target.ndim != 1:
target = np.argmax(target, axis=1)
accuracy = np.sum(y == target) / float(forward_final_output.shape[0])
return accuracy
if __name__ == "__main__":
# x1 = get_truncated_normal(shape=(1, 10000), mean=2, sd=1, low=1, upp=10)
# x2 = get_truncated_normal(shape=(1, 10000), mean=5.5, sd=1, low=1, upp=10)
# x3 = get_truncated_normal(shape=(1, 10000), mean=8, sd=1, low=1, upp=10)
#
# fig, ax = plt.subplots(3, sharex=True)
# ax[0].hist(x1.flatten())
# ax[1].hist(x2.flatten())
# ax[2].hist(x3.flatten())
# plt.show()
a = np.array([1.0, 2.0, 3.0])
print(sigmoid(a, is_numba=True))
print()
b = np.array([1.0, 0.0, 1.0])
c = np.array([0.0, 1.0, 0.0])
print(squared_error(b, c, is_numba=True))
print()
print(cross_entropy_error(b, c, is_numba=True))
print()
d = np.array([[1.0, 2.0, 3.0], [3.0, 2.0, 1.0]])
print(softmax(d, is_numba=True))
print()
q = np.array([[3.3, 1.2, 9.4], [7.1, 2.2, 3.3], [1.9, 9.2, 2.3]])
t = np.array([[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]])
print(accuracy(q, t, is_numba=True))
q = np.array([[3.3, 1.2, 9.4], [7.1, 2.2, 3.3], [1.9, 9.2, 2.3]])
t = np.array([[1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]])
print(accuracy(q, t, is_numba=True)) | UTF-8 | Python | false | false | 3,357 | py | 116 | functions.py | 72 | 0.546619 | 0.491808 | 0 | 130 | 24.830769 | 80 |
shreyakapadia10/Bike-Rental-System | 18,528,488,929,752 | a06c4bf89ec78a02e221b11ac498ea544c5b59d3 | eda5d1c94b9c8e33ba01692a0d946fc0cade0190 | /BikeRentalSystem/BikeUsers/forms.py | 77c846497921e3ca60bfbc13606e1a5ccfcbe91f | [] | no_license | https://github.com/shreyakapadia10/Bike-Rental-System | 9fca03be0b2465f4552b75533ed6e8ccdc16fbda | 0356d5f66b4d50599633d57a7e2fc592567a5c9c | refs/heads/master | "2023-06-10T19:55:59.338879" | "2021-07-02T06:58:44" | "2021-07-02T06:58:44" | 373,848,485 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django import forms
from .models import *
from django.contrib.auth.forms import UserCreationForm, UserChangeForm, AuthenticationForm, PasswordChangeForm
'''User Registration Form'''
class CustomerCreationForm(UserCreationForm):
class Meta:
model = Customer
fields = ['role','first_name', 'last_name', 'username', 'contact', 'address', 'pincode', 'email', 'proof', 'state', 'city']
def __init__(self, *args, **kwargs):
super(CustomerCreationForm, self).__init__(*args, **kwargs)
for visible in self.visible_fields():
visible.field.widget.attrs['class'] = 'form-control'
'''User Login Form'''
class CustomerLoginForm(AuthenticationForm):
username = forms.CharField(help_text='Enter your username', required=True)
class Meta:
model = Customer
fields = ['username', 'password']
def __init__(self, *args, **kwargs):
super(CustomerLoginForm, self).__init__(*args, **kwargs)
for visible in self.visible_fields():
visible.field.widget.attrs['class'] = 'form-control'
'''Customer Change Form'''
class CustomerChangeForm(UserChangeForm):
class Meta:
model = Customer
exclude = ('status', )
'''Select City Form'''
class CityForm(forms.ModelForm):
CHOICES = (
("", "---------"),
)
name = forms.ChoiceField(required=True, choices=CHOICES, label="City", widget=forms.Select(attrs={'class': 'form-control'}))
states = State.objects.all()
state = forms.ModelChoiceField(queryset=states, widget=forms.Select(attrs={'class': 'form-control'}))
class Meta:
model = City
fields = ['state', 'name']
'''Customer Change Form'''
class CustomerUpdateForm(UserChangeForm):
class Meta:
model = Customer
fields = ['first_name', 'last_name', 'username', 'contact', 'address', 'pincode', 'email', 'proof', 'state', 'city']
def __init__(self, *args, **kwargs):
super(CustomerUpdateForm, self).__init__(*args, **kwargs)
for visible in self.visible_fields():
visible.field.widget.attrs['class'] = 'form-control'
class PasswordUpdateForm(PasswordChangeForm):
class Meta:
model = Customer
def __init__(self, user, *args, **kwargs):
super(PasswordUpdateForm, self).__init__(user, *args, **kwargs)
for visible in self.visible_fields():
visible.field.widget.attrs['class'] = 'form-control' | UTF-8 | Python | false | false | 2,442 | py | 52 | forms.py | 24 | 0.635545 | 0.635545 | 0 | 69 | 34.405797 | 131 |
StarStudio/StarSSO | 16,604,343,572,568 | 9077e15d13464861059ddcb8de6d57d1ebce2e56 | 7decdbc9b7bab2d0b4d571a8f658d9beebaa619d | /StarMember/lan_agent/submit.py | e88587e907ed10189adc2bc48a5cb2aa5b99d1f0 | [] | no_license | https://github.com/StarStudio/StarSSO | 3a299630e7fc5d6a2e7957adece8b6796c192cd2 | 4402e890eaf02e57b57ad04596490e0e8759a814 | refs/heads/master | "2020-04-03T22:26:57.801834" | "2018-12-30T05:14:55" | "2018-12-30T05:14:55" | 155,602,807 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from StarMember.views import SignAPIView, param_error, api_succeed
from StarMember.aspect import post_data_type_checker, post_data_key_checker
from StarMember.utils import get_request_params
from flask import request, current_app
from .network import Network, InvalidNetworkIDError
class LANDeviceSubmit(SignAPIView):
method = ['POST']
def post(self, nid):
params = get_request_params()
print(params)
key_checker = post_data_key_checker('devices')
ok, err_msg = key_checker(params)
if not ok:
return param_error(err_msg)
devices = params['devices']
if not isinstance(devices, list):
return param_error('Arg devices has wrong type.')
try:
net = Network(nid)
net.UpdateDevices(devices)
except (InvalidNetworkIDError, ValueError) as e:
return param_error(str(e))
return api_succeed()
| UTF-8 | Python | false | false | 941 | py | 60 | submit.py | 48 | 0.650372 | 0.650372 | 0 | 27 | 33.851852 | 75 |
WoolinChoi/WoolinChoi.github.io | 3,882,650,455,075 | 9d8c879f5cb737da5096a0417feb2e9f05fff023 | a9590fdcf3ed0b3776e7d0edf5be9d13bbace255 | /_posts/jupyter/cWebConn/3_beautifulsoup_class/Ex01_element.py | 1029f9e5f0144e1cb99ce818e6c739b516febca9 | [
"MIT"
] | permissive | https://github.com/WoolinChoi/WoolinChoi.github.io | 59aa4ea685dd38f2588fcac662bb44688b4c7d59 | a0f9c8ecc63443acaae61d744eecec6c943d3a26 | refs/heads/master | "2022-10-24T16:57:06.830741" | "2020-04-15T10:21:54" | "2020-04-15T10:21:54" | 182,653,421 | 0 | 1 | NOASSERTION | false | "2022-10-06T10:44:00" | "2019-04-22T08:09:09" | "2020-04-15T10:21:57" | "2022-10-06T10:43:57" | 75,838 | 0 | 0 | 3 | Jupyter Notebook | false | false | """
bs4 라이브러리 : 웹에서 가져온 HTML코드를 파이썬에서 사용하기 편하게 해주는 라이브러리
[참고] 웹에서 가져온 HTML코드 가져오는 방법
- requests 모듈
- urllib 의 request 모듈
BeautifulSoup 모듈
- find()
- find_all()
[참고] 파서의 종류
- lxml : c로 만들어져 속도 빠름
- html5lib : 파이썬으로 만들어서 lxml보다 느림
- html.parser (*): 파이썬 버전을 확인해서 사용
"""
from bs4 import BeautifulSoup
html = """
<html><body>
<h1>스크레이핑 연습</h1>
<p>웹페이지 분석하자</p>
<p>데이타 정제하기</p>
</body></html>
"""
# 1. 데이터 파서하기
soup = BeautifulSoup(html, "html.parser")
# 2. 원하는 요소 접근하기
h1 = soup.html.body.h1
print(h1)
print(h1.text) # 문자열만 출력하고 싶을땐 .text, .string을 이용하여 출력
print(h1.string)
# 3. p요소의 내용 추출하기
p = soup.find_all("p")
print(p)
for i in p: # list형식은 for문을 이용하여 출력
print(i.text)
| UTF-8 | Python | false | false | 1,172 | py | 453 | Ex01_element.py | 89 | 0.541262 | 0.525485 | 0 | 40 | 19.6 | 57 |
ilyakonstantinov95/DjBlog | 14,285,061,231,926 | 93264ebec8e665233eb75c2148ac066085e57ed8 | e2890e6ef04220a09d150aa922376db39bb7853e | /emplist/migrations/0001_initial.py | 693d522e4c0b73b7c98ebae63dbb1ccbeeb932f7 | [] | no_license | https://github.com/ilyakonstantinov95/DjBlog | da243ecbb8c49f6a2a7c42a644791f68d413942b | 3ab34bed10d7a0f58d19dfee90e836daf7bf6ff7 | refs/heads/master | "2017-12-06T10:14:59.004524" | "2017-01-26T08:24:15" | "2017-01-26T08:24:15" | 80,039,147 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-23 10:13
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Employee',
fields=[
('id_emp', models.AutoField(primary_key=True, serialize=False, unique=True, verbose_name='Идентификатор')),
('first_name', models.CharField(db_index=True, max_length=25, verbose_name='Имя')),
('second_name', models.CharField(db_index=True, max_length=25, verbose_name='Фамилия')),
('third_name', models.CharField(db_index=True, max_length=25, verbose_name='Отчество')),
('birthday', models.DateField(db_index=True, verbose_name='Дата рождения')),
('date_start', models.DateField(auto_now_add=True, db_index=True, verbose_name='Дата принятия')),
('date_end', models.DateField(blank=True, null=True, verbose_name='Дата увольнения')),
],
options={
'verbose_name': 'Сотрудник',
'verbose_name_plural': 'Сотрудники',
'ordering': ['first_name', 'second_name', 'third_name', 'birthday', 'sex', 'status', 'post', '-date_start', 'date_end'],
},
),
migrations.CreateModel(
name='Post',
fields=[
('id_post', models.AutoField(primary_key=True, serialize=False, unique=True, verbose_name='Идентификатор')),
('name', models.CharField(db_index=True, max_length=50, unique=True, verbose_name='Должность')),
],
options={
'verbose_name': 'Должность',
'verbose_name_plural': 'Должности',
'ordering': ['name'],
},
),
migrations.CreateModel(
name='Sex',
fields=[
('id_sex', models.AutoField(primary_key=True, serialize=False, unique=True, verbose_name='Идентификатор')),
('type', models.CharField(db_index=True, max_length=7, verbose_name='Пол')),
],
options={
'verbose_name': 'Пол',
'verbose_name_plural': 'Пол',
'ordering': ['type'],
},
),
migrations.CreateModel(
name='Status',
fields=[
('id_status', models.AutoField(primary_key=True, serialize=False, unique=True, verbose_name='Идентификатор')),
('status', models.CharField(db_index=True, max_length=30, unique=True, verbose_name='Статус')),
],
options={
'verbose_name': 'Статус',
'verbose_name_plural': 'Статусы',
'ordering': ['status'],
},
),
migrations.AlterUniqueTogether(
name='status',
unique_together=set([('id_status', 'status')]),
),
migrations.AlterUniqueTogether(
name='sex',
unique_together=set([('id_sex', 'type')]),
),
migrations.AlterUniqueTogether(
name='post',
unique_together=set([('id_post', 'name')]),
),
migrations.AddField(
model_name='employee',
name='post',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='emplist.Post', verbose_name='Должность'),
),
migrations.AddField(
model_name='employee',
name='sex',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='emplist.Sex', verbose_name='Пол'),
),
migrations.AddField(
model_name='employee',
name='status',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='emplist.Status', verbose_name='Статус'),
),
migrations.AlterUniqueTogether(
name='employee',
unique_together=set([('first_name', 'second_name', 'third_name', 'birthday', 'sex')]),
),
]
| UTF-8 | Python | false | false | 4,460 | py | 17 | 0001_initial.py | 11 | 0.54108 | 0.534507 | 0 | 101 | 41.178218 | 150 |
Yun-Jongwon/TIL | 11,605,001,651,736 | b3f65b69e00c8a9f6f48a8c3daa3b050ff4a4345 | 1346ea1f255d3586442c8fc1afc0405794206e26 | /알고리즘/day15/picnic.py | a0675c0839036358fd7ba4ab78d78e7ec1e146dd | [] | no_license | https://github.com/Yun-Jongwon/TIL | 737b634b6e75723ac0043cda9c4f9acbc2a24686 | a3fc624ec340643cdbf98974bf6e6144eb06a42f | refs/heads/master | "2020-04-12T00:41:03.985080" | "2019-05-01T07:55:25" | "2019-05-01T07:55:25" | 162,208,477 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def dfs(couple):
global m
global count
if sum(visited)==len(visited):
count+=1
return
for V in range(n):
if visited[V]==0:
visited[V]=1
v=V
break
for w in range(n):
if total_map[v][w]==1 and visited[w]==0:
visited[w]=1
dfs(couple+1)
visited[w]=0
visited[V]=0
T=int(input())
for t in range(T):
n,m=map(int,input().split())
visited=[0]*n
data=list(map(int,input().split()))
total_map=[[0]*n for i in range(n)]
count=0
for M in range(m):
a=data[2*M]
b=data[2*M+1]
total_map[a][b]=1
total_map[b][a]=1
dfs(0)
print(count)
# print(total_map)
| UTF-8 | Python | false | false | 739 | py | 220 | picnic.py | 189 | 0.477673 | 0.453315 | 0 | 34 | 20.588235 | 48 |
RussellSk/grokking_algorithms | 13,185,549,612,182 | f57a5ab516edeb1b8dc6d04c6ffab4f028a08c66 | dfa7aaaefb5ccff7fcc016ac1740d52cfbe20caf | /Chapter4/Exercise4.1.py | 7749abba16a976aa7f3b90cd4796ee5356d9c9d6 | [] | no_license | https://github.com/RussellSk/grokking_algorithms | 97ecd387b614b8b7f413c24467b97e2c704a1af8 | 88a2b9c5f21d06d1cba3526c26b68b56dc40e399 | refs/heads/master | "2021-04-14T04:59:44.421724" | "2020-04-16T11:44:01" | "2020-04-16T11:44:01" | 249,208,424 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Exercises 4.1
# Write out the code for the earlier sum function
def sum(arr):
if not arr:
return 0
else:
return arr.pop() + sum(arr)
if __name__ == "__main__":
print(sum([2, 4, 6]))
| UTF-8 | Python | false | false | 216 | py | 9 | Exercise4.1.py | 9 | 0.541667 | 0.513889 | 0 | 13 | 15.615385 | 49 |
tuananh1007/Picasso | 12,850,542,195,905 | 2d4d18f85a1bc1ccc056b60d74d5fc204c2b33e7 | 2e11bf9a4499a962bacd89a742d9dc0ed7108747 | /tf_ops/mesh/unpooling/tf_mesh_unpool3d.py | 7b3864db5d3f04053014653a6149dabb63a57a43 | [] | no_license | https://github.com/tuananh1007/Picasso | df914332a02bf7940d28d5bae80be1601d59cb0c | 5009dd2312ad88635748788696ecc953d5e55116 | refs/heads/main | "2023-03-30T02:52:51.600705" | "2021-03-30T11:44:30" | "2021-03-30T11:44:30" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import tensorflow as tf
import sys, os
base_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(base_dir)
unpool3d_module = tf.load_op_library(os.path.join(base_dir, 'tf_unpool3d_so.so'))
def mesh_interpolate(input, vt_replace, vt_map):
'''
During the decimation, we record the vertex clusters in vt_map.
In unpooling within decoders, we interpolate features for vertices in the
before-decimation mesh from vertex features of its successive decimated mesh.
Here we perform average interpolation to get feature, which means that
the unpooled feature of each output vertex is 1/Nc of that from its input vertex.
Nc is the related vertex cluster size in decimation.
'''
return unpool3d_module.mesh_interpolate(input, vt_replace, vt_map)
@tf.RegisterGradient("MeshInterpolate")
def _mesh_interpolate_grad(op, grad_output):
input = op.inputs[0]
vt_replace = op.inputs[1]
vt_map = op.inputs[2]
grad_input = unpool3d_module.mesh_interpolate_grad(input, grad_output, vt_replace, vt_map)
return [grad_input, None, None]
| UTF-8 | Python | false | false | 1,109 | py | 28 | tf_mesh_unpool3d.py | 19 | 0.714157 | 0.706943 | 0 | 25 | 43.28 | 94 |
thiago-allue/portfolio | 11,089,605,580,602 | 697ff03e063255a0200112804cf3ef495e02b7bf | baf3996414315ffb60470c40c7ad797bf4e6897f | /17_boilerplates/prototypes-master/examples/src/elastic/src/old/guide.py | aaaf2aeadce59a30109e14b385347719f1ae7efd | [
"MIT"
] | permissive | https://github.com/thiago-allue/portfolio | 8fbbecca7ce232567aebe97c19944f444508b7f4 | 0acd8253dc7c5150fef9b2d46eead3db83ca42de | refs/heads/main | "2023-03-15T22:10:21.109707" | "2022-09-14T17:04:35" | "2022-09-14T17:04:35" | 207,919,073 | 0 | 0 | null | false | "2019-11-13T18:18:23" | "2019-09-11T22:40:46" | "2019-09-14T03:15:26" | "2019-11-13T18:18:21" | 180,926 | 0 | 0 | 0 | Python | false | false | import requests
r = requests.get(
"http://api.tvmaze.com/singlesearch/shows?q=big-bang-theory&embed=episodes"
)
import json
jd = json.loads(r.content)
print(jd["_embedded"]["episodes"][0])
import re
ldocs = []
for jo in jd["_embedded"]["episodes"][0:200]:
d = {}
d["id"] = jo["id"]
d["season"] = jo["season"]
d["episode"] = jo["number"]
d["name"] = jo["name"]
d["summary"] = re.sub("<[^<]+?>", "", jo["summary"])
ldocs.append(d)
from elasticsearch import Elasticsearch
es = Elasticsearch([{"host": "localhost", "port": 9200}])
import json
# iterate through documents indexing them
for doc in ldocs:
es.index(index="tvshows", doc_type="bigbang", id=doc["id"], body=json.dumps(doc))
es.get(index="tvshows", doc_type="bigbang", id=2915)
es.search(
index="tvshows",
doc_type="bigbang",
body={"query": {"match": {"summary": "rivalry"}}},
)
es.search(
index="tvshows", doc_type="bigbang", body={"query": {"fuzzy": {"summary": "rival"}}}
)
es.indices.delete(index="bigbang", ignore=[400, 404])
| UTF-8 | Python | false | false | 1,053 | py | 991 | guide.py | 679 | 0.616334 | 0.598291 | 0 | 47 | 21.404255 | 88 |
duncandc/mb2_python | 274,877,916,344 | 296a16ac9b6b20e78694fa5bdeab81caf614593b | 8e4da43aa0a8af1167482c9cbc1ac3ff4410e95e | /groupcat.py | 79b4d4cb8be334b007ce94c98765f85df446de7d | [] | no_license | https://github.com/duncandc/mb2_python | ef8d93548926368a8d230f2d89ed2232659daae3 | 812f9e7c41083e66c77b9ce5d453b25e286685ea | refs/heads/master | "2020-06-18T10:27:08.322401" | "2019-08-02T20:04:07" | "2019-08-02T20:04:07" | 196,270,866 | 2 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
File I/O related to the MBII (sub-)halo catalogs
"""
from __future__ import print_function, division
from mb2_python.utils import packarray
from mb2_python.data import subdtype, groupdtype
import numpy as np
__all__=['gcPath', 'shPath']
__author__=['Duncan Campbell']
def gcPath(basePath, snapNum):
"""
Return absolute path to a group catalog.
"""
gcPath = basePath + '/subhalos'
gcPath += '/' + str(snapNum).zfill(3) + '/'
filePath = gcPath + 'grouphalotab.raw'
return filePath
def shcPath(basePath, snapNum):
"""
Return absolute path to a subhalo catalog.
"""
shcPath = basePath + '/subhalos'
shcPath += '/' + str(snapNum).zfill(3) + '/'
filePath = shcPath + 'subhalotab.raw'
return filePath
def readshc(basePath, snapNum):
"""
Read the basic subhalo catelog.
"""
subhalofile = shcPath(basePath, snapNum)
return np.memmap(subhalofile, mode='r', dtype=subdtype)
def readgc(basePath, snapNum):
"""
read the basic group catelog.
"""
groupfile = gcPath(basePath, snapNum)
return np.memmap(groupfile, mode='r', dtype=groupdtype) | UTF-8 | Python | false | false | 1,141 | py | 13 | groupcat.py | 10 | 0.646801 | 0.643295 | 0 | 49 | 22.306122 | 59 |
joaogomesufal/scrapping_articles | 11,965,778,902,839 | 90534afcdea2f681a4b95e128c9c47c8a6bf645b | 95ce37521e81309df5faaf18bd38117db9fa8278 | /main.py | f518cd1d4cc3f8a403560226b24c0f6b9e591d18 | [] | no_license | https://github.com/joaogomesufal/scrapping_articles | 58bf2d2f4ccc800627063c1ab9ac9413bcc638c3 | bf6715062127330c6a89a575b273dd88034dfda5 | refs/heads/master | "2020-04-24T07:33:53.263967" | "2019-02-26T12:16:15" | "2019-02-26T12:16:15" | 171,802,698 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from _classes.ScrappingArticle import ScrappingArticle
scrapping = ScrappingArticle('https://scholar.google.com.br/scholar', 'data mining', 2015, 2019, 2, 4)
print("Lista de URLs:")
url_list = scrapping.get_url_list()
print(url_list)
print("Lista de Arquivos:")
file_list = scrapping.get_file_list(url_list)
print(file_list)
print("Download de Arquivos:")
scrapping.download_files(file_list)
| UTF-8 | Python | false | false | 397 | py | 5 | main.py | 3 | 0.753149 | 0.72796 | 0 | 14 | 27.357143 | 102 |
zhoutian0930ru/StockPredictionApplication | 15,728,170,260,006 | 410bf801ede64d99e54b2a8c14b72a6f67b72711 | 2e00911d07417094f9fd4015be7bf7c94ead0c7f | /venv1/lib/python3.6/site-packages/intriniorealtime/client.py | 282faa2e415e3872e02432dc466844d44501c05b | [] | no_license | https://github.com/zhoutian0930ru/StockPredictionApplication | 005c2b0519bf0b1c8fef26357f818726effe223d | d0c67f4c2cf4a033dee9d412dc30f10dab0a55be | refs/heads/master | "2020-05-04T03:37:38.274093" | "2019-07-28T14:15:24" | "2019-07-28T14:15:24" | 178,950,193 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import time
import base64
import requests
import threading
import websocket
import json
import logging
import queue
SELF_HEAL_TIME = 1
HEARTBEAT_TIME = 3
IEX = "iex"
QUODD = "quodd"
PROVIDERS = [IEX, QUODD]
MAX_QUEUE_SIZE = 10000
class IntrinioRealtimeClient:
def __init__(self, options):
if options is None:
raise ValueError("Options parameter is required")
self.options = options
self.username = options['username']
self.password = options['password']
self.provider = options['provider']
if 'channels' in options:
self.channels = set(options['channels'])
else:
self.channels = set()
if 'logger' in options:
self.logger = options['logger']
else:
log_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
log_handler = logging.StreamHandler()
log_handler.setFormatter(log_formatter)
self.logger = logging.getLogger('intrinio_realtime')
if 'debug' in options and options['debug'] == True:
self.logger.setLevel(logging.DEBUG)
else:
self.logger.setLevel(logging.INFO)
self.logger.addHandler(log_handler)
if 'max_queue_size' in options:
self.quotes = queue.Queue(maxsize=options['max_queue_size'])
else:
self.quotes = queue.Queue(maxsize=MAX_QUEUE_SIZE)
if not self.username:
raise ValueError("Parameter 'username' must be specified")
if not self.password:
raise ValueError("Parameter 'password' must be specified")
if 'on_quote' in options:
if not callable(options['on_quote']):
raise ValueError("Parameter 'on_quote' must be a function")
else:
self.on_quote = options['on_quote']
else:
self.on_quote = None
if self.provider not in PROVIDERS:
raise ValueError(f"Parameter 'provider' is invalid, use one of {PROVIDERS}")
self.ready = False
self.token = None
self.ws = None
self.quote_receiver = None
self.quote_handler = None
self.joined_channels = set()
self.last_queue_warning_time = 0
QuoteHandler(self).start()
Heartbeat(self).start()
def auth_url(self):
if self.provider == IEX:
return "https://realtime.intrinio.com/auth"
elif self.provider == QUODD:
return "https://api.intrinio.com/token?type=QUODD"
def websocket_url(self):
if self.provider == IEX:
return "wss://realtime.intrinio.com/socket/websocket?vsn=1.0.0&token=" + self.token
elif self.provider == QUODD:
return "wss://www5.quodd.com/websocket/webStreamer/intrinio/" + self.token
def connect(self):
self.logger.info("Connecting...")
self.ready = False
self.joined_channels = set()
if self.ws:
self.ws.close()
time.sleep(1)
try:
self.refresh_token()
self.refresh_websocket()
except Exception as e:
self.logger.error(f"Cannot connect: {e}")
return self.self_heal()
def disconnect(self):
self.ready = False
self.joined_channels = set()
if self.ws:
self.ws.close()
time.sleep(1)
def keep_alive(self):
while True:
pass
def refresh_token(self):
response = requests.get(self.auth_url(), auth=(self.username, self.password))
if response.status_code != 200:
raise RuntimeError("Auth failed")
self.token = response.text
self.logger.info("Authentication successful!")
def refresh_websocket(self):
self.quote_receiver = QuoteReceiver(self)
self.quote_receiver.start()
def self_heal(self):
time.sleep(SELF_HEAL_TIME)
self.connect()
def on_connect(self):
self.ready = True
self.refresh_channels()
def on_queue_full(self):
if time.time() - self.last_queue_warning_time > 1:
self.logger.error("Quote queue is full! Dropped some new quotes")
self.last_queue_warning_time = time.time()
def join(self, channels):
if isinstance(channels, str):
channels = [channels]
self.channels = self.channels | set(channels)
self.refresh_channels()
def leave(self, channels):
if isinstance(channels, str):
channels = [channels]
self.channels = self.channels - set(channels)
self.refresh_channels()
def leave_all(self):
self.channels = set()
self.refresh_channels()
def refresh_channels(self):
if self.ready != True:
return
# Join new channels
new_channels = self.channels - self.joined_channels
self.logger.debug(f"New channels: {new_channels}")
for channel in new_channels:
msg = self.join_message(channel)
self.ws.send(json.dumps(msg))
self.logger.info(f"Joined channel {channel}")
# Leave old channels
old_channels = self.joined_channels - self.channels
self.logger.debug(f"Old channels: {old_channels}")
for channel in old_channels:
msg = self.leave_message(channel)
self.ws.send(json.dumps(msg))
self.logger.info(f"Left channel {channel}")
self.joined_channels = self.channels.copy()
self.logger.debug(f"Current channels: {self.joined_channels}")
def join_message(self, channel):
if self.provider == IEX:
return {
'topic': self.parse_iex_topic(channel),
'event': 'phx_join',
'payload': {},
'ref': None
}
elif self.provider == QUODD:
return {
'event': 'subscribe',
'data': {
'ticker': channel,
'action': 'subscribe'
}
}
def leave_message(self, channel):
if self.provider == IEX:
return {
'topic': self.parse_iex_topic(channel),
'event': 'phx_leave',
'payload': {},
'ref': None
}
elif self.provider == QUODD:
return {
'event': 'unsubscribe',
'data': {
'ticker': channel,
'action': 'unsubscribe'
}
}
def parse_iex_topic(self, channel):
if channel == "$lobby":
return "iex:lobby"
elif channel == "$lobby_last_price":
return "iex:lobby:last_price"
else:
return f"iex:securities:{channel}"
class QuoteReceiver(threading.Thread):
def __init__(self, client):
threading.Thread.__init__(self, args=(), kwargs=None)
self.daemon = True
self.client = client
self.enabled = True
def run(self):
self.client.ws = websocket.WebSocketApp(
self.client.websocket_url(),
on_open = self.on_open,
on_close = self.on_close,
on_message = self.on_message,
on_error = self.on_error
)
self.client.logger.debug("QuoteReceiver ready")
self.client.ws.run_forever()
self.client.logger.debug("QuoteReceiver exiting")
def on_open(self, ws):
self.client.logger.info("Websocket opened!")
if self.client.provider == IEX:
self.client.on_connect()
def on_close(self, ws):
self.client.logger.info("Websocket closed!")
def on_error(self, ws, error):
self.client.logger.error(f"Websocket ERROR: {error}")
self.client.self_heal()
def on_message(self, ws, message):
message = json.loads(message)
self.client.logger.debug(f"Received message: {message}")
quote = None
if self.client.provider == IEX:
if message['event'] == "quote":
quote = message['payload']
elif self.client.provider == QUODD:
if message['event'] == 'info' and message['data']['message'] == 'Connected':
self.client.on_connect()
if message['event'] == 'quote' or message['event'] == 'trade':
quote = message['data']
if quote:
try:
self.client.quotes.put_nowait(quote)
except queue.Full:
self.client.on_queue_full()
class QuoteHandler(threading.Thread):
def __init__(self, client):
threading.Thread.__init__(self, args=(), kwargs=None)
self.daemon = True
self.client = client
def run(self):
self.client.logger.debug("QuoteHandler ready")
while True:
item = self.client.quotes.get()
backlog_len = self.client.quotes.qsize()
if callable(self.client.on_quote):
try:
self.client.on_quote(item, backlog_len)
except Exception as e:
self.client.logger.error(e)
class Heartbeat(threading.Thread):
def __init__(self, client):
threading.Thread.__init__(self, args=(), kwargs=None)
self.daemon = True
self.client = client
def run(self):
self.client.logger.debug("Heartbeat ready")
while True:
time.sleep(HEARTBEAT_TIME)
if self.client.ready and self.client.ws:
msg = None
if self.client.provider == IEX:
msg = {'topic': 'phoenix', 'event': 'heartbeat', 'payload': {}, 'ref': None}
elif self.client.provider == QUODD:
msg = {'event': 'heartbeat', 'data': {'action': 'heartbeat', 'ticker': int(time.time()*1000)}}
if msg:
self.client.logger.debug(msg)
self.client.ws.send(json.dumps(msg))
self.client.logger.debug("Heartbeat!")
| UTF-8 | Python | false | false | 10,537 | py | 22 | client.py | 14 | 0.530701 | 0.528424 | 0 | 318 | 32.13522 | 114 |
jiewu-stanford/leetcode | 14,250,701,521,205 | 6f8feacc8be74cd5382cebdf629582765db1b75d | e90a772733e73e45b4cdbb5f240ef3b4a9e71de1 | /443. String Compression.py | 7b3c99181fa20c5abbb7b9dbcb358cb76b759b88 | [] | no_license | https://github.com/jiewu-stanford/leetcode | 102829fcbcace17909e4de49c01c3d705b6e6e3a | cbd47f713d3307f900daf55c8f27301c70542fc4 | refs/heads/master | "2022-05-28T18:25:00.885047" | "2022-05-18T05:16:22" | "2022-05-18T05:16:22" | 214,486,622 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
Title : 443. String Compression
Problem : https://leetcode.com/problems/string-compression/
'''
'''
two-pointer strategy, one read pointer (i) + one write pointer (j)
Reference: https://leetcode.com/problems/string-compression/discuss/92568/Python-Two-Pointers-O(n)-time-O(1)-space
'''
class Solution:
def compress(self, chars: List[str]) -> int:
i = j = 0
while i < len(chars):
c, freq = chars[i], 0
while i < len(chars) and chars[i] == c:
i, freq = i+1, freq+1
chars[j], j = c, j+1
if freq > 1:
for digit in str(freq):
chars[j], j = digit, j+1
return j | UTF-8 | Python | false | false | 692 | py | 371 | 443. String Compression.py | 370 | 0.544798 | 0.521676 | 0 | 20 | 33.65 | 114 |
knowledgetranslation/citation-dedupe | 8,203,387,555,402 | beaf6b2776fc050213f9f6ccb5aa7d9558d3c6c2 | 59623891733df19652c97f45b221ea5282f74c77 | /parser.py | efda106c18c0cd72826d3ec70b3d1d00242d666b | [] | no_license | https://github.com/knowledgetranslation/citation-dedupe | b9c4784c7909749b50907f3a51f3d12a5738bea1 | 7c102bda9bfe0b920830421557c85c887abdb74a | refs/heads/master | "2020-04-10T14:56:52.701882" | "2015-06-10T19:09:05" | "2015-06-10T19:09:05" | 32,325,476 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python3.4
# -*- coding: utf-8 -*-
import os, sys
import mysql.connector
from lxml import etree, objectify
import logging
import json
# import lxml.usedoctest
# import xml.etree.cElementTree as ET
# FILES VARS
PATH = os.path.abspath('.') +'/'
XML_FILE = PATH + 'lite_sample4.xml' # 'CS Lit Search Comp - all [12530].xml' # 'lite_sample.xml'
SCHEMA_FILE = PATH + 'xml.xsd' # 'xml_doll_smart.txt' #
PARAMS_FILE = PATH + 'xml_parameters.json'
MYSQL_CONFIG = PATH + 'db.cnf'
class parser(object):
# noinspection PyPep8Naming,PyPep8Naming,PyPep8Naming
def __init__(self, inFile=XML_FILE, tabName='', paramsFile=PARAMS_FILE):
self.tableName = tabName
if not self.getParameters(paramsFile):
logging.warning('WARNING! Please, choose other Parameters file against %s' % paramsFile)
self.resetExport()
# sys.exit(0)
quit
# return None
# print( self.tableName)
self.recCount = 0
self.xmlFile = inFile
self.con = self.connectDB(MYSQL_CONFIG)
self.cursor = self.con.cursor(buffered = False)
self.cursor.execute("SET net_write_timeout = 3600")
# noinspection PyPep8Naming,PyPep8Naming
def startParse(self):
xml = self.loadXml(self.xmlFile, SCHEMA_FILE)
if xml is not None:
logging.info('Creating Data table')
self.createDataTable() # Create Data table
logging.info('Start parsing')
isParsed = self.parseXml(xml) # Parsing XML and load into Data table
if isParsed:
logging.info('Data uploaded into DB')
self.closeAllConnections()
return True
else:
logging.warning('Error! Data wasn\'t uploaded into DB')
self.closeAllConnections()
return False
else:
logging.warning('Cannot read XML data from %s' % XML_FILE)
self.resetParser()
return False
# noinspection PyPep8Naming,PyPep8Naming,PyPep8Naming
@staticmethod
def loadXml(inFile, schemaFile = SCHEMA_FILE):
# schema = etree.XMLSchema(file=schemaFile)
# parser = objectify.makeparser(schema=schema)
# if True:# validateXml(inFile, parser):
try:
# doctree = objectify.parse(inFile, parser=parser)
doctree = objectify.parse(inFile)
except Exception as e: #XMLSyntaxError:
# else:
return None
try:
root = doctree.getroot()
return root
except Exception as e: #ValueError:
return None
# noinspection PyPep8Naming,PyPep8Naming,PyPep8Naming
def parseXml(self, xmlObject):
# data = {}
columnMapInverted = {self.mapColumnToXml[k] : k for k in self.mapColumnToXml}
for r in xmlObject.records.record:
d = dict((k,'') for k in self.dataElements) # Create dictionary for
for e in r.getchildren():
if e.tag in self.dataElements:
if e.text is not None:
d[e.tag] = e.text
else:
try:
# d[e.tag] = str(e.getchildren()[0]).encode('unicode-escape')
d[e.tag] = e.findtext('*').encode('unicode-escape')
except:
d[e.tag] = ''
elif e.tag in self.groupedElements.values(): # Check if element is Parent for Group
values_list = []
for s in e.getchildren():
if s.tag in [self.groupedElements[v] for v in self.listElements]: # Check if element within Group should be in a List
for aa in s.getiterator(self.listElements): # Select the elements which should be in a List
values_list.append(aa.findtext('*')) # Get first text in tag 'style' element
d[aa.tag] = self.lineSeparator.join(values_list) # Set Column value = List converted into string with line separator
elif s.tag in self.dataElements:
# if s.hasattr
try:
d[s.tag] = s.findtext('*').encode('unicode-escape')
# d[s.tag] = str(s.getchildren()[0]).encode('unicode-escape')
except:
d[s.tag] = ''
self.loadDataIntoDb(d, r)
self.recCount += 1
return True
# noinspection PyPep8Naming,PyPep8Naming
def loadDataIntoDb(self, dataObject, xml):
# q_columns = ''
# q_empty = ''
# Generate Columns list for insert into table
q_columns = ', '.join(['%s' % self.mapColumnToXml.get(k, k) for k in dataObject.keys()])
# Generate '%s' list for parametric insert into table
q_empty = ', '.join(['%s' for k in dataObject.keys()])
values = list(dataObject.values())
if self.saveOriginalXml:
q_columns += ', %s' % self.originalXml
q_empty += ', %s'
values.append(etree.tostring(xml))
q = "insert into %s (%s) values(%s)" % (self.tableName, q_columns, q_empty) # Prepare sql query
self.cursor.execute (q, values) # Run sql query
self.con.commit()
# noinspection PyPep8Naming,PyPep8Naming,PyPep8Naming
def createDataTable(self):
# tabFields = ''
# Generate Columns list for new Table
tabFields = ', '.join(['%s %s' % (field, 'INTEGER' if field in self.integerColumns else 'MEDIUMTEXT') for field in self.columns]) #fields.split(', ')])
q = "DROP TABLE IF EXISTS %s" % self.tableName
self.cursor.execute(q)
q = "CREATE TABLE %s (%s) CHARACTER SET utf8 COLLATE %s" % (self.tableName, tabFields, self.tableCollate)
self.cursor.execute(q)
# noinspection PyPep8Naming,PyPep8Naming
def getParameters(self, parametersFile):
""" Define the Performance parameters and Database source table. """
if os.path.exists(parametersFile):
logging.info('reading data structure from %s' % parametersFile)
with open(parametersFile) as df :
data = json.load(df)
params = data['source_db']
self.tableCollate = params['collate']
if self.tableName == '': # added for web API v1
self.tableName = params['tab_name']
self.tablePK = params['tab_PK']
self.columns = params['tab_columns'] # ENDNOTE_COLUMNS
self.originalXml = params['original_xml_column']
self.saveOriginalXml = data['parser']['save_original_xml']
self.groupedElements = data['grouped_elements']
self.mapColumnToXml = data['map_column_to_xml']
self.listElements = data['list_elements']
self.dataElements = data['data_elements']
self.integerColumns = data['integer_columns']
self.lineSeparator = data['line_separator']
return True
else:
logging.warning('WARNING! Could not read Parameters from %s' % parametersFile)
return False
# noinspection PyPep8Naming,PyPep8Naming
@staticmethod
def connectDB(dbOptionsFile):
""" You need to fill option file `db.cnf` with your mysql database information """
return mysql.connector.connect(option_files = dbOptionsFile)
# noinspection PyPep8Naming
def closeAllConnections(self):
if hasattr(self, 'cursor'):
self.cursor.close()
if hasattr(self, 'con'):
self.con.close()
# noinspection PyPep8Naming
def resetParser(self):
self.recCount = 0
self.closeAllConnections
# noinspection PyPep8Naming
def start(xmlFile = XML_FILE):
global parser
log_level = logging.WARNING
initLogging(log_level)
print('Parsering in progress...')
parser = parser(xmlFile) # initiate analyser
parser.startParse()
print('%s records were uploaded' % parser.recCount)
def reset():
parser.resetParser()
# noinspection PyPep8Naming
def initLogging(log_level = logging.WARNING):
logging.getLogger().setLevel(log_level)
# noinspection PyPep8Naming,PyPep8Naming
def validateXml(xmlFile, parser):
# schema = etree.XMLSchema(file=schemaFile)
# parser = objectify.makeparser(schema=schema)
try:
with open(xmlFile, 'r') as f:
etree.fromstring(f.read(), parser)
logging.info('File validation was successful.')
return True
except:
logging.warning('WARNING! File %s validation was fail.' % xmlFile) #parametersFile)
return False
# return objectify.parse(xmlFile, parser=parser)
if __name__ == '__main__':
start('lite_sample4.xml') #'CS Lit Search Comp - all [12530].xml') | UTF-8 | Python | false | false | 9,306 | py | 23 | parser.py | 9 | 0.564689 | 0.558994 | 0 | 245 | 36.987755 | 159 |
jdleo/Leetcode-Solutions | 609,885,367,083 | 8b087c2d7ca5930b71315c2e106d1ad80f00bfca | d62d21ea827d5d352515afb07623160ef48c0343 | /solutions/1748/main.py | b3eab5ea55f47155a96762d9843906f491ef0677 | [] | no_license | https://github.com/jdleo/Leetcode-Solutions | e53e77404e358bfa303bb0f873ed9cfe6cb575c0 | cf35874b4ca7fdcdc3e0e7beb0a255d8e98d2385 | refs/heads/main | "2023-03-04T12:26:04.093126" | "2021-02-20T05:40:55" | "2021-02-20T05:40:55" | 326,497,152 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution:
def sumOfUnique(self, nums: list[int]) -> int:
# array to hold count for each num 1 <= nums[i] <= 100
counts = [0] * 101
# fill counts
for num in nums: counts[num] += 1
# result (sum of uniques)
res = 0
# go thru counts
for i in range(len(counts)):
# if this is a unique number, add number to res
if counts[i] == 1: res += i
return res | UTF-8 | Python | false | false | 451 | py | 136 | main.py | 135 | 0.518847 | 0.494457 | 0 | 13 | 33.769231 | 62 |
kaituoxu/PyTorch-ASR-AM | 13,125,420,090,203 | 670260cef3b9474a8776f4c99c460a65b58127ae | 475a7a542514167d27e22a51003ac83bb3fd933f | /kaldi-python/scripts/compute-global-cmvn-stats.py | f0362cc152a45353a5aeeb95370e147e503d9f0b | [
"Apache-2.0"
] | permissive | https://github.com/kaituoxu/PyTorch-ASR-AM | ee13061bbfd5e57aba4f60f6e0093855d022656c | 2ea24e9f49b89741979e517cbb6c517f79122c6b | refs/heads/master | "2021-03-27T14:04:45.185189" | "2017-11-30T08:27:36" | "2017-11-30T08:27:36" | 104,466,718 | 4 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
'''
Created on Aug 6, 2014
@author: chorows
'''
import os
import sys
import logging
import argparse
import numpy as np
import kaldi_io
if __name__ == '__main__':
print >>sys.stderr, os.path.basename(sys.argv[0]), " ".join(sys.argv[1:])
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description='Accumulate global stats for feature normalization: mean and std')
parser.add_argument('in_rxfilename')
parser.add_argument('out_wxfilename')
args = parser.parse_args()
sum = None
sum_sq = None
n = 0
with kaldi_io.SequentialBaseFloatMatrixReader(args.in_rxfilename) as reader:
for name,feats in reader:
nframes, nfeats = feats.shape
n += nframes
if sum is None:
sum = np.zeros((nfeats,))
sum_sq = np.zeros((nfeats,))
sum += feats.sum(0)
sum_sq += (feats*feats).sum(0)
mean = np.asarray(sum/n, dtype=kaldi_io.KALDI_BASE_FLOAT())
std = np.asarray(np.sqrt(sum_sq/n - mean**2),
dtype=kaldi_io.KALDI_BASE_FLOAT())
with kaldi_io.BaseFloatVectorWriter(args.out_wxfilename) as w:
w['mean'] = mean
w['std'] = std
| UTF-8 | Python | false | false | 1,286 | py | 27 | compute-global-cmvn-stats.py | 21 | 0.588647 | 0.580093 | 0 | 48 | 25.791667 | 115 |
xelaxela13/stock | 11,390,253,314,229 | bcb441ad040031a8625079686930dbe0dfa0e118 | b3452d0dc1650ac75a551ce93e345d79df01c309 | /set_env_vars.py | 3feb571a066300989f21ddee3242adfa2b7fbd44 | [
"MIT"
] | permissive | https://github.com/xelaxela13/stock | 149bf10e93217d7efece76b3ac5ac2498c0ddca7 | ef7df50194be340e7faff915e9de4e3b1ade4eca | refs/heads/master | "2022-08-13T16:19:09.551752" | "2020-02-07T17:55:17" | "2020-02-07T17:55:17" | 169,874,047 | 1 | 0 | MIT | false | "2022-04-22T22:25:21" | "2019-02-09T14:17:25" | "2020-04-04T10:55:08" | "2022-04-22T22:25:18" | 10,479 | 1 | 0 | 1 | CSS | false | false | import random
import string
import os.path
def run():
config = {}
while True:
try:
config['SETTINGS'] = {
'SECRET_KEY': random_string(),
'ALLOWED_HOSTS': '*',
'DEBUG': True,
'IPSTACK_ACCESS_KEY': '0e3e331a2e84afc272c53c97982cc67c',
'GMAIL_PASSWORD': '',
'GMAIL_USER': '',
'MEMCACHED_HOST': 'memcached',
'MEMCACHED_PORT': '11211'
}
config['DB'] = {
'name': 'postgres',
'USER': 'postgres',
'HOST': 'db',
'PORT': '5432'
}
config['common'] = {
'PROJECT_ROOT': '/home/user/stock',
'IMAGE': 'xelaxela13/stock:latest'
}
break
except ValueError:
continue
file = '.env'
if os.path.isfile(file):
print('File {} already exist, cannot rewrite it. '.format(file))
return
try:
with open(file, 'w') as f:
for title, conf in config.items():
f.writelines('[' + str(title).upper() + ']\n')
for key, value in conf.items():
f.writelines('\t' + str(key).upper() + '=' + str(value) + '\n')
print('Config file was created success')
return
except Exception as err:
if os.path.isfile(file):
os.remove(file)
print(err)
return
def random_string():
return "".join(
[random.SystemRandom().choice("{}{}{}".format(string.ascii_letters, string.digits, string.punctuation))
for _ in range(50)])
if __name__ == '__main__':
run()
| UTF-8 | Python | false | false | 1,736 | py | 70 | set_env_vars.py | 39 | 0.456221 | 0.437212 | 0 | 61 | 27.459016 | 111 |
oscar503sv/basicos_python | 42,949,694,097 | 9fa414230d69fde6199564caacce53427599d5c5 | 2cf50a2ff667e6fb686de55baf0e1329b386c777 | /01-ejercicios/triangulo.py | 4675a323640099e85b9fe5f6c11f6f9f09af15e8 | [] | no_license | https://github.com/oscar503sv/basicos_python | d41c5baaa425e18c47bb8693301e4020033a3c92 | dc1ebf240d099ef2de92e238fcd095ab29decc9e | refs/heads/master | "2022-06-30T02:26:59.080959" | "2020-05-14T05:51:56" | "2020-05-14T05:51:56" | 263,519,570 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #Dado de los valores ingresados por el usuario (base, altura) calcular y mostrar en pantalla el área de un triángulo.
print("**********CALCULO DE AREA DE UN TRIANGULO**********")
print("Ingresa la base del triángulo:")
base = float(input())
print ("Ingresa la altura del triángulo:")
altura = float(input())
area = (base*altura)/2
print("Área:",area)
| UTF-8 | Python | false | false | 359 | py | 60 | triangulo.py | 58 | 0.689266 | 0.686441 | 0 | 11 | 31.181818 | 117 |
JokerWDL/PyAnomaly | 3,023,657,007,622 | 681750dbf489a6a32e9ef1d6f64d493cc252b272 | f6c69a7f7f1bbae5fd5473dfaac5ef5fad840d58 | /lib/datatools/build/__init__.py | 2dcefc70c84d3f4061e0e716788bdf5dca8ba63f | [
"Apache-2.0"
] | permissive | https://github.com/JokerWDL/PyAnomaly | 8c5ca4ca705a1251c70ff1f36c908c8f6f75e7d8 | cf93437e5d7ae87fa916141cf4b5cc2e929b8199 | refs/heads/master | "2022-11-05T11:31:42.345422" | "2020-06-22T17:21:20" | "2020-06-22T17:21:20" | 274,295,638 | 1 | 0 | Apache-2.0 | true | "2020-06-23T03:04:32" | "2020-06-23T03:04:31" | "2020-06-23T03:04:30" | "2020-06-22T17:22:06" | 5,820 | 0 | 0 | 0 | null | false | false | from .. import dataclass # trigger the register in the dataclass package
| UTF-8 | Python | false | false | 74 | py | 80 | __init__.py | 67 | 0.783784 | 0.783784 | 0 | 1 | 72 | 72 |
nilswiersma/dfaplayground | 5,523,327,977,312 | a35e82a2a68b5261aef5bdc9e7bd7bc9cce200e5 | bd3aa6c6847f67597642ce8c9c7e8f9d1cd7580f | /classic.py | ea897e27b2bb9e5c22e774053afeff006439919c | [] | no_license | https://github.com/nilswiersma/dfaplayground | 4c223620f5457e7cd75d0ab21cd5107be7e80f9b | f87baf2c3c1a9144a6d9d756409614c41e642317 | refs/heads/master | "2022-07-19T18:48:18.560203" | "2020-05-13T14:44:28" | "2020-05-13T14:44:28" | 257,861,193 | 5 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from submod.aes.aes import AES, matrix2bytes
from submod.JeanGrey.phoenixAES import phoenixAES
import random, os
import faultmodels
intermediates = [
# 'input',
# 'ark0',
# 'sb1',
# 'sr1',
# 'mc1',
# 'ark1',
# 'sb2',
# 'sr2',
# 'mc2',
# 'ark2',
# 'sb3',
# 'sr3',
# 'mc3',
# 'ark3',
# 'sb4',
# 'sr4',
# 'mc4',
# 'ark4',
# 'sb5',
# 'sr5',
# 'mc5',
# 'ark5',
# 'sb6',
# 'sr6',
# 'mc6',
# 'ark6',
# 'sb7',
# 'sr7',
'mc7',
'ark7',
'sb8',
'sr8',
'mc8',
'ark8',
'sb9',
'sr9',
# 'mc9',
# 'ark9',
# 'sb10',
# 'sr10',
# 'ark10',
]
key = b'SiDeChaNneLMarVl'
message = b'sUpErSEcREtmESsG'
ctx = AES(bytes(key))
ciphertext = ctx.encrypt_block(bytes(message))
iks = [b''.join(map(lambda x: bytes(x), ik)) for ik in ctx._key_matrices]
print(f'key : {key.hex()}')
print(f'message : {message.hex()}')
print(f'ciphertext : {ciphertext.hex()}')
ctr = 0
for ik in iks:
print(f'ik{ctr:02} : {ik.hex()}')
ctr += 1
print('--encrypt--')
for intermediate in intermediates:
print(f'--{intermediate}--')
faulted = []
for _ in range(10):
faulted.append(os.urandom(16))
for _ in range(200):
faulted.append(ctx.encrypt_block(bytes(message), glitch_at=intermediate, glitch=faultmodels.single_bit_flip))
# faulted.append(ctx.encrypt_block(bytes(message), glitch_at=intermediate, glitch=single_byte_corruption))
# faulted.append(ctx.encrypt_block(bytes(message), glitch_at=intermediate, glitch=single_col_corruption))
# faulted.append(ctx.encrypt_block(bytes(message), glitch_at=intermediate, glitch=single_byte_multi_bit_flip))
# faulted.append(ctx.encrypt_block(bytes(message), glitch_at=intermediate, glitch=double_byte_multi_bit_flip))
# faulted.append(ctx.encrypt_block(bytes(message), glitch_at=intermediate, glitch=triple_byte_multi_bit_flip))
# print(f'faulted : {faulted[-1].hex()}')
random.shuffle(faulted)
# roundkey, idx, candidates = phoenixAES.crack_bytes(faulted, ciphertext, verbose=0, encrypt=True)
# print(f"roundkey : {''.join(['%02x' % x if x is not None else '..' for x in roundkey])} ({idx}, {intermediate})")
roundkey, idx, candidates = phoenixAES.crack_bytes(faulted, ciphertext, verbose=0, encrypt=True)
print(f"roundkey : {''.join(['%02x' % x if x is not None else '..' for x in roundkey])} ({idx}, {intermediate})")
if None in roundkey:
# roundkey, idx, candidates = phoenixAES.crack_bytes(
# phoenixAES.convert_r8faults_bytes(faulted, ciphertext), ciphertext, verbose=0, encrypt=True)
# print(f"roundkey : {''.join(['%02x' % x if x is not None else '..' for x in roundkey])} ({idx}, {intermediate})")
roundkey, idx, candidates = phoenixAES.crack_bytes(
phoenixAES.convert_r8faults_bytes(faulted, ciphertext), ciphertext, verbose=0, encrypt=True)
print(f"roundkey : {''.join(['%02x' % x if x is not None else '..' for x in roundkey])} ({idx}, {intermediate})")
# print(candidates)
# print('--encrypt--')
# for intermediate in intermediates:
# print(f'--{intermediate}--')
# output = []
# candidates=[[], [], [], []]
# recovered=[False, False, False, False]
# key=[None]*16
# prev=''
# for ctr in range(int(1e6)):
# r = random.randint(0,255)
# if r == 0:
# output.append(os.urandom(16))
# elif r == 1:
# output.append(ctx.encrypt_block(bytes(message), glitch_at=intermediate, glitch=single_bit_flip))
# else:
# output.append(ciphertext)
# phoenixAES.crack_bytes_rolling(output[-1], ciphertext, candidates, recovered, key, verbose=0, encrypt=True)
# new=''.join(['%02x' % x if x is not None else '..' for x in key])
# if prev != new:
# print(new, ctr)
# prev = new
# # print(recovered)
# if not False in recovered:
# break
# print(ctr)
# print(iks[-1].hex(), iks[-1].hex() == ''.join(['%02x' % x if x is not None else '..' for x in key]))
# roundkey, idx, candidates = phoenixAES.crack_bytes(output, ciphertext, verbose=0, encrypt=True)
# print(f"roundkey : {''.join(['%02x' % x if x is not None else '..' for x in roundkey])} ({idx}, {intermediate})")
# print('--decrypt--')
# for intermediate in intermediates:
# faulted = []
# for _ in range(50):
# faulted.append(ctx.decrypt_block(bytes(ciphertext), glitch_at=intermediate))
# # print(f'faulted : {faulted[-1].hex()}')
# recovered, idx = phoenixAES.crack_bytes(faulted, message, verbose=0, encrypt=False)
# if recovered:
# print(f'recovered : {recovered.hex()} ({idx}, {intermediate})')
faulted = [
bytes.fromhex('ab73e91362fc2db70d99cce0aa2deecf'),
bytes.fromhex('abf73f13ff062db79a99cc3faa2d1eca'),
bytes.fromhex('abf7745fff5330b769cbcc3f882d1e3f'),
bytes.fromhex('ab06eb13e8742db74a99ccbdaa2db4e9'),
bytes.fromhex('abdbe9ef62fc87b70dbccc9c5e2d49cf'),
bytes.fromhex('abcee9139dfc2db70d99ccadaa2d0bcf'),
bytes.fromhex('abf74741ff2e0fb76e55cc3f772d1eab'),
bytes.fromhex('abf79613ff0a2db73899cc3faa2d1ecb'),
bytes.fromhex('e1f7d613ffa82dc46c993d3faa8f1ec4'),
bytes.fromhex('5af72013ff732dc41899013faa781ee0'),
bytes.fromhex('abf7e970fffc1ab70d8ccc3f9f2d1ecf'),
bytes.fromhex('ab18e923eefcb4b70db3cc81fa2d5fcf'),
bytes.fromhex('8b36e9131afc2d970d99cf36aa8f49cf'),
bytes.fromhex('ab4334130af12db78499ccd3aa2de639'),
bytes.fromhex('aef7e913fffc2d8f0d99643faabe1ecf'),
bytes.fromhex('abf7e15effbf55b730c8cc3f772d1e45'),
bytes.fromhex('ab6ae9133afc2db70d99cc85aa2d52cf'),
bytes.fromhex('417ce91322fc2d820d9965d8aa372ecf'),
bytes.fromhex('b2f7e97efffc15a50dfac03f32561ecf'),
bytes.fromhex('c7dbe91389fc2dc60d99cdcdaa8cd7cf'),
bytes.fromhex('ab62e913d4fc2db70d99ccc8aa2de8cf'),
bytes.fromhex('abf7e994fffc67b70d41cc3ff22d1ecf'),
bytes.fromhex('ab9ee9b234fc70b70df2cc40862d9ecf'),
bytes.fromhex('abf76713ffca2db70899cc3faa2d1ee7'),
bytes.fromhex('c7f7e9cefffc12f00dffd83f19661ecf'),
bytes.fromhex('ab657a133b192db7e499cc51aa2dc7e6'),
bytes.fromhex('72f7e941fffc037a0d08373fb5591ecf'),
bytes.fromhex('5af7e954fffca6c40d61013f90781ecf'),
bytes.fromhex('ab52771305912db71099ccbfaa2dfa16'),
bytes.fromhex('9da6e913c8fc2d180d99148caad236cf'),
bytes.fromhex('da2fe913f0fc2ddb0d99f8e1aaa2fecf'),
bytes.fromhex('abf72013ffdc2db78e99cc3faa2d1e84'),
bytes.fromhex('21b2e91319fc2d440d994d8baa1852cf'),
bytes.fromhex('00f7f713ff392d8acd992d3faa5d1e26'),
bytes.fromhex('ab3de9136afc2db70d99cc0faa2d7ccf'),
bytes.fromhex('8cf7f413ffc82ddbf399e03faa071e9d'),
bytes.fromhex('abfb341381372db74799cce7aa2d2e74'),
bytes.fromhex('11f79713ffdb2d8bbc99623faad51e05'),
bytes.fromhex('abcec31309e42db72899ccd1aa2d2ebe'),
bytes.fromhex('abae3f135aca2db7c299ccd3aa2d95ee'),
bytes.fromhex('213be91325fc2d440d994d24aa18a0cf'),
bytes.fromhex('ab949713a9612db78d99cceaaa2d2c7c'),
bytes.fromhex('abf7e963fffc5cb70dd9cc3f8c2d1ecf'),
bytes.fromhex('fa1ce913edfc2dfb0d9956b6aa45a9cf'),
bytes.fromhex('8bd4e91332fc2da50d99ace8aa1bdfcf'),
bytes.fromhex('daa6e913f1fc2d6c0d994de8aa8e1ccf'),
bytes.fromhex('ab282713d37d2db7ab99cc73aa2dee6d'),
bytes.fromhex('ab6096135a592db7aa99cc84aa2dee12'),
bytes.fromhex('0ef7e95efffca0d20d55593f264f1ecf'),
bytes.fromhex('daf73813ff282d445199d83faa0e1e6e'),
bytes.fromhex('abfbbb1381f92db71a99cce7aa2d2e75'),
bytes.fromhex('4620e9139dfc2dd80d9985e5aa3c54cf'),
bytes.fromhex('abf765a5ffce57b73f60cc3f822d1e4a'),
bytes.fromhex('ab9ee913f7fc2db70d99ccceaa2d45cf'),
bytes.fromhex('c8a5e9136cfc2d8e0d99472faace0fcf'),
bytes.fromhex('46ece91343fc2d170d9990a6aacc00cf'),
bytes.fromhex('abf738d3ff5f45b794bccc3f742d1e47'),
bytes.fromhex('6ff7e913fffc2d940d993c3faa0a1ecf'),
bytes.fromhex('9bf7e9a9fffcb6be0d26793f73c41ecf'),
bytes.fromhex('2dcde9135dfc2d330d999873aaa0f8cf'),
bytes.fromhex('007fe913a6fc2d8a0d992d04aa5d40cf'),
bytes.fromhex('abf7e9d1fffc88b70db6cc3f142d1ecf'),
bytes.fromhex('11f7e9f2fffcfca20dde7a3fd8e31ecf'),
bytes.fromhex('abf7f913ff322db7b299cc3faa2d1ec9'),
bytes.fromhex('abc9e9411dfca0b70dc8ccc0e02dc2cf'),
bytes.fromhex('cd7be913f8fc2dae0d9970d0aafef6cf'),
bytes.fromhex('abf72a93ff2ed4b7499dcc3f0f2d1efc'),
bytes.fromhex('abffe91333fc2db70d99cc12aa2de3cf'),
bytes.fromhex('abf797c6ff3868b73903cc3f0d2d1eb3'),
bytes.fromhex('93f7e9bffffcb6ef0dfac73f36ac1ecf'),
bytes.fromhex('5df78813ff742dc4a799cf3faaa51ed3'),
bytes.fromhex('abf75e11ff50b1b73660cc3f6b2d1e66'),
bytes.fromhex('abf7674eff0b4cb7af94cc3faf2d1eea'),
bytes.fromhex('abf7e954fffc78b70d14cc3fd62d1ecf'),
bytes.fromhex('93f7e613ff962dfa0099783faa051e32'),
bytes.fromhex('83f7d613ffa82dc76c99be3faa421ec4'),
bytes.fromhex('79f7e9d3fffc45860dbc203f74491ecf'),
bytes.fromhex('abf731a1ff4aa6b76c43cc3fd12d1ebf'),
bytes.fromhex('abf7cd64ff8480b70a71cc3f3f2d1ee6'),
bytes.fromhex('abf7e940fffcc3b70d4ccc3fd02d1ecf'),
bytes.fromhex('ab2f1513200b2db7bc99ccc5aa2d6e7c'),
bytes.fromhex('21f7e913fffc2d290d99383faa9f1ecf'),
bytes.fromhex('abf77a86ffd9c7b7c139cc3fde2d1e23'),
bytes.fromhex('abc7e913dffc2db70d99cc9caa2dd7cf'),
bytes.fromhex('34f7e923fffcf7ef0d72473f4d471ecf'),
bytes.fromhex('ab6fe91305fc2db70d99cc13aa2d3dcf'),
bytes.fromhex('abc5e93eb7fc45b70d33cc21a42dcacf'),
bytes.fromhex('abf7778bff91ceb71054cc3f892d1e16'),
bytes.fromhex('abf71013ff062db7b799cc3faa2d1ecc'),
bytes.fromhex('abf78c13ff912db70d99cc3faa2d1e81'),
bytes.fromhex('34f7e941fffc03fb0d08633fb5ce1ecf'),
bytes.fromhex('ab8de913f3fc2db70d99cc61aa2d94cf'),
bytes.fromhex('ab57e9b79cfc70b70d73cc6c202de3cf'),
bytes.fromhex('b0f7e945fffcc0a60d84f23f52e01ecf'),
bytes.fromhex('abf71792ff4213b7eb36cc3f2c2d1e58'),
bytes.fromhex('abc4e9ed32fcfbb70d97cc2b492dddcf'),
bytes.fromhex('abf7e919fffc26b70d04cc3ff52d1ecf'),
bytes.fromhex('b7dbe91362fc2d2c0d99879caa2f49cf'),
bytes.fromhex('2cf7e947fffc215b0dc8873f39a91ecf'),
bytes.fromhex('abf7f72eff19a1b70828cc3f732d1e23'),
bytes.fromhex('e3f7e913fffc2d1c0d99c83faa0f1ecf'),
bytes.fromhex('abf7df47ff8ff7b79081cc3fce2d1ec4'),
bytes.fromhex('ab93e9138ffc2db70d99ccb6aa2d88cf'),
bytes.fromhex('9bf7e9a9fffca1020de5ec3f79491ecf'),
bytes.fromhex('abf7061eff4507b77ececc3f172d1ef9'),
bytes.fromhex('abf79813ff0a2db72599cc3faa2d1e2f'),
]
# print('--encrypt--')
# for intermediate in ['sb9']:
# print(f'--{intermediate}--')
# # faulted = []
# # # # for _ in range(10000):
# # # # faulted.append(os.urandom(16))
# # for _ in range(500):
# # faulted.append(ctx.encrypt_block(bytes(message), glitch_at=intermediate, glitch=double_byte_multi_bit_flip))
# # random.shuffle(faulted)
# # for f in faulted:
# # print(f"bytes.fromhex('{f.hex()}'),")
# roundkey, idx, candidates = phoenixAES.crack_bytes(faulted[:200], ciphertext, verbose=0, encrypt=True)
# print(f"roundkey : {''.join(['%02x' % x if x is not None else '..' for x in roundkey])} ({idx}, {intermediate})")
# # print(candidates)
# with open('tracefile', 'w') as t:
# print(ciphertext.hex(), file=t)
# for f in faulted:
# print(f.hex(), file=t)
# cracker = phoenixAES.ByteCracker(ciphertext, encrypt=True, verbose=0)
# for faulty in faulted:
# roundkey, idx, candidates = cracker.crack_bytes(faulty)
# print(f"roundkey : {''.join(['%02x' % x if x is not None else '..' for x in roundkey])} ({idx})") | UTF-8 | Python | false | false | 12,095 | py | 8 | classic.py | 6 | 0.674742 | 0.5284 | 0 | 274 | 42.149635 | 125 |
yuhong270991/dp | 11,751,030,522,333 | 3efcd53825acf6d3c987f1cd60afca8d3f5da20f | e70b5dab798c183eeb3b861ad96ca654ddafacfc | /Python/KNN/knn.py | 2d197256ec4982988dd127decd5e3cdc2a9c5439 | [] | no_license | https://github.com/yuhong270991/dp | 3054ddaca1bdf466647e5945f698c68da68435a1 | b9107cb2aca129dec78f07d3e5da01764f020baf | refs/heads/master | "2022-04-08T19:37:43.616086" | "2020-03-05T04:59:50" | "2020-03-05T04:59:50" | 149,475,644 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import math
import numpy as np
from operator import itemgetter
#normalizes a given dataset X (where X is a n-D array. The last column is not normalized)
def dataNorm(X):
num_col = X.shape[-1]
last_column = X[:, num_col-1]
X_norm = (X - X.min(axis = 0)) / (X.ptp(axis = 0))
X_norm[:, num_col-1] = last_column
return X_norm
#split normalized dataset into training and test set, PercentTrain is the % of data for training
def splitTT(X_norm, PercentTrain):
#shuffle the rows
np.random.shuffle(X_norm)
#find where to split
sI = int(PercentTrain * len(X_norm))
#training and test split (train set: everything until splitindex, test set: from split index to end)
return X_norm[:sI], X_norm[sI:]
def splitCV(X_norm, K):
#shuffle and split the n-D array into a list of K n-D arrays
np.random.shuffle(X_norm)
return np.array_split(X_norm, K)
#generalizes euclidean/manhattan and minkowski into a single function
def minkow_dist(A, B, length, order):
if(order == 2):
return eucl_dist(A, B, length)
else:
distance = 0.0
num_attribs = length - 1 #dont include rings
for i in range(num_attribs):
distance += pow(abs(A[i] - B[i]), order)
return (distance ** 1.0/order)
#finds the euclidean distance between 2 equal length tuples A and B given the number of elements in each tuple
def eucl_dist(A, B, length):
distance = 0.0
num_attribs = length - 1 #dont include rings
for i in range(num_attribs):
distance += pow((A[i] - B[i]), 2)
return math.sqrt(distance)
def getKNN(testData, dataset, num_cols, K, order):
distance_list = []
#pair distance and data in a list and sort based on smallest distance, only add last col(rings)
for i in range(len(dataset)):
dist = minkow_dist(testData, dataset[i], num_cols, order)
distance_list.append((dist, dataset[i][-1]))
distance_list = sorted(distance_list, key=itemgetter(0))
#get the first K neighbours after sorting
neighbours = []
for i in range(K):
neighbours.append(distance_list[i][1])
return neighbours
def predict(neighbours, K):
#compute the count for each unique label in neighbours
instances, counts = np.unique(neighbours, return_counts = True)
countlist = []
for i in range(len(counts)):
countlist.append((counts[i], instances[i]))
#reverse so highest count will be first value in dict
countlist = sorted(countlist, key = itemgetter(0), reverse = True)
#take the first value as the result
return countlist[0][1]
def KNN(X_train, X_test, K):
N = len(X_test)
num_col = X_test.shape[-1]
correct = 0.0
for i in range(N):
neighbours = getKNN(X_test[i], X_train, num_col, K, 2)
result = predict(neighbours, K)
# if number of rings are correct
if result == X_test[i][-1]:
correct += 1.0
#return percentage of correct as accuracy
return (correct / float(N)) * 100.0
def KNNManhattan(X_train, X_test, K):
N = len(X_test)
num_col = X_test.shape[-1]
correct = 0.0
for i in range(N):
#order is 1 for manhattan dist
neighbours = getKNN(X_test[i], X_train, num_col, K, 1)
result = predict(neighbours, K)
# if number of rings are correct
if result == X_test[i][-1]:
correct += 1.0
#return percentage of correct as accuracy
return (correct / float(N)) * 100.0
def KNNMinkow(X_train, X_test, K):
N = len(X_test)
num_col = X_test.shape[-1]
correct = 0.0
for i in range(N):
#order is 3 for minkowski dist
neighbours = getKNN(X_test[i], X_train, num_col, K, 3)
result = predict(neighbours, K)
# if number of rings are correct
if result == X_test[i][-1]:
correct += 1.0
#return percentage of correct as accuracy
return (correct / float(N)) * 100.0
| UTF-8 | Python | false | false | 3,939 | py | 187 | knn.py | 163 | 0.629601 | 0.615131 | 0 | 109 | 35.119266 | 110 |
ska-telescope/skampi | 17,952,963,334,781 | f9778352cfe683055d4a79ad22df02b8ecd4daa6 | 0de83c64ff184ce999910782886851606e0b0634 | /tests/resources/models/mvp_model/states.py | b4d389d18f48c16485d71e08a32496f3985a00a1 | [
"BSD-3-Clause"
] | permissive | https://github.com/ska-telescope/skampi | 1ce33740389a0a7489bd58383fab47d5557dc6fc | 3ee1e8df0d6612fda6822702561eae89dd7b9cdb | refs/heads/master | "2023-08-31T05:23:25.038516" | "2023-08-29T08:48:56" | "2023-08-29T08:48:56" | 176,765,819 | 1 | 2 | BSD-3-Clause | false | "2023-02-11T00:47:56" | "2019-03-20T15:40:38" | "2022-11-03T12:16:30" | "2023-02-11T00:47:56" | 12,961 | 5 | 1 | 6 | Python | false | false | """
module containing values for interpreting enumerated values (e.g. ObsState)
"""
import enum
class ObsState(enum.IntEnum):
"""Representation of int ObsState as an Enum."""
EMPTY = 0
RESOURCING = 1
IDLE = 2
CONFIGURING = 3
READY = 4
SCANNING = 5
ABORTING = 6
ABORTED = 7
RESETTING = 8
FAULT = 9
RESTARTING = 10
| UTF-8 | Python | false | false | 365 | py | 225 | states.py | 95 | 0.619178 | 0.586301 | 0 | 21 | 16.380952 | 75 |
zhangbc07/Project | 9,801,115,409,361 | 41cfa9b8ea2ca2de1a61b6361dfa8922cc68759f | c0859588f13a3ad9729bac5907dfd42b83a48a08 | /Mp_loan/Mp_loan.py | 0f579d81b1f23bd688379df8619f9311af71354c | [] | no_license | https://github.com/zhangbc07/Project | 25b9e7744fe2f0089bd92e1ab7252939bb47e1f7 | 68d7b411350e9fb04fd788fda38bbfa7865f1d24 | refs/heads/master | "2022-07-23T19:04:19.914211" | "2019-11-14T11:11:55" | "2019-11-14T11:11:55" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# coding: utf-8
#数据下载:https://mirror.shileizcc.com/Other/LoanStats3a.csv
# In[1]:
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
loans_2007=pd.read_csv('./LoanStats3a.csv',skiprows=1)
#清除不必要的列
half_count=len(loans_2007)/2
loans_2007=loans_2007.dropna(thresh=half_count,axis=1)
loans_2007=loans_2007.drop(['desc','url'],axis=1)
loans_2007.to_csv('./loans_2007.csv',index=False)
# In[2]:
loans_2007=pd.read_csv('./loans_2007.csv')
# In[3]:
loans_2007.head()
# In[4]:
#清除无用数据
loans_2007=loans_2007.drop(['id','member_id','funded_amnt','funded_amnt_inv','grade','sub_grade','emp_title','issue_d'],axis=1)
loans_2007 = loans_2007.drop(['zip_code', 'out_prncp', 'out_prncp_inv', 'total_pymnt','total_pymnt_inv', 'total_rec_prncp'], axis=1)
loans_2007 = loans_2007.drop(['total_rec_int', 'total_rec_late_fee','recoveries', 'collection_recovery_fee', 'last_pymnt_d', 'last_pymnt_amnt'], axis=1)
# In[5]:
print(loans_2007.iloc[0])
print(loans_2007.shape[1])
# In[6]:
print(loans_2007['loan_status'].value_counts())
# In[7]:
loans_2007=loans_2007[(loans_2007['loan_status']=='Fully Paid') | (loans_2007['loan_status']=='Charged Off')]
status_replace={
'loan_status':{
'Fully Paid':1,
'Charged Off':0,
}
}
loans_2007=loans_2007.replace(status_replace)
# In[8]:
loans_2007.head()
# In[9]:
#去除空数据
orig_columns=loans_2007.columns
drop_columns=[]
for col in orig_columns:
col_series=loans_2007[col].dropna().unique( )
if len(col_series)==1:
drop_columns.append(col)
loans_2007=loans_2007.drop(drop_columns,axis=1)
# In[10]:
loans_2007.to_csv('./filtered_loans_2007.csv',index=False)
# In[11]:
#统计缺失值
loans=pd.read_csv('./filtered_loans_2007.csv')
null_counts=loans.isnull().sum()
# In[12]:
null_counts
# In[13]:
#去掉缺失值较多的数据
loans=loans.drop('pub_rec_bankruptcies',axis=1)
loans=loans.dropna(axis=0)
# In[14]:
loans.dtypes.value_counts()
# In[15]:
#对object数据进行格式转换
mapping_dict={
'emp_length':{
'10+ years':10,
'9 years':9,
'8 years':8,
'7 years':7,
'6 years':6,
'5 years':5,
'4 years':4,
'3 years':3,
'2 years':2,
'1 year':1,
'n/a':0
}
}
loans=loans.drop(['last_credit_pull_d','earliest_cr_line','addr_state','title'],axis=1)
loans['int_rate']=loans['int_rate'].str.rstrip('%').astype('float')
loans['revol_util']=loans['revol_util'].str.rstrip('%').astype('float')
loans=loans.replace(mapping_dict)
# In[16]:
cat_columns=['home_ownership','verification_status','emp_length','purpose','term']
dummy_df=pd.get_dummies(loans[cat_columns])
loans=pd.concat([loans,dummy_df],axis=1)
loans=loans.drop(cat_columns,axis=1)
loans=loans.drop('pymnt_plan',axis=1)
loans.to_csv('./cleaned_loans2007.csv',index=False)
# In[17]:
loans=pd.read_csv('./cleaned_loans2007.csv')
# In[18]:
loans.info()
# In[19]:
from sklearn.linear_model import LogisticRegression
lr=LogisticRegression()
cols = loans.columns
train_cols = cols.drop('loan_status')
#得到特征
features = loans[train_cols]
#得到标签
target = loans['loan_status']
lr.fit(features, target)
predictions = lr.predict(features)
# In[20]:
#第一次测试——使用逻辑回归测试
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_predict,KFold
lr = LogisticRegression()
kf = KFold()
predictions = cross_val_predict(lr, features, target, cv=kf)
predictions = pd.Series(predictions)
#定义指标
#True positives
tp_filter = (predictions == 1) & (loans['loan_status']== 1)
tp = len(predictions[tp_filter])
#False positives
fp_filter = (predictions == 1) & (loans['loan_status']== 0)
fp = len(predictions[fp_filter])
#True negatives
tn_filter = (predictions == 0) & (loans['loan_status']== 0)
tn = len(predictions[tn_filter])
#False negatives
fn_filter = (predictions == 0) & (loans['loan_status'] == 1)
fn = len(predictions[fn_filter])
# Rates
tpr = tp / float((tp + fn))
fpr = fp / float((fp + tn))
print(tpr)
print(fpr)
# In[22]:
#第二次测试——添加权重项,平衡数据的权重和
lr=LogisticRegression(class_weight='balanced')
kf=KFold()
predictions=cross_val_predict(lr,features,target,cv=kf)
predictions=pd.Series(predictions)
#True positives
tp_filter = (predictions == 1) & (loans['loan_status']== 1)
tp = len(predictions[tp_filter])
#False positives
fp_filter = (predictions == 1) & (loans['loan_status']== 0)
fp = len(predictions[fp_filter])
#True negatives
tn_filter = (predictions == 0) & (loans['loan_status']== 0)
tn = len(predictions[tn_filter])
#False negatives
fn_filter = (predictions == 0) & (loans['loan_status'] == 1)
fn = len(predictions[fn_filter])
# Rates
tpr = tp / float((tp + fn))
fpr = fp / float((fp + tn))
print(tpr)
print(fpr)
# In[23]:
#第三次测试——自定义权重项
penalty={
0:5,
1:1
}
lr=LogisticRegression(class_weight=penalty)
kf=KFold()
predictions=cross_val_predict(lr,features,target,cv=kf)
predictions=pd.Series(predictions)
#True positives
tp_filter = (predictions == 1) & (loans['loan_status']== 1)
tp = len(predictions[tp_filter])
#False positives
fp_filter = (predictions == 1) & (loans['loan_status']== 0)
fp = len(predictions[fp_filter])
#True negatives
tn_filter = (predictions == 0) & (loans['loan_status']== 0)
tn = len(predictions[tn_filter])
#False negatives
fn_filter = (predictions == 0) & (loans['loan_status'] == 1)
fn = len(predictions[fn_filter])
# Rates
tpr = tp / float((tp + fn))
fpr = fp / float((fp + tn))
print(tpr)
print(fpr)
# In[24]:
#第四次测试——随机森林
from sklearn.ensemble import RandomForestClassifier
rf=RandomForestClassifier(n_estimators=10,class_weight='balanced',random_state=1)
kf=KFold()
predictions=cross_val_predict(lr,features,target,cv=kf)
predictions=pd.Series(predictions)
#True positives
tp_filter = (predictions == 1) & (loans['loan_status']== 1)
tp = len(predictions[tp_filter])
#False positives
fp_filter = (predictions == 1) & (loans['loan_status']== 0)
fp = len(predictions[fp_filter])
#True negatives
tn_filter = (predictions == 0) & (loans['loan_status']== 0)
tn = len(predictions[tn_filter])
#False negatives
fn_filter = (predictions == 0) & (loans['loan_status'] == 1)
fn = len(predictions[fn_filter])
# Rates
tpr = tp / float((tp + fn))
fpr = fp / float((fp + tn))
print(tpr)
print(fpr)
| UTF-8 | Python | false | false | 6,518 | py | 8 | Mp_loan.py | 6 | 0.66672 | 0.624522 | 0 | 304 | 19.644737 | 152 |
Lackadaisica1/pre-college-dump | 3,762,391,394,374 | 6cc9757a4f47d73b6cea60188074f73dbe2ea7f8 | f72ff978288a570ea5d0263c830913c67f47b622 | /starting-out-with-python/chapter-11/2 - Car Class.py | f229f15a09b8bd02f33c9824a6e5f2fd134869f8 | [] | no_license | https://github.com/Lackadaisica1/pre-college-dump | 2feceb37ded8e092f77f275c6de6ddc03ceb54fc | d99fa2e11124bff6d2f313c03f4561298fd832d9 | refs/heads/master | "2020-05-07T10:01:50.633289" | "2019-06-12T01:35:14" | "2019-06-12T01:35:14" | 180,401,902 | 0 | 0 | null | false | "2019-04-09T16:00:37" | "2019-04-09T15:53:15" | "2019-04-09T15:57:24" | "2019-04-09T16:00:37" | 0 | 0 | 0 | 0 | Python | false | false | # A program which creates a Car class, and then creates a couple of functions for
# that object
class Car:
def __init__(self, year_model, make, speed):
self.__year_model = year_model
self.__make = make
self.__speed = 0
def set_make(self, make):
self.__make = self
def set_model_year(self, year_model):
self.__year_model = year_model
def accelerate(self, speed):
self.__speed += 5
def brake(self, speed):
self.__speed -= 5
def get_speed(self):
return self.__speed
model = 1997
make = 'Toyota'
speed = 0
car = Car(model, make, speed)
for i in range(5):
car.accelerate(speed)
print("Here is the car's speed in miles per hour:", car.get_speed())
for i in range(5):
car.brake(speed)
print("Here is the car's speed in miles per hour:", car.get_speed())
| UTF-8 | Python | false | false | 886 | py | 70 | 2 - Car Class.py | 57 | 0.586907 | 0.575621 | 0 | 28 | 29.428571 | 81 |
JANMAY007/python_practice | 5,909,875,000,938 | 1449c49afebdbc8bb38b2a15c8aae62433ea073f | b6170903be7a09372f92bf8eb22bac8af200f1ef | /Python language practice/code jam solutions/codejam2.py | a7c8a13a0b5ab3258c7db84c1eb1892791c756ae | [] | no_license | https://github.com/JANMAY007/python_practice | 17b1fc3e4416ff2093ef25b759c41d062cf8195f | f087a1d4404a135dc6ae3bc79b7da52357f302d1 | refs/heads/master | "2022-04-24T12:10:05.074731" | "2020-04-28T12:15:47" | "2020-04-28T12:15:47" | 259,626,954 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
T = int(input())
for o in range(1,T+1):
N = int(input())
a = [[int(q) for q in input().split()] for p in range(0,N)]
result = []
x = np.trace(a)
def func(a):
count1=0
for k in range(N):
d = set(a[k])
if len(d) < N:
count1+=1
return(count1)
result = list(map(list, zip(*a)))#transposing a
print ('%s #%d: %d %d %d' % ('Case',o,x,func(a),func(result))) | UTF-8 | Python | false | false | 433 | py | 77 | codejam2.py | 76 | 0.505774 | 0.487298 | 0 | 16 | 25.1875 | 64 |
zopepy/leetcode | 6,708,738,925,650 | 2cbb7a251b0a13266f85ffa14a9564b700620c15 | 0d4ec25fb2819de88a801452f176500ccc269724 | /remove_k_digits.py | e01ac81dfa6f86a3bf70982113d95878c5953047 | [] | no_license | https://github.com/zopepy/leetcode | 7f4213764a6a079f58402892bd0ede0514e06fcf | 3bfee704adb1d94efc8e531b732cf06c4f8aef0f | refs/heads/master | "2022-01-09T16:13:09.399620" | "2019-05-29T20:00:11" | "2019-05-29T20:00:11" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution:
def removeKdigits(self, num, k):
"""
:type num: str
:type k: int
:rtype: str
"""
def remove_digit(num):
numlist = list(str(num))
if not numlist or not num:
return "0"
l = len(numlist)
found = False
for i in range(0, l-1):
if numlist[i] > numlist[i+1]:
found = True
break
if not found:
max_val = max(numlist, key=lambda x: int(x))
for j,v in enumerate(numlist):
if v == max_val:
break
del numlist[j]
if not numlist:
return "0"
return "".join(numlist)
else:
del numlist[i]
if not numlist:
return "0"
return "".join(numlist)
for i in range(0, k):
num = int(remove_digit(num))
if not num:
break
return num
s = Solution()
print(s.removeKdigits(9, 1))
| UTF-8 | Python | false | false | 994 | py | 799 | remove_k_digits.py | 798 | 0.454728 | 0.445674 | 0 | 42 | 22.619048 | 54 |
Nimitz14/HR_temp_prediction | 2,310,692,421,184 | 6827b9b7d177e7dca03f7a6d874e6240a3262781 | 10817baf530eed2442040eed8d68a4b2b698d4a1 | /predict_temp.py | 833e734b525c301110dce00df74d67c53b2ea625 | [] | no_license | https://github.com/Nimitz14/HR_temp_prediction | 4e94e2384e5fda0e370172c2262019376623f7d9 | da400ff6dd7231b403813238691e4f14fa7ed93b | refs/heads/master | "2017-12-01T08:52:18.139494" | "2016-08-09T16:55:56" | "2016-08-09T16:55:56" | 63,858,731 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from __future__ import division
import numpy as np
import fileinput
'''
See main() at the bottom for top level overview (or read the README).
'''
def get_from_stdin():
'''
Assumes data is being input via stdin. Replaces month names with number. Then creates a by month date-format.
Clean data is then created in the format (?, [month, max_temp, min_temp]) = 2D.
Data with missing values has format (?, [month, known_temp, is_known_temp_max]) = 2D.
Returns them both in ndarray.
'''
# Read in from stdin.
data_clean = []
data_missing = []
ct = 0
for line in fileinput.input():
if ct == 0 or ct == 1: # To skip first 2 lines
ct += 1
continue
if "Missing" in line:
data_missing.append([c for c in line.strip().split()])
else:
data_clean.append([c for c in line.strip().split()])
# Replace month names with a number string.
month2num_dict = {'January': '1', 'February': '2', 'March': '3', 'April': '4', 'May': '5', 'June': '6', 'July': '7',
'August': '8', 'September': '9', 'October': '10', 'November': '11', 'December': '12'}
data_clean = np.array(data_clean)
for i, c in enumerate(data_clean[:, 1]):
data_clean[i, 1] = month2num_dict[c]
data_missing = np.array(data_missing)
for i, c in enumerate(data_missing[:, 1]):
data_missing[i, 1] = month2num_dict[c]
# Handle clean data.
data_clean = data_clean.astype(np.float32)
start_year = np.min(data_clean[:, 0])
data_clean = np.c_[(data_clean[:, 0] - start_year)*12 + data_clean[:, 1], data_clean[:, 2], data_clean[:, 3]]
# Handling data with missing values.
data_missing_float = []
for line in data_missing:
tmp_array = []
tmp_date = 0
is_max = None
for i, c in enumerate(line):
if i == 0:
tmp_date = (float(c) - start_year)*12
elif i == 1:
tmp_date += float(c)
tmp_array.append(tmp_date)
elif "Missing" in c:
if i == 2:
is_max = 1
elif i == 3:
is_max = 0
else:
tmp_array.append(float(c))
tmp_array.append(is_max)
data_missing_float.append(tmp_array)
return data_clean, np.asarray(data_missing_float)
def normal_equation_fit(x, y):
'''
Normal equation formula with offset, linear and sinus term.
'''
mat_A = np.c_[np.ones_like(x), x, custom_sin(x)]
params = np.linalg.inv(mat_A.T.dot(mat_A)).dot(mat_A.T).dot(y.T)
return params
def hypothesis(x, params, phase_sh=4):
return params[0] + params[1]*x + params[2]*custom_sin(x, phase_sh)
def custom_sin(x, phase_sh = 4):
# 2/12 = 1/6.
return np.sin(np.pi*(x-phase_sh)/6)
def predict_temps(data_missing, params_max_t, params_min_t, averages, phase_sh):
'''
For data with missing terms calculate a prediction and adjust by the difference between the known value
and the average value of that month.
To know which average to use, takes the month number modulo 12.
'''
data_by_month = (data_missing[:, 0]-1) % 12
for i, line in enumerate(data_missing):
if line[-1] == 1:
sin_pred = hypothesis(line[0], params_max_t, phase_sh)
final_pred = sin_pred + (line[1] - averages[data_by_month[i], 1])
print(final_pred)
elif line[-1] == 0:
sin_pred = hypothesis(line[0], params_min_t, phase_sh)
final_pred = sin_pred + (line[1] - averages[data_by_month[i], 0])
print(final_pred)
def calc_averages(data):
'''
Calculates average max/min temperature of the 12 months in a year.
'''
averages = [] # shape: (month,[max,min])
data_by_month = (data[:, 0]-1) % 12
for i in xrange(0, 12):
averages.append([])
bool_mask = (data_by_month == i)
averages[i].append(np.mean(data[bool_mask][:, 1]))
averages[i].append(np.mean(data[bool_mask][:, 2]))
return np.asarray(averages)
def calc_optimal_phase(data, params_max):
'''
Autocorrelation implementation to find the best phase shift.
Cuts the two waveforms to 40 samples because in that bound the data waveform is well behaved.
Shifts hypothesis waveform and each time uses the minimum of the two waveforms to find 'area' underneath both.
Is assumed result would be same whether using max or min waveforms (max used).
'''
data_wave = data[:40, 1]
autocorr_func = []
for phase in np.arange(3, 5, 0.005):
hypo_wave = hypothesis(data[:40, 0], params_max, phase)
autocorr_func.append([phase, np.sum(np.min(np.c_[data_wave, hypo_wave], axis=1))])
autocorr_func = np.asarray(autocorr_func)
return autocorr_func[np.argmax(autocorr_func[:, 1]), 0]
def main():
data, data_missing = get_from_stdin()
averages = calc_averages(data)
params_max_t = normal_equation_fit(data[:, 0], data[:, 1])
params_min_t = normal_equation_fit(data[:, 0], data[:, 2])
best_phase_shift = calc_optimal_phase(data, params_max_t)
# Outputs to stdout.
predict_temps(data_missing, params_max_t, params_min_t, averages, best_phase_shift)
main()
| UTF-8 | Python | false | false | 4,753 | py | 2 | predict_temp.py | 1 | 0.654955 | 0.632653 | 0 | 158 | 29.056962 | 117 |
shashank231/Programming-quiz | 18,253,611,034,339 | f6274f2a277a07f62c2a8ab8bbd794bc3501f339 | 4b0d68860631717f2e0321a8f02bf9ccc13b1432 | /namo/migrations/0003_subject_card_pic.py | 8990b2f10a0a56a8ce9541818b374aedb9e5e1bf | [] | no_license | https://github.com/shashank231/Programming-quiz | 0adc8f55f5badd136a92385fce90407a6e7898f1 | 630ab458d447cfee16b900a43efe83585cb306b5 | refs/heads/master | "2022-11-07T18:11:00.081969" | "2020-06-22T13:19:54" | "2020-06-22T13:19:54" | 274,135,865 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.0.6 on 2020-06-13 10:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('namo', '0002_subject_info'),
]
operations = [
migrations.AddField(
model_name='subject',
name='card_pic',
field=models.ImageField(blank=True, null=True, upload_to=''),
),
]
| UTF-8 | Python | false | false | 401 | py | 21 | 0003_subject_card_pic.py | 15 | 0.581047 | 0.533666 | 0 | 18 | 21.277778 | 73 |
keepalive555/export | 7,559,142,484,736 | d8c6f6f615266c3c0ac1dccdb5e033443a85a3f6 | 2ce198c323a3a09ae79f565d6cacb49258a32b5d | /crawlers/util/cache.py | 1ba3e3bd9f4ff76c616a9f90588014441e19deb5 | [] | no_license | https://github.com/keepalive555/export | 030a6f61f358d7a29ce902f261a9769652182866 | 2d12198ae96c995b723488e5525c89fb32698d07 | refs/heads/master | "2017-12-17T06:31:09.896102" | "2017-11-13T15:00:06" | "2017-11-13T15:00:06" | 77,551,081 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
import os
import shelve
from core.metaclass import Final
class CacheFile(object):
"""缓存文件对象。
Args:
cache_dir (str): 文件所在目录。
cache_file_name (str): 文件名称,不带扩展名。
Returns:
None
"""
def __init__(self, cache_dir, cache_file_name):
self._cache_dir = cache_dir
self._cache_file_name = cache_file_name
self._cache = shelve.open(
os.path.join(cache_dir, cache_file_name),
flag='c', protocol=2, writeback=True,
)
def flush(self):
"""将数据刷新到缓存文件中。 """
self._cache.sync()
def __getattr__(self, attr):
"""支持通过'.'访问成员。"""
return self._cache[attr]
def __setattr__(self, attr, value):
"""支持通过'.'设置成员值。"""
if attr.startswith('_'):
super(CacheFile, self).__setattr__(attr, value)
else:
self._cache[attr] = value
def __setitem__(self, k, v):
"""代理shevle。"""
self._cache[k] = v
def __getitem__(self, k):
"""代理shevle。 """
return self._cache.get(k)
def __del__(self):
"""关闭Cache文件。 """
self._cache.close()
class Cache(object):
__metaclass__ = Final
def __init__(self, cache_dir="./"):
cache_dir = os.path.join(os.path.abspath(cache_dir), '.cache')
if not os.path.exists(cache_dir):
os.mkdir(cache_dir, 744)
self._cache_dir = cache_dir
self._object_cache = dict()
def __getattr__(self, attr):
"""通过属性来访问相应APP。
Args:
attr (str): 属性名,一般是爬虫APP名,如itjuzi。
Returns:
None
"""
if attr in self._object_cache:
return self._object_cache[attr]
else:
cache = CacheFile(self._cache_dir, attr)
self._object_cache[attr] = cache
return cache
def clear(self, cache=None, ignore_error=False):
"""清除缓存。
Args:
cache (str): 待清除缓存的名称,例如itjuzi,默认清除全部。
ignore_error(bool): 是否忽略异常。
Returns:
None
Raises:
IOError: 待删除的缓存不存在。
"""
if cache is None:
for _, v in self._object_cache.items():
del v
__import__('shutil').rmtree(self._cache_dir)
else:
try:
v = self._object_cache.get(cache)
if v is not None:
del v
os.remove(
os.path.join(self._cache_dir, '%s.db' % cache))
except IOError:
if ignore_error is not True:
raise
def __del__(self):
"""析构方法,关闭所有Cache文件。"""
for _, v in self._object_cache.items():
del v
if __name__ == "__main__":
pass
| UTF-8 | Python | false | false | 3,082 | py | 17 | cache.py | 13 | 0.484263 | 0.482475 | 0 | 117 | 22.897436 | 70 |
ulti72/Credit_Saison | 12,146,167,530,086 | 285b1572fac9547f4550743d2c40aa9ab2c9a947 | 4d29c074c432495b7013eaab34a29a7ecf45ec00 | /flaskapi/__init__.py | 9a239a9000286e173550c67a9fee36b9ad4f7d81 | [] | no_license | https://github.com/ulti72/Credit_Saison | b4fe8c81fda12980a2508644da9d8d6745dac429 | 28e46318c1392306905d159af7504d395a541954 | refs/heads/master | "2023-05-12T17:07:45.342125" | "2021-05-29T16:22:51" | "2021-05-29T16:22:51" | 371,811,076 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask_sqlalchemy import SQLAlchemy
from flask import Flask
app = Flask(__name__)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite3'
db = SQLAlchemy(app)
from flaskapi import routes | UTF-8 | Python | false | false | 265 | py | 6 | __init__.py | 4 | 0.728302 | 0.724528 | 0 | 11 | 22.272727 | 62 |
adrianoff/ml_coursera | 4,492,535,802,548 | 5730af8344b25dfd4f89f405bfebb2dd66e160e0 | ddebe66cb75c86b580418947b823c2543aa318cb | /final_projects/sentiment_analysis/week6/parser.py | 8ff38c1c1d9a65029c6cb3e4c8bf75ae39ee9f82 | [] | no_license | https://github.com/adrianoff/ml_coursera | 5dae7ab8ceb98cf9a774f27c34597fe226dfacf5 | cfdea088cc3417c6852b6266c66a01e08ede7674 | refs/heads/master | "2021-01-19T11:18:21.392267" | "2018-09-17T15:20:18" | "2018-09-17T15:20:18" | 87,952,932 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
import requests
import bs4 as bs4
from tqdm import tqdm
import sys
reload(sys)
sys.setdefaultencoding('utf8')
def split_uppercase(string):
x = ''
k = 0
for c in string:
if k == 0:
x += c
elif c.isupper() and not string[k-1].isupper():
x += ' %s' % c
else:
x += c
k += 1
return x.strip()
site = 'https://technopoint.ru'
base_url = 'https://technopoint.ru/catalog/17a75b8e16404e77/smartfony'
pages = []
for p in range(1, 33):
pages.append(base_url + '/?p=' + str(p) + '&i=1&stock=0&order=4')
products_link = []
print "Get Pages:\n"
for page in tqdm(pages):
req = requests.get(page)
parser = bs4.BeautifulSoup(req.text, 'lxml')
links = parser.findAll('a', attrs={'data-role': 'product-cart-link'})
for link in links:
products_link.append(site + link['href'] + 'opinion/')
print "\n\nGet Pages With Comments:\n"
pages_with_comments = []
for link in tqdm(products_link):
req = requests.get(link)
parser = bs4.BeautifulSoup(req.text, 'lxml')
ul_pagination = parser.find('ul', {"class": "pagination"})
comments_pager_last_link = None
if ul_pagination is not None:
comments_pager_last_link = ul_pagination.find('li', {"class": "last"}).find('a')
comments_pages_count = 1
if comments_pager_last_link is not None:
comments_pages_count = int(comments_pager_last_link["data-page"]) + 1
for page_with_comments in range(1, comments_pages_count + 1):
pages_with_comments.append(link + str(page_with_comments) + '/')
i = 1
print "\n\nProceed Pages With Comments:\n"
for page_with_comments in tqdm(pages_with_comments):
req = requests.get(page_with_comments)
parser = bs4.BeautifulSoup(req.text, 'lxml')
opinion_items = parser.find('div', {"class": "opinion-container"}).findAll('div', {"class": "opinion-item"})
for opinion_item in opinion_items:
description = str(opinion_item.find('div', {"class": "descriptions"})).replace("\n", ' ')
description = split_uppercase(unicode(description))
grade = opinion_item["data-grade"]
comment_file = open('./data/' + str(i) + '.txt', 'w')
comment_file.write(page_with_comments + "\n" + grade + "\n" + description)
comment_file.close()
i += 1
print "\n"
print "Total comments: " + str(i)
| UTF-8 | Python | false | false | 2,392 | py | 150 | parser.py | 39 | 0.61413 | 0.599498 | 0 | 80 | 28.9 | 112 |
sdck139/ck-leetcode | 19,292,993,114,820 | c3f4cad4846ecc5907166c8d61586d81549b918d | 88cb3315ecb89adc981c80a52cdedff8ac6189f3 | /537-ComplexNumberMultiplication.py | 90f4f8c9868c8d0d4f40a2f1b1648db681e7b59b | [] | no_license | https://github.com/sdck139/ck-leetcode | b61ef40be0948dfd694097160939fbee5f1936b3 | f43574dbb6a872e1c96d367f8948ed8c42b4fde1 | refs/heads/master | "2021-01-20T00:24:11.542989" | "2017-11-01T08:56:47" | "2017-11-01T08:56:47" | 89,125,640 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution(object):
def complexNumberMultiply(self, a, b):
"""
:type a: str
:type b: str
:rtype: str
"""
a1, a2 = self.strToInt(a)
b1, b2 = self.strToInt(b)
return str(a1*b1-a2*b2) + "+" + str(a1*b2 + a2*b1) + "i"
def strToInt(self, a):
strs = a.split("+")
return int(strs[0]), int(strs[1][:-1])
| UTF-8 | Python | false | false | 388 | py | 60 | 537-ComplexNumberMultiplication.py | 60 | 0.476804 | 0.438144 | 0 | 13 | 28.846154 | 64 |
tdnvl/PyCharm | 7,971,459,303,251 | 20d654ff9390e40df30f8a3f72e677533b15b11c | 6c77e86454e83f8676fb06740c82eff56f2672eb | /Lessons Ray/2018-01-01-make-bricks.py | 94e9aaa2c98738f2f3c86e2786f36326110e17fb | [] | no_license | https://github.com/tdnvl/PyCharm | 7898c473c53853ccc94c42773efcd1fb734c2019 | 2d7ce8d707170fddd641c93fb7d4378beaeffa70 | refs/heads/master | "2021-01-20T03:29:12.507434" | "2018-05-15T21:08:14" | "2018-05-15T21:08:14" | 89,544,974 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def make_bricks(small, big, goal):
a = goal / 5
# if a >= 1 we can use at least one big brick
# if a < 1 we cannot use a big brick
b = goal % 5
# if b == 0 the goal is a multiple of 5
# if b != 0 we need to check if we have enough small bricks
# to reach the goal
c = int(a)
d = c*big + b*small
if b == 0:
if big >= a:
return True
elif big < a and ((a - big) * 5) <= small:
return True
elif big < a and (a - big)*5 > small:
return False
elif goal > small:
return False
elif goal <= small:
return True
elif b != 0:
if big < a and small >= b:
return True
elif big < a and small < b:
return False
elif small == 0 and b == 0 and big >= a:
return True
elif small == 0 and b == 0 and big < a:
return False
| UTF-8 | Python | false | false | 893 | py | 53 | 2018-01-01-make-bricks.py | 50 | 0.494961 | 0.478163 | 0 | 31 | 27.806452 | 63 |
upendar245/python | 8,701,603,785,179 | e41215278aac9d4c9e1602ff4584f24982669131 | 4540c5e07a4a439eb8f5f1998be0d4ee1c9291a7 | /recursion.py | f636695099787ca54bb7ad4a0b87605229fd612c | [
"Apache-2.0"
] | permissive | https://github.com/upendar245/python | 60dad510d2f64041340849617f87a5795e58cec7 | ed4d5488b44d385f2d0f75e8558a5b7fc4563d38 | refs/heads/master | "2021-01-25T09:21:05.951513" | "2020-11-08T07:35:21" | "2020-11-08T07:35:21" | 93,823,129 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def sample(x):
if x == 0 or x == 1:
return 1
else:
return sample(x - 1) + sample(x - 2 )
print sample(6)
| UTF-8 | Python | false | false | 130 | py | 12 | recursion.py | 11 | 0.484615 | 0.438462 | 0 | 7 | 17.571429 | 44 |
zy20091082/trimesh | 9,912,784,522,715 | 1be0294e2b9ccc7cf4b2da535d6550adf92a6b27 | dfed9f12ec0893c5893789cdd91c8e2c3f254f9e | /trimesh/version.py | 6915b25eb17e34bd367b7d46af0244670c476608 | [
"MIT"
] | permissive | https://github.com/zy20091082/trimesh | 689e26fbd9232ebba1f56ad34fc22babafb3be03 | c0a421771ec77ffe4e29731ed3ba340e1c800644 | refs/heads/master | "2020-03-25T06:11:30.155310" | "2018-08-03T17:05:23" | "2018-08-03T17:05:23" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | __version__ = '2.31.50'
| UTF-8 | Python | false | false | 24 | py | 1 | version.py | 1 | 0.5 | 0.291667 | 0 | 1 | 23 | 23 |
Ludovic86/compta | 3,779,571,223,961 | b14bf27604384f4356b3ac4123b164de7b686720 | e93cd698baaa26c9fb10511c354ce40b5d4e854f | /migrations/versions/63634acbf78c_debits_table.py | df769b88afd2a715e88dbc6ab6cb6090b8cc147e | [] | no_license | https://github.com/Ludovic86/compta | e6776cddfc02aa141c52e93c28c70dd496df0830 | 0856a467e59803dae612b321fd54cea1cec5a153 | refs/heads/master | "2023-03-22T22:40:34.802475" | "2021-03-21T14:47:43" | "2021-03-21T14:47:43" | 347,668,966 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """debits table
Revision ID: 63634acbf78c
Revises:
Create Date: 2021-03-14 16:01:18.145628
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '63634acbf78c'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('debit',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('date', sa.Date(), nullable=True),
sa.Column('amount', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_debit_amount'), 'debit', ['amount'], unique=False)
op.create_index(op.f('ix_debit_date'), 'debit', ['date'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_debit_date'), table_name='debit')
op.drop_index(op.f('ix_debit_amount'), table_name='debit')
op.drop_table('debit')
# ### end Alembic commands ###
| UTF-8 | Python | false | false | 1,042 | py | 11 | 63634acbf78c_debits_table.py | 7 | 0.653551 | 0.620921 | 0 | 37 | 27.162162 | 79 |
xdxie/WordArt | 5,712,306,512,725 | 5ff2625c43f38e83811a5a6cf6249eec1b27a353 | 40b42ccf2b6959d6fce74509201781be96f04475 | /mmocr/models/textrecog/recognizer/__init__.py | 818df4996df8a4c1555ebda43e8bcce4f2acb697 | [
"Apache-2.0"
] | permissive | https://github.com/xdxie/WordArt | 2f1414d8e4edaa89333353d0b28e5096e1f87263 | 89bf8a218881b250d0ead7a0287526c69586c92a | refs/heads/main | "2023-05-23T02:04:22.185386" | "2023-03-06T11:51:43" | "2023-03-06T11:51:43" | 515,485,694 | 106 | 12 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Copyright (c) OpenMMLab. All rights reserved.
from .abinet import ABINet
from .base import BaseRecognizer
from .crnn import CRNNNet
from .encode_decode_recognizer import EncodeDecodeRecognizer
from .master import MASTER
from .nrtr import NRTR
from .robust_scanner import RobustScanner
from .sar import SARNet
from .satrn import SATRN
from .seg_recognizer import SegRecognizer
from .corner_transformer import CornerTransformer
__all__ = [
'BaseRecognizer', 'EncodeDecodeRecognizer', 'CRNNNet', 'SARNet', 'NRTR',
'SegRecognizer', 'RobustScanner', 'SATRN', 'ABINet', 'MASTER', 'CornerTransformer'
]
| UTF-8 | Python | false | false | 607 | py | 198 | __init__.py | 149 | 0.777595 | 0.777595 | 0 | 17 | 34.705882 | 86 |
LsoaresA/Projetos-Django | 13,348,758,356,448 | 9d7143da9986a10488a92b5825efe6f100eaf73e | 7155b83b4bffa5fb6efb1a2317692f263e444b6b | /LsoaresA/cadastro/urls.py | 5b8ca533681e0c7fe5f3a120d74afcf0648d9a3e | [] | no_license | https://github.com/LsoaresA/Projetos-Django | bf5c31507c0acc52f6e4a6e2d5b3a61eab51536a | 684dff09b31bdfce575563541f47ec588a29a5ad | refs/heads/master | "2021-09-23T12:05:54.457549" | "2018-09-22T13:22:46" | "2018-09-22T13:22:46" | 108,995,746 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.conf.urls import url, include
from django.contrib import admin
from cadastro.views import cadastro
urlpatterns = [
url(r'^cadastro/$', cadastro, name='cadastro'),
url(r'^admin/', admin.site.urls),
]
| UTF-8 | Python | false | false | 217 | py | 18 | urls.py | 12 | 0.728111 | 0.728111 | 0 | 8 | 26.125 | 48 |
DawnsonLi/Senorita | 11,605,001,676,018 | 168d4364e3aa212bd92af1a280fd76ead26b3a98 | 50ee3f186a424e5801f6cbdeb053f43865c87898 | /common/__init__.py | 95dbc1c1ad6b1d49d57f8c5f9e22ad367000c779 | [] | no_license | https://github.com/DawnsonLi/Senorita | 6303d9c626e3f98dbb811c0fd670a7493ba9637d | e09a0b28e21f51aade02072d1fbe3974278f803b | refs/heads/master | "2020-05-07T16:50:18.559534" | "2019-04-11T06:55:03" | "2019-04-11T06:55:03" | 180,701,275 | 10 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | __all__ = ['TSlist','visio'] | UTF-8 | Python | false | false | 28 | py | 12 | __init__.py | 9 | 0.5 | 0.5 | 0 | 1 | 28 | 28 |
miltonleal/MAC0110_Introduction_Computer_Science_IME_USP | 5,841,155,567,076 | 374cef3109836a48b79d790952a6721d5083e74d | 08e052c0c2ee4ad0cd4980fbc8a692c407118659 | /Ex. do Paca/Aula 14/P_14.6.py | c58608177356f9c8a407f14209866e2358c8c550 | [] | no_license | https://github.com/miltonleal/MAC0110_Introduction_Computer_Science_IME_USP | 6fad182f7fbb3e83403080800074bf57456cb0b5 | b47936ce66e715dba79dff44779a750d795192a0 | refs/heads/master | "2023-03-04T11:06:48.092980" | "2021-02-17T14:47:46" | "2021-02-17T14:47:46" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # P14.6) Função Harmonico(epsilon) que recebe número epsilon > 0 bem pequeno (exemplo: 0.00001) e calcula a soma de todos os termos maiores ou iguais a epsilon.
#Ou seja, calcular 1 + 1/2 + 1/3 + ... 1/k, tal que 1/i ≥ epsilon para i = 1, 2, ..., k e 1 / (k+1) < epsilon.
def harmonico(epsilon):
h, k = 0, 1 # inicia a soma
# Somar todos os termos maiores ou iguais a epsilon
while 1 / k >= epsilon:
h = h + 1 / k
k = k + 1
return h
print(harmonico(0.001))
| UTF-8 | Python | false | false | 502 | py | 206 | P_14.6.py | 204 | 0.60161 | 0.541247 | 0 | 15 | 31.933333 | 160 |
sholehanshori/PWDK_Modul_2 | 11,012,296,155,443 | 051f396085d6a3d21770b50386a5f8219ba171f1 | 356eefcb3be20771c459d98df87dc66a9451ddff | /Day 27/27.py | b2ba4c3bed3bb6287317d9f5ee7424c28a884ec6 | [] | no_license | https://github.com/sholehanshori/PWDK_Modul_2 | 4df9bb5c01046c3c63d5609f9677d47d04c5ef30 | c12b8b8efa86d8158f18277ed0d43595adba4cc3 | refs/heads/master | "2020-11-25T07:43:59.174589" | "2019-12-18T05:00:20" | "2019-12-18T05:00:20" | 228,561,329 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # ------------------------ Jumat, 29 November 2019 -----------------------------------
import numpy as np
x = np.arange(1, 10).reshape(3, -1)
# print(x)
#----- Swaping -----
# print(x[:, [1,0,2]]) # Berdasarkan index grup
# print(x[:, [0,0,0]]) # Merubah semua kolom berdasarkan index pertama tiap row
# # (x[ [row , column] ]) --- representasi dari bagian atas
# print(x[[1,0,2], :])
# print(x[[0,0,0], :])
#----- Transpose -----
# a b
# c d = a c e
# e f b d f
a = np.array([[1,2],[3,4],[5,6]])
# print(a)
# print(a.transpose())
#---------------------------------------------
aa = np.loadtxt('0.csv', skiprows=1, delimiter=',') # Tidak bisa membaca 'string', hanya 'int' dan 'float'
print(aa)
print(type(aa))
ab = np.loadtxt('0.csv', skiprows=1, delimiter=',', unpack=True) # Penggunaan 'unpack' untuk mengelompokkan 'column'
print(ab)
id, usia = np.loadtxt('0.csv', skiprows=1, delimiter=',', unpack=True)
print(id) # Memisahkan kolom secara langsung, berupa 2 data
print(usia)
#-- Menggabungkan 2 kolom yg terpisah
ac = list(map(lambda a,b: [a,b], id, usia))
print(ac)
np.savetxt('1.csv', usia, fmt='%d', header='usia', comments='')
# Penggunaan '%d' agar berupa digit biasa
# Penggunaan comments='', karena secara default di depan header='usia', akan ada # di depan kata usia
# -- Membuat file .csv dari data di atas, tapi formatnya sesuai
np.savetxt('2.csv', aa, fmt='%d', header='id,usia', comments='', delimiter=',')
np.savetxt('3.csv', ac, fmt='%d', header='id,usia', comments='', delimiter=',')
# Penggunaan delimiter=',' agar ada pemisah ',' di antara kolom | UTF-8 | Python | false | false | 1,628 | py | 60 | 27.py | 30 | 0.578624 | 0.554054 | 0 | 47 | 33.659574 | 117 |
renjieliu/leetcode | 11,476,152,656,146 | 50751a40383da59f291201b98ccb9246202cb209 | 8f48d12b88048e424ebb0d72ca6dfab5cf12ae0f | /1001_1499/1401.py | f711e0a92e454794764c80652551afcfedbcdf41 | [] | no_license | https://github.com/renjieliu/leetcode | e1caf13c18a8107ed9252588b339fb76bcb1b246 | 4668b64fcb9320b6c316d8608fc61911ce43b6c7 | refs/heads/master | "2023-03-18T18:16:06.187741" | "2023-03-14T20:31:59" | "2023-03-14T20:31:59" | 128,823,819 | 7 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution:
def checkOverlap(self, radius: int, x_center: int, y_center: int, x1: int, y1: int, x2: int, y2: int) -> bool:
def distance(A, B):
return ((A[0] - B[0]) ** 2 + (A[1] - B[1]) ** 2) ** 0.5
circle_up = [x_center, y_center + radius]
circle_down = [x_center, y_center - radius]
circle_left = [x_center - radius, y_center]
circle_right = [x_center + radius, y_center]
circle_center = [x_center, y_center]
square_upperLeft = [x1, y2]
square_bottomLeft = [x1, y1]
square_upperRight = [x2, y2]
square_bottomRight = [x2, y1]
# print(circle_up, x1, x2, y1,y2)
# print(circle_down, x1, x2, y1,y2)
# print(circle_left, x1, x2, y1,y2)
# print(circle_right, x1, x2, y1,y2)
if (distance(square_upperLeft, circle_center) ** 2 <= radius ** 2 # square point within circle
or distance(square_bottomLeft, circle_center) ** 2 <= radius ** 2
or distance(square_upperRight, circle_center) ** 2 <= radius ** 2
or distance(square_bottomRight, circle_center) ** 2 <= radius ** 2
or (y1 <= circle_down[1] <= y2 and x1 <= circle_down[0] <= x2) # circle has part in the square
or (y1 <= circle_up[1] <= y2 and x1 <= circle_up[0] <= x2)
or (y1 <= circle_right[1] <= y2 and x1 <= circle_right[0] <= x2)
or (y1 <= circle_left[1] <= y2 and x1 <= circle_left[0] <= x2)
# square across the circle, both egdes are within the circle, but 4 points are not
or (circle_left[0] <= x1 <= circle_right[0] and circle_left[0] <= x2 <= circle_right[0] and y2 >=
circle_up[1] and y1 <= circle_down[1])
or (circle_down[1] <= y1 <= circle_up[1] and circle_down[1] <= y2 <= circle_up[1] and x2 >=
circle_right[0] and x1 <= circle_left[0])
):
return True
return False | UTF-8 | Python | false | false | 1,999 | py | 1,627 | 1401.py | 1,620 | 0.526763 | 0.482241 | 0 | 35 | 56.142857 | 114 |
ncareol/ncharts | 1,374,389,547,787 | d8d36ef21f54c330ed953f1b8d6fa67651d3b7ad | d91c5b489a8d6b6a27455e743050b8b5cca42a3f | /datavis/settings/production.py | 2515c380165ace85d92da332af8a565bc1922aa0 | [
"BSD-2-Clause"
] | permissive | https://github.com/ncareol/ncharts | 3cdf1f8ea6c509810f9ac51ac1a9d8e37de006e8 | 07c97b6ae234ff74f89d0c6d1902764e2773a268 | refs/heads/develop | "2023-08-03T04:39:20.786171" | "2023-06-12T22:38:46" | "2023-06-12T22:38:46" | 56,725,944 | 1 | 0 | null | false | "2016-10-19T21:39:05" | "2016-04-20T22:48:54" | "2016-10-04T18:12:32" | "2016-10-19T21:38:34" | 15,042 | 0 | 0 | 15 | C++ | null | null | #
# datavis.settings.production
# Django production settings
from .default import *
DEBUG = False
os.environ.setdefault("VAR_DIR", "/var")
VAR_DIR = os.environ.get('VAR_DIR')
DEFAULT_LOG_DIR = LOG_DIR
LOG_DIR = os.path.join(VAR_DIR, 'log/django')
PROD_LOG_LEVEL = 'WARNING'
VAR_RUN_DIR = os.path.join(VAR_DIR, 'run/django')
VAR_LIB_DIR = os.path.join(VAR_DIR, 'lib/django')
# Update path to database if sqlite is used
if 'sqlite' in DATABASES['default']['ENGINE']:
DATABASES['default']['NAME'] = os.path.join(VAR_LIB_DIR, 'db.sqlite3')
SECRET_KEY = os.environ.get('EOL_DATAVIS_SECRET_KEY')
if SECRET_KEY is None:
raise ValueError('EOL_DATAVIS_SECRET_KEY environment variable must be set when running with datavis.settings.production')
#
# django may generate log messages such as:
# Invalid HTTP_HOST header: 'www.baidu.com'.
# You may need to add 'www.baidu.com' to ALLOWED_HOSTS.
#
# However, don't follow that advice to add those external host
# names to ALLOWED_HOSTS!
# Hacked sites may have a link to this site, but as I understand it,
# the redirect may contain an HTTP packet with an altered HTTP_HOST
# and SERVER_NAME, hoping that a dumb server, thinking HTTP_HOST
# is itself, will use it in its own redirects and <script> statemtents.
# The latter could result in an import of hacker code on a
# client's browser. Setting ALLOWED_HOSTS to the various names for datavis will
# result in packets being ignored if they contain other than the following:
#
ALLOWED_HOSTS = ['datavis', 'datavis.eol.ucar.edu', 'datavis-dev.eol.ucar.edu', 'localhost', '128.117.82.210']
# People who should receive emails of ERRORs
ADMINS = (
('Gordon Maclean', 'gordondmaclean@gmail.com'),
('Gary Granger', 'granger@ucar.edu'),
)
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': 'unix:' + os.path.join(VAR_RUN_DIR, 'django_memcached.sock'),
# 'LOCATION': '127.0.0.1:11211',
'TIMEOUT': 300, # 300 seconds is the default
}
}
CACHE_MIDDLEWARE_ALIAS = 'default'
CACHE_MIDDLEWARE_SECONDS = 300
CACHE_MIDDLEWARE_KEY_PREFIX = ''
if LOG_DIR != DEFAULT_LOG_DIR:
#
# iterate over LOGGING['handlers'] and update filenames w/ new LOG_DIR
# and set level to PROD_LOG_LEVEL, except for mail_admins handler
#
for key, value in LOGGING['handlers'].items():
if 'filename' in value:
value['filename'] = value['filename'].replace(DEFAULT_LOG_DIR, LOG_DIR)
if key != 'mail_admins' and key != 'requests' and 'level' in value:
value['level'] = PROD_LOG_LEVEL
for key, value in LOGGING['loggers'].items():
if key != 'ncharts.views.requests' and 'level' in value:
value['level'] = PROD_LOG_LEVEL
| UTF-8 | Python | false | false | 2,777 | py | 53 | production.py | 22 | 0.684192 | 0.672668 | 0 | 80 | 33.7125 | 125 |
libero/search | 15,771,119,931,223 | dbbd685dede20783c7f2f56feeb8a84cbda02e9d | e4e9a68742294d7cb5fc07ee6b5bb642f6c3fbd7 | /search/settings.py | 47534e2d8ba8b9a90498898d8d9da4bfdaa5b51d | [
"MIT"
] | permissive | https://github.com/libero/search | b411e869c0856fc61c1dd76c3c47ad057d258fbe | f13c7fe2aa5f3cd1e2f62234995788bed7147b91 | refs/heads/master | "2020-04-19T11:17:58.625630" | "2019-07-09T13:22:39" | "2019-07-09T13:22:39" | 168,163,286 | 0 | 3 | MIT | false | "2019-11-21T10:06:11" | "2019-01-29T13:55:30" | "2019-07-09T13:22:42" | "2019-11-21T05:25:03" | 68 | 0 | 3 | 0 | Python | false | false | """
Search application settings.
"""
from typing import List
GATEWAY_URL: str = ''
ELASTICSEARCH_HOSTS: List[str] = []
CONTENT_SERVICES_TO_INDEX: List[str] = []
| UTF-8 | Python | false | false | 164 | py | 40 | settings.py | 24 | 0.682927 | 0.682927 | 0 | 10 | 15.4 | 41 |
maxdrohde/snomed_map | 5,257,039,984,350 | 5ff16e9c16775042d21f7670a474e1acab72d6d9 | d908ab0266fe111c7fa8428fde454d09b873237d | /searcher.py | 65bc81b0a15cd24b2665f7cad70b067c7460dfd3 | [
"MIT"
] | permissive | https://github.com/maxdrohde/snomed_map | dbaa8aed6f61905446c55cb71564cd176b50d5ff | 2ecf24d02ea5e920b503549f6ee872ab748a6e28 | refs/heads/master | "2020-03-29T21:05:04.902966" | "2018-10-24T14:43:54" | "2018-10-24T14:43:54" | 150,348,724 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from helper_tools import sanatize, remove_stopwords, make_ngrams, make_skipgrams, flatten
from search import search, term_to_codes
import pandas as pd
def get_codes(matches):
# helper function to match SNOMED synonyms to their code and FSN
lis = []
for item in matches:
term = item[0]
score = item[1]
gram = item[2]
codes = term_to_codes(term)
for code in codes:
lis.append([code, score, gram])
return [list(flatten(x)) for x in lis]
class Searcher:
def __init__(self, text):
self.text = sanatize(text)
self.original_text = text
self.ngrams = make_ngrams(self.text)
self.skipgrams = make_skipgrams(self.text)
self.all_grams = list(set(self.ngrams + self.skipgrams)) # Ngrams + Skipgrams
def __str__(self):
return(f"Original text is: {self.original_text} \nSanatized text is: {self.text}")
def __repr__(self):
return(f"Original text is: {self.original_text} \nSanatized text is: {self.text}")
def get_best_match(self, alpha=0.8, all_results=False):
# Matches the full text to SNOMED CT as a single string
match = search(self.text, alpha=alpha, all_results=all_results)
return(match)
def match_ngrams(self, alpha=0.8):
matches = [[search(gram, alpha=alpha), gram] for gram in self.all_grams]
matches = [x for x in matches if x[0]] # remove empty lists
matches = [list(flatten(x)) for x in matches]
matches = sorted(matches, key = lambda x: x[1], reverse=True) # sort by score
matches = get_codes(matches) # add the FSN and code columns
df = pd.DataFrame(matches, columns=['Matched Term', 'Fully Specified Name', 'Code', 'Score', 'Ngram'])
df = df[['Ngram', 'Matched Term', 'Fully Specified Name', 'Code', 'Score']] # reorder
# drop duplicates (only highest scores kept since list is sorted by score)
df = df.drop_duplicates(subset='Matched Term')
df = df.drop_duplicates(subset='Fully Specified Name')
return df
| UTF-8 | Python | false | false | 2,074 | py | 12 | searcher.py | 8 | 0.633558 | 0.629219 | 0 | 49 | 41.326531 | 110 |
Richard549/Parallel-Flame-Graph | 10,703,058,538,353 | 77727a73934deaad797ecaa3e89e8996b2daa25c | 444c823a911b70609539e489c164d7084718a325 | /src/PfgPlotting.py | 2245f00f78b2d849f161dbcdae7bf39367e64e83 | [
"MIT"
] | permissive | https://github.com/Richard549/Parallel-Flame-Graph | db06f1f1ea325af8f34dde8770f9f9aab7e65b34 | 03c75b2dc55cb673c46b20fc6e84a5a4292ce8ea | refs/heads/master | "2023-02-04T21:42:10.044056" | "2020-10-19T11:25:27" | "2020-10-19T11:25:27" | 291,295,777 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.colors as mcolors
from matplotlib import cm
from matplotlib.transforms import Bbox
import random
import logging
from enum import Enum, auto
from PfgUtil import sizeof_fmt
from PfgTree import ColourMode
class HeightDisplayOption(Enum):
CONSTANT = auto()
CPU_TIME = auto()
PARALLELISM_INEFFICIENCY = auto()
""""
This class handles defines the callback to display hover-over text about a tree node
"""
class PFGHoverText:
def __init__(self, ax):
self.hover_text = ax.text(1, 1, "", bbox=dict(facecolor='white', alpha=0.7), fontsize=6, zorder=100)
self.cidmotion = ax.figure.canvas.mpl_connect('motion_notify_event', self.on_plot_hover)
def on_plot_hover(self, event):
if event.inaxes is not None:
ax = event.inaxes
text = ""
for obj in ax.findobj():
# Searching which data member corresponds to current mouse position
if obj.contains(event)[0]:
if None not in (event.xdata, event.ydata):
if obj.get_gid() is not None:
text += obj.get_gid() + "\n"
if ax.highlighted_rectangle is not None:
ax.highlighted_rectangle.remove()
ax.highlighted_rectangle = None
if text == "":
self.hover_text.set_text("")
ax.figure.canvas.draw()
else:
self.hover_text.set_text(text)
self.hover_text.set_position((event.xdata, event.ydata))
# also highlight the rectangle of the parent!
# dodgy parsing for now
parent_identifier = text.split("Parent=[")[1].split("\n")[0].split(":")[-1].split("]")[0]
if parent_identifier in ax.rectangles:
rectangle_coords = ax.rectangles[parent_identifier]
# draw a highlighted rectangle
rect = patches.Rectangle(
(rectangle_coords[0],rectangle_coords[2]),
rectangle_coords[1],
rectangle_coords[3],
linewidth=4,
edgecolor=(0.0,0.0,0.0,1.0),
facecolor=(0.0,0.0,0.0,0.0),
gid=None,
zorder=100,
clip_on=False)
ax.add_patch(rect)
ax.highlighted_rectangle = rect
ax.figure.canvas.draw()
"""
This class implements the text on each tree node
The text position and size changes when the user refines the axis limits or resizes the window
"""
class PFGBarText:
def __init__(self, ax, x1, x2, y1, y2, text):
self.ax = ax
# Calculate the per character width (in data-coords)
test_box = ax.text(5, 5, 'a', fontsize='small')
bbox = test_box.get_window_extent(renderer=ax.figure.canvas.get_renderer())
bbox = Bbox(ax.transData.inverted().transform(bbox))
self.per_char_data_width = bbox.width
self.per_char_data_height = bbox.height
test_box.remove()
self.bar_x1 = x1
self.bar_x2 = x2
self.bar_y1 = y1
self.bar_y2 = y2
self.text = text
xlims = ax.get_xlim()
ylims = ax.get_ylim()
# determine the initial position of the text
x, y, processed_text = self.compute_text_position_info(ax, xlims, ylims, text)
text_box = ax.text(x, y, processed_text, zorder=20, fontsize='small')
self.cid = ax.callbacks.connect('resize_event', self)
self.cid2 = ax.callbacks.connect('xlim_changed', self)
self.cid3 = ax.callbacks.connect('ylim_changed', self)
self.text_box = text_box
def get_per_char_data_width(self):
return self.per_char_data_width
def compute_text_position_info(self, ax, xlims, ylims, text):
text_position_x = 0
text_position_y = 0
processed_text = ""
# check if the bar actually displayed, or if only part of the bar is displayed
visible_bar_x1 = None
visible_bar_x2 = None
visible_bar_y1 = None
visible_bar_y2 = None
# check x axis:
if self.bar_x1 >= xlims[0] and self.bar_x1 <= xlims[1]:
# the x start is inside
visible_bar_x1 = self.bar_x1
if self.bar_x2 <= xlims[1] and self.bar_x2 >= xlims[0]:
# the x end is inside
visible_bar_x2 = self.bar_x2
if self.bar_x1 >= xlims[0] and self.bar_x2 <= xlims[1]:
visible_bar_x1 = self.bar_x1
visible_bar_x2 = self.bar_x2
if self.bar_x1 <= xlims[0] and self.bar_x2 >= xlims[1]:
visible_bar_x1 = xlims[0]
visible_bar_x2 = xlims[1]
if visible_bar_x1 is not None and visible_bar_x2 is None:
# This means it is truncated on the right hand side
visible_bar_x2 = xlims[1]
if visible_bar_x2 is not None and visible_bar_x1 is None:
# This means it is truncated on the left hand side
visible_bar_x1 = xlims[0]
# check y axis:
if self.bar_y1 >= ylims[0] and self.bar_y1 <= ylims[1]:
# the y start is inside
visible_bar_y1 = self.bar_y1
if self.bar_y2 <= ylims[1] and self.bar_y2 >= ylims[0]:
# the y end is inside
visible_bar_y2 = self.bar_y2
if self.bar_y1 >= ylims[0] and self.bar_y2 <= ylims[1]:
visible_bar_y1 = self.bar_y1
visible_bar_y2 = self.bar_y2
if self.bar_y1 <= ylims[0] and self.bar_y2 >= ylims[1]:
visible_bar_y1 = ylims[0]
visible_bar_y2 = ylims[1]
if visible_bar_y1 is not None and visible_bar_y2 is None:
# This means it is truncated on the top
visible_bar_y2 = ylims[1]
if visible_bar_y2 is not None and visible_bar_y1 is None:
# This means it is truncated on the bottom
visible_bar_y1 = ylims[0]
if visible_bar_x1 is None or visible_bar_y1 is None:
# then the bar is not visible
return text_position_y, text_position_y, processed_text
# How many characters can the visible bar handle?
width = float(visible_bar_x2) - visible_bar_x1
height = float(visible_bar_y2) - visible_bar_y1
num_characters_available = (width / self.per_char_data_width)
padding_characters = 2
num_characters_support = num_characters_available - padding_characters
#logging.debug("Num characters support: %s", num_characters_support)
#logging.debug("Height: %s", height)
#logging.debug("Per char data width: %s", self.per_char_data_width)
# Can we support the number of characters required?
# Setting a minimum of 2 actual symbol characters
if num_characters_support < 2.0 or height < self.per_char_data_height:
return text_position_y, text_position_y, processed_text
# Check if we need to truncate the text
if num_characters_support < len(text):
processed_text = text[:int(num_characters_support-2)] + ".."
else:
processed_text = text[:int(num_characters_support)]
text_position_y = visible_bar_y1 + (height/2.0) - (0.5*self.per_char_data_height)
free_character_spaces = float(num_characters_available) - len(processed_text)
text_position_x = visible_bar_x1 + (((free_character_spaces+1)/2.0))*self.per_char_data_width
return text_position_x, text_position_y, processed_text
def __call__(self, event):
# function is called on each scale change or window resized
# recalculate the width of each character in data coordinates
test_box = self.ax.text(5, 5, 'a', fontsize='small')
bbox = test_box.get_window_extent(renderer=self.ax.figure.canvas.get_renderer())
bbox = Bbox(self.ax.transData.inverted().transform(bbox))
self.per_char_data_width = bbox.width
self.per_char_data_height = bbox.height
test_box.remove()
xlim = self.ax.get_xlim()
ylim = self.ax.get_ylim()
x, y, processed_text = self.compute_text_position_info(self.ax, xlim, ylim, self.text)
if processed_text == "":
self.text_box.set_text(processed_text)
return
self.text_box.set_position((x, y))
self.text_box.set_text(processed_text)
self.text_box.figure.canvas.draw()
"""
If width is wallclock, height may be relative to CPU-time, or parallel inefficiency, etc
"""
def calculate_node_height(
node,
width_to_interval_ratio,
height_option,
reference_duration,
reference_height,
reference_height_value,
maximum_parallelism=None
):
heights = []
reference_width = width_to_interval_ratio * reference_duration
reference_area = reference_width * reference_height
for part in node.node_partitions:
if height_option == HeightDisplayOption.CONSTANT:
heights.append(reference_height)
elif height_option == HeightDisplayOption.CPU_TIME:
part_width = width_to_interval_ratio * part.wallclock_duration
part_height = ((part.cpu_time / reference_height_value) * reference_area) / part_width
heights.append(part_height)
elif height_option == HeightDisplayOption.PARALLELISM_INEFFICIENCY:
total_cpu_cycles_lost = 0
for parallelism, interval in part.parallelism_intervals.items():
optimal_cpu_cycles = ((parallelism)/maximum_parallelism) * interval
total_cpu_cycles_lost += (interval - optimal_cpu_cycles)
logging.debug("Node [%s] total_cpu_cycles_lost %s from parallelism intervals %s",
",".join([part.name for part in node.node_partitions]),
sizeof_fmt(total_cpu_cycles_lost),
",".join([str(list(str(part.parallelism_intervals.values()))) for part in node.node_partitions])
)
part_width = width_to_interval_ratio * part.wallclock_duration
part_height = ((total_cpu_cycles_lost / reference_height_value) * reference_area) / part_width
if part_height == 0.0:
part_height = 0.0001 * reference_height # just so that we can zoom in if we want to see what the thing is
heights.append(part_height)
else:
logging.error("Height display option %s not supported.", height_option)
raise NotImplementedError()
return heights
def plot_pfg_node(
ax,
node,
x0,
y0,
height_option,
reference_duration,
reference_height_value,
reference_height,
width_to_interval_ratio,
parent_name,
colours,
colour_values,
cpus,
node_colour_mapping,
colour_mode
):
total_wallclock_duration = max(node.wallclock_durations)
total_node_width = width_to_interval_ratio * total_wallclock_duration
heights = calculate_node_height(node, width_to_interval_ratio, height_option, reference_duration, reference_height, reference_height_value, len(cpus))
total_node_height = max(heights)
edgecolour = (0.0,0.0,0.0,1.0)
# Key to use to determine the colour of the rectangle
colour_identifier = "None"
if colour_mode == ColourMode.BY_PARENT:
if node.original_parent_node is not None:
colour_identifier = str(hex(id(node.original_parent_node)))
elif colour_mode == ColourMode.BY_CPU:
colour_identifier = ",".join([str(cpu) for cpu in node.cpus])
else:
logging.error("Colour mode not supported.")
raise NotImplementedError()
# To enable hover-over to find the correct parent, record the unique ids for the rectangles
node_identifier = str(hex(id(node)))
if node.original_parent_node is None:
parent_identifier = "None"
else:
parent_identifier = str(hex(id(node.original_parent_node)))
info_text = "Total duration: " + sizeof_fmt(total_wallclock_duration) + "\n"
info_text += "".join([part.name + ": " + sizeof_fmt(part.wallclock_duration) + "\n" for part in node.node_partitions]) + "\n"
info_text += "Parent=[" + str(parent_name) + ":" + str(parent_identifier) + "]\n"
wallclock_durations_by_cpu = node.get_per_cpu_wallclock_durations()
for cpu, duration in wallclock_durations_by_cpu.items():
info_text += str(cpu) + ": " + str(sizeof_fmt(duration)) + "\n"
# Invisible rectangle just for the hover-over text
rect = patches.Rectangle(
(x0,y0),
total_node_width,
total_node_height,
linewidth=0,
edgecolor=(0.0,0.0,0.0,0.0),
facecolor=(0.0,0.0,0.0,0.0),
gid=info_text,
zorder=0)
ax.add_patch(rect)
if ax.rectangles is None:
ax.rectangles = {node_identifier: [x0, total_node_width, y0, total_node_height]}
else:
ax.rectangles[node_identifier] = [x0, total_node_width, y0, total_node_height]
for part_idx, part in enumerate(node.node_partitions):
part_width = width_to_interval_ratio * part.wallclock_duration
part_height = heights[part_idx]
#facecolour = (1.0,1.0,1.0,1.0)
facecolour = colours(colour_values[node_colour_mapping[colour_identifier]])
logging.trace("Plotting %s on cpus %s with width %f at x=%f,y=%f", part.name, node.cpus, part_width, x0, y0)
rect = patches.Rectangle(
(x0,y0),
part_width,
part_height,
linewidth=1,
edgecolor=edgecolour,
facecolor=facecolour,
gid=None,
zorder=10)
ax.add_patch(rect)
text = PFGBarText(ax, x0, x0+part_width, y0, y0+part_height, part.name)
x0 += part_width
return total_node_width, total_node_height
def plot_pfg_tree(tree,
min_timestamp,
max_timestamp,
cpus,
height_option,
output_file=None
):
if len(tree.root_nodes) == 0:
logging.warn("There are no root nodes in the tree.")
return
colours = cm.get_cmap("Reds")
# There should be a colour for each 'original parent'
node_colour_mapping = tree.assign_colour_indexes_to_nodes(tree.root_nodes)
maximum_colour = 0.65;
#minimum_colour = 0.05;
minimum_colour = 0.00;
colour_step = (maximum_colour-minimum_colour)/len(node_colour_mapping)
colour_values = [(i+1)*colour_step + minimum_colour for i in range(len(node_colour_mapping))]
random.shuffle(colour_values)
fig = plt.figure()
fig.set_size_inches(14, 8)
ax = fig.add_subplot(111)
ax.rectangles = {}
ax.highlighted_rectangle = None
top_level_width = 100.0
maximum_x = 0
maximum_y = 0
# Calculate the wallclock interval to width ratio
total_top_level_wallclock_duration = 0.0
for node in tree.root_nodes:
max_wallclock_duration = max(node.wallclock_durations)
total_top_level_wallclock_duration += max_wallclock_duration
width_to_interval_ratio = top_level_width / total_top_level_wallclock_duration
# The reference duration (i.e. width), actual height, and height-value, allow other nodes to calculate their actual heights using their height values
reference_height = 10.0
reference_height_value = tree.root_nodes[0].node_partitions[0].cpu_time
reference_duration = tree.root_nodes[0].wallclock_durations[0]
if height_option == HeightDisplayOption.PARALLELISM_INEFFICIENCY:
total_cpu_cycles_lost = 0
for parallelism, interval in tree.root_nodes[0].node_partitions[0].parallelism_intervals.items():
optimal_cpu_cycles = ((parallelism)/len(cpus)) * interval
total_cpu_cycles_lost += (interval - optimal_cpu_cycles)
reference_height_value = total_cpu_cycles_lost
# The root nodes are considered siblings
sibling_node_sets = [tree.root_nodes]
# Processes each set of siblings, using the alignments given by their parent
while len(sibling_node_sets) > 0:
next_sibling_node_sets = []
for sibling_node_set in sibling_node_sets:
accumulated_sibling_width = 0.0
for sibling_idx, node in enumerate(sibling_node_set):
# What is my x position?
base_x_position = 0.0
if node.ancestor_alignment_node is not None:
base_x_position = node.ancestor_alignment_node.start_x
x_position = base_x_position + accumulated_sibling_width
# What is my y position?
y_position = 0.0
parent_name = "None"
if node.parent_node is not None:
y_position = node.parent_node.start_y + node.parent_node.height
if node.original_parent_node is not None:
parent_name = " and ".join(["("+part.name+")" for part in node.original_parent_node.node_partitions])
# Plot the node
width, height = plot_pfg_node(ax,
node,
x_position,
y_position,
height_option,
reference_duration,
reference_height_value,
reference_height,
width_to_interval_ratio,
parent_name,
colours,
colour_values,
cpus,
node_colour_mapping,
tree.colour_mode
)
# write the positions of this node for my children/siblings
node.start_x = x_position
node.start_y = y_position
node.width = width
node.height = height
accumulated_sibling_width += width
if x_position + width > maximum_x:
maximum_x = x_position + width
if y_position + height > maximum_y:
maximum_y = y_position + height
# get child nodes for the next plotting pass
next_sibling_node_sets.append(node.child_nodes)
# finished plotting the current sets of siblings
# set the next ones to plot
sibling_node_sets = next_sibling_node_sets
# Now display
ax.set_facecolor((0.9, 0.9, 0.9))
ax.set_xlim([0,maximum_x])
ax.set_ylim([0,maximum_y*1.25])
# Create the hover-over
hover_text = PFGHoverText(ax)
wallclock_duration = sizeof_fmt(max_timestamp - min_timestamp)
ax.set_title("OpenMP Parallel FlameGraph")
ax.set_yticks([])
ax.set_xticks([0,top_level_width])
ax.set_xticklabels((str(sizeof_fmt(0)), str(sizeof_fmt(max_timestamp - min_timestamp))))
if output_file is None:
logging.info("Displaying interactive plot.")
plt.show()
else:
logging.info("Saving plot to %s.", output_file)
fig.savefig(output_file, format="png", dpi=400, bbox_inches="tight")
| UTF-8 | Python | false | false | 16,530 | py | 7 | PfgPlotting.py | 5 | 0.69395 | 0.677314 | 0 | 536 | 29.828358 | 151 |
ArchiDg/OSISoft-Summer-2018 | 2,482,491,141,151 | 06c54eb811a0f2aca37cf9e13950d7e4989441c9 | 15fa6a6e4c5fdfe0d8e64f61350321c51bf1f7cb | /mqtttopi/Receiver/mqtt_receiver.py | ff388ccb265db9bf64b97d124595a1483435678a | [] | no_license | https://github.com/ArchiDg/OSISoft-Summer-2018 | 41e204aa8575ab5312a753f51a44723b701b97f0 | 5dd4f5e2e9a931f4cda46da80c60b97e19db264b | refs/heads/master | "2020-03-19T12:19:29.969860" | "2018-06-07T20:43:58" | "2018-06-07T20:43:58" | 136,510,832 | 1 | 0 | null | false | "2018-06-07T20:43:59" | "2018-06-07T17:35:37" | "2018-06-07T20:30:43" | "2018-06-07T20:43:59" | 0 | 0 | 0 | 0 | Python | false | null | """
Create mqtt client and connect to mqtt server.
trigger on_connect after connecting to mqtt server.
Triggers on_message callback upon receiving message from mqtt server
"""
import paho.mqtt.client as mqtt
import datetime
import json
import logging
### MAKE CSV FILE
### HOW TO ADD CONFIG FILE
from mqtttopi.Sender.omf_sender import SendOmfMessageToEndpoint \
as send_omf_message_to_endpoint
from mqtttopi.mqtt_to_omf.asset_data_message import CreateAssetMessage \
as create_asset
from mqtttopi.mqtt_to_omf.asset_link_message import CreateAssetLinkMessage \
as asset_link
from mqtttopi.mqtt_to_omf.container_link_message import CreateContainerLinkMessage \
as container_link
from mqtttopi.mqtt_to_omf.container_message import CreateContainerMessage \
as container_message
from mqtttopi.mqtt_to_omf.create_data_value_for_container import CreateDataValueForContainer \
as create_data_values
class MqttClient(object):
# Constructor
def __init__(self, client_name, ip_address, port, topic):
"""
Creates mqtt client\n
:param client_name: unique name\n
:param ip_address: string\n
:param port: int\n
:param topic: follows MQTT topic format\n
"""
self.connected = False
self.topic = topic
self.client = mqtt.Client(client_name, clean_session=False)
self.client.on_connect = self.on_connect
self.establishConnection(ip_address, port)
self.client.on_message = self.on_message
# on_connect callback
def on_connect(self, client, userdata, flags, rc):
"""
Function: on_connect
:param client: MQTT client instance
:param userdata: private user data
:param flags: response falgs sent by MQTT server
:param rc: result code
"""
if rc == 0:
self.connected = True
else:
print("Connection failed")
self.client.loop_stop()
print("on_connect:Connected with flags:{0},result code:{1}".format(flags,
rc))
# magic method!
def __enter__(self):
"""
Start the mqtt client loop and return self, allows implementing objects
which can be used easily with the 'with' statement.
:return: self
"""
self.client.loop_start()
return self
# magic method!
def __exit__(self, type, value, tb):
"""
Automatically closes the connection once the corresponding 'with'
statement goes out of scope
:param type: type
:param value: value
:param tb: tb
"""
self.client.loop_stop()
self.client.disconnect()
# Static method
def establishConnection(self, address, port):
try:
self.client.connect(address, port)
except _MqttException:
raise _MqttException("Establishing Connection Failed")
# on_message callback
global topics_set
topics_set = set()
global asset_set
asset_set = set()
def on_message(self, client, userdata, message):
"""
This is the on_message call back
:param client: mqtt client
:param userdata: user data
:param message: mqtt message
"""
global messageTopic
global messagePayload
global dataValue
global timestamp
global topicElement
global on_message_time # latency analysis
global data_value_send_time # latency analysis
try:
# test for unicode decode error
decodeTest = str(message.payload.decode("utf-8"))
on_message_time = datetime.datetime.now() # @@@@@@@@@@@@@@@@@@
messageTopic = message.topic
topicElementsList = messageTopic.split("/")
messagePayload = json.loads(message.payload)
# returns json object from string
dataValue = messagePayload['Value']
timestamp = (messagePayload['Timestamp']) # gives the string
#timestamp = str(datetime.datetime.now())
status = (messagePayload['Status'])
description = (messagePayload['Description'])
logging.info("ON_MESSAGE, messageTopic: {0},payload: {1},"
" at time: {2}\n".format(messageTopic,
json.dumps(messagePayload, indent=4),
str(datetime.datetime.now())))
print("ON_MESSAGE, messageTopic: {0}, payload: {1},"
" at time: {2}\n".format(messageTopic,
json.dumps(messagePayload, indent=4),
str(datetime.datetime.now())))
# json.dumps takes an object and produces a string
# Checks if the message topic is already in the set. In case it is there,
# directly sends data value to the container. If it is not there, creates
# container message, asset, link between assets, link between container
# and assets, then sends data value to the container
if messageTopic not in topics_set:
topics_set.add(messageTopic)
# Create and send container message
send_omf_message_to_endpoint("container",
container_message.create_container_message(messageTopic))
print("CONTAINER message created and sent to end point,at time: {0}".
format(str(datetime.datetime.now())))
### Creates drop down AF hierarchy ###
# Create and send Data Messages (Static Data & Link Data)
for i in range(len(topicElementsList)):
if (i == 0):
topicElement = topicElementsList[i] # asset name
assetIndex = topicElementsList[i]
# Ensure unique index for each asset
sourceIndex = "_ROOT" # needed for link message
targetIndex = assetIndex # needed for link message
else:
topicElement = topicElementsList[i]
prevAssetIndex = assetIndex
assetIndex = prevAssetIndex + "/" + topicElementsList[i]
sourceIndex = prevAssetIndex # needed for link message
targetIndex = assetIndex # needed for link message
# Make sure each asset is created only once
if assetIndex not in asset_set:
asset_set.add(assetIndex)
# Create and Send assets
send_omf_message_to_endpoint("data", create_asset.create_asset_data_message
(assetIndex, topicElement))
# logging.info("DATA message(asset): sent to define assets,
# at time: {0}".format(str(datetime.datetime.now())))
# Create and send link between assets
send_omf_message_to_endpoint("data", asset_link.create_asset_link_message
(sourceIndex, targetIndex))
# logging.info("DATA message(Link assets): to define link between assets,
# at time: {0}".format(str(datetime.datetime.now())))
else:
print("asset and link message already in end point ")
# Create and send link between container and asstes
# uses last asset element as source index
send_omf_message_to_endpoint("data", container_link.create_container_link_message
(messageTopic, messageTopic))
# logging.info("DATA message (Link container to asset): to link
# container to assets,at time: {0}".format(str(datetime.datetime.now())))
# Send data to containers
send_omf_message_to_endpoint("data", create_data_values.create_data_values_for_second_dynamic_type
(messageTopic, dataValue, timestamp, status, description))
data_value_send_time = datetime.datetime.now() # @@@@@@@@@@@@@@
logging.info("Time interval: {}".format(data_value_send_time - on_message_time))
logging.info("DATA message (DATA VALUE): Values sent to containers")
else:
logging.info("Message Topic already in endpoint as CONTAINER:"
"{0}".format(messageTopic))
send_omf_message_to_endpoint("data", create_data_values.create_data_values_for_second_dynamic_type
(messageTopic, dataValue, timestamp, status, description))
data_value_send_time = datetime.datetime.now() # @@@@@@@@@@@@@@
logging.info("DATA message (DATA VALUE): Values sent to end point to containers\n\n")
logging.info("Time interval: {}".format(data_value_send_time - on_message_time))
except:
client.disconnect()
client.loop_stop()
print("Exception happened!")
# private class mqtt exception
class _MqttException(Exception):
def __init__(self, message):
if message != None:
self.message = message
else:
self.message= ""
def __str__(self):
return self.message | UTF-8 | Python | false | false | 9,536 | py | 12 | mqtt_receiver.py | 10 | 0.572462 | 0.570575 | 0 | 223 | 41.766816 | 114 |
RBuractaon/animated-octo-nemesis | 8,057,358,657,700 | 4f8f95561106157d65bba82efcb4c183ba299fea | d6e77317a8a7fadbb1ec4a153911dabf465b245d | /SSH Keystrokes/training/reverse_time.py | 747aa752c615ba97b79c7a30237fab24e0d45efe | [
"Apache-2.0"
] | permissive | https://github.com/RBuractaon/animated-octo-nemesis | 58891f510100def2f455ba531e0107246034b757 | 658f9fd87caffabbfadb246232df4746ba3474f5 | refs/heads/master | "2021-01-21T02:28:17.651069" | "2015-08-20T00:22:59" | "2015-08-20T00:22:59" | 26,342,626 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# reverse_time.py
# Syntax: reverse_time.py <user sessions file> <output file>
# normalizes the time of each user session. T = Time - Max(Time) for each user.
# would like to see the ?,"e","x","i","t","\n" sequence
__author__ = 'rburactaon'
import sys
import datetime
import numpy as np
# Check command line arguments
if len(sys.argv) != 3:
print "Syntax:\treverse_time.py <user sessions file> <output file>"
sys.exit(2)
else:
usrsession = sys.argv[1]
output = sys.argv[2]
print "Input File:\t", usrsession
print "Output File:\t", output
fout = open(output, 'w')
fout.write("source\tsource_port\tsource:port\tdtg\ttime\trev_time\trev_index\tdelta\tusername\n")
n = 0
lastsession = None
last_src = None
last_prt = None
last_usr = None
t_hist = list()
with open(usrsession, 'r') as fin:
fin.readline() # Skip Header
for line in fin:
n += 1
(src, prt, srcprt, t, usr) = line.strip().split(',')
if lastsession is None:
t_hist.append(float(t))
elif srcprt == lastsession:
t_hist.append(float(t))
else:
TVEC = np.array(t_hist)
TVEC.sort()
RevTimes = TVEC-max(TVEC)
RevTimes = RevTimes * (-1)
DeltaT = np.ediff1d(TVEC, to_begin=999999999)
NDX = range(0, len(RevTimes))
for ii in range(0, len(TVEC)):
dtg = datetime.datetime.fromtimestamp(TVEC[ii]).strftime('%Y-%m-%d %H:%M:%S')
fout.write(last_src + '\t' + last_prt + '\t' + lastsession + '\t' + dtg + '\t' + str(TVEC[ii]) +
'\t' + str(RevTimes[ii]) + '\t' + str(NDX[-ii]) + '\t' + str(DeltaT[ii]) + '\t' +
last_usr + '\n')
t_hist = list()
t_hist.append(float(t))
last_usr = usr
last_src = src
last_prt = prt
lastsession = srcprt
# Print last line
TVEC = np.array(t_hist)
TVEC.sort()
RevTimes = TVEC-max(TVEC)
RevTimes = RevTimes * (-1)
DeltaT = np.ediff1d(TVEC, to_begin=999999999)
NDX = range(0, len(RevTimes))
for ii in range(0, len(TVEC)):
dtg = datetime.datetime.fromtimestamp(TVEC[ii]).strftime('%Y-%m-%d %H:%M:%S')
fout.write(last_src + '\t' + last_prt + '\t' + lastsession + '\t' + dtg + '\t' + str(TVEC[ii]) +
'\t' + str(RevTimes[ii]) + '\t' + str(NDX[-ii]) + '\t' + str(DeltaT[ii]) + '\t' +
last_usr + '\n')
fin.close()
fout.close() | UTF-8 | Python | false | false | 2,551 | py | 20 | reverse_time.py | 12 | 0.543316 | 0.53038 | 0 | 75 | 33.026667 | 112 |
LarsKue/CompPhys20 | 12,051,678,240,932 | 076df8ae0110989ae0520a0777295853b0ad7c61 | 8afc4eb3ebd8b58232a79dcb8708291689270951 | /Uebung_2/Lars/main.py | b031d12ffa06d82638cb1508916cf982927cf6e1 | [] | no_license | https://github.com/LarsKue/CompPhys20 | fbc523fbf2d17ad8fc5d5d001fa4c2076df1bee5 | 1defa433dd4f5edaf4f181c0428206834f281139 | refs/heads/master | "2022-11-09T03:50:37.547404" | "2020-06-26T14:02:49" | "2020-06-26T14:02:49" | 257,870,307 | 0 | 0 | null | false | "2020-04-30T21:08:20" | "2020-04-22T10:39:26" | "2020-04-22T10:39:32" | "2020-04-30T21:08:19" | 7 | 0 | 0 | 0 | null | false | false | import sys
import os
import numpy as np
import math
from matplotlib import animation, pyplot as plt
from vec3 import Vec3
from particle import Particle
from n_particle_system import NParticleSystem
import random # for random particle generation
"""
Submission for Uebung 2 for Computational Physics 2020
Group Members:
Daniel Kreuzberger
Lars Kuehmichel
David Weinand
"""
"""
How to use:
Simply run this file as-is or scroll down to the main function at the bottom of this file
to make parameter changes. All results are already on git, so you can play around with this as you like without
fear of overwriting anything.
Homework comments:
Clearly the integration error is dependent on the time resolution.
This is especially visible for the circular orbit, where the
eccentricity only stays very close to 1 when the time step is
smaller than or equal to 0.01 (explicit euler)
or 0.1 (kdk leapfrog)
The integration method also makes a big difference here, the system is much more stable
with kdk leapfrog since it has a higher time resolution in the velocity and uses
an implicit-ish method to calculate the position.
You can clearly see a line in the log-log-plots for the energy error,
showing how important it is to select a proper time step. For the leapfrog
integrator, reducing the timestep by 3 orders of magnitude yielded an energy error
roughly 10 orders of magnitude smaller.
You can view the plots yourself by looking at the .png files in this folder. We only made energy plots
for the circular orbits, since any other kind of motion does not make much sense to analyze this way.
However, we plotted the squared eccentricity (which is linearly proportional to the energy error)
for all tests we performed, so if need be you can view those in the videos (more on that later).
The results are consistent with what you would expect, for small time steps and better integration methods,
the eccentricity stays very close to 1 (alternating in a sinusodial fashion), meaning the orbit stays very much
circular. For larger time steps, especially with the explicit euler method, the eccentricity sways a lot more,
meaning the error in energy is high and the orbit no longer approximately circular, which is also what you can see
in the animations.
We provided you with a bunch of cool videos and animations for all the tests we performed.
Most notable are
videos_kdk_leapfrog/h=0.100s.v0.mp4
and
videos_explicit_euler/h=0.100s.v0.mp4
where you can see how much more stable leapfrog is even with a larger time step.
We also performed some tests using implicit euler integration, you can see those in their respective videos
All the videos are named after their time step and initial velocity parameters.
They are also all "real-time" meaning those with large time steps have a smaller number of frames per second,
but this shows you exactly when and where a data point is recorded.
"""
def plot_animation(positions, sq_eccentricities, time_step, linetrace=True, video_filename="particle_animation.mp4",
show=True):
n_particles = len(positions)
if n_particles == 0:
return
n_steps = len(positions[0])
fig = plt.figure(figsize=(16, 8))
ax1 = fig.add_subplot(1, 2, 1)
ax1.set_xlim((-3, 3))
ax1.set_ylim((-3, 3))
ax1.set_xlabel("x")
ax1.set_ylabel("y")
ax2 = fig.add_subplot(1, 2, 2)
ax2.set_xlim((-0.1, time_step * n_steps + 0.1))
# this needs to be set manually, unfortunately. Use the static plot in the end to check which values to set this to
ax2.set_ylim((0, 10)) # lower and higher velocity
# ax2.set_ylim((0.989, 1.011)) # circular orbit
ax2.set_xlabel("t")
ax2.set_ylabel("$e^2$")
dots, = ax1.plot([], [], lw=0, marker="o")
eccs = [ax2.plot([], [])[0] for _ in range(n_particles)]
# for linetracing
lines = [ax1.plot([], [])[0] for _ in range(n_particles)]
patches = lines + [dots] + eccs
def init():
dots.set_data([], [])
for ecc in eccs:
ecc.set_data([], [])
if linetrace:
for line in lines:
line.set_data([], [])
return patches
def animate(j):
print("\ranimation progress: frame {:d} / {:d} ({:.2f}%)".format(j, n_steps, 100 * j / n_steps), end="")
x = [particle[j].x for particle in positions]
y = [particle[j].y for particle in positions]
dots.set_data(x, y)
for k in range(len(sq_eccentricities)):
eccs[k].set_data([k * time_step for k in range(j)], [sq_eccentricities[k][:j]])
if linetrace:
for k in range(len(positions)):
particle = positions[k]
line = lines[k]
line.set_data([p.x for p in particle[:j + 1]], [p.y for p in particle[:j + 1]])
return patches
ax1.grid()
ax1.set_title("Particle Motion")
ax2.grid()
ax2.set_title("Eccentricities")
anim1 = animation.FuncAnimation(fig, animate, init_func=init, frames=n_steps, interval=1000 * time_step,
repeat_delay=2000, blit=True)
print()
# comment this out if you don't want to overwrite videos
anim1.save(video_filename, fps=1 / time_step, extra_args=["-vcodec", "libx264"])
if show:
fig.show()
else:
plt.close(fig)
def run_simulation(n_steps, h, M, R, v, video_filename, show=True):
# # create the particles
p1 = Particle(Vec3(R, 0.0, 0.0), Vec3(0.0, -v, 0.0), M)
p2 = Particle(Vec3(-R, 0.0, 0.0), Vec3(0.0, v, 0.0), M)
# can calculate systems with more than 2 bodies too
# p3 = Particle(Vec3(0.0, 0.0, 0.0), Vec3(0.0, 0.0, 0.0), 5
# particles = [Particle(Vec3(random.normalvariate(0.0, R), random.normalvariate(0.0, R), 0.0),
# Vec3(random.normalvariate(0.0, v), random.normalvariate(0.0, v), 0.0), M) for _ in range(200)]
n_body_system = NParticleSystem([p1, p2])
# n_body_system = NParticlesSimulation([p1, p2, p3])
# n_body_system = NParticlesSimulation(particles)
# list of positions of the particles at different timesteps
# dimensions are number of particles, number of time steps
positions = [[particle.position] for particle in n_body_system.particles]
ecc1 = []
ecc2 = []
t = np.linspace(0, n_steps * h, n_steps)
for current_t in t:
print("\rcalculation progress: t = {:.2f} / {:.2f} ({:.2f}%)".format(current_t, t[-1], 100 * current_t / t[-1]),
end="")
# update the positions
# n_body_system.step_explicit_euler(h)
# tolerance may need manual tuning depending on time step
# n_body_system.step_implicit_euler(h, sq_tolerance=1e-3)
# n_body_system.step_kdk_leapfrog(h)
n_body_system.step_rk4(h)
# record the eccentricities for plotting later
ecc1.append(n_body_system.particles[0].eccentricity(n_body_system.particles[1]))
ecc2.append(n_body_system.particles[1].eccentricity(n_body_system.particles[0]))
# get the positions for plotting
for i in range(len(positions)):
positions[i].append(n_body_system.particles[i].position)
last_t = current_t
print()
# plot an animation, this may take a long time, so comment this in if you want fancy animated results
plot_animation(positions, [[e.abs_sq() for e in ecc1], [e.abs_sq() for e in ecc2]], h, linetrace=True,
video_filename=video_filename, show=show)
# statically plot the eccentricities
# this is useful if you set show to True and want to examine a single simulation
fig = plt.figure(figsize=(10, 7))
plt.plot(t, [p.abs_sq() for p in ecc1], label="Particle 1")
plt.plot(t, [p.abs_sq() for p in ecc2], label="Particle 2")
plt.xlabel("t")
plt.ylabel("e^2")
plt.title("Squared Eccentricities")
plt.legend(loc="lower left")
plt.savefig("e_sq.png")
"""
In our example, the error in energy simplifies to
delta E = 4 * |e^2 - 1|
where e is the eccentricity.
Two orbits are complete after 8*pi seconds have passed.
"""
# idx = int(8 * math.pi / h)
#
# # ecc1 and ecc2 are roughly equal for all simulations
# delta_E = 4 * abs(ecc1[idx].abs_sq() - 1)
delta_E = 0
if show:
plt.show()
else:
plt.close(fig)
return delta_E
def main(argv: list) -> int:
# simulation parameters
ns = [50, 500, 5000, 50000]
hs = [1, 0.1, 0.01, 0.001]
# adjust this for different velocities of the particles
Particle.G = 1
M = 1
R = 1.0
# analytically, this velocity makes for a circular orbit of both particles around each other
v0 = math.sqrt(M * Particle.G / (4 * R))
# use these for animation generation
# vs = [v0, v0 * math.sqrt(2), v0 / 3]
# v_names = ["v0", "sqrt(2)v0", "v0 div 3"]
# use these to generate Energy-only plots
vs = [v0]
v_names = ["v0"]
directory = "videos/"
if not os.path.exists(directory):
os.mkdir(directory)
else:
directory = input("Directory 'videos/' already exists. Please input the name of a replacement videos path or press Enter to overwrite.")
if not directory:
directory = "videos/"
if not directory.endswith("/"):
directory += "/"
delta_energy = []
for n_steps, h in zip(ns, hs):
for v, v_name in zip(vs, v_names):
# generate a bunch of animations for the above simulation parameters and save them in the directory videos/
video_filename = directory + "h={:.3f}s.".format(h) + v_name + ".mp4"
current_delta_energy = run_simulation(n_steps, h, M, R, v, video_filename, show=False)
delta_energy.append(current_delta_energy)
plt.figure(figsize=(8, 8))
plt.plot(hs, delta_energy, lw=0, marker="o")
plt.xlabel("h")
plt.ylabel(r"$\Delta E$")
plt.xscale("log")
plt.yscale("log")
plt.title("Energy Error vs Time Resolution")
plt.savefig("deltaE.png")
plt.show()
return 0
if __name__ == "__main__":
main(sys.argv)
| UTF-8 | Python | false | false | 10,111 | py | 31 | main.py | 26 | 0.649194 | 0.624271 | 0 | 278 | 35.370504 | 144 |
FyzHsn/data_structures_and_algorithms | 11,218,454,592,645 | 065ec36d918e2c00f3b6f6f161e41771727e69aa | 08aaeb9ac669eb6de341c3d74b58d1c5b588aace | /algorithms/sort_stack.py | 6654a86819d1dcbe42177b93324ec8a6f3924f91 | [] | no_license | https://github.com/FyzHsn/data_structures_and_algorithms | 7e2daae36cf8a3977a15253dea4e7435f632c020 | 64bbc7edd0b53cfebda8081badff7a481aea0c87 | refs/heads/master | "2022-10-29T22:50:19.334804" | "2020-06-18T02:13:31" | "2020-06-18T02:13:31" | 269,997,062 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from collections import deque
def insert_at_bottom(s, element):
if len(s) == 0 or element >= s[-1]:
s.append(element)
else:
top = s.pop()
insert_at_bottom(s, element)
s.append(top)
return
def sort_stack(s):
"""Time Complexity: O(n^2), Auxilary Space Complexity: O(n)"""
if len(s) == 0:
return
top = s.pop()
sort_stack(s)
insert_at_bottom(s, top)
return s
if __name__ == "__main__":
test_cases = [[1, 5, 2, 6],
[7, 3, 4, 1],
[1]]
for input in test_cases:
stack = deque()
for item in input:
stack.append(item)
print(sort_stack(stack))
| UTF-8 | Python | false | false | 717 | py | 28 | sort_stack.py | 26 | 0.483961 | 0.46583 | 0 | 33 | 20.121212 | 66 |
JoeSteven/v2rayPro-speed | 18,648,748,027,814 | cdaacf7ca6a13b2913f653592efa5235b6734296 | 0554bcc743485341a284992088005142d3324771 | /v2ray/v2ray_ping.py | 0b084c950d974ca6e5c33b74ecc08d89e4a409f3 | [] | no_license | https://github.com/JoeSteven/v2rayPro-speed | c082cb744a57b7e36f5f05ce46af50f909561bf6 | e009c6a7c55eefaa9533690b5082a5b20ec89737 | refs/heads/master | "2020-04-01T00:56:03.661802" | "2018-10-12T09:56:58" | "2018-10-12T09:56:58" | 152,719,282 | 2 | 3 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# !/usr/bin/env python3
# 登陆授权
# 获取服务器列表
# 转换json{
# "SVIP":"svip-kt.v2cloud.xyz",
# "HKT1":"hk-hkt.v2cloud.xyz"}
# 测速
# alias vpn='python3 /Users/joey/PycharmProjects/vpn/v2ray_ping.py [user_name] [password]'
import requests
import json
import os
import sys
json_file = os.getcwd() + "/vpn.json"
def auth(user_name, password):
url = 'https://v2rayapi.com/client/api.php?s=user.auth'
response = requests.post(url, data={'username': user_name,
'password': password, 'getToken': '1'}, timeout=15)
data = json.loads(response.text)
print(response.text)
token = data['data']
print(token)
return token
def get_vpn_list(token):
url = 'https://v2rayapi.com/client/api.php?s=whmcs.hosting&token=' + token
response = requests.get(url, timeout=15)
data = json.loads(response.text)
nodes = data['data'][0]['node']
print(nodes)
dict = {}
tsl_list = []
for node in nodes:
print(node)
name = node['name'] # .encode('latin1').decode('unicode_escape')
server = node['server']
print(name)
dict[name] = server
if node["tls"] == 1:
tsl_list.append(name)
print(dict)
with open(json_file, 'w') as f:
json.dump(dict, f)
return json_file, tsl_list
def ping_vpn(vpn_json):
os.system("mping -p " + vpn_json)
os.system("rm " + json_file)
def main():
user_name = sys.argv[1]
password = sys.argv[2]
print(user_name + " " + password)
token = auth(user_name, password)
if token is None:
print("获取授权失败")
return
vpn_json, tsl_list = get_vpn_list(token)
# vpn_json = get_vpn_list('d9ccc738-4e2e-49d3-aad0-3622ed176691')
print(vpn_json)
if vpn_json is None:
print("获取vpn列表失败")
return
ping_vpn(vpn_json)
print("支持 tsl 的服务器:")
print(tsl_list)
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 2,020 | py | 5 | v2ray_ping.py | 3 | 0.586578 | 0.566598 | 0 | 80 | 23.4 | 91 |
merdarking/Automation-Fruit-Catalog | 14,912,126,490,099 | d58dc388de36bf94468d08cdc1c898b6e271ce62 | 5d29e9243bab3ed40280f71ad4cc678d8e3f1439 | /run_edited.py | 6b4232f07b3bffa1115fa0c34320e4a7095deba0 | [] | no_license | https://github.com/merdarking/Automation-Fruit-Catalog | dde200c223cfdb7bcaca6392b2a01139e8dd917d | 15fd0111d32b763279360394c5905b2fbaaa7d25 | refs/heads/main | "2023-03-26T04:35:16.318359" | "2021-03-25T02:28:28" | "2021-03-25T02:28:28" | 351,283,311 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #! /usr/bin/python3
import os
import sys
import datetime # NEW
import reports # NEW
'''Get the description file name list in the directory'''
def get_file_list():
# Go into the path in the descriptions
path = os.path.join(os.path.dirname(sys.argv[0]),"supplier-data")
os.chdir(path)
# Get the file names in the descriptions directory
descriptions = os.listdir("descriptions")
descriptions = [desc_file for desc_file in descriptions if '.txt' in desc_file]
return tuple(descriptions)
'''Obtain and save it into the dictionary to be dumped into JSON'''
def get_file_data():
# Get the file name
descriptions = get_file_list()
key = ('name', 'weight', 'description', 'image_name')
desc_data = []
# Get the file data information to be processed locally
for desc in descriptions:
image_name = desc.replace('.jpeg','.txt')
path = os.path.join(os.path.dirname(sys.argv[0]),"supplier-data","descriptions",desc)
tmp_dict = {}
# Open the data
with open(path,'r') as opened:
item = [content.strip() for content in opened]
# Remove any empty line
if '' in item:
item.remove('')
opened.close()
# Throw away the lbs
item[1] = int(item[1].strip(' lbs'))
# Convert into dictionary
tmp_dict[key[0]], tmp_dict[key[1]], tmp_dict[key[2]], tmp_dict[key[3]] = item[0], item[1], item[2], image_name
desc_data.append(tmp_dict)
return desc_data
# START HERE IS NEW
'''Combine name and weight on each individual item'''
def process_body(items):
text = "name: {}<br/>weight: {} lbs".format(items['name'], str(items['weight']))
return text
def main():
decription_data = get_file_data()
# START HERE IS NEW
'''Content of the pdf file'''
date = datetime.datetime.today().strftime('%B %e, %Y')
attachment = os.path.join("..","processed.pdf")
title = "Processed Updated on {}".format(date)
'''Process the body'''
overall_body = []
for item in decription_data:
item_body = process_body(item)
overall_body.append(item_body)
paragraph = '<br/><br/>'.join(overall_body)
print(reports.generate_report(attachment, title, paragraph))
if __name__ == '__main__':
main() | UTF-8 | Python | false | false | 2,344 | py | 8 | run_edited.py | 7 | 0.609215 | 0.604096 | 0 | 76 | 29.855263 | 118 |
hanlingzhi/TF-TestBySpider | 3,805,341,071,414 | c9836e116321e248e4074824527cdf4de9aa8e97 | 7a3f3e8e2360459afd0b03c3da080a4c1ff50549 | /test_spider/util/enum.py | 2f9ea1068c5e7dc445fb9c239b0c44badf5dfd76 | [] | no_license | https://github.com/hanlingzhi/TF-TestBySpider | aae2f82174142c0a6b75d31c2e7f63c9f3ad88b1 | 09d3207ec2033ec7699d53d2a29293faa0ddb54b | refs/heads/master | "2020-12-28T02:38:22.477555" | "2020-03-15T13:25:53" | "2020-03-15T13:25:53" | 238,154,601 | 5 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from enum import Enum, unique
@unique
class CrawlType(Enum):
scrapy = 'scrapy' # normal
selenium = 'selenium'
puppeeter = 'puppeeter'
| UTF-8 | Python | false | false | 150 | py | 19 | enum.py | 17 | 0.666667 | 0.666667 | 0 | 10 | 14 | 30 |
porcofly/face_segmentation | 6,012,954,234,298 | 6dd0b08f76f39d3f7f451dbb9f1ad4b4d4368580 | 966dfbf3db28e0815e86455a68816c59a75ae782 | /face_segment_contour.py | 103662ef240beff9cf9b5ec0b0e884fc794661d2 | [] | no_license | https://github.com/porcofly/face_segmentation | 6bdadcd670cee0c27d43dd82f2dceb319e6b72d8 | 05aad68a971a1218e525b7f4e99969b030eb88a6 | refs/heads/master | "2020-03-23T16:38:12.019116" | "2018-07-21T01:10:58" | "2018-07-21T01:13:50" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
import argparse
from scipy.spatial import ConvexHull
from skimage import draw
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from util import read_list, load_landmarks, show_result, CRF
def main(args):
image_paths = read_list(args.image_list)
for path in image_paths:
# landmarks_file should have the same prefix as image_file
landmarks_file = path[:-3] + 'txt'
im = Image.open(path)
width, height = im.size
landmarks = load_landmarks(landmarks_file)
landmarks[:,1] = height - landmarks[:,1]
# select contour points
#contour_points = get_contour_side(landmarks)
# generate a contour curve with contour points
hull = ConvexHull(landmarks)
# draw landmarks
lm = np.array(im)
for i in range(landmarks.shape[0]):
rr, cc = draw.circle(height-landmarks[i,1].astype('int32'), landmarks[i,0].astype('int32'), 5)
lm[rr, cc, :] = np.array((255, 0, 0))
# create mask
mask = np.zeros((height, width))
rr, cc = draw.polygon(height-landmarks[hull.vertices,1], landmarks[hull.vertices,0], mask.shape)
mask[rr,cc] = 1
path = path[:-1] if path[:-1] == '/' else path
image_name = path[path.rindex('/')+1:-4] + '_contour.png'
show_result(lm, mask, np.tile((mask!=0)[:,:,np.newaxis], (1,1,3)) * im, save=True, filename='images/'+image_name)
# add CRF
prob = np.concatenate(((1-mask)[np.newaxis,:,:]*0.9 +
mask[np.newaxis, :, :]*0.1,
mask[np.newaxis, :, :]*0.9 +
(1-mask)[np.newaxis, :, :]*0.1), axis=0)
map = CRF(prob, np.array(im))
#show_result(im, map, np.tile((map!=0)[:,:,np.newaxis], (1,1,3)) * im)
if __name__=="__main__":
parser = argparse.ArgumentParser(description=
'Face segmentation with landmarks.')
parser.add_argument('--image_list',
default='input/list.txt',
type=str, help='path to image file')
args = parser.parse_args()
main(args)
| UTF-8 | Python | false | false | 2,207 | py | 4 | face_segment_contour.py | 3 | 0.557771 | 0.53874 | 0 | 55 | 39.036364 | 121 |
joelwking/aci-cyber-ingest | 12,970,801,243,412 | 7a344d031384f2d05fe3280d9f97a656ce680fd4 | eaeea2df868b2315e989005a0e64c1d7438416ad | /Service/atomic_counters.py | 8828bb975bbd1543c8c01246f4d35aa6205dd725 | [
"MIT"
] | permissive | https://github.com/joelwking/aci-cyber-ingest | 5e202bc88213a8c17c57c770425f60db1a42c839 | a5e8f42ef1f6aeb284741319bd68254cee25beed | refs/heads/master | "2020-12-24T05:41:32.291661" | "2016-08-24T14:32:43" | "2016-08-24T14:32:43" | 65,848,094 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
"""
Usage:
atomic_counters.py
use CTRL + c to exit!
This module monitors the atomic counters configured on an ACI fabric and generates a security incident
in Phantom (phantom.us) when the matches on a couter exceeds a threshold.
Copyright (c) 2016 World Wide Technology, Inc.
All rights reserved.
author: joel.king@wwt.com
Requirements:
This module references two additional WWT developed modules, PhantomIngest and AnsibleACI.
These imported modules facilitate communication with the REST APIs for Phantom and APIC.
Both modules are published on my GitHub account, https://github.com/joelwking
Revision history:
27 July 2016 | 1.0 - initial relese
3 Aug 2016 | 1.1 - main logic complete
10 Aug 2016 | 1.2 - basic functionality complete
22 Aug 2016 | 1.3 - modifications for running on APIC rather than VM Flint
23 Aug 2016 | 1.4 - additional mods from testing on simulator
"""
# SYSTEM IMPORTS
import sys
import time
import json
import signal
import requests
# LOCAL IMPORTS
import AnsibleACI as aci
import PhantomIngest as ingest
# CONSTANTS
SLEEP_RETRY = 10
SLEEP_NORMAL = 60
# GLOBALS
counter_array = []
# CLASSES
class Counter(object):
"This object maintains the state of the various counters we are watching"
def __init__(self):
" "
self.dn = None
self.attributes = {}
self.epoch_time = 0
def populate_fields(self, **kwargs):
" "
for key, value in kwargs.items():
try:
self.key = value
except KeyError:
print "KeyError populating instance variable %s" % key
return False
return True
# MAIN LOGIC
def main(params):
" "
set_signals()
apic = get_controller_object(params)
phantom = ingest.PhantomIngest(params["phantom"]["host"], params["phantom"]["token"])
while True:
apic.aaaLogin()
if not apic.is_connected():
print "Failed to authenticate with APIC ... retrying"
time.sleep(SLEEP_RETRY)
continue
for item in get_what_to_watch():
try:
query_atomic_counters(apic, phantom, **item)
except AssertionError as e:
print "AssertionError " + e
except TypeError:
print "Failure to load JSON data"
except KeyError as e:
print "Error accessing key " + e
apic.aaaLogout()
idle_time()
return
def query_atomic_counters(apic, phantom, **kwargs):
"for the APIC specified, issue a class query and get the counter specified"
apic.setgeneric_URL("%s://%s/api/node/class/" + kwargs["class"] + ".json")
ret_code = apic.genericGET()
assert (ret_code is requests.codes.ok), "Failure to communicate: %s" % ret_code
content = json.loads(apic.get_content())
print "\n%s %s, searching %s, for %s, found %s" % (time.asctime(), apic.controllername, kwargs["class"], kwargs["counter"], content["totalCount"])
for managed_object in content["imdata"]:
if new_mo(managed_object, kwargs):
update_object_list(managed_object, kwargs)
continue
else:
if over_threshold(kwargs["class"], managed_object):
create_artifact(phantom, '28', kwargs["class"], managed_object) # Note, 28 is hard coded *******************
return
def new_mo(managed_object, arguments):
"check if this is a new atomic counter"
# print "\nnew_mo = %s \narguments = %s" % (managed_object, arguments)
for item in counter_array:
if managed_object[arguments["class"]]["attributes"]["dn"] == item.dn:
print "%s located mo: %s" % (time.asctime(), item.dn[0:80])
return False
return True
def update_object_list(managed_object, arguments):
" add the object to the list"
moc = Counter()
try:
moc.attributes = managed_object[arguments["class"]]["attributes"]
moc.dn = managed_object[arguments["class"]]["attributes"]["dn"]
moc.epoch_time = int(time.time())
except KeyError:
print "KeyError exception in update_object_list"
else:
counter_array.append(moc)
print "%s added object: %s" % (time.asctime(), moc.dn[0:80])
return
def over_threshold(aci_class, managed_object):
"Is this a reportable incident?"
#
# You need to update the object with the new values
# In addition to doing the checks.
#
if int(managed_object[aci_class]["attributes"]["totTxP"]) > 0:
return True
if int(managed_object[aci_class]["attributes"]["totRxP"]) > 0:
return True
return False
def create_artifact(phantom, container_id, aci_class, managed_object):
"Create an artifact in the Phantom container specified"
# Populate the meta data with the key, value pairs, stripping off the class and attributes
meta_data = managed_object[aci_class]["attributes"]
# Populate the CEF fields
cef = {"sourceAddress": meta_data["src"],
"destinationAddress": meta_data["dst"],
"transportProtocol": meta_data["filtEnt"],
"endTime": meta_data["ts"],
"message": meta_data["dn"],
"out": meta_data["totTxP"],
"in": meta_data["totRxP"]
}
art_i_fact = {"name": aci_class, "source_data_identifier": meta_data["ts"]}
try:
artifact_id = phantom.add_artifact(container_id, cef, meta_data, **art_i_fact)
except AssertionError as e:
print "Any HTTP return code other than OK %s %s" % (e, phantom.content)
except Exception as e:
print "Typically the phantom host did not respond, a connection error %s %s" % (e, phantom.content)
print "%s added artifact: %s" % (time.asctime(), artifact_id)
return
def idle_time():
"be quiet for a bit, and report occasionally we are alive"
print "%s idle..." % time.asctime()
time.sleep(SLEEP_NORMAL)
def what_todo_about_nothing():
"Having not implemented this function in a meaningful way, advise the calling routine."
raise NotImplementedError
def get_controller_object(params):
"Initalize the APIC controller object, and return it."
apic = aci.Connection()
apic.setUsername(params["aci"]["username"])
apic.setPassword(params["aci"]["password"])
apic.setcontrollerIP(params["aci"]["host"])
return apic
def set_signals():
"Set the signals used to interrupt the program"
signal.signal(signal.SIGINT, sig_handler) # Enable Interrupt handler
signal.signal(signal.SIGTERM, sig_handler) # Enable TERM handler
return
def sig_handler(signum, frame):
"Handle signal interrupts."
print '%s interrupt %s caught, exiting.' % (time.asctime(), signum)
sys.exit()
def usage():
"Print out the module documentation"
print __doc__
sys.exit()
def get_meta_data_keys():
"***STUB ROUTINE*** Populate artifact with theses keys"
return ("dst",
"src",
"ts",
"filtEnt",
"seqNo",
"dn",
"totRxP",
"totTxP")
def get_what_to_watch():
"***STUB ROUTINE*** Query these classes and look at the counter, create incident if threshold is exceeded."
return ({"class": "dbgEpgToIpRslt", "counter": "totTxP", "threshold": None},
{"class": "dbgIpToEpgRslt", "counter": "totTxP", "threshold": None})
def get_credentials():
"***STUB ROUTINE*** to Return parameters for this run"
try:
import atomic_counters_constants
except ImportError:
usage()
sys.exit()
return atomic_counters_constants.params
if __name__ == '__main__':
debug = False
main(get_credentials()) | UTF-8 | Python | false | false | 7,927 | py | 5 | atomic_counters.py | 3 | 0.615618 | 0.608048 | 0 | 257 | 29.848249 | 150 |
Zacharias030/ProGraML | 2,980,707,316,705 | e98ff4bb1660e75bd57a6bc701e63a119f304253 | 458ea9901d3fe05c03327c8ec74441814578dbf9 | /deeplearning/ml4pl/models/lstm/graph_lstm_test.py | 2b38a266e5f0532bb430ad96e5164673c3377fb9 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | https://github.com/Zacharias030/ProGraML | 945e6c615afc6950ed7d7c747f3fa03182fab379 | cd99d2c5362acd0b24ee224492bb3e8c4d4736fb | refs/heads/master | "2020-11-28T06:42:53.521864" | "2020-07-22T09:29:42" | "2020-07-22T09:29:42" | 229,731,521 | 0 | 1 | NOASSERTION | true | "2020-08-10T12:45:04" | "2019-12-23T10:40:53" | "2020-07-22T09:30:00" | "2020-08-10T12:45:04" | 78,780 | 0 | 1 | 0 | Jupyter Notebook | false | false | # Copyright 2019 the ProGraML authors.
#
# Contact Chris Cummins <chrisc.101@gmail.com>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for //deeplearning/ml4pl/models/lstm:graph_lstm."""
import random
import string
from typing import List
from datasets.opencl.device_mapping import opencl_device_mapping_dataset
from deeplearning.ml4pl.graphs.labelled import graph_tuple_database
from deeplearning.ml4pl.graphs.labelled.devmap import make_devmap_dataset
from deeplearning.ml4pl.ir import ir_database
from deeplearning.ml4pl.models import batch_iterator as batch_iterator_lib
from deeplearning.ml4pl.models import epoch
from deeplearning.ml4pl.models import log_database
from deeplearning.ml4pl.models import logger as logging
from deeplearning.ml4pl.models.lstm import graph_lstm
from deeplearning.ml4pl.testing import random_graph_tuple_database_generator
from deeplearning.ml4pl.testing import testing_databases
from labm8.py import test
FLAGS = test.FLAGS
# For testing models, always use --strict_graph_segmentation.
FLAGS.strict_graph_segmentation = True
# For testing the LSTM we can use a reduced size model.
FLAGS.lang_model_hidden_size = 8
FLAGS.heuristic_model_hidden_size = 4
# No test coverage for hot code.
MODULE_UNDER_TEST = None
###############################################################################
# Utility functions.
###############################################################################
def CreateRandomString(min_length: int = 1, max_length: int = 1024) -> str:
"""Generate a random string."""
return "".join(
random.choice(string.ascii_lowercase)
for _ in range(random.randint(min_length, max_length))
)
###############################################################################
# Fixtures.
###############################################################################
@test.Fixture(
scope="session",
params=testing_databases.GetDatabaseUrls(),
namer=testing_databases.DatabaseUrlNamer("log_db"),
)
def log_db(request) -> log_database.Database:
"""A test fixture which yields an empty log database."""
yield from testing_databases.YieldDatabase(
log_database.Database, request.param
)
@test.Fixture(scope="session")
def logger(log_db: log_database.Database) -> logging.Logger:
"""A test fixture which yields a logger."""
with logging.Logger(log_db, max_buffer_length=128) as logger:
yield logger
@test.Fixture(scope="session", params=(2, 104), namer=lambda x: f"graph_y:{x}")
def graph_y_dimensionality(request) -> int:
"""A test fixture which enumerates graph label dimensionalities."""
return request.param
@test.Fixture(
scope="session", params=list(epoch.Type), namer=lambda x: x.name.lower()
)
def epoch_type(request) -> epoch.Type:
"""A test fixture which enumerates epoch types."""
return request.param
@test.Fixture(scope="session")
def opencl_relpaths() -> List[str]:
opencl_df = make_devmap_dataset.MakeGpuDataFrame(
opencl_device_mapping_dataset.OpenClDeviceMappingsDataset().df,
"amd_tahiti_7970",
)
return list(set(opencl_df.relpath.values))
@test.Fixture(scope="session")
def ir_db(opencl_relpaths: List[str]) -> ir_database.Database:
"""A test fixture which yields an IR database with 256 OpenCL entries."""
with testing_databases.DatabaseContext(
ir_database.Database, testing_databases.GetDatabaseUrls()[0]
) as db:
rows = []
# Create IRs using OpenCL relpaths.
for i, relpath in enumerate(opencl_relpaths):
ir = ir_database.IntermediateRepresentation.CreateFromText(
source="pact17_opencl_devmap",
relpath=relpath,
source_language=ir_database.SourceLanguage.OPENCL,
type=ir_database.IrType.LLVM_6_0,
cflags="",
text=CreateRandomString(),
)
ir.id = i + 1
rows.append(ir)
with db.Session(commit=True) as session:
session.add_all(rows)
yield db
@test.Fixture(scope="session")
def graph_db(
opencl_relpaths: List[str], graph_y_dimensionality: int,
) -> graph_tuple_database.Database:
"""A test fixture which yields a graph database with 256 OpenCL IR entries."""
with testing_databases.DatabaseContext(
graph_tuple_database.Database, testing_databases.GetDatabaseUrls()[0]
) as db:
random_graph_tuple_database_generator.PopulateWithTestSet(
db,
len(opencl_relpaths),
node_x_dimensionality=2,
node_y_dimensionality=0,
graph_x_dimensionality=2,
graph_y_dimensionality=graph_y_dimensionality,
split_count=3,
)
yield db
###############################################################################
# Tests.
###############################################################################
def test_load_restore_model_from_checkpoint_smoke_test(
logger: logging.Logger,
graph_db: graph_tuple_database.Database,
ir_db: ir_database.Database,
):
"""Test creating and restoring model from checkpoint."""
# Create and initialize a model.
model = graph_lstm.GraphLstm(
logger, graph_db, ir_db=ir_db, batch_size=32, padded_sequence_length=10,
)
model.Initialize()
# Create a checkpoint from the model.
checkpoint_ref = model.SaveCheckpoint()
# Reset the model state to the checkpoint.
model.RestoreFrom(checkpoint_ref)
# Run a test epoch to make sure the restored model works.
batch_iterator = batch_iterator_lib.MakeBatchIterator(
model=model,
graph_db=graph_db,
splits={epoch.Type.TRAIN: [0], epoch.Type.VAL: [1], epoch.Type.TEST: [2],},
epoch_type=epoch.Type.TEST,
)
model(
epoch_type=epoch.Type.TEST, batch_iterator=batch_iterator, logger=logger,
)
# Create a new model instance and restore its state from the checkpoint.
new_model = graph_lstm.GraphLstm(
logger, graph_db, ir_db=ir_db, batch_size=32, padded_sequence_length=10,
)
new_model.RestoreFrom(checkpoint_ref)
# Check that the new model works.
batch_iterator = batch_iterator_lib.MakeBatchIterator(
model=new_model,
graph_db=graph_db,
splits={epoch.Type.TRAIN: [0], epoch.Type.VAL: [1], epoch.Type.TEST: [2],},
epoch_type=epoch.Type.TEST,
)
new_model(
epoch_type=epoch.Type.TEST, batch_iterator=batch_iterator, logger=logger,
)
def test_classifier_call(
epoch_type: epoch.Type,
logger: logging.Logger,
graph_db: graph_tuple_database.Database,
ir_db: ir_database.Database,
):
"""Test running a graph classifier."""
model = graph_lstm.GraphLstm(
logger, graph_db, ir_db=ir_db, batch_size=8, padded_sequence_length=100,
)
model.Initialize()
batch_iterator = batch_iterator_lib.MakeBatchIterator(
model=model,
graph_db=graph_db,
splits={epoch.Type.TRAIN: [0], epoch.Type.VAL: [1], epoch.Type.TEST: [2],},
epoch_type=epoch_type,
)
results = model(
epoch_type=epoch_type, batch_iterator=batch_iterator, logger=logger,
)
assert isinstance(results, epoch.Results)
assert results.batch_count
# We only get loss for training.
if epoch_type == epoch.Type.TRAIN:
assert results.has_loss
else:
assert not results.has_loss
if __name__ == "__main__":
test.Main()
| UTF-8 | Python | false | false | 7,621 | py | 231 | graph_lstm_test.py | 174 | 0.676289 | 0.665923 | 0 | 238 | 31.021008 | 80 |
FihlaTV/backstage | 841,813,596,413 | c9850a7dba35c260919bf4e6db37454466371766 | 31a9016e3b0438989241246912a35e335f1cc448 | /backstage/users/models.py | 3af14c577b8046cf74aa6743b405a5233f2db7e0 | [] | no_license | https://github.com/FihlaTV/backstage | db74f1d2b58c3ae286fe42b87b5d19bbc34e008e | 6ab42aea4d89c634a5d2525693d94fd4315a9654 | refs/heads/master | "2020-04-03T02:45:44.934883" | "2013-09-20T11:01:39" | "2013-09-20T11:01:39" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding: UTF-8
'''
Created: 2013-06-20
Author: weixin
Email: weixindlut@gmail.com
Desc:
'''
from django.db import models
from backstage.backend.utilities import get_uuid
class UserInfo(models.Model):
"""
"""
userid = models.CharField(unique=True, blank=False, max_length=50,
verbose_name="id", primary_key=True,
default=get_uuid)
email = models.EmailField(max_length=50, blank=True)
username = models.CharField(blank=False, max_length=100, unique=True)
homepage_url = models.URLField(max_length=200, blank=True)
location = models.CharField(blank=True, max_length=50)
irc_nickname = models.CharField(blank=True, max_length=50)
class Meta:
verbose_name = "user info"
verbose_name_plural = "user info"
def __unicode__(self):
return self.username
| UTF-8 | Python | false | false | 873 | py | 22 | models.py | 19 | 0.644903 | 0.618557 | 0 | 30 | 28.1 | 73 |
thepavangollapalli/nlp_jskp | 17,755,394,807,112 | b45254bc14a2a0b1a2ca73d194db1240a9e7a881 | e3a683431bcf28f29585f26ebe596b6855bdb143 | /question_generator/questions.py | 37e2140dfa8608eba59f1d0e8bd8a6c837267feb | [] | no_license | https://github.com/thepavangollapalli/nlp_jskp | 6be6766d19d9128d280b231d857443f12339e5bc | bb42d6dfd83794af4dfde9d45be5c4661a395178 | refs/heads/master | "2020-03-29T17:37:37.958431" | "2018-11-29T20:52:56" | "2018-11-29T20:52:56" | 150,173,967 | 0 | 0 | null | false | "2023-02-14T23:19:51" | "2018-09-24T22:01:39" | "2018-11-29T20:52:58" | "2018-11-29T20:52:57" | 50 | 0 | 0 | 0 | Python | false | false | #!/usr/bin/env python -W ignore::DeprecationWarning
#Mute depreciation warning from scikit-learn
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
import nltk
import sys
import copy
from nltk.tag.stanford import StanfordNERTagger, StanfordPOSTagger
stanford_ner_jar = '/home/coreNLP/stanford-ner-2018-10-16/stanford-ner.jar'
stanford_ner_model = '/home/coreNLP/stanford-ner-2018-10-16/classifiers/english.all.3class.distsim.crf.ser.gz'
stanford_pos_jar = '/home/coreNLP/stanford-postagger-full-2018-10-16/stanford-postagger.jar'
stanford_pos_model = '/home/coreNLP/stanford-postagger-full-2018-10-16/models/english-bidirectional-distsim.tagger'
stanford_when_model = '/home/coreNLP/stanford-ner-2018-10-16/classifiers/english.muc.7class.distsim.crf.ser.gz'
ner_tagger = StanfordNERTagger(stanford_ner_model, stanford_ner_jar, encoding="utf8")
pos_tagger = StanfordPOSTagger(stanford_pos_model, stanford_pos_jar, encoding="utf8")
when_tagger = StanfordNERTagger(stanford_when_model, stanford_ner_jar, encoding="utf8")
#class to store all sentences
class Sentences:
def __init__(self, passage):
self.sentences = nltk.tokenize.sent_tokenize(passage)
self.tokenized = self.tokenize(self.sentences)
self.ner = self.ner(self.tokenized)
self.pos = self.pos(self.tokenized)
self.whenTags = self.whenTags(self.tokenized)
self.overall_questions = self.get_overall_questions()
def tokenize(self, sentences):
tokens = list()
for s in self.sentences:
tokens.append(nltk.word_tokenize(s))
return tokens
def ner(self, tokens):
nerTags = ner_tagger.tag_sents(tokens)
return nerTags
def pos(self, tokens):
pos_tags = list()
for s in tokens:
pos_tags.append(nltk.pos_tag(s))
return pos_tags
def whenTags(self, tokens):
whenTags = when_tagger.tag_sents(tokens)
return whenTags
def get_overall_questions(self):
result = list()
for i in range(0, len(self.sentences)):
sentence = Questions(self, i)
for question in sentence.sentence_questions:
result.append(question)
return result
class Questions:
def __init__(self, s, n):
self.currSent = s.sentences[n]
self.tokenized = s.tokenized[n]
self.ner = s.ner[n]
self.pos = s.pos[n]
self.whenTags = s.whenTags[n]
self.len = len(self.tokenized)
self.proN = self.proN(self.ner)
self.sentence_questions = self.get_questions()
def __repr__(self):
return str(self.ner)
def create_question(self, q):
if(len(q) == 0):
return q
return " ".join(q) + "?"
def proN(self, ner_tags):
pn = dict()
for (w, t) in ner_tags:
if t == "PERSON":
if(w not in pn):
pn[w] = 1
else:
pn[w] += 1
maxWord = ""
maxCount = 0
for k in pn:
if(maxCount < pn[k]):
maxCount = pn[k]
maxWord = k
return maxWord
def get_questions(self):
return [self.what(), self.who(), self.when(), self.yesNo(), self.where(), self.why()]
def what(self):
noun_tags = {'NN', 'NNP', 'NNS', 'PRP'}
verb_tags = {'VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ', 'DT', 'MD'}
q = list()
found = False
for i in range(1, len(self.tokenized) - 1):
if found:
if self.pos[i][1] in noun_tags and self.tokenized[i-1] == 'and':
q.pop()
break
q.append(self.tokenized[i])
if not found and self.pos[i][1] in verb_tags and self.ner[i-1][1] != 'PERSON' and self.pos[i-1][1] in noun_tags:
q.append("What")
q.append(self.tokenized[i])
found = True
return self.create_question(q)
def who(self):
verb_tags = {'VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ', 'MD'}
ner_subjects = {'PERSON', 'ORGANIZATION'}
q = list()
found = False
for i in range(1, self.len - 1):
if found:
if self.pos[i][1] in verb_tags and self.ner[i-1][1] in ner_subjects:
q.pop()
break
q.append(self.tokenized[i])
if not found and self.pos[i][1] in verb_tags:
q.append("Who")
q.append(self.tokenized[i])
found = True
return self.create_question(q)
def when(self):
q = list()
found = False
time = {'today', 'tomorrow', 'yesterday'}
otherTime = {"by", "after", "before", "during", "when", "while", 'on', 'in', 'last'}
lastTime = {"after", "before", "during", "when", "while"}
for i in range(1, self.len ):
if not found:
if ((self.ner[i-1][1] == 'PERSON' and i < self.len - 1) and
((self.pos[i][1] == 'MD' and self.pos[i+1][1] == 'VB') or
(self.pos[i][1] == 'VBD' and self.pos[i+1][1] == 'VBG'))):
q.append("When")
q.append(self.tokenized[i])
q.append(self.tokenized[i-1])
found = True
else:
if (self.ner[i-1][1] == 'PERSON' and i < self.len and
(self.pos[i][1] in ['VBZ', 'VBD'])):
if self.pos[i][1] == 'VBZ':
verb = "will"
else:
verb = "did"
new = self.tokenized[i]
q.append("When")
q.append(verb)
q.append(self.tokenized[i-1])
q.append(new)
found = True
else:
if (self.whenTags[i][1] in ['DATE', 'TIME'] or self.tokenized[i]
in time):
if self.tokenized[i-1] in otherTime:
q.pop()
return self.create_question(q)
elif (self.tokenized[i] in lastTime):
return self.create_question(q)
elif i != self.len - 1:
q.append(self.tokenized[i])
return self.create_question(q)
else:
return self.create_question(q)
def yesNo(self):
posTag = copy.deepcopy(self.pos)
flip = False
indication = {"is", "are", "does", "were", "can", "were", "will",
"has", "had", "have", "could", "would", "should"}
verbs = list()
q = list()
for i in range(len(posTag)):
word = posTag[i][0]
tag = posTag[i][1]
if word in indication:
verbs.append(i)
if len(verbs) == 0:
posTag.insert(0, ('did', 'VBD'))
flip = True
else:
posTag.insert(0, posTag.pop(verbs[0]))
q.append(posTag[0][0].title())
for i in range(len(posTag)-1):
w = posTag[i+1][0]
wnew = ""
if i == 0:
if posTag[1][1] == "PRP":
w = self.proN
elif posTag[1][1] == "NNP":
w = w.lower()
if flip:
try:
wnew = en.verb.present(w)
except:
wnew = w
else:
wnew = w
q.append(wnew)
q.pop()
return self.create_question(q)
def where(self):
nerTag = copy.deepcopy(self.ner)
sentence = self.tokenized
posTag = copy.deepcopy(self.pos)
q = list()
verbs = {"VBD", "VBZ"}
nouns = {"NN", "NNP", "PRP"}
wh = {"at", "in"}
sub = False
loc = False
subI = 0
locI = 0
vals = dict()
for w in range(len(sentence)):
if(loc):
if (posTag[w][1] in verbs):
vals["verb"] = (sentence[w], w)
locI = w
break
if(sub):
if (posTag[w][1] in verbs):
vals["verb"] = (sentence[w], w)
if (nerTag[w][1] == "LOCATION" and sentence[w-1] in wh):
subI = w - 1
if(not sub and not loc):
if(nerTag[w][1] == "LOCATION"):
loc = True
elif(posTag[w][1] in nouns):
sub = True
vals["sub"] = (sentence[w], w)
if subI == 0 and locI == 0:
return q
elif sub:
w1 = vals["verb"][1]
bc1 = ''.join(str(e) for e in sentence[w1+1:subI])
bc1 = bc1.replace(",", " ")
w0 = vals["sub"][1]
bc0 = ''.join(str(e) for e in sentence[w0-1:w1])
bc0 = bc0.replace(",", " ")
if (vals["verb"][0] in {"is", "was"}):
q.append("Where")
q.append(vals["verb"][0])
q.append(bc0)
q.append(bc1)
elif(nerTag[vals["verb"][1]][1] == "VBD"):
q.append("Where did")
q.append(vals["sub"][0])
q.append(en.verb.present(vals["verb"][0]))
q.append(bc1)
else:
q.append("Where does")
q.append(vals["sub"][0])
q.append(en.verb.present(vals["verb"][0]))
q.append(bc1)
elif loc:
w = vals["verb"][1]
bc = ''.join(str(e) + " " for e in sentence[locI:])
bc = bc.replace(".", "")
bc = bc.replace("!", "")
q.append("Where")
q.append(bc)
return self.create_question(q)
def why(self):
n = self.ner
p = self.pos
vI = None
sI = None
sentence = self.tokenized
nouns = {"NN", "NNP", "PRP", "NNS"}
verbs = {"VBD", "VBZ", "VBP"}
vs = False
ss = False
done = 0
nerTag = copy.deepcopy(n)
posTag = copy.deepcopy(p)
dt = None
q = list()
if("because" in sentence or "since" in sentence):
for w in range(self.len):
if sentence[w] in {"because", "since"}:
done = w
if posTag[w][1] in nouns and not ss:
sI = w
ss = True
if(w-1 >= 0):
if(posTag[w-1][1] == "DT"):
dt = sentence[w-1]
else:
dt = None
if posTag[w][1] in verbs and not vs:
vs = True
vI = w
if sI != None and vI!=None:
bc = ''.join(str(e) + " " for e in sentence[vI+1:done])
bc = bc.replace(".", "")
bc = bc.replace("!", "")
q.append("Why")
q.append(sentence[vI])
if(dt != None):
q.append(dt)
q.append(sentence[sI])
q.append(bc)
return self.create_question(q)
def clean(result):
clean = list()
for q in result:
if(q != [] and q!= None and q!=""):
clean.append(q)
return clean
if __name__ == '__main__':
txtFile = sys.argv[1]
n = int(sys.argv[2])
with open(txtFile, 'r', encoding="latin1", errors='surrogateescape') as f:
content = f.read()
f.close()
sentences = Sentences(content)
counter = 0
result = sentences.overall_questions
result = clean(result)
while(counter<n):
if(result == []):
print("")
counter += 1
else:
print(result[counter%len(result)])
counter += 1
| UTF-8 | Python | false | false | 12,040 | py | 7 | questions.py | 4 | 0.460714 | 0.447924 | 0 | 344 | 33.997093 | 124 |
L-Ezray/zpp | 15,247,133,928,781 | 88316af5cd1fa839a4af2495a77f97c47be4ca52 | cb2a7ef60faa649f2c6b97958135d4e182bcdb5e | /18.py | 311ff0446999c175abaa6a4f334f93036f87ce7f | [] | no_license | https://github.com/L-Ezray/zpp | d857df7588bf57924d2f3685c9fbedc93e892df6 | ed715129907aa1f9c473cceb5df4c376f6699b3e | refs/heads/master | "2020-03-22T19:02:03.440262" | "2018-07-17T09:07:48" | "2018-07-17T09:07:48" | 140,499,358 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | 帐号 = '456456456'
密码 = '123123'
money = 100
a = input('请输入帐号:')
b = input('请输入密码:')
if (a==帐号)and(b==密码):
print('开始取钱')
c = input('请输入取钱金额:')
if float(c)<=100:
print('您本次取钱金额为%s,'%c,'您的余额为',100-float(c))
else:
print('没钱取毛线')
else:
print('非法账户')
| UTF-8 | Python | false | false | 383 | py | 32 | 18.py | 32 | 0.530249 | 0.44484 | 0 | 14 | 19 | 51 |
Z19900505/Python.learn | 8,495,445,321,753 | 0dbfddd2806c942a6541697d302b6ffd46dfd847 | c748b5a96187f4eb8ff13c67efdb862b3269d1af | /module/svgwrite/practice/Symbol.example.py | 3d89924dd89479aa87296e676d50fb9f3652d22a | [] | no_license | https://github.com/Z19900505/Python.learn | 29cc75148de867d179788e7d677f4171476a084f | 03144dcaa575d29ffde14a30169e291ac833b9f4 | refs/heads/master | "2020-12-02T08:40:31.192813" | "2016-12-02T08:45:19" | "2016-12-02T08:45:19" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# coding: utf-8
import svgwrite
dwg = svgwrite.Drawing("Symbol.example.svg", (900, 900), debug=True)
symbol = dwg.symbol(id='symbol')
dwg.defs.add(symbol)
symbol.viewbox(0, 0, 10, 10)
symbol.add(dwg.circle((5,5), r=5))
dwg.add(dwg.use(symbol, insert=(100, 100), size=(10, 10), fill='green'))
dwg.add(dwg.use(symbol, insert=(200, 200), size=(20, 20), fill='red'))
dwg.add(dwg.use(symbol, insert=(300, 300), size=(30, 30), fill='blue'))
dwg.add(dwg.use(symbol, insert=(400, 400), size=(40, 40), fill='pink'))
dwg.add(dwg.use(symbol, insert=(500, 500), size=(50, 50), fill='yellow'))
dwg.add(dwg.use(symbol, insert=(600, 600), size=(60, 60), fill='brown'))
dwg.add(dwg.use(symbol, insert=(700, 700), size=(70, 70), fill='gray'))
dwg.save() | UTF-8 | Python | false | false | 763 | py | 364 | Symbol.example.py | 98 | 0.656619 | 0.542595 | 0 | 20 | 37.2 | 73 |
skandix/Dramhack | 12,008,728,598,892 | 2b1409eb7983c8ea0b477752ba4a0c2c1d1f5fb8 | 51d0fe411f4bf8a9bcc9b0bf4ed2a74cb31dfaa8 | /modules/dhsc/fetch.py | c22a94417b2550fefb19f0e7e886ef17f64cb882 | [] | no_license | https://github.com/skandix/Dramhack | 69a1298415a2d3c01fb705d06a383c963dc8fae4 | 8ed970a49f5d00468d12d3c2890da8e021234415 | refs/heads/master | "2017-12-01T21:45:30.297777" | "2017-06-16T23:23:34" | "2017-06-16T23:23:34" | 61,155,771 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import requests
import datetime
import sys
#for handeling shit scandinavian characters
reload(sys)
sys.setdefaultencoding('utf-8')
def fetchAPIData(endpoint, api="https://api.crew.dreamhack.se/1"):
compiledURL = api+endpoint
return requests.get(compiledURL, verify=False).json()
class schedule_fetcher(object):
# Class variabless
_result = []
_return_string = ""
# Init program
def __init__(self):
self._result = fetchAPIData('/schedule/get/')
if (len(self._result) <= 0):
raise Exception(self, "Dude ingen paa den lista")
# Get only mainstage
self._result = self._result[0]['events']
# Sort them
self._sort_them()
# Fix time
self._convert_time()
# Append difference
self._add_diff()
# Return the results
def gief(self):
# Check first if already parsed
if(len(self._return_string) != 0):
return self._return_string
for record in self._result:
#print record
self._return_string += str('\n\nName: {:s}\nStart: {:}\nStop: {:}\nDuration: {:}\nText: {:s}\nContact: {:s} \nID: {:s} ').format(record['head'], record['start'], record['stop'], record['Diff'], "N/A", "N/A", record['id'])
#head = record['head']
#start = record['start']
#stop = record['stop']
#diff = record['Diff']
#text = record['text']
#contact = record['contact']
#ids = record['id']
return self._return_string
# Sort them based on rules
def _sort_them(self):
self._result = sorted(self._result, key=lambda result: result['start'])
# Convert to human readable time format
def _convert_time(self):
for record in self._result:
record['start'] = datetime.datetime.fromtimestamp(float(record['start'])).strftime('%Y-%m-%d %H:%M:%S')
record['stop'] = datetime.datetime.fromtimestamp(float(record['stop'])).strftime('%Y-%m-%d %H:%M:%S')
# Append difference
def _add_diff(self):
for record in self._result:
start = datetime.datetime.strptime(record['start'], "%Y-%m-%d %H:%M:%S")
stop = datetime.datetime.strptime(record['stop'], "%Y-%m-%d %H:%M:%S")
record['Diff'] = stop - start
# Print ut
def _vis_dem(self):
for line in self._result:
return line
# Return a json file
def _get_them_json(self):
return self._return_string
stage = schedule_fetcher().gief()
| UTF-8 | Python | false | false | 2,618 | py | 9 | fetch.py | 3 | 0.559587 | 0.557678 | 0 | 87 | 29.091954 | 233 |
storm2513/Task-manager | 9,174,050,154,555 | 12e458b23b5ed0418e0b06e0c295f098d0a0b0b9 | 638ef8d1570533e65f4520447862f2de2bf74694 | /task-manager/library/tests/storage_test.py | e8a0855ca3c7bbdae44ecfc3e02893a4f06bc21d | [] | no_license | https://github.com/storm2513/Task-manager | a325b5beb82407a613da1abbfe0f23ccd60d3152 | 8ea1f92fe56b9b331ade1163521e99b3fb524863 | refs/heads/master | "2020-03-26T00:15:21.365312" | "2018-08-10T16:58:10" | "2018-08-10T16:58:10" | 144,312,524 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import unittest
import datetime
from tmlib.storage.storage_models import (
Task,
UsersReadTasks,
UsersWriteTasks,
Category,
TaskPlan,
Notification,
DatabaseConnector)
from tmlib.storage.category_storage import CategoryStorage
from tmlib.storage.notification_storage import NotificationStorage
from tmlib.storage.task_storage import TaskStorage
from tmlib.storage.task_plan_storage import TaskPlanStorage
from tmlib.models.task import Status
from tmlib.models.notification import Status as NotificationStatus
from tests.factories import CategoryFactory, TaskFactory, TaskPlanFactory, NotificationFactory
class StorageTest(unittest.TestCase):
def setUp(self):
self.database = ':memory:'
self.category_storage = CategoryStorage(self.database)
self.task_storage = TaskStorage(self.database)
self.task_plan_storage = TaskPlanStorage(self.database)
self.notification_storage = NotificationStorage(self.database)
self.category = CategoryFactory(user_id=10)
self.task = TaskFactory()
self.task_plan = TaskPlanFactory()
self.notification = NotificationFactory()
DatabaseConnector(self.database).create_tables()
def tearDown(self):
DatabaseConnector(self.database).drop_tables()
# CategoryStorage tests
def test_creates_category(self):
before_categories_count = Category.select().count()
self.category_storage.create(self.category)
after_categories_count = Category.select().count()
self.assertEqual(before_categories_count + 1, after_categories_count)
def test_creates_category_and_gets_it_by_id(self):
category_with_id = self.category_storage.create(self.category)
category_from_db = self.category_storage.get_by_id(category_with_id.id)
self.assertEqual(category_with_id.id, category_from_db.id)
self.assertEqual(category_with_id.name, category_from_db.name)
self.assertEqual(category_with_id.user_id, category_from_db.user_id)
def test_deletes_category_by_id(self):
category_id = self.category_storage.create(self.category).id
self.category_storage.delete_by_id(category_id)
self.assertEqual(
Category.select().where(
Category.id == category_id).count(), 0)
def test_updates_category(self):
category_with_id = self.category_storage.create(self.category)
category_with_id.name = "Movies to watch"
self.category_storage.update(category_with_id)
category_from_db = Category.get(Category.id == category_with_id.id)
self.assertEqual(category_from_db.name, "Movies to watch")
# TaskStorage tests
def test_creates_task(self):
before_tasks_count = Task.select().count()
self.task_storage.create(self.task)
after_tasks_count = Task.select().count()
self.assertEqual(before_tasks_count + 1, after_tasks_count)
def test_creates_task_and_gets_it_by_id(self):
task_with_id = self.task_storage.create(self.task)
task_from_db = self.task_storage.get_by_id(task_with_id.id)
self.assertEqual(task_with_id.id, task_from_db.id)
self.assertEqual(task_with_id.title, task_from_db.title)
self.assertEqual(task_with_id.user_id, task_from_db.user_id)
self.assertEqual(task_with_id.note, task_from_db.note)
def test_deletes_task_by_id(self):
task_id = self.task_storage.create(self.task).id
self.task_storage.delete_by_id(task_id)
self.assertEqual(Task.select().where(Task.id == task_id).count(), 0)
def test_updates_task(self):
task_with_id = self.task_storage.create(self.task)
task_with_id.title = "Do something great"
self.task_storage.update(task_with_id)
task_from_db = Task.get(Task.id == task_with_id.id)
self.assertEqual(task_from_db.title, "Do something great")
def test_returns_user_tasks(self):
first_task = TaskFactory()
second_task = TaskFactory()
user_id = 10
first_task.user_id = user_id
second_task.user_id = user_id
self.task_storage.create(first_task)
self.task_storage.create(second_task)
tasks = self.task_storage.user_tasks(user_id)
self.assertEqual(len(tasks), 2)
def test_returns_inner_tasks(self):
task_id = self.task_storage.create(self.task).id
inner_task = TaskFactory()
inner_task.parent_task_id = task_id
self.task_storage.create(inner_task)
inner_tasks = self.task_storage.inner(task_id)
self.assertEqual(len(inner_tasks), 1)
def test_returns_inner_tasks_recursive(self):
task_id = self.task_storage.create(self.task).id
inner_task = TaskFactory()
inner_task.parent_task_id = task_id
inner_task_id = self.task_storage.create(inner_task).id
second_level_inner_task = TaskFactory()
second_level_inner_task.parent_task_id = inner_task_id
self.task_storage.create(second_level_inner_task)
inner_tasks = self.task_storage.inner(task_id, True)
self.assertEqual(len(inner_tasks), 2)
def test_adds_user_for_read(self):
user_id = 10
task_id = self.task_storage.create(self.task).id
self.task_storage.add_user_for_read(user_id=user_id, task_id=task_id)
self.assertEqual(
UsersReadTasks.select().where(
UsersReadTasks.task_id == task_id and UsersReadTasks.user_id == user_id).count(),
1)
def test_adds_user_for_write(self):
user_id = 10
task_id = self.task_storage.create(self.task).id
self.task_storage.add_user_for_write(user_id=user_id, task_id=task_id)
self.assertEqual(
UsersWriteTasks.select().where(
UsersWriteTasks.task_id == task_id and UsersWriteTasks.user_id == user_id).count(),
1)
def test_removes_user_for_read(self):
user_id = 10
task_id = self.task_storage.create(self.task).id
self.task_storage.add_user_for_read(user_id=user_id, task_id=task_id)
self.task_storage.remove_user_for_read(
user_id=user_id, task_id=task_id)
self.assertEqual(
UsersReadTasks.select().where(
UsersReadTasks.task_id == task_id and UsersReadTasks.user_id == user_id).count(),
0)
def test_removes_user_for_write(self):
user_id = 10
task_id = self.task_storage.create(self.task).id
self.task_storage.add_user_for_write(user_id=user_id, task_id=task_id)
self.task_storage.remove_user_for_write(
user_id=user_id, task_id=task_id)
self.assertEqual(
UsersWriteTasks.select().where(
UsersWriteTasks.task_id == task_id and UsersWriteTasks.user_id == user_id).count(),
0)
# TaskPlanStorage tests
def test_creates_task_plan(self):
before_plans_count = TaskPlan.select().count()
self.task_plan_storage.create(self.task_plan)
after_plans_count = TaskPlan.select().count()
self.assertEqual(before_plans_count + 1, after_plans_count)
def test_deletes_task_plan_by_id(self):
task_plan_id = self.task_plan_storage.create(self.task_plan).id
self.task_plan_storage.delete_by_id(task_plan_id)
self.assertEqual(TaskPlan.select().where(
TaskPlan.id == task_plan_id).count(), 0)
def test_updates_task_plan(self):
new_interval = 500
new_datetime = datetime.datetime.now()
task_plan_with_id = self.task_plan_storage.create(self.task_plan)
task_plan_with_id.interval = new_interval
task_plan_with_id.last_created_at = new_datetime
self.task_plan_storage.update(task_plan_with_id)
task_plan_from_db = TaskPlan.get(TaskPlan.id == task_plan_with_id.id)
self.assertEqual(task_plan_from_db.interval, new_interval)
self.assertEqual(task_plan_from_db.last_created_at, new_datetime)
def test_returns_all_user_plans(self):
user_id = 10
self.task_plan.user_id = 10
plans_count = 3
for i in range(plans_count):
self.task_plan_storage.create(self.task_plan)
plans = self.task_plan_storage.all_user_plans(user_id)
self.assertEqual(len(plans), plans_count)
def test_processes_task_plans(self):
user_id = 10
repeated_task = TaskFactory()
repeated_task.status = Status.TEMPLATE.value
repeated_task.user_id = user_id
task_id = self.task_storage.create(repeated_task).id
before_tasks_count = len(self.task_storage.user_tasks(user_id))
interval = 300
big_interval = interval * 10
last_created_at = datetime.datetime.now() - datetime.timedelta(seconds=interval + 5)
"""
repeated_task_plan after processing should create new task
repeated_task_plan_big_interval should not create new task because of bit interval
"""
repeated_task_plan = TaskPlan(
user_id=user_id,
task_id=task_id,
last_created_at=last_created_at,
interval=interval)
repeated_task_plan_big_interval = TaskPlan(
user_id=user_id,
task_id=task_id,
last_created_at=last_created_at,
interval=big_interval)
self.task_plan_storage.create(repeated_task_plan)
self.task_plan_storage.create(repeated_task_plan_big_interval)
self.task_plan_storage.process_plans(self.task_storage)
self.assertEqual(len(self.task_storage.user_tasks(user_id)),
before_tasks_count + 1)
# NotificationStorage tests
def test_creates_notification(self):
before_notifications_count = Notification.select().count()
self.notification_storage.create(self.notification)
after_notifications_count = Notification.select().count()
self.assertEqual(
before_notifications_count + 1,
after_notifications_count)
def test_deletes_notification_by_id(self):
notification_id = self.notification_storage.create(
self.notification).id
self.notification_storage.delete_by_id(notification_id)
self.assertEqual(Notification.select().where(
Notification.id == notification_id).count(), 0)
def test_updates_notification(self):
new_title = "Updated title"
notification_with_id = self.notification_storage.create(
self.notification)
notification_with_id.title = new_title
self.notification_storage.update(notification_with_id)
notification_from_db = Notification.get(
Notification.id == notification_with_id.id)
self.assertEqual(notification_from_db.title, new_title)
def test_returns_all_user_notifications(self):
user_id = 10
self.notification.user_id = 10
notifications_count = 3
for i in range(notifications_count):
self.notification_storage.create(self.notification)
notifications = self.notification_storage.all_user_notifications(
user_id)
self.assertEqual(len(notifications), notifications_count)
def test_returns_pending_notifications(self):
user_id = 10
self.notification.user_id = 10
self.notification.status = NotificationStatus.PENDING.value
notifications_count = 3
for i in range(notifications_count):
self.notification_storage.create(self.notification)
# create notification with other status
self.notification.status = NotificationStatus.CREATED.value
self.notification_storage.create(self.notification)
notifications = self.notification_storage.pending(user_id)
self.assertEqual(len(notifications), notifications_count)
def test_returns_created_notifications(self):
user_id = 10
self.notification.user_id = 10
self.notification.status = NotificationStatus.CREATED.value
notifications_count = 3
for i in range(notifications_count):
self.notification_storage.create(self.notification)
# create notification with other status
self.notification.status = NotificationStatus.PENDING.value
self.notification_storage.create(self.notification)
notifications = self.notification_storage.created(user_id)
self.assertEqual(len(notifications), notifications_count)
def test_returns_pending_notifications(self):
user_id = 10
self.notification.user_id = 10
self.notification.status = NotificationStatus.SHOWN.value
notifications_count = 3
for i in range(notifications_count):
self.notification_storage.create(self.notification)
# create notification with other status
self.notification.status = NotificationStatus.CREATED.value
self.notification_storage.create(self.notification)
notifications = self.notification_storage.shown(user_id)
self.assertEqual(len(notifications), notifications_count)
def test_process_notification(self):
self.task.start_time = datetime.datetime.now()
task_id = self.task_storage.create(self.task).id
relative_start_time = 300
self.notification.status = NotificationStatus.CREATED.value
self.notification.relative_start_time = 300
self.notification.task_id = task_id
notification_id = self.notification_storage.create(
self.notification).id
self.notification_storage.process_notifications()
processed_notification = self.notification_storage.get_by_id(
notification_id)
self.assertEqual(
processed_notification.status,
NotificationStatus.PENDING.value)
| UTF-8 | Python | false | false | 13,867 | py | 56 | storage_test.py | 42 | 0.663085 | 0.658037 | 0 | 319 | 42.470219 | 99 |
Buhua-Liu/Adversarial-examples--Attack-and-Defense | 8,229,157,351,789 | 5ce8a6e60dccc11a7ad5042a3da379795ba2c596 | 4cc7a958e2a7d79001fcb0be176efd52084756a8 | /main.py | 9d9c82e9324bd9a964210320c5f76ac93af1a208 | [
"MIT"
] | permissive | https://github.com/Buhua-Liu/Adversarial-examples--Attack-and-Defense | a8d387554d4a5429251e708c2c2f2865e6b80302 | 8b4ad8e0deea1e60d702806ea7b7a38cfb706e0d | refs/heads/master | "2020-08-19T20:10:52.521051" | "2020-01-08T03:14:20" | "2020-01-08T03:14:20" | 215,950,017 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import tensorflow as tf
import numpy as np
import os
import time
from dknn import DkNNModel, plot_reliability_diagram
from attack.fgsm import FGSM
def main():
# Loading and pre-processing data
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data()
train_images = train_images.reshape(train_images.shape[0], 28, 28, 1).astype('float32')
train_images = (train_images - 127.5) / 127.5
test_images = test_images.reshape(test_images.shape[0], 28, 28, 1).astype('float32')
test_images = (test_images - 127.5) / 127.5
# Use a holdout of the test data to simulate calibration data for the DkNN
nb_cali = 750
train_data = train_images
y_train = train_labels
cali_data = test_images[:nb_cali]
y_cali = test_labels[:nb_cali]
test_data = test_images[nb_cali:]
y_test = test_labels[nb_cali:]
#Define callable that returns a dictionary of all activations for a dataset
flatten = tf.keras.layers.Flatten()
def get_activations(data):
data_activations = {}
for layer in layers:
output = model.get_layer(layer).output
extractor = tf.keras.Model(model.input, output)
data_activations[layer] = flatten(extractor(data)).numpy()
return data_activations
# Load the trained model
model = tf.keras.models.load_model('mnist.h5')
_,acc = model.evaluate(test_images, test_labels, verbose=0)
print('The accuracy of the trained model on test data is:', acc)
# Extract representations for the training and calibration data at each layer of interest to the DkNN.
layers = ['conv2d', 'conv2d_1', 'conv2d_2', 'dense']
neighbors = 75
nb_classes = 10
number_bits =17
# Instantiate the DkNNModel
dknn_model = DkNNModel(
neighbors,
layers,
get_activations,
train_data,
y_train,
nb_classes,
number_bits = number_bits
)
# Calibrate the credibility metric
dknn_model.calibrate(cali_data, y_cali)
# Generate adversarial examples
print('================================================================')
print('Generating adversarial examples with FGSM...')
start = time.time()
adv = FGSM(
model,
test_data,
eps = 0.5,
norm = np.inf,
clip_min = -1.,
clip_max = 1.,
)
end = time.time()
print('Generation completed! Time cost:', end-start, 's.')
# Test the original DNN and the corresponding DkNN
# on clean test data and adversarial test data
for data_in, fname in zip([test_data, adv], ['clean', 'adversarial']):
print('================================================================')
print('Testing the DNN and DkNN on {} data...'.format(fname))
start = time.time()
preds = model(data_in).numpy()
print('For DNN, accuracy on', fname, 'data:',
np.mean(np.argmax(preds, axis=1) == y_test))
dknn_preds = dknn_model.fprop_np(data_in)
print('For DkNN, accuracy on', fname, 'data:',
np.mean(np.argmax(dknn_preds, axis=1) == y_test))
if not os.path.exists('diagram'):
os.mkdir('diagram')
print('----------------------------------------------------------------')
print('Plotting dnn diagrams...')
plot_reliability_diagram(preds,
y_test, 'diagram/dnn_' + fname + '.png')
print('----------------------------------------------------------------')
print('Plotting dknn diagrams...')
plot_reliability_diagram(dknn_preds,
y_test, 'diagram/dknn_' + fname + '.png')
end = time.time()
print('Test on {} completed! Time cost: {}s'.format(fname, end-start))
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 3,879 | py | 3 | main.py | 2 | 0.559423 | 0.545501 | 0 | 102 | 37.029412 | 106 |
mrpal39/portflioWebapp | 19,447,611,931,096 | 5518e1772d6a46ee72e76d5cc5adf0af7bbd3139 | 3e05276c6562bbca2c46daec0bf30d765bb6c8d5 | /accounts/decorators.py | 3daa600bfca8acdda42de048d49f9bb8ede334e7 | [] | no_license | https://github.com/mrpal39/portflioWebapp | 762571a74979ddcd4abf90c8ab8684dcd2afa6fa | 898023c0b528557d4ab5ece6c48707f5e61ea296 | refs/heads/master | "2023-08-07T06:17:28.081727" | "2021-10-05T16:30:27" | "2021-10-05T16:30:27" | 414,057,726 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.decorators import user_passes_test
def jobseeker_required(funcation=None,redirect_field_name=REDIRECT_FIELD_NAME,login_url='login'):
actual_decorator= user_passes_test(
lambda u:u.is_active and u.jobseeker,
login_url=login_url,
redirect_field_name=redirect_field_name
)
if function:
return actual_decorator(function)
return actual_decorator
def employer_required(funcation=None,redirect_field_name=REDIRECT_FIELD_NAME,login_url='login'):
actual_decorator= user_passes_test(
lambda u:u.is_active and u.employer,
login_url=login_url,
redirect_field_name=redirect_field_name
)
if function:
return actual_decorator(function)
return actual_decorator | UTF-8 | Python | false | false | 832 | py | 62 | decorators.py | 43 | 0.707933 | 0.707933 | 0 | 31 | 25.741935 | 97 |
ericsonj/mse_pds_tp_final | 13,443,247,654,240 | 6897c943d5284659257ca1f6acf48289607886dd | fe72e3476a0ced87470194efffbd72a94f71e861 | /predictor.py | 370fe7ea0411324b93d9bff81fc8b5b5928af1bc | [] | no_license | https://github.com/ericsonj/mse_pds_tp_final | 23ddf736f6c233a2c0aaabab6606ff83dc1e3125 | 02c388fa15bd7a5a3ac76cda5cd47de2fab1c31d | refs/heads/master | "2020-07-04T22:53:16.182098" | "2019-08-16T22:09:48" | "2019-08-16T22:09:48" | 202,448,431 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 14 18:25:51 2019
@author: ericson
"""
import numpy as np
import matplotlib.pyplot as plt
# Señal del problema
N = 100
M = 4
fs = 1000
w0 = 3
phi0 = np.pi/4
amp_noise = 0.02
ts = 1/fs # tiempo de muestreo
tt = np.linspace(0, (N-1)*ts, N).flatten()
signal = np.sin(2*w0*np.pi*tt + phi0)
# Creacion de ruido
noise = np.random.normal(0,amp_noise,np.size(signal))
signal_noise = signal + noise
## Se aplica weiner filter
# Crear matriz hermetica [M,N-M+1]
rows = M
columns = np.size(signal_noise) - M + 1
hmatrix = np.zeros((rows, columns))
for i in np.arange(columns):
hmatrix[ : , i ] = signal_noise[np.arange(M + (i - 1) , (i-1) , -1 )]
| UTF-8 | Python | false | false | 772 | py | 3 | predictor.py | 1 | 0.596628 | 0.54345 | 0 | 39 | 18.74359 | 76 |
rochapaulo/Wuohoy | 3,075,196,596,050 | 2865cc82bf49a2732488082452c63652d44fe41a | c120b8972f14fdfa77def0094ff42f170e54200c | /frank/command/vpncommand.py | 2f135c97bb440e83fd426f661fc48433f5af0d5a | [] | no_license | https://github.com/rochapaulo/Wuohoy | 34fc792642d8a093cea111890a14a1c6c9d5ad78 | 4cb8083832bf60cf64069f4c246b0f3b493dfc81 | refs/heads/master | "2015-09-24T12:36:42.763837" | "2015-07-06T14:24:36" | "2015-07-06T14:24:36" | 37,598,827 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
import subprocess
import sys
# [1] vpnPassword
# [2] vpnUser
# [3] vpnAuthGroup
# [4] vpnHost
command = 'sudo echo ' + sys.argv[1] + ' | openconnect --user=' + sys.argv[2] + ' --authgroup=' + sys.argv[3] + ' --passwd-on-stdin --no-cert-check --no-xmlpost ' + sys.argv[4]
print "executing: " + command
subprocess.call(command, shell=True)
| UTF-8 | Python | false | false | 364 | py | 16 | vpncommand.py | 10 | 0.648352 | 0.626374 | 0 | 14 | 25 | 176 |
elomario/tarantinobwah | 7,559,142,478,313 | 0bb288cfb1d7b7413ee78f92d799769b0dc78879 | 191a0b27a670ecd7f8473d25e5a2e7aabea19af2 | /projet_web/events/views.py | bda623bf2fb639462dd0dabb6a491968032004f4 | [] | no_license | https://github.com/elomario/tarantinobwah | a83697b23ce966baeb963bf82fec0165ccbe6513 | 327d1ef5d0d24c8494221d60548aba263cb0aa79 | refs/heads/master | "2021-01-10T19:50:36.067471" | "2014-06-01T09:42:38" | "2014-06-01T09:42:38" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse
from django.template import RequestContext, loader
from events.models import Event, Member
from django.contrib.auth.models import User
from ecomments.models import Ecomment
def index(request):
latest_events_list = Event.objects.order_by('-name')[:5]
template = loader.get_template('events/index.html')
context = RequestContext(request, {
'latest_events_list': latest_events_list,
})
return HttpResponse(template.render(context))
def detail(request, events_id):
if request.method == 'POST':
if 'participate' in request.POST:
id=request.POST['user']
user = User.objects.get(id=id)
participant = Member.objects.get(user=user)
event = get_object_or_404(Event, pk=events_id)
event.participants.add(participant)
elif 'abandon' in request.POST:
a_id=request.POST['a_user']
a_user = User.objects.get(id=a_id)
a_participant = Member.objects.get(user=a_user)
a_event = get_object_or_404(Event, pk=events_id)
a_event.participants.remove(a_participant)
elif 'submit' in request.POST:
c_id=request.POST['c_user']
c_user= User.objects.get(id=c_id)
c_member = Member.objects.get(user=c_user)
c_event = get_object_or_404(Event, pk=events_id)
ecomment = Ecomment(member_commenting=c_member, event_commenting_on=c_event, core_of_comment=request.POST['comment'])
ecomment.save()
elif 'delete' in request.POST:
d_id=request.POST['c_id']
Ecomment.objects.get(pk=d_id).delete()
event = get_object_or_404(Event, pk=events_id)
ecomments_list=Ecomment.objects.all()
template = loader.get_template('events/description.html')
context = RequestContext(request, {
'event': event,
'ecomments_list': ecomments_list,
})
return HttpResponse(template.render(context))
def results(request, projects_title):
return HttpResponse("You're looking at the results of event %s." % projects_title) | UTF-8 | Python | false | false | 2,020 | py | 27 | views.py | 23 | 0.700495 | 0.692574 | 0 | 53 | 36.150943 | 120 |
Akhtar-Zaman/Odoo-Practice | 18,519,899,018,229 | 3d2b51064216ee4f61c25ee9dffb4b600cee3fd3 | 3ab59a6e4589ad7f2c5d40199cf562c0b90a15c0 | /hospital_management/models/views/Doctors.py | da4f32f0743d6c5c62052e724972539806d48850 | [] | no_license | https://github.com/Akhtar-Zaman/Odoo-Practice | 517f294073f4975dc9186780e5cd72560e670aa1 | d99142ab84cf96829373798685116aadd81f1e5d | refs/heads/main | "2023-05-10T07:22:49.140858" | "2021-06-08T13:12:48" | "2021-06-08T13:12:48" | 370,238,931 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from odoo import models, fields, api, _
from datetime import date
class HospitalDoctors(models.Model):
_name = 'hospital.doctors'
_rec_name = 'doc_name'
doc_image = fields.Binary('Image', attachment=True)
doc_name = fields.Char('Name')
doc_mobile = fields.Integer('Phone Number')
doc_email = fields.Char('Email')
doc_address = fields.Text('Address')
doc_patients = fields.One2many('hospital.patients', 'pat_doctor', string="Patients")
doc_category = fields.Many2one('hospital.doctor.category', string='Department')
def Todays_Doctor_Appointment(self):
return {
'name': "Today's Appointments",
'type': "ir.actions.act_window",
'res_model': "hospital.patients",
'view_mode': "tree,form",
#'domain': ['&', ('pat_doctor','=', self.id),('appointment_date','=',date.today())],
'domain': [('pat_doctor','=', self.id)],
}
def Find_patient_by_date(self):
return {
'name': "Find Patient",
'type': "ir.actions.act_window",
'res_model': "wizard.patients.filter",
'view_mode': "form",
'target': "new",
'context': {'default_doc_id': self.id}
}
| UTF-8 | Python | false | false | 1,291 | py | 47 | Doctors.py | 26 | 0.55151 | 0.549961 | 0 | 44 | 27.909091 | 96 |
schmidtbri/task-queue-ml-model-deployment | 17,188,459,124,830 | 0a5c09eb99016c46c93e4840f990417da36e521e | f01953b9dcb236084a1ec792f18302f166ca64a5 | /scripts/concurrent_test.py | 361109553b7215406f8d12179668c0c555ea4423 | [
"MIT"
] | permissive | https://github.com/schmidtbri/task-queue-ml-model-deployment | a0eed5ac7ad6c07a36d81622df062f75ecb6c4c1 | f6d7a944811bba467515c9118a0ace4058dfbb0a | refs/heads/master | "2021-06-22T02:40:37.030068" | "2021-04-21T13:00:26" | "2021-04-21T13:00:26" | 209,553,163 | 9 | 2 | MIT | false | "2021-04-21T13:00:27" | "2019-09-19T12:51:42" | "2020-12-14T02:14:24" | "2021-04-21T13:00:26" | 113 | 5 | 1 | 1 | Python | false | false | """A script to test the model task queue with a concurrent requests"""
import time
from concurrent.futures import ThreadPoolExecutor as Executor
from model_task_queue.celery import app
def request_task(data):
task = app.tasks["model_task_queue.ml_model_task.iris_model"]
result = task.delay(data=data)
# waiting for the task to complete
while result.ready() is not True:
time.sleep(1)
prediction = result.get(timeout=1)
return prediction
def run_test():
data = [
{"sepal_length": 5.0, "sepal_width": 3.2, "petal_length": 1.2, "petal_width": 0.2},
{"sepal_length": 5.5, "sepal_width": 3.5, "petal_length": 1.3, "petal_width": 0.2},
{"sepal_length": 4.9, "sepal_width": 3.1, "petal_length": 1.5, "petal_width": 0.1},
{"sepal_length": 4.4, "sepal_width": 3.0, "petal_length": 1.3, "petal_width": 0.2}
]
with Executor(max_workers=4) as exe:
jobs = [exe.submit(request_task, d) for d in data]
results = [job.result() for job in jobs]
print("The tasks returned these predictions: {}".format(results))
if __name__ == '__main__':
run_test()
| UTF-8 | Python | false | false | 1,146 | py | 14 | concurrent_test.py | 9 | 0.623909 | 0.593368 | 0 | 34 | 32.705882 | 91 |
ZZRebas/python-100- | 1,597,727,841,928 | 9bae1cd92b50b8642e7ce03b4e2b9d0b9a21c642 | b98c6faf3eb68aacb6916af456367d69bda2385b | /question bank/Day_4.py | f6459cd9bd769ca444a4d8d3a03849bf12dc3695 | [] | no_license | https://github.com/ZZRebas/python-100- | 1a7e68a31312bb86724b44f30b30376030cc87f1 | 44c225f000f38cf0772336e7208d78151e39133d | refs/heads/main | "2023-06-19T16:21:37.053273" | "2021-07-01T07:08:18" | "2021-07-01T07:08:18" | 377,105,456 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
#Question 14
#> **_Write a program that accepts a sentence and calculate the number of upper case letters and lower case letters._**
# > **_Suppose the following input is supplied to the program:_**
# Hello world!
# > **_Then, the output should be:_**
# UPPER CASE 1
# LOWER CASE 9
#方法1
import re
st=input()
print('LOWER CASE',len(re.findall('[a-z]',st)))
print('UPPER CASE',len(re.findall('[A-Z]',st)))
#方法2
word = input()
upper,lower = 0,0
for i in word:
lower+=i.islower()
upper+=i.isupper()
print("UPPER CASE {0}\nLOWER CASE {1}".format(upper,lower))
'''
#Question 15
# > **_Write a program that computes the value of a+aa+aaa+aaaa with a given digit as the value of a._**
# > **_Suppose the following input is supplied to the program:_**
# 9
# > **_Then, the output should be:_*
# 11106
num=input()
print(eval('%s+%s%s+%s%s%s+%s%s%s%s'%(num,num,num,num,num,num,num,num,num,num)))
print(int(num)+int(num*2)+int(num*3)+int(num*4))
print(sum(int(num * i) for i in range(1,5)))
from functools import reduce
print(reduce(lambda x,y:int(x)+int(y),[num * i for i in range(1,5)]))
| UTF-8 | Python | false | false | 1,102 | py | 25 | Day_4.py | 24 | 0.658135 | 0.635283 | 0 | 37 | 28.567568 | 119 |