repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
sequence | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
unknown | revision_date
unknown | committer_date
unknown | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
unknown | gha_created_at
unknown | gha_updated_at
unknown | gha_pushed_at
unknown | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
wtsi-hgi/lurge | 3,212,635,571,213 | 97ee7de5eb7bea442889f724d652e74d3e4e05a6 | 53346281898358b0ae32ddd8b2dcc59e50c4a758 | /db/__init__.py | b3bc8b6ab4f89a4ef3633fa00ec2e3d745458c7e | [
"BSD-3-Clause"
] | permissive | https://github.com/wtsi-hgi/lurge | 56eca727c6a0becdb5161e3e746d4f1c177bc925 | 3dbffe7485603a44eed77120b501f45d96b16a31 | refs/heads/master | "2023-04-06T06:48:26.130001" | "2023-03-16T17:22:57" | "2023-03-16T17:22:57" | 210,387,428 | 1 | 0 | BSD-3-Clause | false | "2023-03-16T17:22:58" | "2019-09-23T15:17:29" | "2021-12-14T17:13:20" | "2023-03-16T17:22:57" | 640 | 0 | 0 | 2 | Python | false | false | import db.common
import db.warnings
import db_config
historical_usage = db.warnings.get_all_historical_usage_data(
db.common.get_sql_connection(db_config))
| UTF-8 | Python | false | false | 161 | py | 31 | __init__.py | 26 | 0.782609 | 0.782609 | 0 | 6 | 25.833333 | 61 |
Hanlen520/EApitTest | 6,820,408,072,852 | fec94d49fab417b4d678a4351ab6c164d4f9d578 | c4fce5f3f05dcf23f45f35d8b325335091bcb7d1 | /common/oper_file.py | 7358f4647ddb505b3e429fc95589ece48c47a8cc | [] | no_license | https://github.com/Hanlen520/EApitTest | 291308ed909a074f12bb568a979ca331ac555679 | b014eb7b0739884865b3d7ce8b6b82d737ae2093 | refs/heads/master | "2020-04-03T10:36:19.076994" | "2018-10-28T09:28:38" | "2018-10-28T09:28:38" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# __author__ = "Link.Burrows"
# __date__ = "2018-05-29 11:47 PM"
"""封装操作文件的方法"""
import os
import time
import shutil
class OperFile:
def copy_file(self, src, dst):
"""复制文件"""
shutil.copyfile(src, dst)
def trunc_file(self, filename):
"""清空文件"""
with open(filename, 'w') as fp:
fp.truncate()
if __name__ == "__main__":
pwd = os.path.dirname(os.path.abspath(__file__))
filename = "test.txt"
now = time.strftime("%Y-%m-%d %H_%M_%S")
newfile = "test_" + now + ".log"
src = pwd + '\\' + filename
dst = pwd + '\\' + newfile
op_file = OperFile()
op_file.copy_file(src, dst)
op_file.trunc_file(src)
| UTF-8 | Python | false | false | 741 | py | 13 | oper_file.py | 10 | 0.531825 | 0.513437 | 0 | 29 | 23.37931 | 52 |
Zar-rok/bubble_plot | 15,187,004,393,673 | ac3d15dcdfd30a9d94958ddf1be12ca6f882e887 | 30c691646fe9935c357b27606b158cbc84c42213 | /test_bubble_plot.py | b7e98ff85c81a66825f31bf1aac4863b35fb4db9 | [
"MIT"
] | permissive | https://github.com/Zar-rok/bubble_plot | 51f5b1011af3611708ad42409e0d22f2a02fd8c4 | 4669072dd820d22e5e9b06f220fe469cfe66db54 | refs/heads/master | "2023-02-22T22:27:35.269644" | "2021-01-25T20:52:43" | "2021-01-25T20:52:43" | 288,778,831 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from secrets import token_urlsafe
from unittest import TestCase
from unittest.mock import mock_open, patch
from bubble_plot import (
Bubble,
BubblePlot,
Config,
CSVWriter,
Facets,
LatexBubblePlotWriter,
Occurrence,
SplitXAxis,
build_and_save_plots,
compute_occurences_from,
compute_color_map,
)
class TestOccurence(TestCase):
def setUp(self):
self.year = "2020"
self.occu = Occurrence(self.year)
def test_init(self):
self.assertEqual(self.year, self.occu.year)
self.assertEqual(1, self.occu.occurrence)
def test_update(self):
self.occu.update("2019")
self.assertEqual("2019", self.occu.year)
self.assertEqual(2, self.occu.occurrence)
self.occu.update("2021")
self.assertNotEqual("2021", self.occu.year)
self.assertEqual(3, self.occu.occurrence)
class TestSplitXAxis(TestCase):
def setUp(self):
self.x_axis = SplitXAxis("X", {})
def test_update(self):
entry = {"X": "pouet", "Y": "hoho"}
bubble = Bubble(label_x=entry["X"], label_y=entry["Y"])
self.x_axis.update(entry, "2020", "Y")
self.assertTrue(bubble in self.x_axis.bubbles)
def test_update_unkown_facet(self):
entry = {"Z": "pouet", "Y": "hoho"}
regex = "Unknown facet named: 'X' on the x axis"
self.assertRaisesRegex(
KeyError, regex, self.x_axis.update, entry, "2020", "Y"
)
entry = {"X": "pouet", "Z": "hoho"}
regex = "Unknown facet named: 'Y' on the y axis"
self.assertRaisesRegex(
KeyError, regex, self.x_axis.update, entry, "2020", "Y"
)
class TestBubblePlot(TestCase):
def setUp(self):
self.bubble_plot = BubblePlot(Facets("Y", "X_left", "X_right"))
def test_init(self):
self.assertEqual("Y", self.bubble_plot.y_axis)
self.assertEqual("X_left", self.bubble_plot.x_axis.left.facet)
self.assertEqual("X_right", self.bubble_plot.x_axis.right.facet)
def test_update(self):
entry = {"X_left": "pouet", "X_right": "teuop", "Y": "hoho"}
bubble_left = Bubble(label_x=entry["X_left"], label_y=entry["Y"])
bubble_right = Bubble(label_x=entry["X_right"], label_y=entry["Y"])
self.bubble_plot.update(entry, "2020")
self.assertTrue(bubble_left in self.bubble_plot.x_axis.left.bubbles)
self.assertTrue(bubble_right in self.bubble_plot.x_axis.right.bubbles)
class TestCSVWriter(TestCase):
def setUp(self):
self.output_dir = "output_dir"
self.conf = Config(
1,
2,
"year",
["iy", "ix", "nbr", "year", "y", "x"],
"template.tex",
self.output_dir,
[],
)
self.years = {"2018", "2019", "2020"}
self.entries = [
{"Y": "0", "X_left": "1", "X_right": "2", "year": "2018"},
{"Y": "3", "X_left": "4", "X_right": "4", "year": "2020"},
{"Y": "6", "X_left": "7", "X_right": "7", "year": "2019"},
]
self.year_mapping = {"2018": 0, "2019": 500, "2020": 1000}
self.x_left_mapping = {"1": -4, "4": -3, "7": -2}
self.x_right_mapping = {"2": 2, "4": 3, "7": 4}
self.y_mapping = {"0": 0, "3": 1, "6": 2}
self.data = [
(0, -4, 1, 0),
(1, -3, 1, 1000),
(2, -2, 1, 500),
(0, 2, 1, 0),
(1, 3, 1, 1000),
(2, 4, 1, 500),
]
self.facets = Facets("Y", "X_left", "X_right")
self.bubble_plot = BubblePlot(self.facets)
for entry in self.entries:
self.bubble_plot.update(entry, entry[self.conf.class_year])
self.writer = CSVWriter(self.bubble_plot, self.years, self.conf)
def test_compute_year_score_mapping(self):
mapping = self.writer.compute_year_score_mapping()
self.assertEqual(self.year_mapping, mapping)
def test_compute_labels_indices_mapping(self):
(
x_left_mapping,
x_right_mapping,
y_mapping,
) = self.writer.compute_labels_indices_mapping()
self.assertEqual(self.y_mapping, y_mapping)
self.assertEqual(self.x_left_mapping, x_left_mapping)
self.assertEqual(self.x_right_mapping, x_right_mapping)
def test_prepared_bubbles_data(self):
prepared_data = self.writer.prepared_bubbles_data(
self.x_left_mapping,
self.x_right_mapping,
self.y_mapping,
self.year_mapping,
)
self.assertEqual(self.data, prepared_data)
def test_write(self):
mock = mock_open()
with patch("bubble_plot.open", mock):
self.writer.write(
self.data,
list(self.x_left_mapping.keys())
+ list(self.x_right_mapping.keys()),
list(self.y_mapping.keys()),
)
mock.assert_called_once_with(
f"{self.output_dir}/{self.bubble_plot}.csv", "w"
)
exps = [
("iy,ix,nbr,year,y,x\r\n"),
("0,-4,1,0,1,0\r\n"),
("1,-3,1,1000,4,3\r\n"),
("2,-2,1,500,7,6\r\n"),
("0,2,1,0,2,\r\n"),
("1,3,1,1000,4,\r\n"),
("2,4,1,500,7,\r\n"),
]
handle = mock()
for expected, (args, _) in zip(exps, handle.write.call_args_list):
self.assertEqual(expected, args[0])
class TestLatexBubblePlotWriter(TestCase):
def setUp(self):
self.output_dir = "output_dir"
self.conf = Config(
1,
2,
"year",
["iy", "ix", "nbr", "year", "y", "x"],
"template.tex",
self.output_dir,
[],
)
self.years = {"2018", "2019", "2020"}
self.entries = [
{"Y": "0", "X_left": "1", "X_right": "2", "year": "2018"},
{"Y": "3", "X_left": "4", "X_right": "5", "year": "2020"},
{"Y": "6", "X_left": "7", "X_right": "8", "year": "2019"},
]
self.year_mapping = {"2018": 0, "2019": 500, "2020": 1000}
self.x_mapping = {"1": -4, "4": -3, "7": -2, "2": 2, "5": 3, "8": 4}
self.y_mapping = {"0": 0, "3": 1, "6": 2}
self.data = (
(0, -4, 1, 0),
(1, -3, 1, 1000),
(2, -2, 1, 500),
(0, 2, 1, 0),
(1, 3, 1, 1000),
(2, 4, 1, 500),
)
self.facets = Facets("Y", "X_left", "X_right")
self.bubble_plot = BubblePlot(self.facets)
for entry in self.entries:
self.bubble_plot.update(entry, entry[self.conf.class_year])
mock = mock_open(
read_data="ix\n" + "\n".join(map(str, self.x_mapping.values()))
)
with patch("bubble_plot.open", mock):
self.writer = LatexBubblePlotWriter(
self.bubble_plot, self.years, self.conf
)
self.writer.x_indices = tuple(x for (_, x, _, _) in self.data)
def test_init(self):
mock = mock_open(
read_data="ix\n" + "\n".join(map(str, self.x_mapping.values()))
)
with patch("bubble_plot.open", mock):
LatexBubblePlotWriter(self.bubble_plot, self.years, self.conf)
self.assertEqual(
(f"{self.output_dir}/{self.bubble_plot}.csv", "r"),
mock.call_args_list[0][0],
)
def test_year_color(self):
years_len = len(self.years)
year_color = compute_color_map(len(list(self.years)))
self.assertEqual(years_len, len(set(year_color)))
def test_prepare_values(self):
expect = (
"defineColorsYear",
"setColorsYear",
"xMin",
"xMax",
"yLabel",
"meta",
"xField",
"xIndexField",
"yField",
"yIndexField",
"yearField",
"xLeftLabel",
"xRightLabel",
"CSVDataFile",
"colorsYear",
)
year_color = {
"2017": (0, 0, 1),
"2018": (0, 1, 0),
"2019": (1, 0, 0),
"2020": (1, 1, 1),
}
self.assertEqual(
expect, tuple(self.writer.prepare_values(year_color).keys())
)
def test_write(self):
template_values = {
"defineColorsYear": [token_urlsafe(5) for _ in self.years],
"setColorsYear": [token_urlsafe(5) for _ in self.years],
"xMin": token_urlsafe(5),
"xMax": token_urlsafe(5),
"yLabel": token_urlsafe(5),
"meta": token_urlsafe(5),
"xField": token_urlsafe(5),
"xIndexField": token_urlsafe(5),
"yField": token_urlsafe(5),
"yIndexField": token_urlsafe(5),
"yearField": token_urlsafe(5),
"xLeftLabel": token_urlsafe(5),
"xRightLabel": token_urlsafe(5),
"CSVDataFile": token_urlsafe(5),
"colorsYear": [token_urlsafe(5) for _ in self.years],
}
mock = mock_open()
with patch("bubble_plot.open", mock):
self.writer.write(template_values)
self.assertEqual(
(f"{self.conf.latex_template}", "r"), mock.call_args_list[0][0]
)
self.assertEqual(
(f"{self.conf.output_dir}/{self.writer.plot}.tex", "w"),
mock.call_args_list[1][0],
)
class TestAPI(TestCase):
def setUp(self):
self.entries = [
{"Y": "0", "X_left": "1", "X_right": "2", "year": "2018"},
{"Y": "3", "X_left": "4", "X_right": "5", "year": "2020"},
{"Y": "6", "X_left": "7", "X_right": "8", "year": "2019"},
]
self.output_dir = "output_dir"
self.conf = Config(
1,
2,
"year",
["iy", "ix", "nbr", "year", "y", "x"],
"template.tex",
self.output_dir,
[],
)
self.plot_plan = Facets("Y", "X_left", "X_right")
def test_compute_occurences_from(self):
plot, years = compute_occurences_from(
self.entries, self.plot_plan, self.conf
)
entries_len = len(self.entries)
self.assertEqual(entries_len, len(plot.x_axis.left.bubbles))
self.assertEqual(entries_len, len(plot.x_axis.right.bubbles))
self.assertEqual({"2018", "2019", "2020"}, years)
def test_build_and_save_plots(self):
number_plot = 3
mock = mock_open(read_data="ix\n-1\n0\n1")
with patch("bubble_plot.open", mock):
build_and_save_plots(
self.entries, [self.plot_plan] * number_plot, self.conf
)
self.assertEqual(number_plot * 4, len(mock.call_args_list))
| UTF-8 | Python | false | false | 10,840 | py | 9 | test_bubble_plot.py | 3 | 0.502214 | 0.466605 | 0 | 324 | 32.45679 | 78 |
gzgdouru/python | 12,438,225,306,629 | 5d0b8e4e60b889c159474cb456cf34c4470cf82c | cda789f0b106e3af6b6edf5896fcf3dafd39a2f2 | /module/paramiko/test3.py | b23b0b3842d9b646cb188eedf03b825731fb28b4 | [] | no_license | https://github.com/gzgdouru/python | ffe3ca3583dd505257aeb487909422e54cf4e270 | 469a18740a4b064215b6e3059bc3709008f61291 | refs/heads/master | "2018-09-20T07:40:01.303924" | "2018-08-23T03:17:29" | "2018-08-23T03:17:29" | 109,092,092 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #coding:utf8
import paramiko
remoteHost = ("192.168.232.134", 22)
# 实例化一个trans对象
trans = paramiko.Transport(remoteHost)
# 建立连接
trans.connect(username="ouru", password="5201314ouru")
# 实例化一个 sftp对象,指定连接的通道
sftp = paramiko.SFTPClient.from_transport(trans)
# 发送文件
sftp.put(localpath='test1.py', remotepath='/usr/local/ouru/python/test1.py')
# 下载文件
# sftp.get(remotepath, localpath)
trans.close() | UTF-8 | Python | false | false | 463 | py | 179 | test3.py | 173 | 0.753149 | 0.692695 | 0 | 19 | 19.947368 | 76 |
birdsarah/core-hq | 3,564,822,890,093 | d14ce5fc022bcbb74d0b40449bc3cd22694a2e73 | 5a3130323f9427efee178c3133a7ebcb0123f65e | /corehq/apps/commtrack/tests/test_sms_reporting.py | 0b7d8a4187a31d757b9cf0f9f05a945e27e25a39 | [] | no_license | https://github.com/birdsarah/core-hq | c9676efe7d37bde65da3b8fdf29c5c46ca9d6fec | 5318e8f850919d2bf5b45c3e5edb5a9627775aa7 | refs/heads/master | "2020-02-26T02:03:02.507694" | "2013-02-28T23:46:14" | "2013-02-28T23:46:14" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.utils.unittest.case import TestCase
from corehq.apps.commtrack.helpers import make_supply_point,\
make_supply_point_product
from corehq.apps.commtrack.tests.util import make_loc, \
bootstrap_domain, bootstrap_user, TEST_BACKEND
from corehq.apps.commtrack.models import Product
from corehq.apps.commtrack.sms import handle
from casexml.apps.case.models import CommCareCase
from corehq.apps.sms.backend import test
class StockReportTest(TestCase):
def setUp(self):
self.backend = test.bootstrap(TEST_BACKEND, to_console=True)
self.domain = bootstrap_domain()
self.user = bootstrap_user()
self.verified_number = self.user.get_verified_number()
self.loc = make_loc('loc1')
self.sp = make_supply_point(self.domain.name, self.loc)
self.products = Product.by_domain(self.domain.name)
self.assertEqual(3, len(self.products))
self.spps = {}
for p in self.products:
self.spps[p.code] = make_supply_point_product(self.sp, p._id)
def tearDown(self):
self.domain.delete() # domain delete cascades to everything else
def testStockReport(self):
amounts = {
'pp': 10,
'pq': 20,
'pr': 30,
}
# soh loc1 pp 10 pq 20...
handled = handle(self.verified_number, 'soh {loc} {report}'.format(
loc='loc1',
report=' '.join('%s %s' % (k, v) for k, v in amounts.items())
))
self.assertTrue(handled)
for code, amt in amounts.items():
spp = CommCareCase.get(self.spps[code]._id)
self.assertEqual(str(amt), spp.current_stock)
| UTF-8 | Python | false | false | 1,670 | py | 55 | test_sms_reporting.py | 41 | 0.634132 | 0.625749 | 0 | 43 | 37.837209 | 75 |
joab40/ChatterBot | 11,304,353,954,419 | 6c347b1227ab0a8e3ed302e4169d4ba364a54946 | 780320060ee6238f46b81b83718a25a38a96a970 | /chatterbot/adapters/logic/mixins.py | 0585ca928658b1b7aa352883ef628ddf593dc17a | [
"BSD-3-Clause"
] | permissive | https://github.com/joab40/ChatterBot | 828ed1ba0e9029dea8621d0227324bb4c8856559 | 3e5c8d5794a917420d58ede8b867041bbe1d5f6a | refs/heads/master | "2021-01-17T18:03:59.151074" | "2015-12-30T00:58:36" | "2015-12-30T00:58:36" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class KnownResponseMixin(object):
def get_statements_with_known_responses(self):
"""
Filter out all statements that are not in response to another statement.
A statement must exist which lists the closest matching statement in the
in_response_to field. Otherwise, the logic adapter may find a closest
matching statement that does not have a known response.
"""
if (not self.context) or (not self.context.storage):
return []
all_statements = self.context.storage.filter()
responses = set()
to_remove = list()
for statement in all_statements:
for response in statement.in_response_to:
responses.add(response.text)
for statement in all_statements:
if statement.text not in responses:
to_remove.append(statement)
for statement in to_remove:
all_statements.remove(statement)
return all_statements
class ResponseSelectionMixin(object):
def process(self, input_statement):
# Select the closest match to the input statement
confidence, closest_match = self.get(input_statement)
# Save any updates made to the statement by the logic adapter
self.context.storage.update(closest_match)
# Get all statements that are in response to the closest match
response_list = self.context.storage.filter(
in_response_to__contains=closest_match.text
)
if response_list:
if self.tie_breaking_method == "first_response":
response = self.get_first_response(response_list)
elif self.tie_breaking_method == "most_frequent_response":
response = self.get_most_frequent_response(closest_match, response_list)
else:
response = self.get_random_response(response_list)
else:
response = self.storage.get_random()
return confidence, response
def get_most_frequent_response(self, input_statement, response_list):
"""
Returns the statement with the greatest number of occurrences.
"""
# Initialize the matching responce to the first response.
# This will be returned in the case that no match can be found.
matching_response = response_list[0]
occurrence_count = 0
for statement in response_list:
count = statement.get_response_count(input_statement)
# Keep the more common statement
if count >= occurrence_count:
matching_response = statement
occurrence_count = count
# Choose the most commonly occuring matching response
return matching_response
def get_first_response(self, response_list):
"""
Return the first statement in the response list.
"""
return response_list[0]
def get_random_response(self, response_list):
"""
Choose a random response from the selection.
"""
from random import choice
return choice(response_list)
| UTF-8 | Python | false | false | 3,131 | py | 4 | mixins.py | 4 | 0.628234 | 0.627276 | 0 | 89 | 34.168539 | 88 |
whyguu/style-transfer-tf | 5,738,076,348,383 | c4a1a0d34a0228aeab03905810233c455ebd63e4 | df503e95219ce7b742dd0b445e15eb76ebe83143 | /model.py | 873c4715437b19a6e96c268545e9197bddb3c240 | [] | no_license | https://github.com/whyguu/style-transfer-tf | 9b21d393931cb8d97c08522f34b2d964d9fb096f | 50c8ef6cb6149661ce503533ae9af0f94a4011a2 | refs/heads/master | "2020-03-15T21:09:25.777922" | "2019-05-08T08:19:46" | "2019-05-08T08:19:46" | 132,348,841 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import tensorflow as tf
import numpy as np
import os
from skimage import io
from skimage.transform import resize
from collections import OrderedDict
class TransferModel(object):
def __init__(self):
self.init_weight = np.load('./vgg19.npy', encoding='latin1').item()
# ######################################
self.content_layers = ['conv4_2']
self.content_weights = [1.0]
self.style_layers = ['conv1_1', 'conv2_1', 'conv3_1', 'conv4_1', 'conv5_1']
self.style_weights = [0.2, 0.2, 0.2, 0.2, 0.2]
self.content_style_loss_ratio = 1e-4
# attention: the weights were trained in the data format bgr. so may be we should fellow this
# but here, i do not use bgr. if the result is not good, you can change it to rgb
self.mean_pixel = np.array([123.68, 116.779, 103.939], dtype=np.float32) # rgb
self.content = np.expand_dims(io.imread('./images/content/build.jpeg') - self.mean_pixel, axis=0)
self.style = np.expand_dims(io.imread('./images/style/rain_princess.jpg') - self.mean_pixel, axis=0)
b, h, w, c = self.content.shape
self.rlt = tf.Variable((np.random.randn(b, h, w, c).astype(np.float32)),
trainable=True)
# self.rlt = tf.Variable(self.content.copy(), trainable=True)
# self.rlt = tf.Variable(self.style.copy(), trainable=True)
assert len(self.style_layers) == len(self.style_weights)
assert len(self.content_layers) == len(self.content_weights)
def build_model(self):
# content
self.content_tensors_dict = self.vgg19_forward(self.content, store_objects=self.content_layers, scope='content')
# style
self.style_tensors_dict = self.vgg19_forward(self.style, store_objects=self.style_layers, scope='style')
for key in self.style_tensors_dict.keys():
self.style_tensors_dict[key] = self.gram_matrix(self.style_tensors_dict[key])
# pre-compute
with tf.Session() as sess:
# sess.run(tf.global_variables_initializer())
for key in self.content_tensors_dict.keys():
self.content_tensors_dict[key] = sess.run(self.content_tensors_dict[key],)
for key in self.style_tensors_dict.keys():
self.style_tensors_dict[key] = sess.run(self.style_tensors_dict[key],)
# rlt
self.rlt_tensor_dict = self.vgg19_forward(self.rlt, set(self.style_layers+self.content_layers), scope='result')
def calc_loss(self):
# content_loss
content_loss = 0
for i, key in enumerate(self.content_tensors_dict.keys()):
tp = tf.pow(x=self.rlt_tensor_dict[key]-self.content_tensors_dict[key], y=2)
content_loss += self.content_weights[i] * tf.reduce_sum(tp) / 2
# style loss
style_loss = 0
for i, key in enumerate(self.style_layers):
_, h, w, c = self.rlt_tensor_dict[key].get_shape()
n = h.value * w.value
c = c.value
gram = self.gram_matrix(self.rlt_tensor_dict[key])
tp = tf.pow(self.style_tensors_dict[key]-gram, 2)
style_loss += (1.0 / tf.constant(4.0*n*n*c*c, dtype=tf.float32)) * self.style_weights[i] * tf.reduce_sum(tp)
loss = self.content_style_loss_ratio * content_loss + style_loss
return loss, content_loss, style_loss
def get_result_img(self, sess):
return np.squeeze(np.clip(sess.run(self.rlt)+self.mean_pixel, 0.0, 255).astype(np.uint8))
# ######################################################################################
@staticmethod
def gram_matrix(tensor):
flat = tf.reshape(tensor, shape=(-1, tensor.shape[-1]))
gram = tf.matmul(tf.transpose(flat), flat)
return gram
# ######################################################################################
def vgg19_forward(self, x, store_objects, scope):
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
self.conv1_1 = self.conv_layer(x, "conv1_1")
self.conv1_2 = self.conv_layer(self.conv1_1, "conv1_2")
self.pool1 = self.max_pool(self.conv1_2, 'pool1')
self.conv2_1 = self.conv_layer(self.pool1, "conv2_1")
self.conv2_2 = self.conv_layer(self.conv2_1, "conv2_2")
self.pool2 = self.max_pool(self.conv2_2, 'pool2')
self.conv3_1 = self.conv_layer(self.pool2, "conv3_1")
self.conv3_2 = self.conv_layer(self.conv3_1, "conv3_2")
self.conv3_3 = self.conv_layer(self.conv3_2, "conv3_3")
self.conv3_4 = self.conv_layer(self.conv3_3, "conv3_4")
self.pool3 = self.max_pool(self.conv3_4, 'pool3')
self.conv4_1 = self.conv_layer(self.pool3, "conv4_1")
self.conv4_2 = self.conv_layer(self.conv4_1, "conv4_2")
print(self.conv4_2)
self.conv4_3 = self.conv_layer(self.conv4_2, "conv4_3")
self.conv4_4 = self.conv_layer(self.conv4_3, "conv4_4")
self.pool4 = self.max_pool(self.conv4_4, 'pool4')
self.conv5_1 = self.conv_layer(self.pool4, "conv5_1")
self.conv5_2 = self.conv_layer(self.conv5_1, "conv5_2")
self.conv5_3 = self.conv_layer(self.conv5_2, "conv5_3")
self.conv5_4 = self.conv_layer(self.conv5_3, "conv5_4")
self.pool5 = self.max_pool(self.conv5_4, 'pool5')
store = OrderedDict()
for name in store_objects:
store[name] = tf.get_default_graph().get_tensor_by_name(name=scope+'/'+name+':0')
return store
def conv_layer(self, x, name):
# with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
# filter = tf.get_variable(name='kernel', shape=self.init_weight[name][0].shape,
# initializer=tf.constant_initializer(self.init_weight[name][0]), trainable=False)
# bias = tf.get_variable(name='bias', shape=self.init_weight[name][1].shape,
# initializer=tf.constant_initializer(self.init_weight[name][1]), trainable=False)
conv_filter = tf.constant(self.init_weight[name][0], dtype=tf.float32, name='kernel')
conv_bias = tf.constant(self.init_weight[name][1], dtype=tf.float32, name='bias')
conv = tf.nn.conv2d(input=x, filter=conv_filter, strides=[1, 1, 1, 1], padding='SAME')
bias = tf.nn.bias_add(conv, conv_bias)
return tf.nn.relu(bias, name=name)
@staticmethod
def max_pool(x, name):
return tf.layers.max_pooling2d(x, pool_size=2, strides=2, padding='same', name=name)
if __name__ == '__main__':
path = '/Users/whyguu/Desktop/build.jpeg'
# path = './images/style/rain_princess.jpg'
image = io.imread(path)
w, h, _ = image.shape
sp = np.array([w, h]) / np.max([w, h]) * 512
sp = sp.astype(np.int)
print(sp)
image = resize(image, output_shape=sp)
io.imsave('./images/content/build.jpg', image)
| UTF-8 | Python | false | false | 7,000 | py | 3 | model.py | 2 | 0.582429 | 0.552 | 0 | 145 | 47.275862 | 120 |
bomb1e/gotgit | 4,406,636,467,142 | 087740bd99d8795ae9833150f4306e817222b5b9 | 7192da38b6afd3c60f80ccbecb3040cf34369ce3 | /f44ec82e-d145-40ba-812f-e96c2ee46daa.py | 895750de77d5337106e1f1390dae329f4677ebb4 | [] | no_license | https://github.com/bomb1e/gotgit | 6699fa9e6adb0a79f22441df41a102b09b78c2ce | 3c14cb6d1d0c4cba6a1e24a9899462d2e4bee2ce | refs/heads/master | "2020-06-12T06:00:34.695425" | "2018-08-01T02:00:02" | "2018-08-01T02:00:02" | 75,600,461 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from __future__ import print_function
def func57(arg307, arg308):
def func58(arg309, arg310):
var311 = arg308 ^ arg307
var312 = ((arg307 + -873) + ((-811 - (1631696821 - arg309 - arg309)) ^ (arg309 & arg307))) & (arg309 & (arg307 + var311) | arg309 | 498016551 ^ (((arg308 ^ arg310) & arg308) + 179) & arg307 | arg307 - arg308) ^ arg310 | arg310
var313 = (-636 & arg307 - arg310 ^ arg308 - arg308) | var312 ^ var312 ^ arg309 - arg309
result = arg310 ^ arg309
return result
var314 = func58(arg307, arg308)
var319 = func59(arg307, var314)
var320 = (-1982215526 - -807 & 1004446833) - var319
var321 = arg307 | arg308 + (arg307 | -833989564)
var322 = var314 | var314 | arg308 + var314
var323 = var320 - ((var320 + -607) & arg307)
var324 = (214 | arg307 - arg308) | var319
var325 = (912 | var320 ^ var319) + var324
var326 = var321 + (102182695 ^ (var322 & arg307))
var327 = ((var322 - var323) & var325) + -673
if var327 < var326:
var328 = var322 ^ var320
else:
var328 = var321 + ((var326 + var314) ^ 504)
if var320 < var319:
var329 = var326 & ((var314 & var323) - arg307)
else:
var329 = var327 | (var320 ^ arg308) & arg307
var330 = var325 | var323 ^ (var319 - var324)
var331 = (arg308 | -867 & var321) & var314
if var320 < var325:
var332 = (var320 | var327 & var314) & var319
else:
var332 = var327 ^ ((var321 & var314) ^ var322)
var333 = (var331 - var320) & var319
var334 = arg308 - arg308
var335 = var324 | var333
result = arg308 ^ (arg307 ^ var334)
return result
def func59(arg315, arg316):
var317 = 0
for var318 in range(3):
if arg315 < var318:
var317 += arg315 - arg315
else:
var317 += arg315 - var317 ^ var318
return var317
def func48(arg291, arg292):
var302 = var295(arg292, arg291)
var306 = func55(arg291, var302)
result = arg291 - arg291
return result
def func51(arg296, arg297):
var298 = func54()
var299 = (((972 | arg296) + arg297) | -1680772740) | 127
var300 = 568 + ((var298 ^ -664) ^ -602)
var301 = var299 + (var298 - var300 ^ var300 + ((var300 ^ (247 | (var300 & var298) + arg296 ^ var300 - (arg297 - (var299 ^ (((var299 ^ var298) ^ arg296) - var299))) - arg296 - -815145398 ^ var300) | 144563678) & -2120038069)) + -429
result = var300 ^ var299 - (var299 + (var301 ^ 969 | (arg296 - 1480689798 | (var301 | var300 + var300)) | -319272990 - -61))
return result
def func54():
func52()
result = len(range(7))
func53()
return result
def func53():
global len
del len
def func52():
global len
len = lambda x : -5
def func50():
closure = [-1]
def func49(arg293, arg294):
closure[0] += func51(arg293, arg294)
return closure[0]
func = func49
return func
var295 = func50()
def func38(arg224, arg225):
var259 = func39(arg224, arg225)
var272 = func43(var259, arg225)
var283 = var275(var272, var259)
var288 = func47(var259, var272)
if arg224 < var288:
var289 = var272 - var288 | arg225 - -547476101
else:
var289 = -1143016801 + arg225 & (var283 - var283) & var283
if var283 < var288:
var290 = (var259 + (var259 | 2069297978 & var288)) | (((var259 - arg225) & 250508161) & (-895459322 | 776))
else:
var290 = var259 - (var288 - var272)
result = arg224 & (var283 - 841) - ((var272 + ((var272 - arg225 | var283 ^ var272) | -639562925) + var283) | arg225 | -575)
return result
def func47(arg284, arg285):
var286 = 0
for var287 in range(28):
var286 += (arg285 - var287) ^ 9
return var286
def func46(arg276, arg277):
var278 = (arg277 | arg277) - arg277 | -1083776126 & arg276 + -2
var279 = -695 - var278 + arg276 - -699253752
var280 = arg277 + (var278 & (arg276 - arg276 & (var278 ^ var279)) - var278 | arg277)
var281 = arg277 | ((var278 - 624) | arg276 | (arg276 - (var280 - var280 - ((var280 - (1349410245 + arg277)) - var279 + var280 ^ var278 - (-1876188315 + (-477 | -122))) ^ (var280 ^ (var278 ^ var279)))) + var278) | var278 - arg277
var282 = 397 ^ arg277
result = (((var282 - var278 ^ arg276 + var280) + (213945079 & arg277)) | 907 ^ var282 + arg276) | var282 - var279 + -2130411587
return result
def func45():
closure = [8]
def func44(arg273, arg274):
closure[0] += func46(arg273, arg274)
return closure[0]
func = func44
return func
var275 = func45()
def func43(arg260, arg261):
var262 = arg261 + 675 - arg261 - -299
var263 = arg260 & ((arg261 & 144) | arg261)
var264 = var263 - 411 | arg261
var265 = arg261 | ((arg260 | var262) + -1826132226)
var266 = var262 + ((var263 ^ 651) - -337)
var267 = 939 - -1314785328 + -693 ^ 826
if var262 < var266:
var268 = arg261 & var265 - 216 & 1006727631
else:
var268 = var267 + arg260 | -630 & arg261
var269 = var266 & (var264 - var266) ^ var265
var270 = var262 ^ -2097090574 | (var267 + var262)
var271 = ((var263 | -29) + arg260) - var269
result = (arg260 & var266) ^ (1508536901 - (var269 | var269 + ((var262 + arg260) - var263) & var263) | arg260) | var265 + var262
return result
def func39(arg226, arg227):
var254 = var230(arg227, arg226)
if arg226 < arg226:
var255 = (405 | (-441312064 ^ (arg226 & 965 - (-374608202 - -754 | var254 & arg227 & arg227 | (arg227 & (115 | (503 + var254) ^ -594 + var254 + arg226 | 868))) | var254 ^ var254 ^ -671021910) | 7)) | 955
else:
var255 = -11331823 - 925852681 - 600
var256 = var254 + (var254 ^ -726724253) & -885760586
var257 = var254 - arg226 - arg226
var258 = arg227 & -901
result = var256 - (var257 - arg226 ^ arg227) ^ 1421239658 + (var257 | (var256 ^ ((var254 - 1407795655 | 141028523) ^ var254)) ^ 173)
return result
def func42(arg231, arg232):
var233 = arg232 + -1559395562 + arg231
var234 = var233 & arg232 | var233 - var233
var235 = var234 ^ var233
var236 = -1991465324 + -843
var237 = var233 - (var234 + -1334526750) & 188
var238 = var235 - var233
var239 = (var233 ^ var234) & arg232
var240 = (var233 - var233) ^ var235 & arg232
var241 = -800 + var236 + var238
var242 = (var239 ^ var238) ^ -881
var243 = var234 - arg231
var244 = var241 & (var235 - var236 ^ var235)
var245 = var235 & var238
var246 = var239 & (var239 + var238 | -648390283)
var247 = (var242 + var238) - -893 + var233
var248 = var239 | arg231 ^ var245 - var242
var249 = (var235 & var244 + -1290284415) + var241
var250 = var249 - var235 + 606185235 - var236
var251 = var235 ^ var239 + var248
var252 = arg231 ^ var243
var253 = var241 & (arg232 & var250)
result = ((var244 + var253) + ((var253 ^ (var236 | var235 + var244 | var233)) | arg232) | (var233 | var239) | var246) - var251
return result
def func41():
closure = [-5]
def func40(arg228, arg229):
closure[0] += func42(arg228, arg229)
return closure[0]
func = func40
return func
var230 = func41()
def func25(arg185, arg186):
var200 = func26(arg186, arg185)
var207 = var203(arg186, arg185)
var213 = var210(var200, arg186)
var214 = func35()
var217 = class36()
for var218 in range(30):
var217.func37(var213, var218)
var219 = (var207 + var207) - -965 - var213 - (var200 | arg186) + ((var200 - var214) ^ ((arg186 + (var207 ^ arg185)) & var213))
var220 = (((var213 ^ var207) | (var219 | var214) ^ var219) + var214 - 494644027) + -205296995 + (var200 & ((arg186 & arg186) - (var214 + var207 + (arg185 - arg186) & -65260470 - -654) ^ var219) + var219)
var221 = ((var214 | (var207 & 172 | (var220 | (var207 | arg185 & (var213 + (var200 + (var200 & var220))))))) - -700961881) | arg185
var222 = (((var220 + var221 + var220 ^ var214) ^ -746401032 - var200) & (((((var200 & var214) | (674 | arg185) + arg185) ^ ((((arg186 - -963470642) | 895 - arg185) | arg186 ^ var219) - var219) | arg185) & var221) | 681)) & var207
if var220 < var222:
var223 = var222 & var221 & ((var214 | (var220 & var222 & var200 + var214) + var220 & var219) | var213 | -207995630 - ((arg185 | var222) - 126) ^ var221 | (var214 | arg186) & -959 + (var213 & var207) + arg185 - var207)
else:
var223 = var214 - arg186 | var219 & var207
result = var200 | var214 & arg185
return result
class class36(object):
def func37(self, arg215, arg216):
return 0
def func35():
func33()
result = len(range(24))
func34()
return result
def func34():
global len
del len
def func33():
global len
len = lambda x : -3
def func32(arg211, arg212):
result = (arg211 | arg211) - arg212
return result
def func31():
closure = [0]
def func30(arg208, arg209):
closure[0] += func32(arg208, arg209)
return closure[0]
func = func30
return func
var210 = func31()
def func29(arg204, arg205):
var206 = (arg205 - arg205) | arg204 & (803 - 1080001843)
result = arg204 + -658
return result
def func28():
closure = [6]
def func27(arg201, arg202):
closure[0] += func29(arg201, arg202)
return closure[0]
func = func27
return func
var203 = func28()
def func26(arg187, arg188):
var189 = -1649403880 & arg187
if arg187 < arg188:
var190 = arg187 & (arg187 | (-485258042 ^ arg187))
else:
var190 = arg188 + 1805752178
if arg187 < var189:
var191 = -1376480026 ^ arg187
else:
var191 = var189 ^ 510497691 ^ 29997409 | var189
var192 = arg188 ^ -1112073743 | 1861303238 + var189
if var192 < var192:
var193 = 764 | (arg188 & (-536319675 + 227))
else:
var193 = arg188 & var192 | arg187
var194 = (arg187 - 405) | arg187 ^ var192
var195 = 675 ^ (arg187 ^ (arg188 + 2122167817))
var196 = var194 + 552
var197 = (arg188 | arg188) + arg187
var198 = ((var196 ^ var197) & var197) + var195
var199 = var198 & var189
result = (-523 ^ var194 + arg187 - arg188) ^ (((var189 | arg187 + var198) + (var198 | var196 ^ 459)) - var199) + var189
return result
def func11(arg95, arg96):
var133 = var99(arg96, arg95)
var144 = var136(var133, arg96)
var164 = func21(arg95, var133)
var165 = func24()
if arg96 < var144:
var166 = var164 ^ arg95
else:
var166 = (1684409856 | var164 | 657965376) - 974801810
var167 = (arg96 & -1484339351) + 446 ^ var144
var168 = (arg96 ^ (var165 | var144)) - var167
var169 = arg96 | (var164 | var165) - 456
var170 = -2002069724 ^ arg96 ^ var164
var171 = ((604999453 - var144) ^ var168) ^ arg96
if var167 < arg95:
var172 = var170 - var167
else:
var172 = (var165 - var167 & arg95) ^ var169
if var164 < var144:
var173 = 462 + var164
else:
var173 = var133 & var164
var174 = ((var169 & -543) | var168) + var168
var175 = var171 + var171 + var165 & 132
var176 = var169 ^ var168 & 1198056013
if var144 < var133:
var177 = arg96 & var171 + var176
else:
var177 = var171 + ((var175 + var168) | var133)
var178 = arg95 ^ var174 - var171 | var176
if var176 < var169:
var179 = 275453620 - (var171 - var176) | var171
else:
var179 = var170 ^ 730
var180 = var175 - (var169 | var176)
var181 = (arg95 | arg96) ^ var144
var182 = (var171 ^ arg95 & arg96) | -578836440
var183 = var171 & var180
var184 = var170 | var174 - var133 ^ var178
result = var176 - (var178 - ((171 | (var182 ^ var180)) | var182))
return result
def func24():
func22()
result = len(range(19))
func23()
return result
def func23():
global len
del len
def func22():
global len
len = lambda x : -8
def func21(arg145, arg146):
var147 = (arg145 + 283474408 & arg146) & arg146
var148 = -382927217 - -915962585 & var147
var149 = arg146 + var148
var150 = 216 & arg145
var151 = var149 | var148 | arg145 + arg146
var152 = 850 - var150 - var147 + var150
var153 = var147 ^ var150 ^ var152 ^ -310
if arg145 < var150:
var154 = var147 ^ var152 & var150 | arg146
else:
var154 = -877 - arg146
var155 = ((var152 | var147) + -898) | arg146
var156 = var147 - var148 - 580662349 ^ -678
var157 = var152 & var156 - var150 & var147
var158 = var148 ^ (var150 + var148 - 1977566016)
var159 = (arg145 & var155 & var148) + var148
var160 = (var155 ^ var149 + arg146) - var152
var161 = (var147 ^ 531 ^ var155) | var156
if var158 < var159:
var162 = var159 | var158 - var155 ^ 759
else:
var162 = var149 - var159 + (var153 | var151)
var163 = var156 ^ var150 - arg145 & var151
result = arg145 | var147
return result
def func20(arg137, arg138):
var139 = 249 & 772036111
var140 = arg137 & -311
var141 = (var140 | (var139 & var140) - (arg138 - 98 | var139)) - var140 - (var139 - arg138 - (-990 & arg137) ^ arg137) & (((var139 | arg138 - (-559605008 & var139)) + arg138) ^ arg137 - -944973917 ^ var140 + var140 ^ var139)
var142 = (187 ^ 539) - ((-771347445 + ((arg137 | arg138) & (var139 | var140) | arg138 + arg138 - arg137 - 29 & arg137 - var141)) | var141 & (arg138 | var141 & (var139 + var140 - var139)) + -269013832) ^ var140 & var141
var143 = (341 ^ arg138 | var142) - 66
result = 991 | (var140 - arg138 ^ (var139 - var142 ^ 577467486 ^ -1763547513) - 2075859733 | (var143 ^ arg138))
return result
def func19():
closure = [-2]
def func18(arg134, arg135):
closure[0] += func20(arg134, arg135)
return closure[0]
func = func18
return func
var136 = func19()
def func14(arg100, arg101):
var117 = func15(arg101, arg100)
if arg100 < var117:
var118 = -587 - -271
else:
var118 = arg100 - (arg101 | arg100 | arg100)
var119 = arg101 | (arg100 & 162 + arg100)
var120 = var117 ^ (arg101 + var119) - var117
var121 = arg101 ^ var119 | var120 + var119
var122 = arg101 - 393 & var120 | arg100
var123 = var117 + 363 - 1135889793 ^ arg101
var124 = 830068392 + var119
var125 = var121 & ((var117 ^ var123) + arg101)
var126 = (1923911739 - var121 | var123) + var125
var127 = var120 & var121 - (var120 + var125)
var128 = var120 | arg101 + var123 & var127
var129 = ((var117 + var122) ^ var120) & var124
var130 = -277633608 ^ -358
if var119 < arg101:
var131 = var125 ^ (var121 + var127) & -1062088177
else:
var131 = var126 & var128
var132 = (var126 & var117 ^ var124) ^ var119
result = var130 | (var119 | var122)
return result
def func17(arg104, arg105):
var106 = arg105 - (9 ^ arg105) & arg105
var107 = (-622 | -1610886129 | -701) | arg105
var108 = (var107 - var107) + arg104 - arg104
var109 = (arg104 & (arg104 | var106)) ^ var106
var110 = (arg105 - var108 | -137) | var106
var111 = var107 - var107
var112 = (var109 + 44) ^ arg104
var113 = var108 + ((arg104 | var109) & var111)
var114 = (arg105 - var110 - 773) & var106
var115 = (arg105 ^ var114) | -765 + arg104
result = var107 & arg104
return result
def func13():
closure = [8]
def func12(arg97, arg98):
closure[0] += func14(arg97, arg98)
return closure[0]
func = func12
return func
var99 = func13()
def func1(arg1, arg2):
var84 = func2(arg1, arg2)
var85 = arg1 - (337 & arg2 & arg1)
var86 = arg1 | var84 | arg2
var87 = var84 | var86
if arg1 < var85:
var88 = (arg1 ^ var87 + arg2) | arg2
else:
var88 = ((arg2 + var86) ^ var87) | var87
var89 = (var84 - arg1) ^ -736 & arg1
var90 = (var85 ^ (arg2 & var86)) ^ arg1
var91 = var90 - 847 ^ var90 + var89
var92 = arg1 ^ var86
var93 = var91 ^ (arg2 & var92 + 1082947587)
var94 = (var90 ^ var92) ^ (var85 ^ var86)
result = var89 + arg1 & (var92 - var87) - ((var89 + var93) ^ 887) - arg2 - var86 & (934689839 & var90)
return result
def func2(arg3, arg4):
var5 = 0
for var83 in func3(arg3, arg4):
var5 += arg4 ^ arg4 & var83
return var5
def func4(arg8, arg9):
var39 = var12(arg9, arg8)
var43 = func8(arg8, var39)
var50 = func10(var43, var39)
var51 = (arg8 & 655) + arg8 ^ arg8
var52 = var51 & -559 - 427
var53 = var51 | 297116718
var54 = ((arg9 | var53) & arg9) - var50
var55 = var51 ^ -1165718165 & 1141506666 - var51
if var52 < arg8:
var56 = var53 - var51 ^ var53 & arg8
else:
var56 = -555 | var39
if var52 < var51:
var57 = 1945604587 & var43 - var51
else:
var57 = (arg9 ^ var54 & var43) + -499
var58 = var52 | (arg9 ^ var55 | var51)
var59 = (var55 ^ 1228217408) & (arg8 + var43)
var60 = 713174959 ^ arg9
var61 = (var54 + var39 + var60) + var60
if arg9 < var43:
var62 = var51 + var54 & var54 | 172
else:
var62 = (arg8 - var51) ^ var59 ^ var60
var63 = (-1053602568 + 173 + var43) - var53
var64 = var53 & 494 - -1547967970 ^ var59
var65 = var60 | var60 & var54 ^ var60
var66 = ((var60 & var50) - var43) & var65
var67 = var52 - (var60 & var52) & -70
var68 = ((-836360499 & var64) + var61) - var55
var69 = var66 - var59 | var51 & var39
var70 = var68 ^ var61 + var51
var71 = var39 - var61 & var39
result = (var58 + (var39 | (var59 | var59) ^ var39 - var54) - -1444861019 & -754 | var59 ^ var39) ^ arg8 + arg8
return result
def func10(arg44, arg45):
var46 = 1163836719 & (311 - arg44) + (arg45 ^ 2090053623 & 240)
var47 = -507 ^ 519608850 & -733
if var47 < arg45:
var48 = (((arg45 + var47 ^ ((arg44 - ((125 | ((var46 ^ -350430808 - arg45) | var47)) ^ -648 ^ ((arg45 - arg45) & 396) & var47)) | arg44) | (var46 & var46 + var46 | -153 & arg45)) | arg44) | arg44) + -644
else:
var48 = arg44 + (var46 - ((418 & var46 | (var46 | var46 ^ arg45 & (var47 + 807) & (arg45 ^ (arg44 - ((arg44 & var46 ^ 65) ^ 372113354))) + var46 | var47) ^ -722) & arg45) + 481 & arg44) | 234
var49 = 1436737036 | var46 ^ ((var46 & var47 ^ arg45 ^ -978809328) ^ var46) ^ -1159721905 | 2058759920 ^ (((arg45 ^ -529) + var47) + 891 - -177059904 | 803) + (var47 | -752793462)
result = var46 | (((-171340431 & arg45 - 404 | var49 | (arg45 + 588 - var47 ^ arg45)) ^ var46) - arg44) + var49
return result
def func7(arg13, arg14):
var15 = arg13 | 1826370996
var16 = 730 + arg14
var17 = (var16 ^ arg13 - 341) ^ 433593022
var18 = (var17 | 162) ^ arg13 + -741
var19 = arg14 + (var18 + var15 + var17)
var20 = var16 | (var17 ^ var16)
var21 = 291 - var15 | var15 + arg14
if var20 < var21:
var22 = (var15 ^ var16 & var19) & var21
else:
var22 = var19 + var17 | arg14
var23 = arg14 | var19
if var21 < var20:
var24 = var20 | var19
else:
var24 = var16 | var19
if var23 < var16:
var25 = var23 ^ var19
else:
var25 = (arg13 - var20 ^ arg13) ^ var17
var26 = var17 & var15
var27 = var18 ^ var20
var28 = ((var18 - 11584293) - var17) & var19
var29 = var17 - var16
if var29 < var17:
var30 = var15 & var28
else:
var30 = var21 ^ var21
var31 = arg14 | var19
var32 = (var18 ^ -165477490 + var15) - var29
if var18 < var31:
var33 = var17 - (var26 - -1776609371 | 1511217019)
else:
var33 = var29 - (var21 + (var19 | var15))
var34 = 477 | var27
if var18 < var28:
var35 = arg13 | var31
else:
var35 = 802195272 | (var29 - var34) & -1443578142
var36 = ((var20 & var26) | arg14) + arg13
var37 = (var16 & var17 + var34) ^ var19
var38 = var15 - var21 | var17
result = var15 & arg13
return result
def func6():
closure = [8]
def func5(arg10, arg11):
closure[0] += func7(arg10, arg11)
return closure[0]
func = func5
return func
var12 = func6()
def func3(arg6, arg7):
var72 = func4(10394234, arg6)
yield var72
var73 = 241 + (arg7 | (arg7 + 678))
yield var73
var74 = var73 ^ var73
yield var74
var75 = var73 & var73 | var73
yield var75
var76 = (412794784 + -547) ^ var74 - var74
yield var76
var77 = ((847 + -1892226770) | -669) - arg6
yield var77
var78 = var73 + var77
yield var78
var79 = 1678002186 - var77
yield var79
var80 = -147724287 - (arg6 ^ 962 ^ var75)
yield var80
var81 = arg7 & var77
yield var81
var82 = var80 ^ -1563974824
yield var82
def func8(arg40, arg41):
def func9(acc, rest):
var42 = 7 ^ 7 & rest
if acc == 0:
return var42
else:
result = func9(acc - 1, var42)
return result
result = func9(10, 0)
return result
def func15(arg102, arg103):
def func16(acc, rest):
var116 = func17(acc, 2)
if acc == 0:
return var116
else:
result = func16(acc - 1, var116)
return result
result = func16(10, 0)
return result
def func55(arg303, arg304):
closure = [0]
def func56(acc, rest):
var305 = 2 & (rest & 6) & (9 - closure[0])
closure[0] += var305
if acc == 0:
return var305
else:
result = func56(acc - 1, var305)
return result
result = func56(10, 0)
return result
if __name__ == "__main__":
print('prog_size: 5')
print('func_number: 11')
print('arg_number: 95')
for i in range(25000):
x = 5
x = func1(x, i)
print(x, end='')
print('prog_size: 5')
print('func_number: 25')
print('arg_number: 185')
for i in range(25000):
x = 5
x = func11(x, i)
print(x, end='')
print('prog_size: 5')
print('func_number: 38')
print('arg_number: 224')
for i in range(25000):
x = 5
x = func25(x, i)
print(x, end='')
print('prog_size: 5')
print('func_number: 48')
print('arg_number: 291')
for i in range(25000):
x = 5
x = func38(x, i)
print(x, end='')
print('prog_size: 3')
print('func_number: 57')
print('arg_number: 307')
for i in range(25000):
x = 5
x = func48(x, i)
print(x, end='')
print('prog_size: 5')
print('func_number: 60')
print('arg_number: 336')
for i in range(25000):
x = 5
x = func57(x, i)
print(x, end='') | UTF-8 | Python | false | false | 22,481 | py | 25,849 | f44ec82e-d145-40ba-812f-e96c2ee46daa.py | 25,848 | 0.579867 | 0.312264 | 0 | 608 | 35.976974 | 235 |
inovei6un/SoftUni-Studies-1 | 8,607,114,473,775 | 805852bf75d0ae92a7277041bf82e10adc1c2cf7 | 4d892dc51e2dda0fcce246ac608fc4e0ce98c52b | /FirstStepsInPython/Fundamentals/Exercice/Text Processing/04. Caesar Cipher.py | 80c1cc2bfd0f29c9a4da32e7e51258aac3a78513 | [
"MIT"
] | permissive | https://github.com/inovei6un/SoftUni-Studies-1 | 510088ce65e2907c2755a15e427fd156909157f0 | 3837c2ea0cd782d3f79353e61945c08a53cd4a95 | refs/heads/main | "2023-08-14T16:44:15.823962" | "2021-10-03T17:30:48" | "2021-10-03T17:30:48" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | line_input1 = input()
result = ''
for ch in range(len(line_input1)):
result += chr(ord(line_input1[ch]) + 3)
print(result) | UTF-8 | Python | false | false | 129 | py | 349 | 04. Caesar Cipher.py | 345 | 0.635659 | 0.604651 | 0 | 8 | 15.25 | 43 |
benkev/alma | 5,806,795,787,536 | fea3ceb5b2d36d0cf3aaa7938e9b69d587df771c | deeba087efffc2b1645b45057813420698a3dee2 | /old/simulated_obs_alma_large_incell.py | 84e19b6d93c5838adc82e4918b004748a7f981bc | [] | no_license | https://github.com/benkev/alma | 0f772ca16150d853731a9a25b3ccc09edf0f7ed2 | 8f8e2a66a3fc051d5a2471ecadc9a30da6c0720b | refs/heads/main | "2023-03-29T09:38:14.074744" | "2021-03-30T22:29:46" | "2021-03-30T22:29:46" | 332,064,741 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #
# Use to generate a simulated ALMA image of the data set
# RoundSpottyDisk1.fits. Uses 2-hour continuous integrations.
# Noise assumes 10GHz BW, dual pol.
#
import os
projectn= 'alma_large_incell_0.02048'
skymodeln='/home/benkev/ALMA/RoundSpottyDisk2.fits'
simobserve(project=projectn, skymodel=skymodeln,
incell = '0.02048arcsec', incenter='200GHz', inwidth='1GHz',
setpointings = False ,
ptgfile = '/home/benkev/ALMA/Betelgeusedirection.txt',
integration = '300s',
obsmode = 'int',
antennalist = '/home/benkev/ALMA/alma.cycle7.10.cfg',
hourangle = 'transit',
totaltime = '7200s',
outframe = 'LSRK',
thermalnoise = '',
verbose=False)
modelname=projectn + '/' + projectn + '.alma.cycle7.10.ms'
noisymodelname=projectn + '/' + projectn + '.alma.cycle7.10_mynoise.ms'
os.system('cp -r ' + modelname + ' ' + noisymodelname)
sm.openfromms(noisymodelname)
sm.setnoise(mode='simplenoise', simplenoise='0.0001755Jy')
print('Adding noise...' )
sm.corrupt()
sm.done()
fitsout=projectn + '/' + projectn + '.alma.cycle7.10_mynoise.uvfits'
exportuvfits(vis=noisymodelname,fitsfile=fitsout,
datacolumn='data', field='',spw='',
antenna='',timerange='',writesyscal=False,
multisource=False, combinespw=True,
writestation=False,overwrite=False)
| UTF-8 | Python | false | false | 1,472 | py | 17 | simulated_obs_alma_large_incell.py | 13 | 0.618886 | 0.586277 | 0 | 42 | 32.952381 | 75 |
druids/developers-chamber | 9,775,345,603,873 | b19bd1205bd8e578e7d8a175607ccf094bcdc7f9 | 8b20e14613b9f9d430d66ba32fc4190edda27c49 | /developers_chamber/git_utils.py | 517105c1731429e6658728f18d5091905547bf37 | [
"MIT"
] | permissive | https://github.com/druids/developers-chamber | ff31ce933854f54396ab666ac9feec964fa262ec | 728657d2c9e5e06d9942cc74c544f29e64cea3a8 | refs/heads/master | "2023-08-17T08:33:53.007182" | "2023-06-26T15:46:07" | "2023-06-26T15:46:07" | 203,990,722 | 10 | 11 | MIT | false | "2023-09-06T18:10:47" | "2019-08-23T12:05:08" | "2023-08-19T10:19:44" | "2023-09-06T18:10:44" | 215 | 8 | 11 | 5 | Python | false | false | import re
import git
from click import BadParameter, UsageError
from git import GitCommandError
from .types import ReleaseType
from .version_utils import bump_version
DEPLOYMENT_COMMIT_PATTERN = r'^Deployment of "(?P<branch_name>.+)"$'
RELEASE_BRANCH_PATTERN = r'^(?P<release_type>(release|patch))-(?P<version>[0-9]+\.[0-9]+\.[0-9]+)$'
def create_release_branch(version, release_type, remote_name=None, branch_name=None):
repo = git.Repo('.')
g = repo.git
if branch_name:
g.checkout(branch_name)
if remote_name:
g.pull(remote_name, branch_name)
if release_type in {ReleaseType.minor, ReleaseType.major}:
release_branch_name = 'release-{}'.format(version)
elif release_type == ReleaseType.patch:
release_branch_name = 'patch-{}'.format(version)
else:
raise BadParameter('build is not allowed for release')
g.checkout(branch_name or 'HEAD', b=release_branch_name)
if remote_name:
g.push(remote_name, release_branch_name, force=True)
return release_branch_name
def create_branch(source_branch_name, new_branch_name):
try:
repo = git.Repo('.')
g = repo.git
g.checkout(source_branch_name, b=new_branch_name)
return new_branch_name
except GitCommandError:
raise UsageError('Branch "{}" already exist'.format(new_branch_name))
def create_deployment_branch(environment, remote_name=None, is_hot=False):
repo = git.Repo('.')
g = repo.git
source_branch_name = str(repo.head.reference)
deployment_branch_name = 'deploy-{}'.format(environment)
files_to_add = list(filter(None, (file for file in g.diff('--name-only', '--cached').split('\n'))))
if files_to_add:
g.stash('save')
if is_hot:
deployment_branch_name += '-hot'
try:
g.branch('-D', deployment_branch_name)
except GitCommandError:
# Branch not exits
pass
g.checkout('HEAD', b=deployment_branch_name)
g.commit('--allow-empty', message='Deployment of "{}"'.format(source_branch_name))
if remote_name:
g.push(remote_name, deployment_branch_name, force=True)
g.checkout(source_branch_name)
if files_to_add:
g.stash('apply')
g.add(files_to_add)
return deployment_branch_name
def checkout_to_release_branch(remote_name=None):
repo = git.Repo('.')
g = repo.git
match = re.match(DEPLOYMENT_COMMIT_PATTERN, repo.head.commit.message)
if not match:
raise UsageError('Invalid deployment branch commit')
branch_name = match.group('branch_name')
g.checkout(branch_name)
if remote_name:
g.pull(remote_name, branch_name)
return branch_name
def bump_version_from_release_branch(files=['version.json']):
repo = git.Repo('.')
g = repo.git
match = re.match(RELEASE_BRANCH_PATTERN, str(repo.head.reference))
if not match:
raise UsageError('Invalid release branch')
bump_version(match.group('version'), files)
return match.group('version')
def commit_version(version, files=['version.json'], remote_name=None):
repo = git.Repo('.')
g = repo.git
try:
g.add(files)
g.commit(m=str(version))
except GitCommandError as ex:
raise UsageError('Version files was not changed or another git error was raised: {}'.format(ex))
try:
g.tag(str(version))
except GitCommandError as ex:
raise UsageError('Tag {} already exists or another git error was raised: {}'.format(version, ex))
if remote_name:
g.push(remote_name, str(repo.head.reference))
g.push(remote_name, str(version))
def merge_release_branch(to_branch_name=None, remote_name=None):
repo = git.Repo('.')
g = repo.git
source_branch_name = str(repo.head.reference)
g.checkout(to_branch_name)
if remote_name:
g.pull(remote_name, to_branch_name)
# GitPython does not support merge --no-ff or what?
git_cmd = git.cmd.Git('.')
no_ff_commit = 'Merge branch "{}"'.format(source_branch_name)
git_cmd.execute(('git', 'merge', '--no-ff', '-m', no_ff_commit, str(source_branch_name)))
if remote_name:
g.push(remote_name, to_branch_name)
g.checkout(source_branch_name)
def get_current_branch_name():
repo = git.Repo('.')
return str(repo.head.reference)
def get_commit_hash(branch_name):
try:
repo = git.Repo('.')
return repo.heads[branch_name].object.hexsha
except IndexError:
raise UsageError('Invalid branch name: {}'.format(branch_name))
def get_current_issue_key():
branch_name = get_current_branch_name()
match = re.match('(?P<issue_key>.{3}-\d+).*', branch_name)
if match:
return match.group('issue_key')
else:
return None
| UTF-8 | Python | false | false | 4,784 | py | 37 | git_utils.py | 30 | 0.644231 | 0.642768 | 0 | 163 | 28.349693 | 105 |
nlake44/GAEBenchmark | 6,176,162,996,959 | 120a6535ea0cb7c82c160d3acd23d18261de6844 | af28787e10af63f400eca759b3c23d88aa4c9a00 | /wordcount/__init__.py | 44f4d7d232f5107ec1e48ab9d67cdfaeadb5038d | [] | no_license | https://github.com/nlake44/GAEBenchmark | 055d33b747ed4c88739200bdd7aab5568e3c1022 | 66f7a7886d216cb35cfaf0cdbd4690f0288a8af0 | refs/heads/master | "2021-01-01T18:54:50.622118" | "2011-11-24T00:31:20" | "2011-11-24T00:31:20" | 2,693,753 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import logging
from data.wordcount import WCDataSet
from data.wordcount import get_result_query
from data.wordcount import WCResults
from records import Record
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from google.appengine.ext.webapp import template
from google.appengine.api import users
from fantasm import fsm
from fantasm.action import FSMAction
from fantasm.action import DatastoreContinuationFSMAction
from wordcount.pl import WordCountPipelineLoop
from mapreduce import model
from mapreduce import control
def getKindString(num_entries):
if num_entries >= 1000000:
return "Words1M"
elif num_entries >= 100000:
return "Words100K"
elif num_entries >= 10000:
return "Words10K"
elif num_entries >= 1000:
return "Words1K"
elif num_entries >= 100:
return "Words100"
# Wordcount for mr, pipeline, and fsm
class WordCount(webapp.RequestHandler):
def get(self):
user = users.get_current_user()
if not user:
self.redirect(users.create_login_url(dest_url="/"))
return
q = WCDataSet.all()
q.order('-start')
results = q.fetch(1000)
datasets = [result for result in results]
datasets_len = len(datasets)
q = Record.all()
q.filter('benchmark =', "wordcount")
q.order('-start')
results = q.fetch(1000)
records = [result for result in results]
records_len = len(records)
self.response.out.write(template.render("templates/wordcount.html",
{"user": user.email(),
"datasets_len" : datasets_len,
"datasets" : datasets,
"records": records,
"records_len" : records_len}))
def post(self):
""" Generate data sets here """
if self.request.get("fsm_cleanup"):
if fsm_calculate_run_time():
self.redirect('/wc')
else:
self.response.out.write("Error calculating fsm/wordcount")
return
if self.request.get("compute"):
engine = self.request.get("engine")
dataset = self.request.get("dataset")
user = self.request.get('user')
data = WCDataSet.get_by_key_name(dataset)
record = Record(engine_type=engine,
dataset=dataset,
benchmark="wordcount",
num_entities=data.num_entries,
#shard_count=data.num_pipelines,
entries_per_pipe=data.entries_per_pipe,
user=user,
char_per_word=data.char_per_word,
state="Running")
if engine == "fsm":
record.put()
context = {}
context['user'] = str(user)
context['num_entries'] = int(data.num_entries)
fsm.startStateMachine('WordCount', [context])
self.redirect('/wc')
elif engine == "pipeline":
mypipeline = WordCountPipelineLoop(data.num_entries)
mypipeline.start()
record.pipeline_id = mypipeline.pipeline_id
record.put()
self.redirect('/wc')
#self.redirect(mypipeline.base_path + "/status?root=" + mypipeline.pipeline_id)
logging.info("wordcount job running")
elif engine == "mr":
# Why 1k each per shard or less? is this ideal?
shards = 10
if data.num_entries > 1000:
shards = data.num_entries/1000
kind = getKindString(data.num_entries)
mapreduce_id = control.start_map(
name="Wordcount with just mappers",
handler_spec="wordcount.mr.wordcount_mapper",
reader_spec="mapreduce.input_readers.DatastoreInputReader",
mapper_parameters={
"entity_kind": "data.wordcount."+kind,
"processing_rate": 500
},
mapreduce_parameters={model.MapreduceSpec.PARAM_DONE_CALLBACK:
'/wc/mr/callback'},
shard_count=shards,
queue_name="default",
)
record.mr_id = mapreduce_id
record.put()
self.redirect('/wc')
def fsm_calculate_run_time():
""" Fantasm does not give call backs when its done. Must figure it out
with another job using the last modified date on output entities
"""
# Get the last job which was run for wordcount/fsm
q = Record.all()
q.filter('engine_type =','fsm')
q.filter('benchmark =','wordcount')
q.order('-start')
results = q.fetch(1)
if len(results) == 0:
logging.error("Unable to find a record for fsm/wordcount")
return False
q = None
record = None
for ii in results:
if ii.state == "Done":
logging.error("Last FSM end time has already been calculated")
q = WCResults.all()
if not q:
logging.error("No query returned for WordCount results")
return False
record = ii
max_date = None
while True:
results = q.fetch(1000)
for ii in results:
date = ii.modifiedDate
if max_date == None or max_date < date:
max_date = date
if len(results) < 1000:
break;
record.state = "Done"
record.end = max_date
delta = (record.end - record.start)
record.total = float(delta.days * 86400 + delta.seconds) + float(delta.microseconds)/1000000
record.put()
return True
| UTF-8 | Python | false | false | 5,434 | py | 32 | __init__.py | 24 | 0.600478 | 0.58594 | 0 | 159 | 33.163522 | 94 |
Schnee-Cy/AntMark | 10,127,532,896,942 | 0be3b28d47053ec37420156299ebbb1b8d62ea35 | dbd6c2e7eb370a139120a707dbddcc7def985531 | /users/migrations/0013_message_msg_type.py | 7822c75613b4150660e8e3b2855555e1c64d400f | [] | no_license | https://github.com/Schnee-Cy/AntMark | e41c3d2438ed970f224c906cef50c06804f33f97 | 59afc724ccb27561b74140202ee114fdea651284 | refs/heads/master | "2020-05-09T10:18:00.818948" | "2019-05-15T03:34:07" | "2019-05-15T03:34:07" | 181,036,476 | 1 | 0 | null | true | "2019-05-14T03:25:09" | "2019-04-12T15:35:19" | "2019-05-08T07:59:44" | "2019-05-14T03:25:09" | 32,019 | 1 | 0 | 0 | Python | false | false | # Generated by Django 2.0.3 on 2019-04-23 22:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0012_auto_20190423_2215'),
]
operations = [
migrations.AddField(
model_name='message',
name='msg_type',
field=models.CharField(choices=[('M', 'massage'), ('S', 'stu_verify'), ('C', 'commodity_verify')], default='message', max_length=20),
),
]
| UTF-8 | Python | false | false | 480 | py | 67 | 0013_message_msg_type.py | 26 | 0.58125 | 0.5125 | 0 | 18 | 25.666667 | 145 |
ameet123/SchemaMatching | 19,052,474,932,908 | 5001fdb3743404107225930ff3f10c05210565da | 2db2fbf17c906617bd0e55632698b7c0c8c01c99 | /PrecisionRecall/Scoring.py | 0c23a9447e81a3a8c66323b73f93c7a21087c348 | [] | no_license | https://github.com/ameet123/SchemaMatching | b0daa0f78d49319e18f37f1668ee2126b1784255 | 72a77b3f95a4f2d539fdeccec09a0061dd9ecd76 | refs/heads/master | "2021-04-18T11:00:11.652187" | "2020-03-31T21:04:49" | "2020-03-31T21:04:49" | 249,537,339 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #Precision and Recall
import pandas as pd
import csv
from collections import defaultdict
import operator
csv_file_1="C:/Users/Shruti Jadon/Desktop/DatabaseProject/PrecisionRecall/FinalSOM_edit_45.csv"
csv_file_2="C:/Users/Shruti Jadon/Desktop/DatabaseProject/PrecisionRecall/FinalSOM_edit_50.csv"
csv_file_3="C:/Users/Shruti Jadon/Desktop/DatabaseProject/PrecisionRecall/Manual_Mapping.csv"
def extract_file(csv_file):
df = pd.read_csv(csv_file) #you can also use df['column_name']
test_attribute= df['test_attribute'].tolist()
train_attribute=df['train_attribute'].tolist()
i=0
Matched_Dictionary={}
Matched_Dictionary[test_attribute[i]]=train_attribute[i]
for i in range(0,len(test_attribute)-1):
if(test_attribute[i]!=test_attribute[i+1]):
Matched_Dictionary[test_attribute[i+1]]=train_attribute[i+1]
return Matched_Dictionary
def extracted_files(csv_file):
df = pd.read_csv(csv_file) #you can also use df['column_name']
test_attribute= df['test_attribute'].tolist()
train_attribute=df['train_attribute'].tolist()
probability=df['probability']
temp_dictionary=defaultdict(list)
i=0
temp_dictionary[test_attribute[i]].append((probability[i],train_attribute[i]))
for i in range(0,len(test_attribute)-1):
if(test_attribute[i]==test_attribute[i+1]):
temp_dictionary[test_attribute[i]].append((probability[i],train_attribute[i]))
else:
temp_dictionary[test_attribute[i]].append((probability[i],train_attribute[i]))
dict_1=defaultdict(list)
for key in temp_dictionary.keys():
print temp_dictionary[key]
temp_dictionary[key].sort(key=lambda x: x[0], reverse=True)
print "after sorting"
print temp_dictionary[key]
print "new value"
for keys in temp_dictionary.keys():
#print keys +":::::"
x= temp_dictionary[keys]
if(len(x)>3):
for values in range(0,3):
dict_1[keys].append(x[values][1])
else:
for values in range(0,len(x)):
dict_1[keys].append(x[values][1])
#return dict_1, dict_2, dict_3
return dict_1
First=extracted_files(csv_file_1)
#print First
Second=extracted_files(csv_file_2)
Manual=extract_file(csv_file_3)
#precision calculation false positive and false negative
#false positive is something that shouldn't have been matched but is matching
#false negative is something that should have been null but is matched
# true positive is perfectly matched ones
#precision= tp/tp+fp
#recall= tp/tp+fn
#matching is false p when highest is not same as manual one.
#false negative if it is present in list
def F1score(evaluation_file,manual_file):
tp=0
fp=0
fn=0
max_len=len(evaluation_file.keys())
#print max_len
for e_key in evaluation_file.keys():
#print "evaulation of keys"
for m_key in manual_file.keys():
if(e_key==m_key):
if(manual_file[m_key]==evaluation_file[e_key][0]):
tp=tp+1
elif (manual_file[m_key]!=evaluation_file[e_key][0]):
fp=fp+1
else:
for values in evaluation_file[e_key]:
if(manual_file[m_key]==evaluation_file[e_key][values]):
print "yo"
fn=fn+1
precision=(tp*1.0)/(tp+fp)
recall=(tp*1.0)/(tp+fn)
F1_Score=(2*precision*recall)/(precision+recall)
second_score=tp*1.0/max_len
return precision, recall, F1_Score, second_score
print F1score(First,Manual)
print F1score(Second,Manual)
| UTF-8 | Python | false | false | 3,825 | py | 180 | Scoring.py | 23 | 0.613595 | 0.599477 | 0 | 98 | 36.234694 | 95 |
DMWillie/Leetcode | 1,769,526,539,840 | 2fe43f483bda931b082950445690d0c107b3c6b0 | 6f1703737438600cb91c89ee52c6e1409a78ae13 | /DecimalToBinary.py | c5908366a7b16937633eb3629c72e079930b4b2f | [] | no_license | https://github.com/DMWillie/Leetcode | 5393c243f10a58b6dd2e37d09042a116ed5a49f6 | 5e9ec62c160974c6048693d952e6a3a60a4bdf12 | refs/heads/master | "2020-04-26T07:40:27.888959" | "2019-03-02T03:52:06" | "2019-03-02T03:52:06" | 173,400,356 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding:utf-8 -*-
__author__='Willie'
"""
由于字符串str并没有相加,例如''+1这样调用是错误的,
也并没str(1)将整数变为字符串这样的函数
所以将整数变为字符串可以利用格式化这样的方式
"""
#十进制转换为二进制
def dtob(n):
result=''
list1=[]
while n>1:
m=n%2
n=n/2
list1.append(m)
list1.append(1)
while list1!=[]:
result=result+'%d' % list1.pop()
return result
number=int(raw_input("请输入一个十进制数:"))
print '%d 对应的二进制数为: %s' %(number,dtob(number))
| UTF-8 | Python | false | false | 611 | py | 8 | DecimalToBinary.py | 7 | 0.573696 | 0.546485 | 0 | 23 | 18.130435 | 46 |
junweinehc/Python | 13,718,125,560,516 | d51320797309ca5cdde896aedf9d65c78cfbefa1 | 463e60e6af874c96bfa01f48156c648f63878614 | /Projects/email_sending.py | 0900c68d98babe78756c21770b8ea4825249dca1 | [] | no_license | https://github.com/junweinehc/Python | 642e9f773cca446d667124584dd38692af98e90f | 8390d0a3ae36a4f60df9fbf414c73e6c21a2e828 | refs/heads/main | "2023-04-20T11:10:17.821976" | "2021-05-24T06:29:08" | "2021-05-24T06:29:08" | 321,553,438 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | Python 3.7.4 (v3.7.4:e09359112e, Jul 8 2019, 14:54:52)
[Clang 6.0 (clang-600.0.57)] on darwin
Type "help", "copyright", "credits" or "license()" for more information.
>>> import smtplib
sender_email = ""
rec_email = ""
password = input(str("Enter your password: "))
message = "HI m python"
server = smtplib.SMTP('smtp.gmail.com',587)
server.starttls()
server.login(sender_email, password)
print("Correct logins")
server.sendmail(sender_email, rec_email, message)
print("Email has been sent to: ",rec_email)
| UTF-8 | Python | false | false | 514 | py | 5 | email_sending.py | 3 | 0.700389 | 0.63035 | 0 | 17 | 29.117647 | 72 |
gaurav0529/Python_Questions | 8,383,776,198,711 | c9a167fd7278dad134efa8a51a26a51d46e42860 | 3adfb9361759ed087053098039f3fb28066c2ed5 | /Loops and Conditional Statements/Ques14_loop&cond.py | 5b604a693e038808f05416505c2713c705fda4d1 | [] | no_license | https://github.com/gaurav0529/Python_Questions | 7f8b13e8cb9893b075b273fee44d934ad0b8938e | 9fd8140cc998d5e9bffd5c314e8f4140a47f2acf | refs/heads/master | "2020-06-12T18:30:24.076761" | "2019-07-04T08:33:01" | "2019-07-04T08:33:01" | 194,388,168 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | print("Question 14")
print("Enter String")
a=input()
l=len(a)
alp=0
dig=0
print(l)
for n in range(l):
b=a[n]
if b.isalpha()==True:
alp+=1
if b.isdigit()==True:
dig+=1
print("Letters",alp)
print("Digits",dig)
| UTF-8 | Python | false | false | 236 | py | 53 | Ques14_loop&cond.py | 53 | 0.576271 | 0.550847 | 0 | 15 | 14.733333 | 25 |
shishanshanTest/sinazixuan | 5,918,464,966,931 | 9793371d382dd3f46d3dbd0d0244ee039b1f4a1c | f158797859809ea580632d28d3f8293ade52814f | /test/test_zixuan.py | 7e02b2bce0fcd00dc13a8199fb515d84a9e0ef08 | [] | no_license | https://github.com/shishanshanTest/sinazixuan | 49f153308b9c9159ee3f88cbd4ef75951a1aeb51 | 6853938fdc24c175323dae59beb4b94b1c4ea123 | refs/heads/master | "2020-05-20T01:55:53.543884" | "2019-05-07T03:17:10" | "2019-05-07T03:17:10" | 185,320,515 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # !/use/bin/env python
# -*-conding:utf-8-*-
#author:shanshan
import requests,unittest,random
from base.excel_json import *
from base.get_path import *
from base.read_excel import *
# class zixuan(unittest.TestCase):
# yincang_type = 'us'
#
# def setUp(self):
# pass
#
# def tearDown(self):
# pass
#
# def assert_code(self,r):
# self.assertEqual(r.status_code, 200)
# self.assertEqual(r.json()['result']['status']['code'], 0)
#
#
# def test_001_chakanzixuanfenzu(self):
# """查看自选分组接口"""
# r = requests.get(url=excel.get_url(1),
# params=read_zixuanJSON_data(1),
# headers=zixuan_header())
# self.assert_code(r)
#
# def test_002_chuangjianzixuanfenzu(self):
# """创建自选分组接口"""
# r = requests.get(url=excel.get_url(2),
# params=read_zixuanJSON_data(2),
# headers=zixuan_header())
# global pid, order
# pid = r.json()['result']['data']['pid']
# order = r.json()['result']['data']['order']
# print(pid)
# self.assert_code(r)
#
# def test_003_xiugaizixuanname(self):
# """更改自选分组名称接口"""
# params = read_zixuanJSON_data(3)
# params['pid'] = pid#动态参数pid
# r = requests.get(url=excel.get_url(3),
# params=params,
# headers=zixuan_header())
# self.assert_code(r)
#
# def test_004_tiaozhengzixuanfenzushuxun(self):
# """随即调整自选分组顺序接口"""
# jorder = ['all','cn','hk','us','ft','fund','wh',pid]#放在一个列表中
# random.shuffle(jorder)#使用random.shuffle生成随机的列表
# list_str = ','.join(jorder)#再将列表转化为字符串
# params = read_zixuanJSON_data(4)
# params['jorder'] = list_str
# r = requests.get(url=excel.get_url(4),
# params=params,
# headers=zixuan_header())
# self.assert_code(r)
#
#
#
# def test_005_yincangzixuanfenzu(self):
# """隐藏自选股分组接口"""
# params = read_zixuanJSON_data(5)
# #typelist_index = random.randint(0,5)
# #listype = ['cn','hk','us','ft','fund','wh']
# params['type'] =self.yincang_type
# r = requests.get(url=excel.get_url(5),
# params=params,
# headers=zixuan_header())
# self.assert_code(r)
#
# def test_006_yincanggongnengyanzheng(self):
# """验证隐藏功能是否成功"""
# r = requests.get(url=excel.get_url(1),
# params=read_zixuanJSON_data(1),
# headers=zixuan_header())
# for i in range(0,7):
# if r.json()['result']['data'][i]['type'] == self.yincang_type:
# self.assertEqual(r.json()['result']['data'][i]['status'],1)
#
#
# def test_007_shanchuzixuanfenzu(self):
# """删除自选分组接口"""
# params = read_zixuanJSON_data(6)
# params['pid'] = pid
# r = requests.get(url=excel.get_url(6),
# params=params,
# headers=zixuan_header())
# self.assert_code(r)
# if __name__ == '__main__':
# unittest.main(verbosity=2)
r = requests.get(url=excel.get_url(2),
params=read_zixuanJSON_data(2),
headers=zixuan_header())
pid = r.json()['result']['data']['pid']
print(json.dumps(r.json(),indent=4,ensure_ascii=False))
params = read_zixuanJSON_data(3)
params['pid'] = pid#动态参数pid
r = requests.get(url=excel.get_url(3),
params=params,
headers=zixuan_header())
print(json.dumps(r.json(),indent=4,ensure_ascii=False))
| UTF-8 | Python | false | false | 3,932 | py | 6 | test_zixuan.py | 6 | 0.51473 | 0.500803 | 0 | 115 | 31.469565 | 77 |
faustind/advent_of_code_2020 | 5,385,889,005,076 | 549e5674bcbd223122b3f760f3fc8626ebfa8083 | d02b82284cd2f2ac9465ebe0b871c80b522ca4c5 | /day6/main.py | 69bcc781a04c831810a990c0a9a325e7476ad54b | [] | no_license | https://github.com/faustind/advent_of_code_2020 | a5a4fa461a467d0fec33959e3ed36d70a9de7e23 | 11876e61957caa9cb480365c34237ce46970d4c0 | refs/heads/main | "2023-02-23T16:43:05.204213" | "2021-01-31T23:27:50" | "2021-01-31T23:27:50" | 317,677,838 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import collections
def row():
r = input()
if all(c.isspace() for c in r):
return ""
else:
return r
def read_g():
global finished
g = collections.Counter()
n = 0
r = row()
while r:
n += 1 # nmembers in g
g.update(r)
try:
r = row()
except EOFError:
r, finished = "", True
return n, g
def anyone_count():
count = 0
while not finished:
_, g = read_g()
count += len(g)
print(count)
def everyone_count():
count = 0
while not finished:
n, g = read_g()
count += sum(1 if v == n else 0 for v in g.values())
print(count)
def main():
# anyone_count()
everyone_count()
if __name__ == "__main__":
finished = False
main()
| UTF-8 | Python | false | false | 798 | py | 15 | main.py | 13 | 0.484962 | 0.477444 | 0 | 50 | 14.96 | 60 |
stellating/Draw_InterestPlot | 747,324,309,549 | 83b49765062c0c60d7ab8de83eddccbe7ebff518 | b8e7e6427d46bfd05de4859d9710afefd28c0877 | /python_plt20190313/stella_plt.py | d4020f19aeddf111b90f3c62b186fc25af3a69d8 | [] | no_license | https://github.com/stellating/Draw_InterestPlot | eaf2732b948bd8ecca72684ea43354525337752f | 0843cdc8dc432fb02409a47e15017b393b14a91a | refs/heads/master | "2020-04-28T14:58:35.733469" | "2019-03-13T13:05:23" | "2019-03-13T13:05:23" | 175,355,384 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# 设置横纵坐标的名称以及对应字体格式
font1 = {'family': 'Times New Roman',
'weight': 'heavy',
'size': 20,
}
plt.rc('font', **font1) # pass in the font dict as kwargs
# 设置title和x,y轴的label
f=open('fpsforstella.csv','r')
line = f.readline()
listX = []
listY = []
listName = []
while line:
a = line.split(',')
name = a[0]
X = float(a[1])
Y = float(a[2])
listName.append(name)
listX.append(X) # 将其添加在列表之中
listY.append(Y)
line = f.readline()
f.close()
print(listX)
print(listY)
X_data = listX
Y_data = listY
#print(type(X_data))
N = len(X_data)
#colors = np.random.rand(N) # 随机产生50个0~1之间的颜色值
#col=['c', 'b', 'g', 'r', 'c', 'm', 'y', 'k', 'w']
col=['c', 'b', 'g', 'r', 'c', 'm', 'y', 'k'] #边界颜色不能设为白色,否则会看不到
len_col = len(col)
#area = np.pi * (10 * np.random.rand(N))**2 # 点的半径范围:0~15
#markers_market=['.', ',', 'o', 'v', '^', '<', '>', '1', '2', '3', '4', '8', 's', 'p', 'P', '*', 'h', 'H', '+', 'x', 'X', 'D', 'd', '|', '_']
markers_market=['.', ',', 'o', 'v', '^', '<', '>', '8', 's', 'p', 'P', '*', 'h', 'H', 'D', 'd'] #剔除那些没有边界的markers
len_markers = len(markers_market)
#print(np.random.randint(len_markers))
#print(markers_market[0])
#markers_market[np.random.randint(len_markers)] #[0,len_markers)
# for i in range(len(X_data)):
p1 = plt.scatter(X_data[0], Y_data[0], c='',s=200, alpha=1, marker=markers_market[1],edgecolors=col[0],linewidths=2, label='ACT')
p2 = plt.scatter(X_data[1], Y_data[1], c='',s=200, alpha=1, marker=markers_market[2],edgecolors=col[1],linewidths=2, label='MDNET')
p3 = plt.scatter(X_data[2], Y_data[2], c='',s=200, alpha=1, marker=markers_market[3],edgecolors=col[2],linewidths=2, label='ADNET')
p4 = plt.scatter(X_data[3], Y_data[3], c='',s=200, alpha=1, marker=markers_market[4],edgecolors=col[7],linewidths=2 , label='meta-sdnet')
p5 = plt.scatter(X_data[4], Y_data[4], c='',s=200, alpha=1, marker=markers_market[5],edgecolors=col[4],linewidths=2 , label='meta-crest')
p6 = plt.scatter(X_data[5], Y_data[5], c='',s=200, alpha=1, marker=markers_market[6],edgecolors=col[0],linewidths=2 , label='VITAL')
p7 = plt.scatter(X_data[6], Y_data[6], c='',s=200, alpha=1, marker=markers_market[7],edgecolors=col[6],linewidths=2 , label='siamFC')
p8 = plt.scatter(X_data[7], Y_data[7], c='',s=200, alpha=1, marker=markers_market[8],edgecolors=col[2],linewidths=2, label='MCCT')
p9 = plt.scatter(X_data[8], Y_data[8], c='',s=200, alpha=1, marker=markers_market[9],edgecolors=col[3],linewidths=2, label='CRAC-MDNet')
p10 = plt.scatter(X_data[9], Y_data[9], c='',s=200, alpha=1, marker=markers_market[10],edgecolors=col[1],linewidths=2 , label='SiamRPN')
p11 = plt.scatter(X_data[10], Y_data[10], c='',s=200, alpha=1, marker=markers_market[12],edgecolors=col[3],linewidths=2 , label='CRAC-Siam')
plt.legend(loc='lower right' ,ncol=3,fontsize=14) #设置图例,分为3列展示
#plt.scatter(X_data[i], Y_data[i], s=200, alpha=1, marker=markers_market[np.random.randint(len_markers)],edgecolors=col[np.random.randint(len_col)])
maxX = max(X_data)
minX = min(X_data)
maxY = max(Y_data)
minY = min(Y_data)
plt.xlim(minX-0.2, maxX+10)
plt.ylim(minY-5, maxY+5)
plt.axis()
plt.grid(linestyle='--')
plt.title("AUC for OPE and FPS")
plt.xlabel("FPS")
plt.ylabel("AUC for OPE")
#获得坐标轴的句柄
ax=plt.gca();
plt.xscale('symlog')
x = (0,1,2,4,6,8,10,20,40,60)
plt.xticks(x)
ax.yaxis.set_major_formatter(mpl.ticker.ScalarFormatter())
ax.xaxis.set_major_formatter(mpl.ticker.ScalarFormatter())
# 设置坐标刻度值的大小以及刻度值的字体
plt.tick_params(labelsize=15,width=2,bottom=True,top=True,left=True,right=True,direction='in')
tick_labels = ax.get_xticklabels() + ax.get_yticklabels()
[label.set_fontname('Times New Roman') for label in tick_labels]
###设置坐标轴的粗细
ax.spines['bottom'].set_linewidth(2);
###设置底部坐标轴的粗细
ax.spines['left'].set_linewidth(2);
####设置左边坐标轴的粗细
ax.spines['right'].set_linewidth(2);
###设置右边坐标轴的粗细
ax.spines['top'].set_linewidth(2);
####设置上部坐标轴的粗细
plt.subplots_adjust(top=0.9,bottom=0.1,left=0.1,right=1,hspace=0,wspace=0) #设置存储时白边问题
plt.margins(0,0)
# 保存图片到指定路径
plt.savefig("lalala3.png")
# 展示图片 *必加
plt.show()
| UTF-8 | Python | false | false | 4,694 | py | 4 | stella_plt.py | 1 | 0.624538 | 0.580869 | 0 | 129 | 31.550388 | 150 |
anlu1601/openAI_gym_Q_Learning | 9,835,475,108,361 | dd96cddcd519e68c3ec9b2d008469b2a08245916 | 242afaf93335669f324c5ba7040bf54ebf95b2df | /sim.py | 656ca17f7e341b3ba4b4487556bdc5a1ac3902bc | [] | no_license | https://github.com/anlu1601/openAI_gym_Q_Learning | 78705ef65fe0dec2c0b82c8b75501c2219da3d8e | 6b0a86b0a0b43baac18b6ee46518af2b3078fd02 | refs/heads/master | "2023-05-06T07:07:42.044520" | "2021-05-31T08:12:47" | "2021-05-31T08:12:47" | 372,430,133 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from random import seed
from random import random
seed(1)
# 1 day ~ sin(0.02x + 5) * 10 + 30
import math
def sim_curve(x, phaseshift, amp):
# phaseshift default -/+ 1/2 pi
pi = math.pi
period = (2 * pi) / 365
y = math.sin(period * x + phaseshift) * 10 + amp
#print(y)
return y
def sim(x, range_, phaseshift, amp):
y = sim_curve(x, phaseshift, amp)
min_ = y - range_
max_ = y + range_
rand = random()
rand_y = min_ + (rand * (max_ - min_))
#print(max_)
#print(min_)
return rand_y
def sim_func_single(x, range_):
min_ = x - range_
max_ = x + range_
rand = random()
rand_y = min_ + (rand * (max_ - min_))
#print(max_)
#print(min_)
return rand_y | UTF-8 | Python | false | false | 729 | py | 7 | sim.py | 6 | 0.545953 | 0.521262 | 0 | 32 | 21.8125 | 52 |
damarkrisnandi/CP_With_Python3 | 1,778,116,462,969 | 734815481f70797bd98ebae506fc71b1ecf446a0 | 9e57349fdeff7e687a389a16c4e5a9929d4ec80f | /codeforces/210129_edu103/a.py | 43906bb3ac44f0f528f3fd9f9e4fce3947c725fa | [] | no_license | https://github.com/damarkrisnandi/CP_With_Python3 | 33f1e31ed27d20de9e3e3e1b26a63382f36bf1f3 | fbb4ddea8f68d97f07133c1569dfdb9cff748bef | refs/heads/master | "2023-04-12T20:17:16.911071" | "2021-04-20T10:18:59" | "2021-04-20T10:18:59" | 359,771,853 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from math import ceil
t = int(input())
while t>0:
t-=1
n,k = list(map(int, input().split(" ")))
if n > k:
mult = int(ceil(n/k))
k = k * mult
print(int(ceil(k/n))) | UTF-8 | Python | false | false | 195 | py | 34 | a.py | 34 | 0.487179 | 0.476923 | 0 | 10 | 18.6 | 44 |
gabriellaec/desoft-analise-exercicios | 8,186,207,673,782 | 864d33c26b2402d0705695c52e76bfd9382e3b66 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_185/ch32_2019_04_01_18_01_02_339841.py | 578b0bd9b3232c016af4d7283aa23a58dbc230c5 | [] | no_license | https://github.com/gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | "2023-01-31T17:19:42.050628" | "2020-12-16T05:21:31" | "2020-12-16T05:21:31" | 306,735,108 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | dúvida = input("Você tem dúvidas? (sim/não)")
while dúvida != "não":
if dúvida != "não":
print("Pratique mais")
dúvida = input("Você tem dúvidas? (sim/não)")
print("Até a próxima")
| UTF-8 | Python | false | false | 219 | py | 35,359 | ch32_2019_04_01_18_01_02_339841.py | 35,352 | 0.585366 | 0.585366 | 0 | 6 | 32.5 | 53 |
eurialdo/EditordeImagem | 18,605,798,344,397 | fab3401ad0ce9b3fce78a5c3f6b6e81d8b792b13 | 1894e19eb44c4b17ef59dc8af55186c2c9f6e76f | /MudarTam_Imag.py | 2be4ae9e6e668ff25713bfa1991733d6af37f923 | [] | no_license | https://github.com/eurialdo/EditordeImagem | 826c81df254800d4bf438bf1986adcecac32e076 | cf6a3f2a2ed779b5a85867efc4286e2ef1c8d5c6 | refs/heads/master | "2020-09-21T20:32:23.013087" | "2019-11-29T20:36:35" | "2019-11-29T20:36:35" | 224,918,988 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import getopt
import sys
import logging
from filtro_Imag import ajustarImag
logger = logging.getLogger()
def init():
args = sys.argv[1:]
if len(args) == 0:
logger.error("-p não pode estar vazio")
raise ValueError("-p não pode estar vazio")
logger.debug(f"run with params: {args}")
# transform arguments from console
opts, rem = getopt.getopt(args, "p:", ["rotacao=", "redimencionar=", "cor_filtro=", "girar_cima", "girar_esquerda"])
rotate_angle = resize = color_filter = flip_top = flip_left = None
path = None
for opt, arg in opts:
if opt == "-p":
path = arg
elif opt == "--rotacao":
rotate_angle = int(arg)
elif opt == "--redimencionar":
resize = arg
elif opt == "--cor_filtro":
color_filter = arg
elif opt == "--giro_cima":
flip_top = True
elif opt == "--giro_esquerda":
flip_left = arg
if not path:
raise ValueError("sem caminho")
img = ajustarImag.get_img(path)
if rotate_angle:
img = ajustarImag.Rotacao(img, rotate_angle)
if resize:
w, h = map(int, resize.split(','))
img = ajustarImag.redimensionar(img, w, h)
if color_filter:
img = ajustarImag.cor_filtro(img, color_filter)
if flip_left:
img = ajustarImag.VirarEsquerda(img)
if flip_top:
img = ajustarImag.girarCima(img)
if __debug__:
img.show()
if __name__ == "__main__":
init()
| UTF-8 | Python | false | false | 1,530 | py | 5 | MudarTam_Imag.py | 5 | 0.561518 | 0.560209 | 0 | 65 | 22.492308 | 120 |
Haymaekr/PythonProjects | 206,158,460,005 | 843a1430bd0c7366152f504c53f7eca4ea4a9432 | 3b271425a1b180b8dadbd2c716f7cec03f0efd6e | /WebScraper/scraper.py | 1c626533a7387774b3c29a6f21f31c8afacce47e | [] | no_license | https://github.com/Haymaekr/PythonProjects | 8a8424eaa0219c5b1b542cf6c79f35b5ea6960d8 | bb940b497d62c0464238ee3d72f3f68df5dc579e | refs/heads/master | "2022-11-08T15:25:33.313902" | "2020-06-20T05:25:18" | "2020-06-20T05:25:18" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import requests
from bs4 import BeautifulSoup
import pprint
res = requests.get("https://news.ycombinator.com/")
soup = BeautifulSoup(res.text,"html.parser")
links = (soup.select('.storylink'))
subtext = soup.select('.subtext')
more = soup.select('.morelink')
more_link = more[0].get('href')
print(more_link)
res2 = requests.get("https://news.ycombinator.com/"+more_link)
soup2 = BeautifulSoup(res2.text,"html.parser")
links2 = (soup2.select('.storylink'))
subtext2 = soup2.select('.subtext')
mega_links = links + links2
mega_subtext = subtext + subtext2
def sort_byvotes(hn_list):
return sorted(hn_list,key=lambda k: k['votes'],reverse=True)
def custom_hn(links,subtext):
hn = []
for i,item in enumerate(links):
title = item.getText()
href = item.get("href",None)
vote = subtext[i].select('.score')
if len(vote):
points = int(vote[0].getText().replace(' points',''))
if points>99:
hn.append({'title' : title,'link' : href, 'votes' : points})
return sort_byvotes(hn)
pprint.pprint(custom_hn(mega_links,mega_subtext)) | UTF-8 | Python | false | false | 1,050 | py | 16 | scraper.py | 12 | 0.692381 | 0.679048 | 0 | 41 | 24.634146 | 64 |
swastikkalyane27/assinmentNo1 | 2,422,361,603,815 | d5e01d70cb9f1065ea6b947d157df3962090232d | aeddc21a0017a03ea0b0c96fdf032f37510f4ad1 | /60. keys having multiple input.py | 2d710c91598a813b781ad63da2c5878b40e4f7cc | [] | no_license | https://github.com/swastikkalyane27/assinmentNo1 | 64df7a3ad5e0c8c32ead9017b2d6d5b19b702800 | 0c669a59695910909fd6da2e2e6e07d82b2f5855 | refs/heads/master | "2023-06-09T15:51:46.265821" | "2021-06-26T07:17:39" | "2021-06-26T07:17:39" | 376,775,055 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[1]:
my_dict = {}
a, b, c = 15, 26, 38
my_dict[a, b, c] = a + b - c
a, b, c = 5, 4, 11
my_dict[a, b, c] = a + b - c
print("The dictionary is :")
print(my_dict)
# In[6]:
# dictionary containing longitude and latitude of places
places = {("19.07'53.2", "72.54'51.0"):"Mumbai", ("28.33'34.1", "77.06'16.6"):"Delhi"}
print(places)
print()
# Traversing dictionary with multi-keys and crearing
# different lists from it
lat = []
long = []
plc = []
for i in places:
lat.append(i[0])
long.append(i[1])
plc.append(places[i[0], i[1]])
print(lat)
print(long)
print(plc)
# In[ ]:
| UTF-8 | Python | false | false | 633 | py | 34 | 60. keys having multiple input.py | 34 | 0.587678 | 0.516588 | 0 | 44 | 13.295455 | 86 |
Neural-Finance/fully-connected-nn | 8,143,258,039,272 | a7a3044216a621e6e5cecf496043d860ba6c1d10 | 3ed14fbdbfdd4b247b823bd63f6436da5f6e88e5 | /data_handlers.py | 64e8be3ab5758ad2db051c060323f39fb950042a | [
"MIT"
] | permissive | https://github.com/Neural-Finance/fully-connected-nn | 7e4f10790aa86227870ab6499aab9982e2748487 | 831ecedc632d2a68bc6e28d7b335985460907ce3 | refs/heads/master | "2021-06-23T05:27:40.900170" | "2017-07-28T11:53:48" | "2017-07-28T11:53:48" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.cross_validation import train_test_split
def prepare_dataset(dataset):
'''
This fucnction is used when you have strings in dataframe.
It is using LabelEncoder to transform columns from dataframe into columns encoded in specific order
Input: Pandas dataframe
Output: Pandas dataframe editetd
'''
new_dataset = dataset
ch = list(dataset.columns.values)
encoder = LabelEncoder()
for i in ch:
column = new_dataset[i]
column = encoder.fit_transform(column)
new_dataset[i]=column
return new_dataset
def labels_vs_features(dataset):
'''
This fucnction is used specificly for problem with muschrooms
This function splits dataset to X and y (features and labels)
Input: Pandas dataframe
Output: X - features for our dataset
y - labels (classes) for the dataset
'''
X = []
y = []
y = dataset['class']
ch = list(dataset.columns.values)
for i in ch:
if i == 'class':
continue
else:
X.append(dataset[i])
return np.array(X).T, np.array(y)
def data_splitter(X, y, test_size=0.2,verbose=True):
'''
This fucnction is used to split dataset into training and testing parts
Input: X- features
y- labels
test_size - how much samples from dataset you want to use for testing: Default is 20% - 0.2
verbose - showing sizes of splited data or not
Output: X_train, y_train, X_test, y_test - features split into train and test set
'''
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size)
if verbose:
print("X_train size -->", X_train.shape)
print("y_train size -->", y_train.shape)
print("X_test size -->", X_test.shape)
print("y_test size -->", y_test.shape)
return X_train, X_test, y_train, y_test | UTF-8 | Python | false | false | 1,786 | py | 11 | data_handlers.py | 5 | 0.705487 | 0.702128 | 0 | 70 | 24.528571 | 100 |
mkan0141/NLP-100-knock | 5,325,759,452,374 | 1bbab2a57a4fcfd7f9a201b613dc41b304debce6 | 57410d3d4a37482420323eadf6737fe4e71bfb68 | /chap02/01.py | b269d4ef444ba1c4b86ba7fcf47b331b694a798a | [] | no_license | https://github.com/mkan0141/NLP-100-knock | d39fe825b1a43335123e0c3e6a28038af48148e0 | f0251306029b220e718ad5606b96cb8e7d311a50 | refs/heads/master | "2020-03-16T18:49:18.878389" | "2019-03-29T13:09:47" | "2019-03-29T13:09:47" | 132,888,065 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | str = 'パタトクカシーー'
print(str[::2])
| UTF-8 | Python | false | false | 49 | py | 44 | 01.py | 43 | 0.606061 | 0.575758 | 0 | 2 | 15.5 | 16 |
amm0day/FastApi-template | 19,456,201,874,344 | 0a8800d1f348bb84a6b7a491dca3468d4dca0816 | c65498118ad9b21d8d2f36f4c25ebcdb042b97eb | /app/core/config.py | 5e7d32080d78529199f2e39922891b2e9babe8ea | [] | no_license | https://github.com/amm0day/FastApi-template | 76fead044c0955ea2c96a7ed8f0d8273d19d1d6f | 287fc3e365ee1a2dddf9604d75d55285ed0b4135 | refs/heads/master | "2023-02-19T13:15:21.339048" | "2022-11-13T22:46:47" | "2022-11-13T22:50:29" | 328,949,798 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import logging
import sys
from loguru import logger
from starlette.config import Config
from app.core.logging import InterceptHandler
config = Config(".env")
VERSION = config("VERSION", cast=str, default="0.0.1")
DEBUG: bool = config("DEBUG", cast=bool, default=False)
API_PREFIX: str = "/weather"
PROJECT_NAME: str = config("PROJECT_NAME", default="Weather api")
CONNECTION_REDIS: str = config("CONNECTION_REDIS", default="redis://redis:6379/?db=0")
API_KEY_POSITIONSTACK: str = config("API_KEY_POSITIONSTACK", default="e7ceb1186afb8e5070a5e33a34812f16")
# logging configuration
LOGGING_LEVEL = logging.DEBUG if DEBUG else logging.INFO
LOGGERS = ("uvicorn.asgi", "uvicorn.access")
logging.getLogger().handlers = [InterceptHandler()]
for logger_name in LOGGERS:
logging_logger = logging.getLogger(logger_name)
logging_logger.handlers = [InterceptHandler(level=LOGGING_LEVEL)]
logger.configure(handlers=[{"sink": sys.stderr, "level": LOGGING_LEVEL}])
| UTF-8 | Python | false | false | 967 | py | 16 | config.py | 12 | 0.752844 | 0.723888 | 0 | 29 | 32.344828 | 104 |
gfYaya/yayaPython | 17,678,085,427,978 | 5f82040fd74c8ded485db3a6b82cb40b6fbcdc81 | 1f9e8dfbe7e33e19f31865d84ebbdccecaa071ea | /fluent python/chapter 7 Function Decorators and Closures/Closures.py | 4a50869cb07206eaad52717f222b82d8baef42dd | [] | no_license | https://github.com/gfYaya/yayaPython | 09fe2aaaa090d78f2f1322843201c7f7aaff3afc | c994f5652ef7c747a2d20d461fdb99471d626f36 | refs/heads/master | "2020-07-09T18:09:53.719306" | "2017-08-28T07:21:57" | "2017-08-28T07:21:57" | 74,029,034 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding = utf-8
# Example 7-8. average_oo.py: A class to calculate a running average
class Averager:
def __init__(self):
self.series = []
def __call__(self, new_value):
self.series.append(new_value)
total = sum(self.series)
return total / len(self.series)
avg = Averager()
print(avg(10))
print(avg(11))
print(avg(12))
# Example 7-9. average.py: A higher-order function to calculate a running average
def make_average():
series = []
def averager(new_value):
series.append(new_value)
total = sum(series)
return total / len(series)
return averager
# Example 7-10. Testing Example 7-9
avg = make_average()
print(avg(10))
print(avg(11))
print(avg(12))
# Example 7-11. Inspecting the function created by make_averager in Example 7-9
print(avg.__code__.co_varnames)
print(avg.__code__.co_freevars)
# Example 7-12. Continuing from Example 7-10
print(avg.__closure__)
print(avg.__closure__[0].cell_contents)
| UTF-8 | Python | false | false | 992 | py | 111 | Closures.py | 108 | 0.652218 | 0.617944 | 0 | 48 | 19.666667 | 81 |
jessicagamio/tree_finder | 14,010,183,357,638 | 764bc07396e7e351eac75d374dd8b85a62535632 | 1a84492a33b8dd85a7f55ec18d92cef74d65d174 | /seed.py | 90acdba3737239fbbd9110f222115b4d803c156a | [] | no_license | https://github.com/jessicagamio/tree_finder | f20ada3306d4ff16356efe36bd03ef5fa1195381 | 05308d95d62187206a0aac5eff85c44667b072e5 | refs/heads/master | "2022-12-09T18:41:33.670223" | "2020-01-16T02:34:52" | "2020-01-16T02:34:52" | 201,641,804 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from sqlalchemy import func
from model import TreeSpecies,Tree,User, Hugs, connect_to_db, db
from server import app
import json
if __name__ == "__main__":
connect_to_db(app)
db.create_all()
def create_species():
"""create tree_species table"""
tree_species = {}
# Create TreeSpecies
scientific_name = 'Platanus x hispanica'
common_name = 'London Plane'
shape = 'palmate'
margin = 'entire'
venation = 'pinnate'
factoid = 'Dicidous Tree. Bare through November-March. Member of the Sycamore Family'
image = '/static/img/platanus_x_hispanica.jpg'
platanus_x_hispanica = TreeSpecies(sci_name=scientific_name,
common_name=common_name,
shape=shape,
factoid=factoid,
margin=margin,
venation=venation,
image=image)
db.session.add(platanus_x_hispanica)
db.session.commit()
tree_species[scientific_name] = platanus_x_hispanica
scientific_name = 'Magnolia grandiflora'
common_name= 'Magnolia'
shape = 'obtuse'
margin = 'entire'
venation = 'pinnate'
factoid = 'Evergreen. Produces long lasting white, fragrant flowers.'
image = '/static/img/magnolia_grandiflora.jpg'
Magnolia_grandiflora = TreeSpecies(sci_name=scientific_name,
common_name=common_name,
shape=shape,
factoid=factoid,
margin=margin,
venation=venation,
image=image)
db.session.add(Magnolia_grandiflora)
db.session.commit()
tree_species[scientific_name]= Magnolia_grandiflora
scientific_name = 'Prunus cerasifera'
common_name = 'Purple-Leaf Plum'
shape = 'obtuse'
margin = 'serrated'
venation = 'Cross Venulate'
factoid = 'Blooms favorite flowers in the spring. Attracts bees.'
image = '/static/img/purple_leaf_plum.jpg'
Prunus_cerasifera = TreeSpecies(sci_name=scientific_name,
common_name=common_name,
shape=shape,
factoid=factoid,
margin=margin,
venation=venation,
image=image)
db.session.add(Prunus_cerasifera)
db.session.commit()
tree_species[scientific_name]= Prunus_cerasifera
scientific_name = 'Tristaniopsis laurina'
common_name = 'Small-Leaf Tristania'
shape = 'lanceolate'
margin = 'entire'
venation = 'pinnate'
factoid = 'Originates from Australia. Disease and pest resistant.'
image = '/static/img/tristaniopsis.jpg'
Tristaniopsis_laurina = TreeSpecies(sci_name=scientific_name,
common_name=common_name,
shape=shape,
factoid=factoid,
margin=margin,
venation=venation,
image=image)
db.session.add(Tristaniopsis_laurina)
db.session.commit()
tree_species[scientific_name]=Tristaniopsis_laurina
scientific_name = 'Ginkgo biloba'
common_name= 'Maidenhair Tree'
shape = 'flabellate'
margin = 'entire'
venation = 'parallel'
factoid = 'Can live as long as 3000 years. Leaves turn yellow in the fall.'
image = '/static/img/ginkgo.jpg'
Ficus_microcarpa_nitida = TreeSpecies(sci_name=scientific_name,
common_name=common_name,
shape=shape,
factoid=factoid,
margin=margin,
venation=venation,
image=image)
db.session.add(Ficus_microcarpa_nitida)
db.session.commit()
tree_species[scientific_name]= Ficus_microcarpa_nitida
return tree_species
def create_trees(tree_species):
"""create trees table"""
TREE_DATA = "trees_sf/rows.json"
trees_json = open(TREE_DATA).read()
tree_info = json.loads(trees_json)
entries = tree_info['data'].__len__()
for i in range(entries):
tree_type =tree_info['data'][i][10]
# split out the scientific and common name from data
scientific_name, common_name = tree_type.split('::')
scientific_name = scientific_name.strip()
latitude = tree_info['data'][i][23]
longitude = tree_info['data'][i][24]
if latitude == None or longitude == None or scientific_name not in ['Platanus x hispanica','Magnolia grandiflora','Prunus cerasifera','Tristaniopsis laurina','Ginkgo biloba']:
continue
tree = Tree(lat= float(latitude), lon= float(longitude), tree_species= tree_species[scientific_name])
db.session.add(tree)
def create_user(user):
""" Create user """
username,password,firstname,lastname = user
new_user = User(username=username, password=password, firstname=firstname,lastname=lastname)
db.session.add(new_user)
def create_hugs(username, tree_species):
"""create Hugs"""
tree_species = create_species()
create_trees(tree_species)
create_user(['jondoe','abc123', 'Jon','Doe'])
db.session.commit()
| UTF-8 | Python | false | false | 5,010 | py | 20 | seed.py | 9 | 0.621557 | 0.618962 | 0 | 161 | 30.093168 | 183 |
apsureda/gcp-group-manager | 11,166,914,970,602 | 095e9059c3641324ccf5d4a10568b71f32440cd8 | f0ae6f71dc9f7debe4c60d6b71dd82043018a2cc | /scripts/tf_generator.py | 9b186aec2f46ebd270ebb619292db730e0190f12 | [
"Apache-2.0"
] | permissive | https://github.com/apsureda/gcp-group-manager | 69c1bbd817e5ffd926480c6a77461cd99dac32c0 | 01088c4bf07dbf47a7bd134973f037055ee28d72 | refs/heads/main | "2023-07-02T04:01:18.469332" | "2021-07-30T09:16:52" | "2021-07-30T09:16:52" | 390,943,144 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
"""Generate Terraform configuration files from a set of predefined templates.
Copyright 2021 Google LLC. This software is provided as-is, without warranty or
representation for any use or purpose. Your use of it is subject to your
agreement with Google.
"""
import os
import sys
import shutil
import argparse
import logging
import yaml
import json
import jinja2
import tf_dump
import glob
conf_cache = {}
def parse_args(argv):
parser = argparse.ArgumentParser()
# common options
parser.add_argument('--template-dir', help='location of tf template files')
parser.add_argument('--tf-out', help='directory where the generated Terraform files should be written')
parser.add_argument('--config', help='yaml file containing the common configuration settings')
parser.add_argument('--revert-forced-updates', action='store_true',
help='set to false any existing force_updates flag found in requests file')
parser.add_argument('--resources', help='yaml file containing the resources to create')
parser.add_argument('--log-level', required=False,
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
default='INFO',
help='set log level')
# add sub commands
subparsers = parser.add_subparsers(help='available commands')
log_config = subparsers.add_parser('ci-groups', help='generates cloud identity groups')
log_config.set_defaults(func=cmd_ci_groups)
return parser.parse_args(argv)
def get_config(config_file, mandatory_fields):
"""
Reads a config file and checks for duplicates and for the existence of the
optional mandatory fields provided.
"""
# Check if we have already loaded this config file
global conf_cache
if config_file in conf_cache:
return conf_cache[config_file]
# open the requests file, which is in YAML format
stream = open(config_file, "r")
config_stream = yaml.load_all(stream, Loader=yaml.FullLoader)
config_params = {}
# YAML files can have multiple "documents" go through the file and append all the
# elements in a single map
for doc in config_stream:
for k,v in doc.items():
if k in config_params:
logging.error('\'%s\' defined twice in config file %s' % (k, config_file))
sys.exit(1)
config_params[k] = v
# check that we have all the required config params
if mandatory_fields and len(mandatory_fields) > 0:
for k in mandatory_fields:
if not k in config_params:
logging.error('missing required param in config file: \'%s\'' % (k))
sys.exit(1)
# put this in our config cache to avoid loading each time
conf_cache[config_file] = config_params
return config_params
def generate_tf_files(template_dir, tf_out, tpl_type, context, replace, prefix=None):
"""
Generates terraform files given a context and template folder.
- template_dir: the folder containing the jinja templates to use.
- tf_out: the folder where the resulting terraform files will be written.
- tpl_type: the type of template to use (sub-folder of the templates folder).
- comntext: the content object that will be passed to the jinja templates.
- replace: remove previous output files before writing new ones.
- prefix: used when generating several terraform configurations in the same
output folder (currently used for projects).
"""
# initialize jinja2 environment for tf templates
env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir), trim_blocks=True)
# check for templates with the current template type
template_list = []
if os.path.isdir(template_dir + '/' + tpl_type):
template_list = os.listdir(template_dir + '/' + tpl_type)
if len(template_list) == 0:
logging.warning('no templates found for request of type \'%s\'' % (tpl_type))
return False
# folder where the tf files will be generated
out_folder = tf_out
if prefix:
out_folder += '/' + prefix
# if replace requested, remove previous files. If not requested but previous files
# present, return.
if os.path.isdir(out_folder):
if replace:
shutil.rmtree(out_folder)
logging.debug('removing previous config: \'%s\'' %(out_folder))
os.mkdir(out_folder)
else:
logging.info('ignoring request \'%s\'. Found previous terraform config folder.' % (out_folder))
return False
else:
os.makedirs(out_folder)
# apply the selected templates
logging.info('using context: %s' % (json.dumps(context, sort_keys=True)))
for ttype in ['common', tpl_type]:
template_list = []
if os.path.isdir(template_dir + '/' + ttype):
template_list = os.listdir(template_dir + '/' + ttype)
else:
continue
for tplfile in template_list:
# remove junk files
if tplfile.startswith('.'):
continue
template = env.get_template(ttype + '/' + tplfile)
out_file_name = out_folder + '/' + tplfile
# remove jinjs2 extensions
if out_file_name.endswith('.j2'):
out_file_name = out_file_name[:-3]
logging.debug('generating config file: \'%s\'' % (out_file_name))
rendered = template.render(context=context).strip()
if len(rendered) > 0:
out_file = open(out_file_name, "w")
out_file.write(rendered)
out_file.close()
elif os.path.exists(out_file_name):
logging.debug('empty output. Remving previous file: \'%s\'' % (out_file_name))
os.remove(out_file_name)
return True
def cmd_ci_groups(args):
"""
Generates the terraform files for Cloud Identity groups based on the group folder hierarchy.
"""
def tf_group(group):
"""
Generates the terraform code for a group
"""
tf_block = tf_dump.TFBlock(block_type='resource', labels=['google_cloud_identity_group', group['full_name']])
tf_block.add_element('display_name', '"%s"' % (group['full_name']))
tf_block.add_element('initial_group_config', '"WITH_INITIAL_OWNER"')
tf_block.add_element('parent', '"%s"' % (parent))
tf_key_block = tf_dump.TFBlock(block_type='group_key')
tf_key_block.add_element('id', '"%s"' % (group['unique_id']))
tf_block.add_block(tf_key_block)
labels = {
'"cloudidentity.googleapis.com/groups.discussion_forum"' : '""'
}
tf_block.add_element('labels', labels)
return tf_block.dump_tf()
def tf_member(group_id, member_id, roles):
"""
Generates the terraform code for a group member
"""
member_id = member_id.lower()
tf_block = tf_dump.TFBlock(block_type='resource', labels=['google_cloud_identity_group_membership', group_id + '_' + member_id.lower().replace('@', '_').replace('.', '_')])
tf_block.add_element('group', 'google_cloud_identity_group.%s.id' % (group_id))
tf_key_block = tf_dump.TFBlock(block_type='preferred_member_key')
tf_key_block.add_element('id', '"%s"' % (member_id))
tf_block.add_block(tf_key_block)
for role in roles:
tf_roles_block = tf_dump.TFBlock(block_type='roles')
tf_roles_block.add_element('name', '"%s"' % (role))
tf_block.add_block(tf_roles_block)
return tf_block.dump_tf()
# check that the resources provided is a folder
if not os.path.exists(args.resources) or not os.path.isdir(args.resources):
logging.error('the provided resource path does not exist or is not a folder: ' + args.resources)
return False
all_groups = {}
groups_by_src = {}
# get the list of group configuration files
conf_files = glob.glob(args.resources + '/**/*.yaml', recursive=True)
# read configuration file
tf_config = get_config(args.config, ['gcs_bucket', 'group_domain', 'group_parent', 'tf_service_account'])
# the number of folder components to be used in the group prefix
prefix_length = 2
domain = tf_config['group_domain']
parent = tf_config['group_parent']
rs_bucket = tf_config['gcs_bucket']
tf_sa = tf_config['tf_service_account']
# parse group config files
for conf_file in conf_files:
f = open(conf_file, "r")
resources = yaml.load(f, Loader=yaml.FullLoader)
# ignore empty files
if not resources:
continue
for group in resources:
if not 'name' in group:
logging.error('group definitions must have a name: ' + conf_file)
return False
g_name = group['name']
# the path of the file: remove the root folder and the file name
g_path = conf_file[len(args.resources)+1:conf_file.rfind('/')]
g_prefix = '-'.join(g_path.lower().split('/')[0:prefix_length])
# create the unique ID for the group, which is composed of the prefix, plus the domain.
group['full_name'] = g_prefix + '-' + g_name
g_unique_id = group['full_name'] + '@' + domain
group['unique_id'] = g_unique_id
group['path'] = g_path
group['conf'] = conf_file
# ignore entry if already exists
if g_unique_id in all_groups:
# TODO: need to kee a record of previously added groups because a new duplicate could appear before the existing one.
logging.warn('group ' + g_unique_id + ' was already defined in ' + all_groups[g_unique_id]['conf'] + '. Ignoring entry from ' + conf_file)
continue
# add the group to the list of groups by unique id
all_groups[g_unique_id] = group
# add the group to the list of groups by source file
if conf_file in groups_by_src:
groups_by_src[conf_file].append(group)
else:
groups_by_src[conf_file] = [group]
# create a terraform configuration for each one of the config folders
refreshed_configs = {}
for conf_file in groups_by_src.keys():
groups = groups_by_src[conf_file]
# the path of the file: remove the root folder and the file name
conf_path = conf_file[len(args.resources)+1:conf_file.rfind('/')]
out_dir = args.tf_out + '/' + conf_path
if not os.path.exists(out_dir):
os.makedirs(out_dir)
elif not out_dir in refreshed_configs:
# generate the terraform config file
commons_context = {
'gcs_bucket' : rs_bucket,
'gcs_prefix' : 'ci_groups/' + conf_path,
'tf_sa' : tf_sa
}
generate_tf_files(args.template_dir, out_dir, 'common', commons_context, True)
refreshed_configs[out_dir] = True
# the resulting file name: replace .yaml by .tf
tf_file_name = conf_file[conf_file.rfind('/')+1:conf_file.rfind('.')] + '.tf'
tf_output = ''
for group in groups:
tf_output += tf_group(group) + '\n\n'
# consolidate the list of members, since each member can have multiple roles
all_members = {}
# first, create the list with the MEMBER role for each member
for mtype in ['members', 'managers', 'owners']:
for member in group[mtype]:
if not member in all_members:
all_members[member] = ['MEMBER']
for member in group['owners']:
all_members[member].append('OWNER')
for member in group['managers']:
all_members[member].append('MANAGER')
for member in all_members:
tf_output += tf_member(group['full_name'], member, all_members[member]) + '\n\n'
# write the tf code for this file
f = open(out_dir + '/' + tf_file_name, 'w')
f.write(tf_output)
f.close()
return True
if __name__ == '__main__':
args = parse_args(sys.argv[1:])
logging.getLogger().setLevel(getattr(logging, args.log_level))
FORMAT = '%(asctime)-15s %(levelname)s %(message)s'
logging.basicConfig(format=FORMAT)
args.func(args)
| UTF-8 | Python | false | false | 11,478 | py | 33 | tf_generator.py | 7 | 0.658216 | 0.656125 | 0 | 283 | 39.558304 | 176 |
Vagacoder/Python_for_everyone | 16,939,351,031,736 | eea9344b03ebe54c9727d8ec1539e001fe3db9e0 | 256746f29f9995accd4fee35b9b8981264ca2e37 | /Ch12/insertionsort.py | 688c85b05a766efe381a80b7d72be8b3c9516632 | [] | no_license | https://github.com/Vagacoder/Python_for_everyone | adadd55561b2200d461afbc1752157ad7326698e | b2a1d1dcbc3cce5499ecc68447e1a04a8e59dc66 | refs/heads/master | "2021-06-22T00:26:02.169461" | "2019-05-25T16:06:04" | "2019-05-25T16:06:04" | 114,508,951 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | ##
# The insertionSort function sorts a list, using the insertion sort algorithm.
#
# Sorts a list, using insertion sort.
# @param values the list to sort
#
def insertionSort(values):
for i in range(1, len(values)):
next = values[i]
# Move all larger elements up.
j = i
while j > 0 and values[j - 1] > next:
values[j] = values[j - 1]
j = j - 1
# Insert the element
values[j] = next
| UTF-8 | Python | false | false | 474 | py | 505 | insertionsort.py | 453 | 0.552743 | 0.542194 | 0 | 21 | 21.571429 | 79 |
HiTpXRD/on_the_fly_assessment | 18,365,280,158,054 | 2f66a666a07429501d35fc0e96ae606f94819333 | 687e880be98bbed202f37711c9cc55d4a88d9062 | /peak_detection_ridge_finding_single_image.py | b956886f5c641b0bc9846975b0f1595b3433a23a | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | https://github.com/HiTpXRD/on_the_fly_assessment | 3faac6682b4a9eef87b7425fa7d9894ea674aeb9 | efc58b1a6cfe2b21d00a957a6c7bb59158404149 | refs/heads/master | "2020-04-06T04:01:21.119064" | "2017-02-11T07:40:38" | "2017-02-11T07:40:38" | 83,074,501 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Nov 17 2016
@author: Fang Ren
"""
from scipy.signal import cwt, ricker, find_peaks_cwt
import matplotlib.pyplot as plt
import numpy as np
import glob
import os
from os.path import basename
from scipy import ndimage
path = 'C:\\Research_FangRen\\Data\\July2016\\Sample1\\Processed_old\\'
file = path + 'Sample1_24x24_t30_0001_1D.csv'
data = np.genfromtxt(file, delimiter = ',')
Qlist = data[:,0]
IntAve = data[:,1]
a1 = 1
a2 = 30
widths = np.arange(a1, a2)
cwt_coefficient = cwt(IntAve, ricker, widths)
peaks = find_peaks_cwt(IntAve, np.arange(a1, a2, 0.05))
peaks = peaks[1:-1]
h = 15 # number of points skipped in finite differences
peaks_accepted=[]
window = h
for peak in peaks:
filter = np.nan_to_num(np.sqrt(-(IntAve[2*h:]-2*IntAve[h:-h]+IntAve[0:-2*h])))
filterwindow = filter[max(peak-h - window, 0):min(peak-h + window, len(filter))]
spectrawindow = IntAve[max(peak - window, h):min(peak + window, len(filter))]
try:
if np.any(filterwindow>spectrawindow/200): # np.percentile(filter,85) is also a good threshold
peaks_accepted.append(peak)
except ValueError:
continue
plt.figure(1)
plt.subplot((311))
plt.pcolormesh(Qlist, widths, cwt_coefficient)
plt.plot(Qlist, [a1]* len(Qlist), 'r--')
plt.plot(Qlist, [a2]* len(Qlist), 'r--')
plt.xlim(0.65, 6.45)
plt.ylim(a1, a2)
# plt.clim(np.nanmin(np.log(cwt_coefficient)), np.nanmax(np.log(cwt_coefficient)))
plt.subplot((312))
plt.plot(Qlist[peaks_accepted], IntAve[peaks_accepted], linestyle = 'None', c = 'r', marker = 'o', markersize = 10)
plt.plot(Qlist[peaks], IntAve[peaks], linestyle = 'None', c = 'b', marker = 'o', markersize = 3)
plt.plot(Qlist, IntAve)
plt.xlim(0.65, 6.45)
plt.subplot((313))
plt.plot(Qlist[15:-15], filter)
plt.xlim(0.65, 6.45)
| UTF-8 | Python | false | false | 1,810 | py | 6 | peak_detection_ridge_finding_single_image.py | 5 | 0.671823 | 0.621547 | 0 | 66 | 26.424242 | 115 |
darghex/PARCES-WS | 6,768,868,470,216 | ea79c50fd22aba1bcb4b879ae138bfb2f5fffb18 | c7b44381f18d87cd96f4c25e97af91c1a2e379fd | /estudiante_views.py | fcccc547dfcab4b0304ea81a08fb342b1c14c76f | [
"Apache-2.0"
] | permissive | https://github.com/darghex/PARCES-WS | 1d479386ee2fb8b7614fe5a28747f65926362f6a | 494ab92f7d894cac2d5cdc25092f1a777622ba62 | refs/heads/master | "2016-09-08T02:33:14.559217" | "2015-01-24T14:04:28" | "2015-01-24T14:04:28" | 29,776,256 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
from flask import request
from main import app, session, IES_REST_URL
from db import instancias_curso, propuestas_matricula, actividades, calificaciones, asignaciones
import json
import requests
from sqlalchemy import and_
@app.route("/instanciascurso/<int:curso>/",methods=['GET'])
def view_instancias_curso(curso):
"""
Lista las clases registradas de una asignatura
"""
instancias = session.query(instancias_curso).filter_by(curso = curso)
response = []
for instancia in instancias:
response.append ({'id': instancia.id, 'tema': instancia.tema, 'corte': instancia.corte , 'fecha': instancia.fecha})
return json.dumps(response)
@app.route("/estudiantes/<id_estudiante>/propuesta",methods=['GET'])
def est_read_propuestas_matricula(id_estudiante):
"""
@retorna un ok en caso de que se halla ejecutado la operacion
@except status 500 en caso de presentar algun error
"""
propuestas = session.query(propuestas_matricula.id, propuestas_matricula.tutor, propuestas_matricula.estado).filter_by(estudiante = id_estudiante)
response = []
for propuesta in propuestas:
estado = True if propuesta.estado == 1 else False
response.append({'id': propuesta.id, 'tutor': propuesta.tutor, 'estado': estado})
return json.dumps({"propuesta": response})
@app.route("/estudiantes/<id_estudiante>/curso/<int:curso>/actividades",methods=['GET'])
def view_actividades(id_estudiante, curso):
"""
@retorna un ok en caso de que se halla ejecutado la operacion
@except status 500 en caso de presentar algun error
"""
try:
instancias = session.query(instancias_curso.id, instancias_curso.tema, instancias_curso.corte, instancias_curso.fecha).filter_by(curso = curso).order_by("fecha desc")
tema = []
for instancia_db in instancias:
actividad = []
actividades_db = session.query(actividades.descripcion, calificaciones.valor).filter_by(instancias_curso_id = instancia_db.id).outerjoin(calificaciones, and_(actividades.id == calificaciones.actividades_id , calificaciones.estudiante == id_estudiante ))
for actividad_db in actividades_db:
actividad.append({'actividad': actividad_db.descripcion, 'calificacion': actividad_db.valor})
tema.append({'tema': instancia_db.tema, 'id': instancia_db.id ,'corte': instancia_db.corte, 'fecha': str(instancia_db.fecha), 'actividades': actividad })
return json.dumps({'instancias_curso': {'instancia': tema }})
except Exception, e:
return "Operacion No se pudo llevar a cabo", 500
return "ok"
@app.route("/estudiantes/<id_estudiante>/tutor",methods=['GET'])
def get_tutor(id_estudiante):
"""
Carga el perfil del tutor
@return: json {
}
"""
asignaciones_desc = session.query(asignaciones.tutor).filter_by(estudiante = id_estudiante).first()
parametros = { 'codigo': asignaciones_desc[0], 'token': request.args.get('token') }
rq = requests.get( IES_REST_URL+"/profesor", params = parametros)
return json.dumps(rq.json)
| UTF-8 | Python | false | false | 2,969 | py | 9 | estudiante_views.py | 8 | 0.72516 | 0.721455 | 0 | 96 | 29.927083 | 256 |
brianmanderson/Local_Recurrence_Work | 13,821,204,772,262 | 987bc49c10e168159cd03a38d5c0832949cff4e5 | f79cb10d124eb74970a3efb2dbb1e69e7f2afbce | /Outcome_Analysis/PreProcessingTools/RaystationTools/Update_Exam_Names_Local.py | b2d804052df4b0b052b7f8ca2888873ef04de99b | [] | no_license | https://github.com/brianmanderson/Local_Recurrence_Work | 6225580415043a8d96ac5d56b784183f7779f9df | 92a7fb3cdccf0fea00573bb96ceec12aed4d62c5 | refs/heads/main | "2021-06-22T21:47:17.399210" | "2021-05-27T16:40:28" | "2021-05-27T16:40:28" | 226,168,022 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | __author__ = 'Brian M Anderson'
# Created on 4/14/2021
"""
This is code created to update the exam names in our excel sheets and folder paths
Exam names changed after the migration from General database to Brocklab database
This script will create a text-file in each folder with the SeriesInstanceUID as well as what exam it used to be called
"""
import os
import pydicom
def main():
path = r'H:\Deeplearning_Recurrence_Work\Dicom_Exports'
for root, folders, files in os.walk(path):
dicom_files = [i for i in files if i.endswith('.dcm')]
if len(dicom_files) > 20:
print(root)
previous_exam_name = os.path.split(root)[-1]
fid = open(os.path.join(root, 'Old_Exam_{}.txt'.format(previous_exam_name)), 'w+')
fid.close()
for file in dicom_files:
ds = pydicom.read_file(os.path.join(root, file))
series_instance_uid = ds.SeriesInstanceUID
fid = open(os.path.join(root, 'SeriesInstanceUID.txt'), 'w+')
fid.write(series_instance_uid)
fid.close()
break
return None
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 1,184 | py | 65 | Update_Exam_Names_Local.py | 62 | 0.610642 | 0.602196 | 0 | 33 | 34.878788 | 119 |
shashank-subex/datum | 4,028,679,360,883 | 2297296a9602eaa2ce4e8a2ae242bea0fcb15b47 | 9607f45b501c62d0500536e14d134a1aca0a6982 | /datum/reader/dataset.py | 4fde60fee62d1a55b3d928eb4bda663c32123daa | [
"Apache-2.0"
] | permissive | https://github.com/shashank-subex/datum | b8fb552d4180ea3ee6345fa6bf4a2620231c7601 | 089b687fc569c8c6ce613349297997c67ce40c7a | refs/heads/master | "2023-05-28T01:00:04.701429" | "2021-06-10T11:13:38" | "2021-06-10T11:13:38" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Copyright 2020 The OpenAGI Datum Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from typing import Callable, Dict, List, Optional
import tensorflow as tf
from absl import logging
from datum.configs import ConfigBase
from datum.reader.tfrecord_reader import Reader
from datum.utils.common_utils import memoized_property
from datum.utils.types_utils import DatasetType
class Dataset():
"""Public API to read tfrecord as tf.data.Dataset.
Args:
path: path to the tfrecord files.
dataset_configs: Optional configuration for data processing and reading.
"""
def __init__(self, path: str, dataset_configs: ConfigBase):
self._path = path
self._dataset_configs = dataset_configs
self._reader = Reader(self._path, self._dataset_configs.read_config)
@property
def dataset_configs(self) -> ConfigBase:
"""Returns current object dataset configs."""
return self._dataset_configs
@dataset_configs.setter
def dataset_configs(self, configs: ConfigBase) -> None:
"""Reset dataset configs."""
self._dataset_configs = configs
def _read(self,
instruction: str,
batch_size: Optional[int] = None,
repeat: Optional[int] = None,
bucket_fn: Optional[Callable[[tf.train.Example], int]] = None,
shuffle: bool = False,
echoing: Optional[int] = None,
full_dataset: bool = False,
pre_batching_callback: Optional[Callable[[Dict], Dict]] = None,
post_batching_callback: Optional[Callable[[Dict], Dict]] = None) -> DatasetType:
"""Read and process data from tfrecord files.
Args:
instruction: instructions to read data split. One single dataset can have data from more than
one splits.
batch_size: batch size.
repeat: number of times to repeat the dataset.
bucket_fn: element length computation fn for bucketing, for sporse inputs data can be
batched based on element length.
shuffle: whether to shuffle examples in the dataset.
echoing: batch echoing factor, if not None perform batch_echoing.
full_dataset: if true, return the dataset as a single batch for dataset with single element.
pre_batching_callback: data processing to apply before batching.
post_batching_callback: data processing to apply post batching. This fucntion should support
batch processsing.
Returns:
a tf.data.Dataset object.
"""
dataset = self._reader.read(instruction, self._dataset_configs.shuffle_files)
if self._dataset_configs.cache:
logging.info(f'Caching dataset to {self._dataset_configs.get("cache_filename", "memory")}')
dataset = dataset.cache(self._dataset_configs.get('cache_filename', ''))
if pre_batching_callback:
logging.info('Applying pre batching callback.')
dataset = dataset.map(pre_batching_callback)
if shuffle:
logging.info('Shuffling dataset examplas.')
dataset = dataset.shuffle(
self._dataset_configs.buffer_size,
seed=self._dataset_configs.seed,
reshuffle_each_iteration=self._dataset_configs.reshuffle_each_iteration)
if bucket_fn:
logging.info(
f'Using bucketing to batch data, bucket_params: {self._dataset_configs.bucket_op}')
bucket_op = tf.data.experimental.bucket_by_sequence_length(
bucket_fn,
self._dataset_configs.bucket_op.bucket_boundaries,
self._dataset_configs.bucket_op.bucket_batch_sizes,
padded_shapes=tf.compat.v1.data.get_output_shapes(dataset),
padding_values=None,
pad_to_bucket_boundary=False)
dataset = dataset.apply(bucket_op)
elif batch_size:
dataset = dataset.padded_batch(batch_size, padded_shapes=self.padded_shapes)
if echoing:
dataset = dataset.flat_map(
lambda example: tf.data.Dataset.from_tensors(example).repeat(echoing))
if repeat:
logging.info(f'Dataset repeat is enabled for: {repeat} times.')
dataset = dataset.repeat(count=repeat)
if post_batching_callback:
logging.info('Applying post batching callback.')
dataset = dataset.map(post_batching_callback)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
if full_dataset:
logging.info('Returning full dataset as a single batch.')
return tf.data.experimental.get_single_element(dataset)
return dataset
@memoized_property
def padded_shapes(self) -> Dict[str, List]:
"""Returns padded shapes from dataset metadata."""
with open(os.path.join(self._path, 'datum_to_type_and_shape_mapping.json'), 'r') as json_f:
mapping = json.load(json_f)
padded_shapes = {}
for key, value in mapping.items():
if len(value['shape']) > 0:
padded_shapes[key] = [None] * len(value['shape'])
else:
padded_shapes[key] = []
return padded_shapes
def train_fn(self,
instruction: str = 'train',
repeat: Optional[int] = None,
shuffle: bool = True) -> DatasetType:
"""Get training dataset.
Args:
instruction: instruction on how much data to read.
repeat: number of times to repeat the dataset.
shuffle: if true, shuffles examples of the dataset.
Returns:
a tf.data.Dataset object.
"""
return self._read(instruction,
batch_size=self._dataset_configs.batch_size_train,
repeat=repeat,
bucket_fn=self._dataset_configs.bucket_fn,
shuffle=shuffle,
echoing=self._dataset_configs.echoing,
full_dataset=self._dataset_configs.full_dataset,
pre_batching_callback=self._dataset_configs.pre_batching_callback_train,
post_batching_callback=self._dataset_configs.post_batching_callback_train)
def val_fn(self,
instruction: str = 'val',
repeat: Optional[int] = None,
shuffle: bool = False) -> DatasetType:
"""Get validation dataset.
Args:
instruction: instruction on how much data to read.
repeat: number of times to repeat the dataset.
shuffle: if true, shuffles examples of the dataset.
Returns:
a tf.data.Dataset object.
"""
return self._read(instruction,
batch_size=self._dataset_configs.batch_size_val,
repeat=repeat,
bucket_fn=self._dataset_configs.bucket_fn,
shuffle=shuffle,
echoing=None,
full_dataset=self._dataset_configs.full_dataset,
pre_batching_callback=self._dataset_configs.pre_batching_callback_val,
post_batching_callback=self._dataset_configs.post_batching_callback_val)
def test_fn(self,
instruction: str = 'test',
repeat: int = 1,
shuffle: bool = False) -> DatasetType:
"""Get test dataset.
Args:
instruction: instruction on how much data to read.
repeat: number of times to repeat the dataset.
shuffle: if true, shuffles examples of the dataset.
Returns:
a tf.data.Dataset object.
"""
return self._read(instruction,
batch_size=self._dataset_configs.batch_size_test,
repeat=repeat,
bucket_fn=self._dataset_configs.bucket_fn,
shuffle=shuffle,
echoing=None,
full_dataset=self._dataset_configs.full_dataset,
pre_batching_callback=self._dataset_configs.pre_batching_callback_test,
post_batching_callback=self._dataset_configs.post_batching_callback_test)
| UTF-8 | Python | false | false | 8,313 | py | 49 | dataset.py | 43 | 0.645375 | 0.644051 | 0 | 204 | 39.75 | 99 |
Galva101/PDF-Tools | 12,223,476,931,612 | 5ce0a547d04d7afd2319683d2bf8be86a7b920a1 | 7d03b65a782bcaf7bf93210d14c3c3f1e44a34dc | /cropMargins.py | 02e47ea21efefa75ea3d9dd28ee31ed4c27b98c0 | [
"MIT"
] | permissive | https://github.com/Galva101/PDF-Tools | 3a7459345922a03ddcb46d97f6028d6052bf562e | 6ee41433efaf1a7db497132a46c610a1c83d1bb0 | refs/heads/main | "2023-03-01T13:44:39.351147" | "2021-02-17T13:07:45" | "2021-02-17T13:07:45" | 338,324,216 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import subprocess
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
def list_files(directory, extension):
return (f for f in os.listdir(directory) if f.endswith('.' + extension))
pdfs = list_files(dir_path, "pdf")
for pdf in pdfs:
print("editing "+str(pdf))
subprocess.call('pdf-crop-margins -u -s \"'+pdf+"\"", shell=True)
| UTF-8 | Python | false | false | 368 | py | 6 | cropMargins.py | 5 | 0.646739 | 0.646739 | 0 | 13 | 26.307692 | 76 |
Gibbo81/LearningPython | 3,135,326,151,428 | 02344fd860701b353d61272083a5c2ed62e1c87f | 60cf62347e0f4c508d428e7518e16858cf2b1d86 | /LearningPython/files31/UpperCase.py | fabc8a4c35a7842ff09eb9b1da58b559f6334e11 | [] | no_license | https://github.com/Gibbo81/LearningPython | 064df9abbb645b7041e052ed594e7650dc2d0f86 | 6ae3022aa60a9e5c0c5cf7f91f058f255911510e | refs/heads/master | "2021-03-08T19:43:53.776245" | "2017-06-28T12:16:40" | "2017-06-28T12:16:40" | 52,823,469 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | if __name__ == '__main__':
import Processor as p
else: #it take the relative position from the master!!!!!! in this case 31DesigningWithClasses.py
import files31.Processor as p
import sys
#to avoid this problem it will be batter to have a directory for all the file to do not have problem
class UpperCase(p.Processor):
def Converter(self, data):
return data.upper()
if __name__ == '__main__':
worker = UpperCase(open('.\\files31\\Righe.txt',"r"), sys.stdout) #but the file still need to be called with \\files31
worker.process()
print()
import os
print(os.getcwd()) #as you can see from the current directory
| UTF-8 | Python | false | false | 671 | py | 74 | UpperCase.py | 65 | 0.660209 | 0.648286 | 0 | 17 | 38.470588 | 124 |
dzlab/docker-aws | 7,971,459,305,516 | 79188d3e836eabf4bd3be4e72893a0d64a3825b5 | c75fbf3b63c9019a9b93232cad2f041a2455a5c4 | /demoapp/app.py | 515d992cb614d5b810f20fda4f6c119db9fe24dc | [
"MIT"
] | permissive | https://github.com/dzlab/docker-aws | e7e05806b8ad6387662b9407c0ae72b7f2afce79 | e855cd917c8ffa7d79f4d44b4b57d4d194b077c1 | refs/heads/master | "2021-01-10T12:26:56.253767" | "2016-08-26T14:39:46" | "2016-08-26T14:39:46" | 47,754,967 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import Flask, render_template
from redis import Redis
import os
app = Flask(__name__)
redis = Redis(host='redis', port=6379)
server_name = os.getenv('SRV_NAME')
server_health_key = '{0}_health'.format(server_name)
@app.route('/health/on')
def health_on():
redis.set(server_health_key, 'on')
return 'Health key {0} set to on!'.format(server_health_key)
@app.route('/health/off')
def health_off():
redis.set(server_health_key, 'off')
return 'Health key {0} set to off!'.format(server_health_key)
@app.route('/health/check')
def health_check():
health = redis.get(server_health_key)
if health == 'on':
return 'healthy', 200
else:
return 'not healthy', 500
@app.route('/')
def index():
redis.incr('hits')
return render_template('index.html', hits=redis.get('hits'))
if __name__ == '__main__':
health_on()
app.run(host='0.0.0.0')
| UTF-8 | Python | false | false | 902 | py | 13 | app.py | 5 | 0.63969 | 0.620843 | 0 | 35 | 24.771429 | 65 |
lhammond/ee-meter | 3,667,902,106,978 | 7a5a99eb0f1af042fbe5a4bee2d4deaf11e8e364 | 7a1b34fa1f66e42429307238600c821b688eae9e | /scripts/fetch_ncdc_normals.py | a86701910117e077e1f9a04dba4ccaa08a2a1ce7 | [
"MIT"
] | permissive | https://github.com/lhammond/ee-meter | 91588c31088e257a890431bb8e898da70224af09 | 50ad19eb000b64816502cda5eb9ce4841b710afa | refs/heads/master | "2020-12-25T17:45:12.560357" | "2015-04-17T17:26:17" | "2015-04-17T17:26:17" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | ##################
#
# Or just use:
#
# wget -r ftp://ftp.ncdc.noaa.gov/pub/data/normals/1981-2010/products/station/
#
##################
from ftplib import FTP
import argparse
import os
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("data_dir")
parser.add_argument("start_at")
args = parser.parse_args()
ftp = FTP('ftp.ncdc.noaa.gov')
ftp.login()
ftp.cwd('/pub/data/normals/1981-2010/products/station')
filenames = []
def callback(response):
filename = response.split()[-1]
filenames.append(filename)
ftp.retrlines('LIST',callback)
started = False
for filename in filenames:
if not started and filename == args.start_at:
started = True
if started:
print filename
ftp.retrbinary('RETR {}'.format(filename), open(os.path.join(args.data_dir,filename), 'wb').write)
ftp.quit()
| UTF-8 | Python | false | false | 943 | py | 20 | fetch_ncdc_normals.py | 16 | 0.59491 | 0.576882 | 0 | 37 | 24.486486 | 110 |
mapostolides/methylseq | 10,771,777,988,451 | 0e0f457af30ac00bbee0e51ea98c48e956298502 | 2ac9a8ae51ecdb9116aff3bbe01e4747bdbbf71c | /pipelines/pacbio_assembly/pacbio_assembly.py | 759c0db0b2c00bf854b355fd5ce3db20127db129 | [] | no_license | https://github.com/mapostolides/methylseq | 434268ee6f917609ca0749dc34b550faaecab0d7 | 4d7a8565279518fd70e4c6240d022748498dfbba | refs/heads/master | "2020-05-14T07:59:02.779506" | "2019-04-16T17:17:32" | "2019-04-16T17:17:32" | 181,713,903 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
################################################################################
# Copyright (C) 2014, 2015 GenAP, McGill University and Genome Quebec Innovation Centre
#
# This file is part of MUGQIC Pipelines.
#
# MUGQIC Pipelines is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MUGQIC Pipelines is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with MUGQIC Pipelines. If not, see <http://www.gnu.org/licenses/>.
################################################################################
# Python Standard Modules
import logging
import os
import sys
# Append mugqic_pipelines directory to Python library path
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))))
# MUGQIC Modules
from core.config import *
from core.job import *
from bfx.readset import *
from bfx import blast
from bfx import circlator
from bfx import gq_seq_utils
from bfx import mummer
from bfx import pacbio_tools
from bfx import smrtanalysis
from pipelines import common
log = logging.getLogger(__name__)
class PacBioAssembly(common.MUGQICPipeline):
"""
PacBio Assembly Pipeline
========================
Contigs assembly with PacBio reads is done using what is refer as the HGAP workflow.
Briefly, raw subreads generated from raw .ba(s|x).h5 PacBio data files are filtered for quality.
A subread length cutoff value is extracted from subreads, depending on subreads distribution,
and used into the preassembly (aka correcting step) (BLASR) step which consists of aligning
short subreads on long subreads.
Since errors in PacBio reads is random, the alignment of multiple short reads on longer reads
allows to correct sequencing error on long reads.
These long corrected reads are then used as seeds into assembly (Celera assembler) which gives contigs.
These contigs are then *polished* by aligning raw reads on contigs (BLASR) that are then processed
through a variant calling algorithm (Quiver) that generates high quality consensus sequences
using local realignments and PacBio quality scores.
Prepare your readset file as described [here](https://bitbucket.org/mugqic/mugqic_pipelines/src#markdown-header-pacbio-assembly)
(if you use `nanuq2mugqic_pipelines.py`, you need to add and fill manually
the `EstimatedGenomeSize` column in your readset file).
"""
def __init__(self, protocol=None):
self._protocol=protocol
self.argparser.add_argument("-r", "--readsets", help="readset file", type=file)
super(PacBioAssembly, self).__init__(protocol)
@property
def readsets(self):
if not hasattr(self, "_readsets"):
if self.args.readsets:
self._readsets = parse_pacbio_readset_file(self.args.readsets.name)
else:
self.argparser.error("argument -r/--readsets is required!")
return self._readsets
def smrtanalysis_filtering(self):
"""
Filter reads and subreads based on their length and QVs, using smrtpipe.py (from the SmrtAnalysis package).
1. fofnToSmrtpipeInput.py
2. modify RS_Filtering.xml files according to reads filtering values entered in .ini file
3. smrtpipe.py with filtering protocol
4. prinseq-lite.pl: write fasta file based on fastq file
Informative run metrics such as loading efficiency, read lengths and base quality are generated in this step as well.
"""
jobs = []
jobs.append(Job([os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])),config.param('smrtanalysis_filtering', 'celera_settings')),os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])),config.param('smrtanalysis_filtering', 'filtering_settings'))], [config.param('smrtanalysis_filtering', 'celera_settings'), config.param('smrtanalysis_filtering', 'filtering_settings')], command="cp -a -f " + os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), "protocols") + " .", name="smrtanalysis_filtering.config"))
for sample in self.samples:
fofn = os.path.join("fofns", sample.name + ".fofn")
input_files = [config.param('smrtanalysis_filtering', 'celera_settings'), config.param('smrtanalysis_filtering', 'filtering_settings')]
for readset in sample.readsets:
if readset.bax_files:
# New PacBio format is BAX
input_files.extend(readset.bax_files)
else:
# But old PacBio format BAS should still be supported
input_files.extend(readset.bas_files)
filtering_directory = os.path.join(sample.name, "filtering")
jobs.append(concat_jobs([
Job(command="mkdir -p fofns", samples=[sample]),
Job(input_files, [fofn], command="""\
`cat > {fofn} << END
{input_files}
END
`""".format(input_files="\n".join(input_files), fofn=fofn)),
Job(command="mkdir -p " + filtering_directory),
smrtanalysis.filtering(
fofn,
os.path.join(filtering_directory, "input.xml"),
os.path.join(sample.name, "filtering.xml"),
filtering_directory,
os.path.join(filtering_directory, "smrtpipe.log")
)
], name="smrtanalysis_filtering." + sample.name))
return jobs
def pacbio_tools_get_cutoff(self):
"""
Cutoff value for splitting long reads from short reads is done here using
estimated coverage and estimated genome size.
You should estimate the overall coverage and length distribution for putting in
the correct options in the configuration file. You will need to decide a
length cutoff for the seeding reads. The optimum cutoff length will depend on
the distribution of the sequencing read lengths, the genome size and the
overall yield. Here, you provide a percentage value that corresponds to the
fraction of coverage you want to use as seeding reads.
First, loop through fasta sequences, put the length of each sequence in an array,
sort it, loop through it again and compute the cummulative length covered by each
sequence as we loop through the array. Once that length is > (coverage * genome
size) * $percentageCutoff (e.g. 0.10), we have our threshold. The idea is to
consider all reads above that threshold to be seeding reads to which will be
aligned lower shorter subreads.
"""
jobs = []
for sample in self.samples:
log.info("Sample: " + sample.name)
sample_nb_base_pairs = sum([readset.nb_base_pairs for readset in sample.readsets])
log.info("nb_base_pairs: " + str(sample_nb_base_pairs))
estimated_genome_size = sample.readsets[0].estimated_genome_size
log.info("estimated_genome_size: " + str(estimated_genome_size))
estimated_coverage = sample_nb_base_pairs / estimated_genome_size
log.info("estimated_coverage: " + str(estimated_coverage))
for coverage_cutoff in config.param('DEFAULT', 'coverage_cutoff', type='list'):
cutoff_x = coverage_cutoff + "X"
coverage_directory = os.path.join(sample.name, cutoff_x)
log.info("COVERAGE_CUTOFF: " + coverage_cutoff + "_X_coverage")
jobs.append(concat_jobs([
Job(command="mkdir -p " + os.path.join(coverage_directory, "preassembly"), samples=[sample]),
pacbio_tools.get_cutoff(
os.path.join(sample.name, "filtering", "data", "filtered_subreads.fasta"),
estimated_coverage,
estimated_genome_size,
coverage_cutoff,
os.path.join(coverage_directory, "preassemblyMinReadSize.txt")
)
], name="pacbio_tools_get_cutoff." + sample.name + ".coverage_cutoff" + cutoff_x))
return jobs
def preassembly(self):
"""
Having in hand a cutoff value, filtered reads are splitted between short and long reads. Short reads
are aligned against long reads and consensus (e.g. corrected reads) are generated from these alignments.
1. split reads between long and short
2. blasr: aligner for PacBio reads
3. m4topre: convert .m4 blasr output in .pre format
4. pbdagcon (aka HGAP2): generate corrected reads from alignments
"""
jobs = []
for sample in self.samples:
for coverage_cutoff in config.param('DEFAULT', 'coverage_cutoff', type='list'):
cutoff_x = coverage_cutoff + "X"
coverage_directory = os.path.join(sample.name, cutoff_x)
preassembly_directory = os.path.join(coverage_directory, "preassembly", "data")
job_name_suffix = sample.name + ".coverage_cutoff" + cutoff_x
jobs.append(concat_jobs([
Job(command="mkdir -p " + preassembly_directory, samples=[sample]),
pacbio_tools.split_reads(
os.path.join(sample.name, "filtering", "data", "filtered_subreads.fasta"),
os.path.join(coverage_directory, "preassemblyMinReadSize.txt"),
os.path.join(preassembly_directory, "filtered_shortreads.fa"),
os.path.join(preassembly_directory, "filtered_longreads.fa")
)
], name="pacbio_tools_split_reads." + job_name_suffix))
job = smrtanalysis.blasr(
os.path.join(sample.name, "filtering", "data", "filtered_subreads.fasta"),
os.path.join(preassembly_directory, "filtered_longreads.fa"),
os.path.join(preassembly_directory, "seeds.m4"),
os.path.join(preassembly_directory, "seeds.m4.fofn")
)
job.name = "smrtanalysis_blasr." + job_name_suffix
job.samples = [sample]
jobs.append(job)
job = smrtanalysis.m4topre(
os.path.join(preassembly_directory, "seeds.m4.filtered"),
os.path.join(preassembly_directory, "seeds.m4.fofn"),
os.path.join(sample.name, "filtering", "data", "filtered_subreads.fasta"),
os.path.join(preassembly_directory, "aln.pre")
)
job.name = "smrtanalysis_m4topre." + job_name_suffix
job.samples = [sample]
jobs.append(job)
job = smrtanalysis.pbdagcon(
os.path.join(preassembly_directory, "aln.pre"),
os.path.join(preassembly_directory, "corrected.fasta"),
os.path.join(preassembly_directory, "corrected.fastq")
)
job.name = "smrtanalysis_pbdagcon." + job_name_suffix
job.samples = [sample]
jobs.append(job)
return jobs
def assembly(self):
"""
Corrected reads are assembled to generates contigs. Please see the
[Celera documentation](http://wgs-assembler.sourceforge.net/wiki/index.php?title=RunCA).
Quality of assembly seems to be highly sensitive to parameters you give Celera.
1. generate celera config files using parameters provided in the .ini file
2. fastqToCA: generate input file compatible with the Celera assembler
3. runCA: run the Celera assembler
"""
jobs = []
for sample in self.samples:
for coverage_cutoff in config.param('DEFAULT', 'coverage_cutoff', type='list'):
cutoff_x = coverage_cutoff + "X"
coverage_directory = os.path.join(sample.name, cutoff_x)
preassembly_directory = os.path.join(coverage_directory, "preassembly", "data")
for mer_size in config.param('DEFAULT', 'mer_sizes', type='list'):
mer_size_text = "merSize" + mer_size
sample_cutoff_mer_size = "_".join([sample.name, cutoff_x, mer_size_text])
mer_size_directory = os.path.join(coverage_directory, mer_size_text)
assembly_directory = os.path.join(mer_size_directory, "assembly")
jobs.append(concat_jobs([
Job(command="mkdir -p " + assembly_directory, samples=[sample]),
pacbio_tools.celera_config(
mer_size,
config.param('DEFAULT', 'celera_settings'),
os.path.join(mer_size_directory, "celera_assembly.ini")
)
], name="pacbio_tools_celera_config." + sample_cutoff_mer_size))
job = smrtanalysis.fastq_to_ca(
sample_cutoff_mer_size,
os.path.join(preassembly_directory, "corrected.fastq"),
os.path.join(preassembly_directory, "corrected.frg")
)
job.name = "smrtanalysis_fastq_to_ca." + sample_cutoff_mer_size
job.samples = [sample]
jobs.append(job)
jobs.append(concat_jobs([
Job(command="rm -rf " + assembly_directory, samples=[sample]),
smrtanalysis.run_ca(
os.path.join(preassembly_directory, "corrected.frg"),
os.path.join(mer_size_directory, "celera_assembly.ini"),
sample_cutoff_mer_size,
assembly_directory
)
], name="smrtanalysis_run_ca." + sample_cutoff_mer_size))
job = smrtanalysis.pbutgcns(
os.path.join(assembly_directory, sample_cutoff_mer_size + ".gkpStore"),
os.path.join(assembly_directory, sample_cutoff_mer_size + ".tigStore"),
os.path.join(mer_size_directory, "unitigs.lst"),
os.path.join(assembly_directory, sample_cutoff_mer_size),
os.path.join(assembly_directory, "9-terminator"),
os.path.join(assembly_directory, "9-terminator", sample_cutoff_mer_size + ".ctg.fasta"),
os.path.join(config.param('smrtanalysis_pbutgcns', 'tmp_dir'), sample_cutoff_mer_size)
)
job.name = "smrtanalysis_pbutgcns." + sample_cutoff_mer_size
job.samples = [sample]
jobs.append(job)
return jobs
def polishing(self):
"""
Align raw reads on the Celera assembly with BLASR. Load pulse information from bax or bas files into aligned file. Sort that file and run quiver (variantCaller.py).
1. generate fofn
2. upload Celera assembly with smrtpipe refUploader
3. compare sequences
4. load pulses
5. sort .cmp.h5 file
6. variantCaller.py
"""
jobs = []
for sample in self.samples:
for coverage_cutoff in config.param('DEFAULT', 'coverage_cutoff', type='list'):
cutoff_x = coverage_cutoff + "X"
coverage_directory = os.path.join(sample.name, cutoff_x)
preassembly_directory = os.path.join(coverage_directory, "preassembly", "data")
for mer_size in config.param('DEFAULT', 'mer_sizes', type='list'):
mer_size_text = "merSize" + mer_size
mer_size_directory = os.path.join(coverage_directory, mer_size_text)
assembly_directory = os.path.join(mer_size_directory, "assembly")
polishing_rounds = config.param('DEFAULT', 'polishing_rounds', type='posint')
if polishing_rounds > 4:
raise Exception("Error: polishing_rounds \"" + str(polishing_rounds) + "\" is invalid (should be between 1 and 4)!")
for polishing_round in range(1, polishing_rounds + 1):
polishing_round_directory = os.path.join(mer_size_directory, "polishing" + str(polishing_round))
# smrtanalysis.reference_uploader transforms "-" into "_" in fasta filename
sample_cutoff_mer_size_polishing_round = "_".join([sample.name.replace("-", "_"), cutoff_x, mer_size_text, "polishingRound" + str(polishing_round)])
job_name_suffix = "_".join([sample.name, cutoff_x, mer_size_text, "polishingRound" + str(polishing_round)])
if polishing_round == 1:
fasta_file = os.path.join(assembly_directory, "9-terminator", "_".join([sample.name, cutoff_x, mer_size_text]) + ".ctg.fasta")
else:
fasta_file = os.path.join(mer_size_directory, "polishing" + str(polishing_round - 1), "data", "consensus.fasta")
jobs.append(concat_jobs([
Job(command="mkdir -p " + os.path.join(polishing_round_directory, "data"), samples=[sample]),
smrtanalysis.reference_uploader(
polishing_round_directory,
sample_cutoff_mer_size_polishing_round,
fasta_file
)
], name="smrtanalysis_reference_uploader." + job_name_suffix))
job = smrtanalysis.pbalign(
os.path.join(polishing_round_directory, "data", "aligned_reads.cmp.h5"),
os.path.join(sample.name, "filtering", "data", "filtered_regions.fofn"),
os.path.join(sample.name, "filtering", "input.fofn"),
os.path.join(polishing_round_directory, sample_cutoff_mer_size_polishing_round, "sequence", sample_cutoff_mer_size_polishing_round + ".fasta"),
os.path.join(config.param('smrtanalysis_pbalign', 'tmp_dir'), sample_cutoff_mer_size_polishing_round)
)
job.name = "smrtanalysis_pbalign." + job_name_suffix
job.samples = [sample]
jobs.append(job)
jobs.append(concat_jobs([
Job(samples=[sample]),
smrtanalysis.load_chemistry(
os.path.join(polishing_round_directory, "data", "aligned_reads.cmp.h5"),
os.path.join(sample.name, "filtering", "input.fofn"),
os.path.join(polishing_round_directory, "data", "aligned_reads.loadChemistry.cmp.h5")
),
smrtanalysis.load_pulses(
os.path.join(polishing_round_directory, "data", "aligned_reads.loadChemistry.cmp.h5"),
os.path.join(sample.name, "filtering", "input.fofn"),
os.path.join(polishing_round_directory, "data", "aligned_reads.loadPulses.cmp.h5")
)
], name = "smrtanalysis_load_chemistry_load_pulses." + job_name_suffix))
job = smrtanalysis.cmph5tools_sort(
os.path.join(polishing_round_directory, "data", "aligned_reads.loadPulses.cmp.h5"),
os.path.join(polishing_round_directory, "data", "aligned_reads.sorted.cmp.h5")
)
job.name = "smrtanalysis_cmph5tools_sort." + job_name_suffix
job.samples=[sample]
jobs.append(job)
job = smrtanalysis.variant_caller(
os.path.join(polishing_round_directory, "data", "aligned_reads.sorted.cmp.h5"),
os.path.join(polishing_round_directory, sample_cutoff_mer_size_polishing_round, "sequence", sample_cutoff_mer_size_polishing_round + ".fasta"),
os.path.join(polishing_round_directory, "data", "variants.gff"),
os.path.join(polishing_round_directory, "data", "consensus.fasta.gz"),
os.path.join(polishing_round_directory, "data", "consensus.fastq.gz")
)
job.name = "smrtanalysis_variant_caller." + job_name_suffix
job.samples=[sample]
jobs.append(job)
job = smrtanalysis.summarize_polishing(
"_".join([sample.name, cutoff_x, mer_size_text]),
os.path.join(polishing_round_directory, sample_cutoff_mer_size_polishing_round),
os.path.join(polishing_round_directory, "data", "aligned_reads.sorted.cmp.h5"),
os.path.join(polishing_round_directory, "data", "alignment_summary.gff"),
os.path.join(polishing_round_directory, "data", "coverage.bed"),
os.path.join(sample.name, "filtering", "input.fofn"),
os.path.join(polishing_round_directory, "data", "aligned_reads.sam"),
os.path.join(polishing_round_directory, "data", "variants.gff"),
os.path.join(polishing_round_directory, "data", "variants.bed"),
os.path.join(polishing_round_directory, "data", "variants.vcf")
)
job.name = "smrtanalysis_summarize_polishing." + job_name_suffix
job.samples=[sample]
jobs.append(job)
return jobs
def pacbio_tools_assembly_stats(self):
jobs = []
for sample in self.samples:
for coverage_cutoff in config.param('DEFAULT', 'coverage_cutoff', type='list'):
cutoff_x = coverage_cutoff + "X"
coverage_directory = os.path.join(sample.name, cutoff_x)
preassembly_directory = os.path.join(coverage_directory, "preassembly", "data")
for mer_size in config.param('DEFAULT', 'mer_sizes', type='list'):
mer_size_text = "merSize" + mer_size
sample_cutoff_mer_size = "_".join([sample.name, cutoff_x, mer_size_text])
mer_size_directory = os.path.join(coverage_directory, mer_size_text)
blast_directory = os.path.join(mer_size_directory, "blast")
mummer_file_prefix = os.path.join(mer_size_directory, "mummer", sample.name + ".")
report_directory = os.path.join(mer_size_directory, "report")
polishing_rounds = config.param('DEFAULT', 'polishing_rounds', type='posint')
fasta_consensus = os.path.join(mer_size_directory, "polishing" + str(polishing_rounds), "data", "consensus.fasta")
# Number of unique run-smartcells per sample
smartcells = len(set([(readset.run, readset.smartcell) for readset in sample.readsets]))
jobs.append(concat_jobs([
Job(command="mkdir -p " + report_directory, samples=[sample]),
# Generate table(s) and figures
pacbio_tools.assembly_stats(
os.path.join(preassembly_directory, "filtered_shortreads.fa"),
os.path.join(preassembly_directory, "filtered_longreads.fa"),
os.path.join(preassembly_directory, "corrected.fasta"),
os.path.join(sample.name, "filtering", "data", "filtered_summary.csv"),
fasta_consensus,
sample.name,
cutoff_x + "_" + mer_size,
sample.readsets[0].estimated_genome_size,
smartcells,
report_directory
)
], name="pacbio_tools_assembly_stats." + sample_cutoff_mer_size))
report_file = os.path.join(report_directory, "PacBioAssembly.pacbio_tools_assembly_stats.md")
jobs.append(
Job(
[
os.path.join(report_directory, "summaryTableReads.tsv"),
os.path.join(report_directory, "summaryTableReads2.tsv"),
os.path.join(report_directory, "summaryTableAssembly.tsv"),
os.path.join(report_directory, "pacBioGraph_readLengthScore.pdf"),
os.path.join(report_directory, "pacBioGraph_readLengthScore.jpeg"),
os.path.join(report_directory, "pacBioGraph_histoReadLength.pdf"),
os.path.join(report_directory, "pacBioGraph_histoReadLength.jpeg"),
fasta_consensus + ".gz"
],
[report_file],
[['pacbio_tools_assembly_stats', 'module_pandoc']],
command="""\
cp {fasta_consensus}.gz {report_directory}/ && \\
total_subreads=`grep -P '^"Total subreads"\t"' {report_directory}/summaryTableReads.tsv | cut -f2 | sed 's/"//g'` && \\
average_subreads_length=`grep -P '^"Average subread length"\t"' {report_directory}/summaryTableReads.tsv | cut -f2 | sed 's/"//g'` && \\
summary_table_reads2=`LC_NUMERIC=en_CA awk -F "\t" '{{OFS="|"; if (NR == 1) {{$1 = $1; print $0; print "-----|-----:|-----:|-----:|-----:|-----:|-----:|-----:|-----:|-----:|-----:"}} else {{print $1, sprintf("%\\47d", $2), sprintf("%\\47d", $3), sprintf("%\\47d", $4), sprintf("%\\47d", $5), sprintf("%\\47d", $6), sprintf("%\\47d", $7), sprintf("%\\47d", $8), sprintf("%\\47d", $9), sprintf("%\\47d", $10), sprintf("%\\47d", $11)}}}}' {report_directory}/summaryTableReads2.tsv | sed 's/^#//'` && \\
summary_table_assembly=`awk -F"\t" '{{OFS="\t"; if (NR==1) {{print; gsub(/[^\t]/, "-")}} print}}' {report_directory}/summaryTableAssembly.tsv | sed 's/"//g' | sed 's/\t/|/g'` && \\
pandoc --to=markdown \\
--template {report_template_dir}/{basename_report_file} \\
--variable total_subreads="$total_subreads" \\
--variable average_subreads_length="$average_subreads_length" \\
--variable summary_table_reads2="$summary_table_reads2" \\
--variable summary_table_assembly="$summary_table_assembly" \\
--variable smartcells="{smartcells}" \\
{report_template_dir}/{basename_report_file} \\
> {report_file}""".format(
fasta_consensus=fasta_consensus,
report_directory=report_directory,
smartcells=smartcells,
report_template_dir=self.report_template_dir,
basename_report_file=os.path.basename(report_file),
report_file=report_file
),
report_files=[os.path.relpath(report_file, mer_size_directory)],
name="pacbio_tools_assembly_stats_report." + sample_cutoff_mer_size)
)
return jobs
def blast(self):
"""
Blast polished assembly against nr using dc-megablast.
"""
jobs = []
for sample in self.samples:
for coverage_cutoff in config.param('DEFAULT', 'coverage_cutoff', type='list'):
cutoff_x = coverage_cutoff + "X"
coverage_directory = os.path.join(sample.name, cutoff_x)
preassembly_directory = os.path.join(coverage_directory, "preassembly", "data")
for mer_size in config.param('DEFAULT', 'mer_sizes', type='list'):
mer_size_text = "merSize" + mer_size
mer_size_directory = os.path.join(coverage_directory, mer_size_text)
blast_directory = os.path.join(mer_size_directory, "blast")
polishing_rounds = config.param('DEFAULT', 'polishing_rounds', type='posint')
if polishing_rounds > 4:
raise Exception("Error: polishing_rounds \"" + str(polishing_rounds) + "\" is invalid (should be between 1 and 4)!")
polishing_round_directory = os.path.join(mer_size_directory, "polishing" + str(polishing_rounds))
sample_cutoff_mer_size = "_".join([sample.name, cutoff_x, mer_size_text])
blast_report = os.path.join(blast_directory, "blast_report.csv")
# Blast contigs against nt
jobs.append(concat_jobs([
Job(command="mkdir -p " + blast_directory, samples=[sample]),
blast.dcmegablast(
os.path.join(polishing_round_directory, "data", "consensus.fasta"),
"7",
blast_report,
os.path.join(polishing_round_directory, "data", "coverage.bed"),
blast_directory
)
], name="blast_dcmegablast." + sample_cutoff_mer_size))
# Get fasta file of best hit.
job = blast.blastdbcmd(
blast_report,
"$(grep -v '^#' < " + blast_report + " | head -n 1 | awk -F '\\t' '{print $2}' | sed 's/gi|\([0-9]*\)|.*/\\1/' | tr '\\n' ' ')",
os.path.join(blast_directory, "nt_reference.fasta"),
)
job.name = "blast_blastdbcmd." + sample_cutoff_mer_size
job.samples=[sample]
jobs.append(job)
report_directory = os.path.join(mer_size_directory, "report")
report_file = os.path.join(report_directory, "PacBioAssembly.blast.md")
jobs.append(
Job(
[os.path.join(blast_directory, "blastCov.tsv"), os.path.join(blast_directory, "contigsCoverage.tsv")],
[report_file],
[['blast', 'module_pandoc']],
command="""\
cp {blast_directory}/blastCov.tsv {report_directory}/ && \\
cp {blast_directory}/contigsCoverage.tsv {report_directory}/ && \\
blast_table=`awk -F"\t" '{{OFS="\t"; if (NR==1) {{print; gsub(/[^\t]/, "-")}} print}}' {report_directory}/blastCov.tsv | sed 's/|/\\\\\\\\|/g' | sed 's/\t/|/g' | head -21` && \\
pandoc --to=markdown \\
--template {report_template_dir}/{basename_report_file} \\
--variable blast_table="$blast_table" \\
{report_template_dir}/{basename_report_file} \\
> {report_file}""".format(
blast_directory=blast_directory,
report_directory=report_directory,
report_template_dir=self.report_template_dir,
basename_report_file=os.path.basename(report_file),
report_file=report_file
),
report_files=[os.path.relpath(report_file, mer_size_directory)],
name="blast_report." + sample_cutoff_mer_size)
)
return jobs
def mummer(self):
"""
Using MUMmer, align polished assembly against best hit from blast job. Also align polished assembly against itself to detect structure variation such as repeats, etc.
"""
jobs = []
for sample in self.samples:
for coverage_cutoff in config.param('DEFAULT', 'coverage_cutoff', type='list'):
cutoff_x = coverage_cutoff + "X"
coverage_directory = os.path.join(sample.name, cutoff_x)
for mer_size in config.param('DEFAULT', 'mer_sizes', type='list'):
mer_size_text = "merSize" + mer_size
mer_size_directory = os.path.join(coverage_directory, mer_size_text)
fasta_reference = os.path.join(mer_size_directory, "blast", "nt_reference.fasta")
mummer_directory = os.path.join(mer_size_directory, "mummer")
mummer_file_prefix = os.path.join(mummer_directory, sample.name + ".")
polishing_rounds = config.param('DEFAULT', 'polishing_rounds', type='posint')
if polishing_rounds > 4:
raise Exception("Error: polishing_rounds \"" + str(polishing_rounds) + "\" is invalid (should be between 1 and 4)!")
fasta_consensus = os.path.join(mer_size_directory, "polishing" + str(polishing_rounds), "data", "consensus.fasta")
sample_cutoff_mer_size = "_".join([sample.name, cutoff_x, mer_size_text])
sample_cutoff_mer_size_nucmer = sample_cutoff_mer_size + "-nucmer"
# Run nucmer
jobs.append(concat_jobs([
Job(command="mkdir -p " + mummer_directory, samples=[sample]),
mummer.reference(
mummer_file_prefix + "nucmer",
fasta_reference,
fasta_consensus,
sample_cutoff_mer_size_nucmer,
mummer_file_prefix + "nucmer.delta",
mummer_file_prefix + "nucmer.delta",
mummer_file_prefix + "dnadiff",
mummer_file_prefix + "dnadiff.delta",
mummer_file_prefix + "dnadiff.delta.snpflank"
)
], name="mummer_reference." + sample_cutoff_mer_size_nucmer))
jobs.append(concat_jobs([
Job(command="mkdir -p " + mummer_directory, samples=[sample]),
mummer.self(
mummer_file_prefix + "nucmer.self",
fasta_consensus,
sample_cutoff_mer_size_nucmer + "-self",
mummer_file_prefix + "nucmer.self.delta",
mummer_file_prefix + "nucmer.self.delta"
)
], name="mummer_self." + sample_cutoff_mer_size_nucmer))
report_directory = os.path.join(mer_size_directory, "report")
report_file = os.path.join(report_directory, "PacBioAssembly.mummer.md")
jobs.append(
Job(
[mummer_file_prefix + "nucmer.self.delta.png", mummer_file_prefix + "nucmer.delta.png"],
[report_file],
[['mummer', 'module_pandoc']],
command="""\
cp {mummer_file_prefix}nucmer.self.delta.png {mummer_file_prefix}nucmer.delta.png {report_directory}/ && \\
pandoc --to=markdown \\
--template {report_template_dir}/{basename_report_file} \\
--variable sample="{sample}" \\
{report_template_dir}/{basename_report_file} \\
> {report_file}""".format(
mummer_file_prefix=mummer_file_prefix,
sample=sample.name,
report_directory=report_directory,
report_template_dir=self.report_template_dir,
basename_report_file=os.path.basename(report_file),
report_file=report_file
),
report_files=[os.path.relpath(report_file, mer_size_directory)],
name="mummer_report." + sample_cutoff_mer_size)
)
return jobs
def compile(self):
"""
Compile assembly stats of all conditions used in the pipeline (useful when multiple assemblies are performed).
"""
jobs = []
for sample in self.samples:
# Generate table
job = pacbio_tools.compile(
sample.name,
sample.name,
sample.readsets[0].estimated_genome_size,
sample.name + ".compiledStats.csv"
)
# Job input files (all consensus.fasta) need to be defined here since only sample directory is given to pacbio_tools.compile
job.input_files = [os.path.join(
sample.name,
coverage_cutoff + "X",
"merSize" + mer_size,
"polishing" + str(polishing_round),
"data",
"consensus.fasta")
for coverage_cutoff in config.param('DEFAULT', 'coverage_cutoff', type='list')
for mer_size in config.param('DEFAULT', 'mer_sizes', type='list')
for polishing_round in range(1, config.param('DEFAULT', 'polishing_rounds', type='posint') + 1)
]
job.name = "pacbio_tools_compile." + sample.name
job.samples=[sample]
jobs.append(job)
return jobs
def circlator(self):
"""
Circularize the assembly contigs if possible.
User should launch this step after making sure the quality of the assembly is acceptable.
"""
jobs = []
for sample in self.samples:
for coverage_cutoff in config.param('DEFAULT', 'coverage_cutoff', type='list'):
cutoff_x = coverage_cutoff + "X"
coverage_directory = os.path.join(sample.name, cutoff_x)
preassembly_directory = os.path.join(coverage_directory, "preassembly", "data")
for mer_size in config.param('DEFAULT', 'mer_sizes', type='list'):
mer_size_text = "merSize" + mer_size
mer_size_directory = os.path.join(coverage_directory, mer_size_text)
circlator_directory = os.path.join(mer_size_directory, "circlator")
circlator_file = os.path.join(circlator_directory, "output")
polishing_rounds = config.param('DEFAULT', 'polishing_rounds', type='posint')
if polishing_rounds > 4:
raise Exception("Error: polishing_rounds \"" + str(polishing_rounds) + "\" is invalid (should be between 1 and 4)!")
fasta_consensus = os.path.join(mer_size_directory, "polishing" + str(polishing_rounds), "data", "consensus.fasta")
jobs.append(concat_jobs([
Job(command="mkdir -p " + circlator_directory, samples=[sample]),
circlator.circularize(fasta_consensus, os.path.join(preassembly_directory, "corrected.fastq"), circlator_file)
], name = "circlator." + sample.name))
return jobs
def basemodification(self):
"""
Run ipdSummary.py for in silico detection of modified bases
"""
jobs = []
for sample in self.samples:
for coverage_cutoff in config.param('DEFAULT', 'coverage_cutoff', type='list'):
cutoff_x = coverage_cutoff + "X"
coverage_directory = os.path.join(sample.name, cutoff_x)
preassembly_directory = os.path.join(coverage_directory, "preassembly", "data")
for mer_size in config.param('DEFAULT', 'mer_sizes', type='list'):
mer_size_text = "merSize" + mer_size
mer_size_directory = os.path.join(coverage_directory, mer_size_text)
basemodification_directory = os.path.join(mer_size_directory, "basemodification")
basemodification_file_prefix = os.path.join(basemodification_directory, sample.name + ".basemod")
polishing_rounds = config.param('DEFAULT', 'polishing_rounds', type='posint')
if polishing_rounds > 4:
raise Exception("Error: polishing_rounds \"" + str(polishing_rounds) + "\" is invalid (should be between 1 and 4)!")
fasta_consensus = os.path.join(mer_size_directory, "polishing" + str(polishing_rounds - 1), "data", "consensus.fasta")
aligned_reads = os.path.join(mer_size_directory, "polishing" + str(polishing_rounds), "data", "aligned_reads.sorted.cmp.h5")
output_gff = basemodification_file_prefix
jobs.append(concat_jobs([
Job(command="mkdir -p " + basemodification_directory),
smrtanalysis.basemodification(fasta_consensus, aligned_reads, basemodification_file_prefix, mer_size_directory, polishing_rounds)
], name = "basemodification." + sample.name))
return jobs
def motifMaker(self):
"""
Run motifMaker to generate motif_summary.csv
"""
jobs = []
for sample in self.samples:
for coverage_cutoff in config.param('DEFAULT', 'coverage_cutoff', type='list'):
cutoff_x = coverage_cutoff + "X"
coverage_directory = os.path.join(sample.name, cutoff_x)
preassembly_directory = os.path.join(coverage_directory, "preassembly", "data")
for mer_size in config.param('DEFAULT', 'mer_sizes', type='list'):
mer_size_text = "merSize" + mer_size
mer_size_directory = os.path.join(coverage_directory, mer_size_text)
basemodification_directory = os.path.join(mer_size_directory, "basemodification")
basemodification_file_prefix = os.path.join(basemodification_directory, sample.name + ".basemod")
motifMaker_directory = os.path.join(mer_size_directory, "motifMaker")
motifMaker_file = os.path.join(motifMaker_directory, (sample.name + ".motif_summary.csv"))
output_gff = basemodification_file_prefix + ".gff"
polishing_rounds = config.param('DEFAULT', 'polishing_rounds', type='posint')
if polishing_rounds > 4:
raise Exception("Error: polishing_rounds \"" + str(polishing_rounds) + "\" is invalid (should be between 1 and 4)!")
fasta_consensus = os.path.join(mer_size_directory, "polishing" + str(polishing_rounds - 1), "data", "consensus.fasta")
output_gff = basemodification_file_prefix + ".gff"
jobs.append(concat_jobs([
Job(command="mkdir -p " + motifMaker_directory),
smrtanalysis.motifMaker(fasta_consensus, basemodification_file_prefix, mer_size_directory, polishing_rounds, motifMaker_file)
], name = "motifMaker." + sample.name))
return jobs
def report_jobs(self):
"""
Overwrite core pipeline report_jobs method to perform it on every sample/coverage_cutoff/mer_size
"""
for sample in self.samples:
for coverage_cutoff in config.param('DEFAULT', 'coverage_cutoff', type='list'):
cutoff_x = coverage_cutoff + "X"
coverage_directory = os.path.join(sample.name, cutoff_x)
for mer_size in config.param('DEFAULT', 'mer_sizes', type='list'):
mer_size_text = "merSize" + mer_size
mer_size_directory = os.path.join(coverage_directory, mer_size_text)
super(PacBioAssembly, self).report_jobs(os.path.join(self.output_dir, mer_size_directory))
@property
def steps(self):
return [
self.smrtanalysis_filtering,
self.pacbio_tools_get_cutoff,
self.preassembly,
self.assembly,
self.polishing,
self.pacbio_tools_assembly_stats,
self.blast,
self.mummer,
self.compile,
self.circlator,
self.basemodification,
self.motifMaker
]
if __name__ == '__main__':
PacBioAssembly()
| UTF-8 | Python | false | false | 45,177 | py | 40 | pacbio_assembly.py | 15 | 0.548266 | 0.5451 | 0 | 831 | 53.364621 | 530 |
aaronchall/HTML5.py | 5,111,011,123,828 | bdf86e53b67197c6c8bd2596d874cc2383139e0f | c882c9e06cf8453e23c75cdc0e830197328c90e2 | /tests/__main__.py | e8947d7303fffd37aeaa9b4a002520e313cba575 | [
"MIT"
] | permissive | https://github.com/aaronchall/HTML5.py | 0e6b795f70124a1f2d6ede01f683793d570552b5 | ccff1451214adf2d9147a3e253f49e757da5297f | refs/heads/master | "2021-01-10T14:45:00.103592" | "2017-11-16T00:26:21" | "2017-11-16T00:26:21" | 45,585,228 | 3 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | from __future__ import print_function, division, absolute_import
import unittest
import sys
sys.path.append('../')
from __init__ import *
print('loading __main__.py')
def main():
unittest.main(exit=False)
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 253 | py | 12 | __main__.py | 8 | 0.636364 | 0.636364 | 0 | 16 | 14.8125 | 64 |
khanhquoc2920/vidugit | 6,425,271,085,153 | 2af47eca0233c206f0393b3c80aa2b036ad14b61 | 967eb01e56358d219b9d590aef030be25e0eb0cb | /ex1.py | 4f883d0d4b796fd8654f589c1f5ab62e1cc79012 | [] | no_license | https://github.com/khanhquoc2920/vidugit | af3ebf8db7605d150aef7ebcd4e06d9838e15b61 | 7af6e2a2fcdab2146fd8a6de4d356a73930319e4 | refs/heads/master | "2023-08-25T15:27:36.337219" | "2021-09-23T08:32:01" | "2021-09-23T08:32:01" | 407,063,558 | 0 | 0 | null | false | "2021-10-04T04:21:36" | "2021-09-16T07:19:30" | "2021-09-23T08:32:04" | "2021-10-04T04:21:01" | 5 | 0 | 0 | 2 | Python | false | false | print(type(0xFF)) | UTF-8 | Python | false | false | 17 | py | 11 | ex1.py | 11 | 0.764706 | 0.705882 | 0 | 1 | 17 | 17 |
ZiamakEzzat/MyFirstRepo | 17,824,114,316,647 | a6687737a89d9f13876f2ba03d934932c8cb944a | d75b292e508dd5a5b9a4b4ec4304cb011ff20be6 | /unit-list/main.py | ddac79d20f04911e0afed56f9c65c91dd6e01bec | [] | no_license | https://github.com/ZiamakEzzat/MyFirstRepo | 6ea13dcdf0357e0d1f39fe97957aae02bbc00d29 | fee6a98f5217ae0061e783627be83b25b1d838d8 | refs/heads/master | "2021-01-01T17:11:55.967044" | "2017-07-23T20:42:35" | "2017-07-23T20:42:35" | 98,019,946 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def test():
return 5
#add([10,20,30],[1,2,3]) ==> [11,22,33]
#x1+x2
def add(x1, x2):
c=[]
if len(x1) == len(x2):
for a, b in zip(x1, x2):
c.append(a+b)
else:
return [False]
return c
#x1 - x2
def sub(x1, x2):
c=[]
if len(x1) == len(x2):
for a, b in zip(x1, x2):
c.append(a-b)
else:
return [False]
return c
#print(add([10,20,30],[1,2,3]))
####
l = ['a','b','c']
print(l[0],l[1],l[2])
#print(add(x1,x2)) | UTF-8 | Python | false | false | 442 | py | 5 | main.py | 5 | 0.497738 | 0.393665 | 0 | 34 | 12.029412 | 39 |
zenghui0-0/tempfile | 6,339,371,775,906 | d8e40124ca716adb39544cc8603c6b99c68e4c35 | fa3ce9f31523d3e86019762ef8be58923db5bc1a | /script_2021/run_3dmark_memtest_reboot.py | f85d0358a4707d8c936ca098831397a6ab325607 | [] | no_license | https://github.com/zenghui0-0/tempfile | f3eeeaa9a39a3fe7059be1c8f12fb0e9aa8c8c26 | 38295cf1c709dd854061d9c6d5f1c7d0ed132da5 | refs/heads/master | "2021-09-20T20:39:48.647211" | "2021-08-23T05:07:08" | "2021-08-23T05:07:08" | 186,854,937 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import sys
import time
import threading
import conplyent
class RUN(object):
"""
connect was needed
"""
def __init__(self, ip, port, testName="Undefined tests", timeout=60):
self._ip = ip
self._port = port
self._testName = testName
self._timeout = timeout
self._err = "SUCCESS"
self._outDir = os.path.join(os.getcwd(), "log")
self.__conn = conplyent.client.add(self._ip, self._port)
def reinit(self):
self.__conn = conplyent.client.add(self._ip, self._port)
def connect(self, timeout=60):
try:
self.reinit()
self.__conn.connect(timeout=timeout)
except Exception as e:
print(e)
return False
return True
def disconnect(self):
try:
self.__conn.disconnect()
except conplyent.ClientTimeout:
raise ConnectionError
def close(self):
self.__conn.close()
def cd(self, dest):
try:
self.__conn.cd(dest)
except conplyent.ClientTimeout:
raise ConnectionError
def reboot(self):
try:
self.__conn.reboot(complete=False)
except conplyent.ClientTimeout:
raise ConnectionError
def shutdown(self):
try:
self.__conn.shutdown(complete=False)
except conplyent.ClientTimeout:
raise ConnectionError
def sleep(self):
try:
self.__conn.sleep(complete=False)
except conplyent.ClientTimeout:
raise ConnectionError
def heartbeat(self, timeout=5):
return self.__conn.heartbeat(timeout=5)
def checkdevice(self, retrys=10):
for retry in range(0, int(retrys)):
if self.connect(timeout=5):
return True
if self.heartbeat(timeout=5):
return True
self.reinit()
return False
def winReboot(self, num_runs=1, wait_time=10):
"""
run test
"""
startTime = time.time()
nowTime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
rounds = int(num_runs) + 1
retrys = 20
logWritter = self.saveLog(self._testName)
logWritter.write("*"*30 + "\n")
logWritter.write("Start tests: {} at {} \n".format(self._testName, nowTime))
self.__conn.connect(timeout=60)
for round in range(0, int(rounds)):
logWritter.write("-"*30 + "\n")
logWritter.write("Run {} at round {} \n".format(self._testName, round))
print("Run {} at round {} \n".format(self._testName, round))
alive = self.checkdevice(retrys=20)
if not alive:
logWritter.write("Reboot run FAILED at attempt {}, Error:fail to connect with dut\n".format(round))
self._err = "FAILED"
break
try:
print("device alive?{}".format(alive))
time.sleep(int(wait_time))
self.connect(timeout=60)
self.__conn.reboot(complete=True)
#self.__conn.exec("shutdown /r /t 1")
print("Reboot run SUCCESS\n")
time.sleep(60)
logWritter.write("Reboot run SUCCESS\n")
except Exception as e:
self._err = "FAILED"
logWritter.write("Reboot run FAILED at attempt {}, Error: {}\n".format(round, e))
break
logWritter.write("*"*30 + "\n")
logWritter.write("Result of reboot tests: {}, total rounds: {}\n".format(self._err, round))
logWritter.write("Finish runing tests: {} at {} \n".format(self._testName, time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
logWritter.close()
def start_test(self, cmd, runTime=0, workDir=None):
startTime = time.time()
nowTime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
runTime = float(runTime) * 60 # minitus to seconds
logWritter = self.saveLog(self._testName)
logWritter.write("*"*30 + "\n")
logWritter.write("Start tests: {} at {} \n".format(self._testName, nowTime))
alive = self.checkdevice()
if not alive:
logWritter.write("FAILED to connect with SUT.")
return False
if workDir:
self.cd(workDir)
try:
if (runTime > 0):
run = 0
while((time.time() - startTime) < runTime): # run tests till the end of runTime
run += 1
logWritter.write("test rounds {} \n".format(run))
for response in self.run_cmd(cmd):
logWritter.write(response)
print(response)
if (self._err != "SUCCESS"):
break
else: #only run 1 rounds
logWritter.write("test rounds {} \n".format(1))
for response in self.run_cmd(cmd):
logWritter.write(response)
print(response)
except Exception as e:
self._err = e
logWritter.write("Error:{} occur when trying to run tests {}".format(e, self._testName))
self.close()
logWritter.write("\n" + "*"*30 + "\n")
logWritter.write("Result of runing tests {} : {} \n".format(self._testName, self._err))
logWritter.write("Finish runing tests: {} at {} \n".format(self._testName, time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
logWritter.write("*"*30)
logWritter.close()
def run_cmd(self, cmd, time_between_messages=360, max_re_attempts=5):
"""
run any cmd on remote device
"""
listener = self.__conn.exec(cmd, complete=False, max_interval=2)
#listener = self.__conn.exec(cmd)
last_message = time.time()
exit_flag = False
attempts = max_re_attempts
while(True):
try:
line = listener.next() # 2 second grace
attempts = max_re_attempts
except conplyent.ClientTimeout:
if(not(listener.done)):
try:
if(self.heartbeat()):
if((time.time() - last_message) > time_between_messages):
raise ConnectionError
continue
else:
yield "Executor died unexpectedly without informing client..."
if(exit_flag):
break # Server completed but complete message not received
else:
exit_flag = True
continue
except ConnectionError:
self._err = "ConnectionError"
yield "ConnectionError"
except RuntimeError:
if(attempts > 0):
yield "Runtime error? Re-attempting {}/{}".format(
max_re_attempts - attempts, max_re_attempts)
attempts -= 1
continue
else:
yield "Num attempts exceeded! System is really not responding!"
self._err = "RuntimeError"
else:
yield "Should never happen..."
break # This should never happen..?
if(line is None):
break # Listener exited
else:
if(type(line) is bytes or type(line) is str):
yield line[:-1]
last_message = time.time()
yield line
else:
yield "Test exited with: {}".format(str(line))
self._err = "UNKNOWN_SETUP"
break
if(listener.exit_code != 0):
yield "Application exited with: {}".format(listener.exit_code)
self._err = "APPLICATION_ERROR"
def saveLog(self, testName):
strTime = time.strftime("%Y_%m_%d_%H_%M_%S", time.localtime())
logFile = "_".join(testName.split()) + "_" + strTime + ".txt"
if not os.path.isdir(self._outDir):
os.mkdir(self._outDir)
log = os.path.join(self._outDir, logFile)
fw = open(log, 'w+')
return fw
if __name__ == "__main__" :
#define parameters
if (len(sys.argv) > 1):
DUT_IP = sys.argv[1]
else:
DUT_IP = "10.67.135.19"
ports = [9922, 9933]
workDir = "C:\\Program Files\\Futuremark\\3DMark 11\\bin\\x64"
hostLog = os.path.join(os.getcwd(), "Temp")
"""
#1, reboot test part
rebooter = RUN(DUT_IP, 9922, "reboot test")
rebooter.winReboot(num_runs=500)
#2, memory test part
memTest = RUN(DUT_IP, 9933, "memory test")
test2 = threading.Thread(target=memTest.start_test, args=("winautotester memtestpro -p 5 -c 400 -t 600", ))
test2.start()
#3, 3d mark test part
threeDMarkTest = RUN(DUT_IP, 9922, "3d mark tests")
test3 = threading.Thread(target=threeDMarkTest.start_test, args=("3DMark11Cmd.exe --definition=extreme_definition_window.xml", 240, "C:\\Program Files\\Futuremark\\3DMark 11\\bin\\x64"))
test3.start()
test2.join()
test3.join()
print("all tests finished")
"""
test1 = RUN(DUT_IP, 9922)
test2 = threading.Thread(target=test1.start_test, args=("ipconfig", 0.1,))
test2.start()
test2.join()
| UTF-8 | Python | false | false | 9,678 | py | 4,998 | run_3dmark_memtest_reboot.py | 3,292 | 0.513536 | 0.499483 | 0.000207 | 263 | 35.798479 | 190 |
amchii/Python-Exercises | 7,670,811,613,240 | a0f6e5da3d0cfc1afb4921b3a41c8cbb82dd9529 | a09d7a38481c733ba91627aeb269cb7088aee91c | /25语音识别/test.py | 24b27892d6be8e8e69776ec0039fd402377d414f | [] | no_license | https://github.com/amchii/Python-Exercises | c981b9c62870d712c0ff273e38d738d55bd69201 | 6aa3039870c492feaa080e1009c14ba9f9c9691e | refs/heads/master | "2020-04-10T09:04:04.282262" | "2019-01-10T07:47:50" | "2019-01-10T07:47:50" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Date : 2018-12-02 12:09:49
import wave,requests
from pyaudio import PyAudio, paInt16
import time
framerate = 16000 # 采样率
NUM_SAMPLES = 2000 # 采样点
channels = 1 # 声道
sampwidth = 2 # 采样宽度2bytes
TIME = 4 #秒,2000x4=8000
FILENAME='speech.wav'
def save_wave_file(filename, data):
wf = wave.open(filename, 'wb')
wf.setnchannels(channels)
wf.setsampwidth(sampwidth)
wf.setframerate(framerate)
wf.writeframes(b''.join(data))
wf.close()
def my_record():
pa = PyAudio()
stream = pa.open(format=paInt16, channels=channels,
rate=framerate, input=True, frames_per_buffer=NUM_SAMPLES)
my_buf = []
count = 0
t=time.time()
print('正在录音...')
# while count < TIME * 14:
while time.time()<t+10: #10秒
string_audio_data = stream.read(NUM_SAMPLES)
my_buf.append(string_audio_data)
count += 1
save_wave_file(FILENAME, my_buf)
stream.close()
def play(file):
wf = wave.open(file, 'rb')
pa = PyAudio()
stream = pa.open(format=pa.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(), rate=wf.getframerate(), output=True)
while True:
data=wf.readframes(1024)
if data=='':
break
stream.write(data)
stream.close()
pa.terminate()
if __name__=='__main__':
my_record()
play(FILENAME) | UTF-8 | Python | false | false | 1,463 | py | 43 | test.py | 35 | 0.606316 | 0.569123 | 0 | 59 | 23.169492 | 84 |
lermon2000/PythonBasics_HSEU | 14,860,586,891,562 | d3a5b00ef23177ecc50e227de7c257855fd5a64c | a00b99928192dfd628b096d8b0c91b99c951799e | /week_1/num_div_mod.py | b46cd0f632e540b7a85d53acdab0fb93e63136cb | [] | no_license | https://github.com/lermon2000/PythonBasics_HSEU | b2084fdfe6ea222de66512a871cdf4c3a9bda9a4 | c385a2c7bbb0470fcfd368438780b233844b9f51 | refs/heads/master | "2022-02-17T13:48:51.489585" | "2019-05-08T06:25:24" | "2019-05-08T06:25:24" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # как устроено деление и взятие остатка от деления
A, B = [int(input()) for i in range(2)]
C, D = A // B, A % B
equal_a = C * B + D
if equal_a == A:
print(f"(({A} // {B}) * {B}) + {D} = {A}")
if B > 0:
print('Positive:')
print(f"D={D} & D >= 0, D < B")
elif B < 0:
print('Negative:')
print(f"D={D} & D <= 0, D > B")
"""
# Чему равен остаток от деления - 7 на 3?
Целая часть от деления равна - 3. - 3 * 3 = -9,
значит нужно прибавить 2, чтобы получился результат - 7.
Остаток при делении на положительное число
может быть только неотрицательным
"""
| UTF-8 | Python | false | false | 776 | py | 25 | num_div_mod.py | 20 | 0.569686 | 0.547038 | 0 | 24 | 22.916667 | 56 |
fesenkoaa/barmanager-bot-sqlalchemy | 16,252,156,270,875 | fc5bd5caeb61b38aea97daccdc20f01e625aa9c9 | c67613f4ac98d3f43b14db0c898554a304ec96db | /bot.py | 217748d25f29cefa13be4c159f376377e4c39326 | [] | no_license | https://github.com/fesenkoaa/barmanager-bot-sqlalchemy | ce2f68cc4dd78e4c80cb37249805da557780f1f6 | 6dc3ab8a85e1b1b51afac717a3b0023d8f6913db | refs/heads/master | "2023-09-03T09:19:41.938559" | "2021-11-03T13:13:25" | "2021-11-03T13:13:25" | 424,220,845 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from aiogram import Bot, types
from aiogram.dispatcher import Dispatcher
from aiogram.utils import executor
import db_handler as handler
from config import TOKEN
bot = Bot(token=TOKEN)
dp = Dispatcher(bot)
"""Information command"""
@dp.message_handler(commands=['start']) # works
async def send_welcome(message: types.Message):
await bot.send_message(message.from_user.id, "Bot is ready to work!\n"
"Have a lucky shift, {0.first_name}!\n"
"Please, read it /help".format(message.from_user))
@dp.message_handler(commands=['help']) # works
async def send_welcome(message: types.Message):
await message.answer(
"Commands for waiters and admins:\n"
"Add order to (table) (amount) (drink)\n"
"Delete table(table)\n"
"Delete from (table) (drink)\n"
"Bill (table)\n\n"
"Commands only for admin:\n"
"Add to store (amount) (name)\n"
"Store minus (amount) (name)\n"
"Store plus (amount) (name)\n"
"Delete from store (name)\n\n\n"
"Cheat sheet, or examples:\n"
"table: 001, 012, 777\n"
"drinks: Daiquiri, Gin Tonic\n"
"name: house_gin, water, lime\n"
"amount: 1.2, 1.0, 001, 012")
"""Command to add order"""
@dp.message_handler(lambda message: message.text.startswith('Add order to')) #
async def add_order(message: types.Message):
row_table = message.text[13:16]
row_amount = message.text[17:20]
row_drink = message.text[21:]
handler.add_order(int(row_table), row_drink, int(row_amount))
await message.answer(f"You've ordered {row_amount} {row_drink} to table #{row_table}")
"""Commands to edit store"""
@dp.message_handler(lambda message: message.text.startswith('Delete from store')) # works
async def del_table(message: types.Message):
row_name = message.text[18:]
handler.delete_from_store(row_name)
await message.answer(f"You have already deleted {row_name} from store.")
@dp.message_handler(lambda message: message.text.startswith('Add to store')) # works
async def del_table(message: types.Message):
row_amount = float(message.text[13:16])
row_name = message.text[17:]
handler.add_store(row_name, row_amount)
await message.answer(f"You have already added {row_amount} {row_name} to store.")
@dp.message_handler(lambda message: message.text.startswith('Store plus')) # works
async def del_table(message: types.Message):
row_amount = float(message.text[11:14])
row_name = message.text[15:]
handler.store_subjoin(row_name, row_amount)
await message.answer(f"{row_amount} {row_name} was subjoined to store.")
@dp.message_handler(lambda message: message.text.startswith('Store minus')) # works
async def del_table(message: types.Message):
row_amount = float(message.text[12:15])
row_name = message.text[16:]
handler.store_subtract(row_name, row_amount)
await message.answer(f"{row_amount} {row_name} was subtracted from store.")
"""Commands for edit guest's table"""
@dp.message_handler(lambda message: message.text.startswith('Delete from')) # works
async def del_table(message: types.Message):
row_id = int(message.text[12:15])
row_drink = message.text[16:]
handler.del_from_gtable(row_id, row_drink)
await message.answer(f"From table #{row_id} was deleted all {row_drink}.")
@dp.message_handler(lambda message: message.text.startswith('Delete table')) # works
async def del_table(message: types.Message):
row_id = int(message.text[12:16])
handler.del_gtable(row_id)
await message.answer(f"Table #{row_id} was deleted.")
"""Command to get a bill"""
@dp.message_handler(lambda message: message.text.startswith('Bill')) # works, but doesn't show the bill for user
async def del_table(message: types.Message):
row_id = int(message.text[5:8])
handler.get_bill(row_id)
await message.answer(f"The bill for table #{row_id} is printed.")
if __name__ == '__main__':
executor.start_polling(dp)
| UTF-8 | Python | false | false | 4,088 | py | 5 | bot.py | 3 | 0.661937 | 0.646771 | 0 | 118 | 33.644068 | 113 |
xxglwt/untitled9 | 15,994,458,253,668 | 0adb4156bdae9aa7cd6133de6408337b00aedc53 | 7cf75e1473d3c64b93110eb27c32c442027d43bb | /TestModel/migrations/0006_musician.py | b291e1ff97e3f978c21a774c80716bc41da0dacf | [] | no_license | https://github.com/xxglwt/untitled9 | e78c4f747d7e78cc799c4264c19547022aa05a6e | f5ed19a646e6a45dc7f70a2807f513a08186465b | refs/heads/master | "2022-12-10T19:45:49.131721" | "2020-06-23T12:18:55" | "2020-06-23T12:18:55" | 239,277,406 | 0 | 0 | null | false | "2022-12-08T06:21:20" | "2020-02-09T09:25:40" | "2020-06-23T12:21:14" | "2022-12-08T06:21:19" | 848 | 0 | 0 | 8 | Python | false | false | # Generated by Django 2.2.7 on 2019-12-27 12:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('TestModel', '0005_contact_tag'),
]
operations = [
migrations.CreateModel(
name='Musician',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fristName', models.CharField(max_length=50)),
('lastName', models.CharField(max_length=50)),
('instrument', models.IntegerField()),
],
),
]
| UTF-8 | Python | false | false | 633 | py | 24 | 0006_musician.py | 17 | 0.560821 | 0.524487 | 0 | 22 | 27.772727 | 114 |
SarkerAllen/python-challenge | 3,856,880,678,464 | 9a5c13648a73c38a3cb4b4e91d9199ca708b5e2f | efd62dfae427bff61fa4b6f9180bbc4ee10fd695 | /level_3.py | 6961033b73f2bca185df52353d6bb5febf590bc4 | [] | no_license | https://github.com/SarkerAllen/python-challenge | 1e6c1e3c146e88c312d7e0f1e0580f3409c56899 | 0800800995ac2b507da799f79a9ef63904af0f40 | refs/heads/master | "2020-05-18T01:07:03.149367" | "2020-01-04T03:05:14" | "2020-01-04T03:05:14" | 184,081,262 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import requests
import re
content = requests.get("http://www.pythonchallenge.com/pc/def/equality.html").text
answer = re.sub('\n|\t|\r', '', content)
answer = re.findall("<!--(.*?)-->", answer)
key = re.findall("[a-z]+[A-Z]{3}([a-z])[A-Z]{3}[a-z]+", str(answer))
print(''.join(key))
| UTF-8 | Python | false | false | 284 | py | 5 | level_3.py | 4 | 0.609155 | 0.602113 | 0 | 8 | 34.5 | 82 |
fake-name/ReadableWebProxy | 11,819,750,041,864 | 7796cb65a7a6f93dd1413d4b015baaf3b5c5bc5d | 6189f34eff2831e3e727cd7c5e43bc5b591adffc | /WebMirror/management/rss_parser_funcs/feed_parse_extractAmateurtranslationsCom.py | 02117e66f0f8a1a83cc087a9330ed371955ccefe | [
"BSD-3-Clause"
] | permissive | https://github.com/fake-name/ReadableWebProxy | 24603660b204a9e7965cfdd4a942ff62d7711e27 | ca2e086818433abc08c014dd06bfd22d4985ea2a | refs/heads/master | "2023-09-04T03:54:50.043051" | "2023-08-26T16:08:46" | "2023-08-26T16:08:46" | 39,611,770 | 207 | 20 | BSD-3-Clause | false | "2023-09-11T15:48:15" | "2015-07-24T04:30:43" | "2023-08-07T04:26:14" | "2023-09-11T15:48:14" | 20,299 | 194 | 16 | 7 | Python | false | false | def extractAmateurtranslationsCom(item):
'''
Parser for 'amateurtranslations.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Phoenix Against The World', 'Across the Stunning Beast Princess: Phoenix Against the World', 'translated'),
('Hidden Marriage', 'Hidden Marriage', 'translated'),
('Sonata: FAAAM', 'Sonata: Fleeing To Avoid An Arranged Marriage', 'translated'),
('Princess Husband', 'Princess Husband, Too Mensao!', 'translated'),
('My Chief Husband', 'My Chief Husband, Too Mensao!', 'translated'),
('Level-Up Dr', 'Level Up Doctor Choi Kiseok', 'translated'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | UTF-8 | Python | false | false | 1,165 | py | 1,155 | feed_parse_extractAmateurtranslationsCom.py | 1,072 | 0.546781 | 0.546781 | 0 | 24 | 47.583333 | 119 |
spykspeigel/Optimal_control_examples | 3,513,283,281,385 | da857abd8278a84f1576f8472e083d5c9ef23dc9 | f5aa2328f2fa7e5d22ee12174497c176badef0fb | /nh_lyapunov/nh_lyapunov_example.py | 66b3d3e6196cfd19295953dacdd02cfc461d81c4 | [] | no_license | https://github.com/spykspeigel/Optimal_control_examples | b35e1e65b85210bf380ae744d68a0681b74990f1 | 1845a8400aa297af2f5572828a4c92669f89f1a9 | refs/heads/master | "2023-06-27T12:51:06.108377" | "2022-02-17T14:04:43" | "2022-02-17T14:04:43" | 299,210,082 | 0 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | #Here I formulate the control problem as a closed loop steering.
# It closely follows the paper
import numpy as np
import math
from scipy.integrate import solve_ivp
import matplotlib.pylab as plt
def main():
t_eval=np.arange(0,8,0.025)
x_i=-1
y_i=1
theta_i=3*np.pi/4
goal=np.array([0,0])
heading_v=np.array([np.cos(theta_i),np.sin(theta_i)])
x_rel = np.array([x_i,y_i])-goal
x_rel=x_rel/np.linalg.norm(x_rel)
e=np.sqrt((x_i-goal[0])**2 + (y_i-goal[1])**2)
alpha=np.arccos(x_rel.dot(heading_v)/np.linalg.norm(heading_v))
theta=np.arctan2(x_rel[1],x_rel[0])
x0=np.array([e,alpha,theta])
print(x0)
sol=solve_ivp(nh_ode,[0,8],x0,method='RK45',t_eval=t_eval)
t_span = sol.t
X = sol.y
X=convert_into_cartesian(X)
return X
# nh_draw(t_span, X, x_min, x_max, p);
def nh_ode(t,x):
e=x[0]
alpha=x[1]
theta=x[2]
gamma=3
h=1
k=6
e_dot=-e*(gamma*np.cos(alpha)**2)
alpha_dot=(-k*alpha -gamma*h*np.cos(alpha)*np.sin(alpha)/alpha)
theta_dot=gamma*np.cos(alpha)*np.sin(alpha)
return [e_dot,alpha_dot,theta_dot]
#Convert the polar coordinates into cartesian coordinates
#and call draw_triangle
def convert_into_cartesian(x):
es=x[0,:]
alphas=x[1,:]
thetas=x[2,:]
xs=es*np.cos(thetas)
ys=es*np.sin(thetas)
phis=thetas-alphas
return np.array([xs,ys,phis])
"""
def draw_triangle():
T = np.array([
[np.cos(z3), -np.sin(z3), z1],
[np.sin(z3), np.cos(z3), z2]
])
# compute cartesian coordinates of the corners of the car
left_back = T.dot([-length/2, width/2, 1])
right_back = T.dot([-length/2, -width/2, 1])
front = T.dot([length/2, 0, 1])
# draw a triangle that symbolizes the robot
robot = patches.Polygon(
np.vstack([left_back, right_back, front]),
facecolor='none',
**kwargs
)
plt.gca().add_patch(robot)
"""
if __name__== '__main__':
x=main() | UTF-8 | Python | false | false | 1,840 | py | 10 | nh_lyapunov_example.py | 2 | 0.638043 | 0.610326 | 0 | 94 | 18.585106 | 64 |
siska199/Belajar-Machine-Learning-untuk-Pemula-by-Dicoding | 7,017,976,571,185 | 9fa6f23cf80a1ab2c9709969fd7066e4325c2bb9 | 85902fc698b4d3cc6401f5278dd6fda7d773e275 | /5-6 Neural Network dan TensorFlow/Latihan Machine Learning.py | 7b9c33255d040ffc9e40a87066b1401c21745a00 | [] | no_license | https://github.com/siska199/Belajar-Machine-Learning-untuk-Pemula-by-Dicoding | 01f4d785fecb15573c03728204412831b8630ea1 | 0b1c0e64e95b6cb5d40a3417686c88732e194ce1 | refs/heads/main | "2023-06-04T13:49:53.529280" | "2021-07-02T23:49:28" | "2021-07-02T23:49:28" | 382,486,754 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sun May 9 19:47:54 2021
@author: KyuuChan199
"""
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
import matplotlib.pyplot as plt
import numpy as np
print(tf.__version__)
(X_train, y_train), (X_test, y_test) = datasets.cifar10.load_data()
print(X_train.shape)
print(X_test.shape)
#Melihat data dalam bentuk matriks berupa matriks dengan ukuran 32 x 32:
print(X_train[0])
#melihat data dalam bentuk gambar:
plt.imshow(X_train[0])
#resize ukuran data
plt.figure(figsize = (15,2))
plt.imshow(X_train[0])
| UTF-8 | Python | false | false | 602 | py | 2 | Latihan Machine Learning.py | 1 | 0.69103 | 0.646179 | 0 | 25 | 22 | 72 |
muziyongshixin/flower_classification | 2,765,958,951,798 | 7db7bc8947203565442277549b9f4cab65ed19d8 | 7b75a9ddd8e7b73980e5e1db7666712064dc0ee6 | /models/SeaReader.py | 8c25201a5887a6c140a424301651d5398e627e73 | [] | no_license | https://github.com/muziyongshixin/flower_classification | 1379641f15ad116b8f890b2c174048734d6f2665 | 22b758b83fb64fa4486abafbf3aeb2dfabeab1b1 | refs/heads/master | "2020-03-30T00:25:45.024786" | "2019-04-24T14:02:53" | "2019-04-24T14:02:53" | 150,522,994 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'liyz'
import torch
from models.layers import *
from utils.functions import answer_search, multi_scale_ptr
from IPython import embed
from utils.functions import masked_softmax, compute_mask, masked_flip
class SeaReader(torch.nn.Module):
"""
match-lstm+ model for machine comprehension
Args:
- global_config: model_config with types dictionary
Inputs:
context: (batch, seq_len)
question: (batch, seq_len)
context_char: (batch, seq_len, word_len)
question_char: (batch, seq_len, word_len)
Outputs:
ans_range_prop: (batch, 2, context_len)
ans_range: (batch, 2)
vis_alpha: to show on visdom
"""
def __init__(self, dataset_h5_path, device):
super(SeaReader, self).__init__()
self.device = device
# set config
hidden_size = 128
hidden_mode = 'LSTM'
dropout_p = 0.2
# emb_dropout_p = 0.1
enable_layer_norm = False
word_embedding_size = 200
encoder_word_layers = 1
# char_embedding_size = 64
# encoder_char_layers = 1
encoder_bidirection = True
encoder_direction_num = 2 if encoder_bidirection else 1
match_lstm_bidirection = True
match_rnn_direction_num = 2 if match_lstm_bidirection else 1
ptr_bidirection = False
self.enable_search = True
# construct model
self.embedding = Word2VecEmbedding(dataset_h5_path=dataset_h5_path, trainable=True)
# self.char_embedding = CharEmbedding(dataset_h5_path=dataset_h5_path,
# embedding_size=char_embedding_size,
# trainable=True)
# self.char_encoder = CharEncoder(mode=hidden_mode,
# input_size=char_embedding_size,
# hidden_size=hidden_size,
# num_layers=encoder_char_layers,
# bidirectional=encoder_bidirection,
# dropout_p=emb_dropout_p)
self.context_layer = MyRNNBase(mode=hidden_mode,
input_size=word_embedding_size,
hidden_size=hidden_size,
num_layers=encoder_word_layers,
bidirectional=encoder_bidirection,
dropout_p=dropout_p)
self.reasoning_gating_layer = Conv_gate_layer(256)
self.decision_gating_layer = Conv_gate_layer(256)
self.content_reasoning_layer = MyRNNBase(mode=hidden_mode,
input_size=hidden_size * 4+2,
hidden_size=hidden_size,
num_layers=1,
bidirectional=True,
dropout_p=0.2)
self.question_reasoning_layer = MyRNNBase(mode=hidden_mode,
input_size=hidden_size * 2+2,
hidden_size=hidden_size,
num_layers=1,
bidirectional=True,
dropout_p=0.2)
self.decision_layer = torch.nn.Sequential(
torch.nn.Linear(in_features=6512, out_features=1000, bias=True),
torch.nn.Dropout(0.2), # drop 50% of the neuron
torch.nn.ReLU(True),
torch.nn.BatchNorm1d(1000),
torch.nn.Linear(in_features=1000, out_features=500, bias=True),
torch.nn.Dropout(0.2), # drop 50% of the neuron
torch.nn.ReLU(True),
torch.nn.BatchNorm1d(500),
torch.nn.Linear(in_features=500, out_features=2, bias=True)
)
def forward(self, contents, question_ans, logics, contents_char=None, question_ans_char=None):
# assert contents_char is not None and question_ans_char is not None
batch_size = question_ans.size()[0]
max_content_len = contents.size()[2]
max_question_len = question_ans.size()[1]
contents_num = contents.size()[1]
# word-level embedding: (seq_len, batch, embedding_size)
content_vec = []
content_mask = []
question_vec, question_mask = self.embedding.forward(question_ans)
for i in range(contents_num):
cur_content = contents[:, i, :]
cur_content_vec, cur_content_mask = self.embedding.forward(cur_content)
content_vec.append(cur_content_vec)
content_mask.append(cur_content_mask)
# char-level embedding: (seq_len, batch, char_embedding_size)
# context_emb_char, context_char_mask = self.char_embedding.forward(context_char)
# question_emb_char, question_char_mask = self.char_embedding.forward(question_char)
question_encode, _ = self.context_layer.forward(question_vec,question_mask) # size=(cur_batch_max_questionans_len, batch, 256)
content_encode = [] # word-level encode: (seq_len, batch, hidden_size)
for i in range(contents_num):
cur_content_vec = content_vec[i]
cur_content_mask = content_mask[i]
cur_content_encode, _ = self.context_layer.forward(cur_content_vec,cur_content_mask) # size=(cur_batch_max_content_len, batch, 256)
content_encode.append(cur_content_encode)
# 将所有的content编码后统一到相同的长度 200,所有的question编码后统一到相同的长度100
same_sized_content_encode = []
for i in range(contents_num):
cur_content_encode = content_encode[i]
cur_content_encode = self.full_matrix_to_specify_size(cur_content_encode, [max_content_len, batch_size,cur_content_encode.size()[2]]) # size=(200,16,256)
same_sized_content_encode.append(cur_content_encode)
same_sized_question_encode = self.full_matrix_to_specify_size(question_encode, [max_question_len, batch_size,question_encode.size()[2]]) # size=(100,16,256)
# 计算gating layer的值
reasoning_content_gating_val = []
reasoning_question_gating_val = None
decision_content_gating_val = []
decision_question_gating_val = None
for i in range(contents_num):
cur_content_encode = same_sized_content_encode[i] # size=(200,16,256)
cur_gating_input = cur_content_encode.transpose(0, 1).transpose(1, 2) # size=(16,256,200)
cur_reasoning_content_gating_val = self.reasoning_gating_layer(cur_gating_input) # size=(16,1,200)
cur_reasoning_content_gating_val =cur_reasoning_content_gating_val+0.00001 # 防止出现gate为0的情况,导致后面padsequence的时候出错
cur_decision_content_gating_val = self.decision_gating_layer(cur_gating_input) # size=(16,1,200)
cur_decision_content_gating_val =cur_decision_content_gating_val+0.00001 # 防止出现gate为0的情况,导致后面padsequence的时候出错
reasoning_content_gating_val.append(cur_reasoning_content_gating_val)
decision_content_gating_val.append(cur_decision_content_gating_val)
question_gating_input = same_sized_question_encode.transpose(0, 1).transpose(1, 2) # size=(16,256,100)
reasoning_question_gating_val = self.reasoning_gating_layer(question_gating_input) # size=(16,1,100)
reasoning_question_gating_val=reasoning_question_gating_val+0.00001 # 防止出现gate为0的情况,导致后面padsequence的时候出错
decision_question_gating_val = self.decision_gating_layer(question_gating_input) # size=(16,1,100)
decision_question_gating_val=decision_question_gating_val+0.00001 # 防止出现gate为0的情况,导致后面padsequence的时候出错
# 计算gate loss todo: 貌似无法返回多个变量,暂时无用
# question_gate_val = torch.cat([reasoning_question_gating_val.view(-1), decision_question_gating_val.view(-1)])
# reasoning_gate_val = torch.cat([ele.view(-1) for ele in reasoning_content_gating_val])
# decision_gate_val = torch.cat([ele.view(-1) for ele in decision_content_gating_val])
# all_gate_val = torch.cat([question_gate_val, reasoning_gate_val, decision_gate_val])
# mean_gate_val = torch.mean(all_gate_val)
# Matching Matrix computing, question 和每一个content都要计算matching matrix
Matching_matrix = []
for i in range(contents_num):
cur_content_encode = same_sized_content_encode[i]
cur_Matching_matrix = self.compute_matching_matrix(same_sized_question_encode,
cur_content_encode) # (batch, question_len , content_len) eg(16,100,200)
Matching_matrix.append(cur_Matching_matrix)
# compute an & bn
an_matrix = []
bn_matrix = []
for i in range(contents_num):
cur_Matching_matrix = Matching_matrix[i]
cur_an_matrix = torch.nn.functional.softmax(cur_Matching_matrix,
dim=2) # column wise softmax,对matching matrix每一行归一化和为1 size=(batch, question_len , content_len)
cur_bn_matrix = torch.nn.functional.softmax(cur_Matching_matrix,
dim=1) # row_wise attention,对matching matrix每一列归一化和为1 size=(batch, question_len , content_len)
an_matrix.append(cur_an_matrix)
bn_matrix.append(cur_bn_matrix)
# compute RnQ & RnD
RnQ = [] # list[tensor[16,100,256]]
RnD = []
for i in range(contents_num):
cur_an_matrix = an_matrix[i]
cur_content_encode = same_sized_content_encode[i]
cur_bn_matrix = bn_matrix[i]
cur_RnQ = self.compute_RnQ(cur_an_matrix,
cur_content_encode) # size=(batch, curbatch_max_question_len , 256) eg[16,100,256]
cur_RnD = self.compute_RnD(cur_bn_matrix,
same_sized_question_encode) # size=(batch, curbatch_max_content_len , 256) eg[16,200,256]
RnQ.append(cur_RnQ)
RnD.append(cur_RnD)
########### compute Mmn' ##############
D_RnD = [] # 先获得D和RnD的concatenation
for i in range(contents_num):
cur_content_encode = same_sized_content_encode[i].transpose(0, 1) # size=(16,200,256)
cur_RnD = RnD[i] # size=(16,200,256)
# embed()
cur_D_RnD = torch.cat([cur_content_encode, cur_RnD], dim=2) # size=(16,200,512)
D_RnD.append(cur_D_RnD)
RmD = [] # list[tensor(16,200,512)]
for i in range(contents_num):
D_RnD_m = D_RnD[i] # size=(16,200,512)
Mmn_i = []
RmD_i = []
for j in range(contents_num):
D_RnD_n = D_RnD[j] # size=(16,200,512)
Mmn_i_j = self.compute_cross_document_attention(D_RnD_m,
D_RnD_n) # 计算任意两个文档之间的attention Mmn_i_j size=(16,200,200)
Mmn_i.append(Mmn_i_j)
Mmn_i = torch.stack(Mmn_i).permute(1, 2, 3, 0) # size=(16,200,200,10)
softmax_Mmn_i = self.reduce_softmax(Mmn_i) # size=(16,200,200,10)
for j in range(contents_num):
D_RnD_n = D_RnD[j] # size=(16,200,512)
beta_mn_i_j = softmax_Mmn_i[:, :, :, j]
cur_RmD = torch.bmm(beta_mn_i_j, D_RnD_n) # size=(16,200,512)
RmD_i.append(cur_RmD)
RmD_i = torch.stack(RmD_i) # size=(10,16,200,512)
RmD_i = RmD_i.transpose(0, 1) # size=(16,10,200,512)
RmD_i = torch.sum(RmD_i, dim=1) # size=(16,200,512)
RmD.append(RmD_i)
# RmD = [] # list[tensor(16,200,512)]
# for i in range(contents_num):
# D_RnD_m = D_RnD[i] # size=(16,200,512)
#
# RmD_i = []
# for j in range(contents_num):
# D_RnD_n = D_RnD[j] # size=(16,200,512)
# Mmn_i_j = self.compute_cross_document_attention(D_RnD_m,D_RnD_n) # 计算任意两个文档之间的attention Mmn_i_j size=(16,200,200)
# beta_mn_i_j = torch.nn.functional.softmax(Mmn_i_j, dim=2) # 每一行归一化为1 size=(16,200,200)
# cur_RmD = torch.bmm(beta_mn_i_j, D_RnD_n) # size=(16,200,512)
# RmD_i.append(cur_RmD)
#
# RmD_i = torch.stack(RmD_i) # size=(10,16,200,512)
# RmD_i = RmD_i.transpose(0, 1) # size=(16,10,200,512)
# RmD_i = torch.sum(RmD_i, dim=1) # size=(16,200,512)
# RmD.append(RmD_i)
matching_feature_row = [] # list[tensor(16,200,2)]
matching_feature_col = [] # list[tensor(16,100,2)]
for i in range(contents_num):
cur_Matching_matrix = Matching_matrix[i] # size=(16,100,200)
cur_max_pooling_feature_row, _ = torch.max(cur_Matching_matrix, dim=1) # size=(16,200)
cur_mean_pooling_feature_row = torch.mean(cur_Matching_matrix, dim=1) # size=(16,200)
cur_matching_feature_row = torch.stack(
[cur_max_pooling_feature_row, cur_mean_pooling_feature_row]).transpose(0, 1).transpose(1,2) # size=(16,2,200) =>size=(16,200,2)
matching_feature_row.append(cur_matching_feature_row)
cur_max_pooling_feature_col, _ = torch.max(cur_Matching_matrix, dim=2) # size=(16,100)
cur_mean_pooling_feature_col = torch.mean(cur_Matching_matrix, dim=2) # size=(16,100)
cur_matching_feature_col = torch.stack(
[cur_max_pooling_feature_col, cur_mean_pooling_feature_col]).transpose(0, 1).transpose(1,2) # size=(16,2,100) =>size=(16,100,2)
matching_feature_col.append(cur_matching_feature_col)
# print(253)
# embed()
reasoning_feature = []
for i in range(contents_num):
cur_RnQ = RnQ[i] # size=(16,100,256)
cur_RmD = RmD[i] # size=(16,200,512)
cur_matching_feature_col = matching_feature_col[i] # size=(16,100,2)
cur_matching_feature_row = matching_feature_row[i] # size=(16,200,2)
cur_RnQ = torch.cat([cur_RnQ, cur_matching_feature_col], dim=2) # size=(16,100,258)
cur_RmD = torch.cat([cur_RmD, cur_matching_feature_row], dim=2) # size=(16,200,514)
cur_RnQ_mask = compute_mask(cur_RnQ.mean(dim=2), PreprocessData.padding_idx)
cur_RmD_mask = compute_mask(cur_RmD.mean(dim=2), PreprocessData.padding_idx)
gated_cur_RnQ=self.compute_gated_value(cur_RnQ,reasoning_question_gating_val)# size=(16,100,258)
gated_cur_RmD=self.compute_gated_value(cur_RmD,reasoning_content_gating_val[i])# size=(16,200,514)
# 经过reasoning层
cur_RnQ_reasoning_out, _ = self.question_reasoning_layer.forward(gated_cur_RnQ.transpose(0,1),cur_RnQ_mask) # size=(max_sequence_len,16,256)
cur_RmD_reasoning_out, _ = self.content_reasoning_layer.forward(gated_cur_RmD.transpose(0,1),cur_RmD_mask) # size=(max_sequence_len,16,256)
# 所有的矩阵变成相同的大小
cur_RnQ_reasoning_out = self.full_matrix_to_specify_size(cur_RnQ_reasoning_out,
[max_question_len, batch_size,
cur_RnQ_reasoning_out.size()[
2]]) # size=(100,16,256)
cur_RmD_reasoning_out = self.full_matrix_to_specify_size(cur_RmD_reasoning_out,
[max_content_len, batch_size,
cur_RmD_reasoning_out.size()[
2]]) # size=(200,16,256)
#过decision layer的gate层
cur_RnQ_reasoning_out=cur_RnQ_reasoning_out.transpose(0,1) #size(16,100,256)
cur_RmD_reasoning_out=cur_RmD_reasoning_out.transpose(0,1) #size(16,200,256)
gated_RnQ_out=self.compute_gated_value(cur_RnQ_reasoning_out,decision_question_gating_val)#size(16,100,256)
gated_RmD_out=self.compute_gated_value(cur_RmD_reasoning_out,decision_content_gating_val[i])#size(16,200,256)
# 将2种feature cat到一起得到300*256的表示
cur_reasoning_feature = torch.cat([gated_RnQ_out, gated_RmD_out], dim=1) # size(16,300,256) || when content=100 size(16,200,256)
reasoning_feature.append(cur_reasoning_feature)
# 10个文档的cat到一起
reasoning_feature = torch.cat(reasoning_feature, dim=1) # size=(16,3000,256) | when content=100 size(16,2000,256)
# print(299)
# embed()
maxpooling_reasoning_feature_column, _ = torch.max(reasoning_feature, dim=1) # size(16,256)
meanpooling_reasoning_feature_column = torch.mean(reasoning_feature, dim=1) # size(16,256)
maxpooling_reasoning_feature_row, _ = torch.max(reasoning_feature, dim=2) # size=(16,3000) | when content=100 size(16,2000)
meanpooling_reasoning_feature_row = torch.mean(reasoning_feature, dim=2) # size=(16,3000) | when content=100 size(16,2000)
# print(228, "============================")
# embed()
pooling_reasoning_feature = torch.cat(
[maxpooling_reasoning_feature_row, meanpooling_reasoning_feature_row, maxpooling_reasoning_feature_column,
meanpooling_reasoning_feature_column], dim=1).view(batch_size, -1) # size=(16,6512) | when content=100 size(16,4512)
#
# print(314)
# embed()
output = self.decision_layer.forward(pooling_reasoning_feature) # size=(16,2)
# temp_gate_val=torch.stack([mean_gate_val,torch.tensor(0.0).to(self.device)]).resize_(1,2)
# output_with_gate_val=torch.cat([output,temp_gate_val],dim=0)
logics=logics.resize_(logics.size()[0],1)
return output*logics # logics 是反向的话乘以-1,正向的话是乘以1
# # # char-level encode: (seq_len, batch, hidden_size)
# # context_vec_char = self.char_encoder.forward(context_emb_char, context_char_mask, context_mask)
# # question_vec_char = self.char_encoder.forward(question_emb_char, question_char_mask, question_mask)
#
# # context_encode = torch.cat((context_encode, context_vec_char), dim=-1)
# # question_encode = torch.cat((question_encode, question_vec_char), dim=-1)
#
# # match lstm: (seq_len, batch, hidden_size)
# qt_aware_ct, qt_aware_last_hidden, match_para = self.match_rnn.forward(content_encode, content_mask,
# question_encode, question_mask)
# vis_param = {'match': match_para}
#
# # birnn after self match: (seq_len, batch, hidden_size)
# qt_aware_ct_ag, _ = self.birnn_after_self.forward(qt_aware_ct, content_mask)
#
# # pointer net init hidden: (batch, hidden_size)
# ptr_net_hidden = F.tanh(self.init_ptr_hidden.forward(qt_aware_last_hidden))
#
# # pointer net: (answer_len, batch, context_len)
# ans_range_prop = self.pointer_net.forward(qt_aware_ct_ag, context_mask, ptr_net_hidden)
# ans_range_prop = ans_range_prop.transpose(0, 1)
#
# # answer range
# if not self.training and self.enable_search:
# ans_range = answer_search(ans_range_prop, context_mask)
# else:
# _, ans_range = torch.max(ans_range_prop, dim=2)
#
# return ans_range_prop, ans_range, vis_param
def load_glove_hdf5(self):
with h5py.File(self.dataset_h5_path, 'r') as f:
f_meta_data = f['meta_data']
id2vec = np.array(f_meta_data['id2vec']) # only need 1.11s
word_dict_size = f.attrs['word_dict_size']
embedding_size = f.attrs['embedding_size']
return int(word_dict_size), int(embedding_size), torch.from_numpy(id2vec)
def compute_matching_matrix(self, question_encode, content_encode):
question_encode_trans = question_encode.transpose(0, 1) # (batch, seq_len, embedding_size)
content_encode_trans = content_encode.permute(1, 2, 0) # (batch, embedding_size, seq_len)
Matching_matrix = torch.bmm(question_encode_trans, content_encode_trans) # (batch, question_len , content_len)
return Matching_matrix
def compute_cross_document_attention(self, content_m, content_n):
content_n_trans = content_n.transpose(1, 2) # (batch, 512, 200)
Matching_matrix = torch.bmm(content_m, content_n_trans) # (batch, question_len , content_len)
return Matching_matrix
def compute_RnQ(self, an_matrix, content_encode):
content_encode_trans = content_encode.transpose(0, 1) ## (batch, content_len, embedding_size)
RnQ = torch.bmm(an_matrix, content_encode_trans)
return RnQ
def compute_RnD(self, bn_matrix, question_encode):
bn_matrix_trans = bn_matrix.transpose(1, 2) # size=(batch,content_len,question_len)
question_encode_trans = question_encode.transpose(0, 1) # (batch, question_len, embedding_size)
RnD = torch.bmm(bn_matrix_trans, question_encode_trans)
return RnD
# 将矩阵填充到指定的大小,填充的部分为0
def full_matrix_to_specify_size(self, input_matrix, output_size):
new_matrix = torch.zeros(output_size).to(self.device)
new_matrix[0:input_matrix.size()[0], 0:input_matrix.size()[1], 0:input_matrix.size()[2]] = input_matrix[:, :, :]
return new_matrix
def reduce_softmax(self,input_matrix):
size=input_matrix.size()
viewd_input=input_matrix.contiguous().view(size[0],size[1],size[2]*size[3])
softmax_input=torch.nn.functional.softmax(viewd_input,dim=2)
resized_softmax=softmax_input.contiguous().view(size[0],size[1],size[2],size[3])
return resized_softmax
# 通过gate值来过滤
def compute_gated_value(self, input_matrix, gate_val):
'''
gate_val size=(16,1,100)
input_matrix size=(16,100,258)
return gated_matrix size=(16,100,258)
'''
# batch_size = gate_val.size()[0]
# words_num = gate_val.size()[-1]
# eye_matrix = torch.zeros(batch_size, words_num, words_num).to(self.device) # eg:size=(16,100,100)
# eye_matrix[:, range(words_num), range(words_num)] = gate_val[:, 0, range(words_num)]
# gated_matrix = torch.bmm(eye_matrix, input_matrix)
gated_matrix=input_matrix*gate_val.transpose(1,2)
return gated_matrix
| UTF-8 | Python | false | false | 23,431 | py | 26 | SeaReader.py | 23 | 0.573696 | 0.535132 | 0 | 426 | 52.687793 | 166 |
2k-joker/Algorithms-and-Data-Structures | 15,479,062,161,505 | 4d75ebefddc58f70d7034743b77f97ac9d6c045a | 3b712aef61e3dc1682a641d815bacbb9f79e5727 | /Arrays.py | b25d40fba08d00da8e5e97c777846cbe4603bf92 | [] | no_license | https://github.com/2k-joker/Algorithms-and-Data-Structures | 06848d7ebc1f0fb9a576c4835b1b6e31d56c7fc6 | 204f560357ae5abb6b51dc290b8e137d533480b7 | refs/heads/master | "2022-06-18T15:22:02.491459" | "2020-05-08T13:30:00" | "2020-05-08T13:30:00" | 254,953,825 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | ## Swap is always handy for arrays
def swap(i, j, array):
temp = array[i]
array[i] = array[j]
array[j] = temp
## Write a method that checks if two arrays of integers are pemutations of each other ##
# Time complexity: O(nlogn) or O(n) if done with hash maps(python dictionary)
def isPemutation(array1, array2):
if len(array1) != len(array2):
return False
array1.sort()
array2.sort()
for i in array1:
if array1[i] != array2[i]:
return False
return True
## Write a function to determine if an array has all unique elements ##
# Time complexity: O(nlogn) or O(n) with hash maps(python dictionary)
def isUnque(array):
array.sort()
for i in array:
if array[i] == array[i+1]:
return False
return True
## Write a function that searches an array to find two numbers that sum up to a target integer ##
# Assume array of integers and that at most one correct pair exists
# Time complexity: O(nlogn)
def twoIntegerSum(array, target):
if len(array) < 2:
return "Is you dumb??"
array.sort()
tail = len(array) - 1
head = 0
while head != tail:
if array[head] + array[tail] == target:
return [array[head], array[tail]]
elif array[head] + array[tail] > target:
tail -=1
elif array[head] + array[tail] < target:
head +=1
return []
## Write a function that takes target number and moves it to the end of the array ##
# Time complexity: O(n)
def moveToEnd(array, target):
head = 0
tail = len(array) - 1
while head < tail :
if array[tail] == target:
tail -=1
elif array[head] != target:
head +=1
else:
swap(head, tail, array)
head +=1
tail -=1
return array
## Write a function that determines how many times a larger element in a sorted array bribed a smaller element to switch positions ##
# Average time complexity O(nlogn)
def minimumBribes(arr):
counter = 0
for i in range(len(arr)):
if arr[i] - (i+1) > 2:
return print("Too chaotic")
for j in range(max(0,arr[i]-2),i):
if arr[j] > arr[i]:
counter += 1
return print(counter)
## Design a function that finds the maximum number of toys that can be bought given an array of toy prices and max amount ##
# Average time complexity: O(nlogn)
def maximumToys(prices, k):
if len(prices) ==0 or k <= 0:
return 0
k_and_under = []
for x in prices:
if x <= k:
k_and_under.append(x)
k_and_under.sort()
max_toys = 0
for i in range(len(k_and_under)):
k -= k_and_under[i]
if k < 0:
return max_toys
i += 1
max_toys += 1
return max_toys
| UTF-8 | Python | false | false | 2,977 | py | 7 | Arrays.py | 6 | 0.555257 | 0.543836 | 0 | 113 | 24.327434 | 133 |
nifigator/team-one-backend | 16,853,451,684,339 | 9efc939c77d4ca54eb026083d762abd8da332792 | 9477f7e3feb04817d38ed89cf4888a1bda3ecb4c | /app.py | 1c5448c726bc926eb62647236d8d94d175845063 | [] | no_license | https://github.com/nifigator/team-one-backend | 5e2a38fbe0c0701060f88added3016ac28da83d6 | e4683c22fa618a5d0a437d1d9ba261c45e9e2cb8 | refs/heads/master | "2022-12-10T16:16:52.582359" | "2019-09-29T06:53:51" | "2019-09-29T06:53:51" | 194,398,207 | 0 | 0 | null | false | "2022-12-08T05:49:35" | "2019-06-29T11:29:33" | "2019-09-29T06:54:07" | "2022-12-08T05:49:34" | 46 | 0 | 0 | 6 | Python | false | false | """
Main module of the server file
"""
# 3rd party modules
import connexion
# Local modules
import config
def dummy(**kwargs):
return 'Method not yet implemented.', 200
if __name__ == '__main__':
app = config.connex_app
# app = connexion.FlaskApp(__name__, specification_dir='api/')
app.add_api('common.yaml')
app.run(port=8080)
| UTF-8 | Python | false | false | 354 | py | 23 | app.py | 20 | 0.646893 | 0.624294 | 0 | 18 | 18.611111 | 66 |
sirkkalap/iceml | 360,777,281,249 | ba9428fd15b6bb28e4323e826c6a11ad5b570ed9 | 8cfd093089f92f30d1091cec6bdaabaa2c7c466d | /ais/analysis.py | b53c6ed10eda36dc8a1728578ae5baf2e523b6d5 | [
"MIT"
] | permissive | https://github.com/sirkkalap/iceml | 94f7a6d750e2937ca2bdd256a0c970e0b9ee1331 | 442f6513d399fe30e81f9d932210c930f7d40b3c | refs/heads/master | "2022-12-04T21:00:55.244248" | "2020-08-11T20:40:47" | "2020-08-11T20:40:47" | 283,975,825 | 1 | 0 | null | true | "2020-07-31T07:48:31" | "2020-07-31T07:48:30" | "2019-11-11T15:17:06" | "2020-03-31T09:53:07" | 26,148 | 0 | 0 | 0 | null | false | false | import pandas as pd
import numpy as np
import logging
logger = logging.getLogger(__name__)
MOVE_SPEED_THRESHOLD = 5
STOP_SPEED_THRESHOLD = 0.5
PREVIOUS_OBSERVATIONS_TIME_FRAME = 5 # store N minutues of observations
def filter_previous_observations_by_timestamp(df):
if len(df) > 0:
return df[lambda x: x['timestamp'] >= (df['timestamp'] - pd.Timedelta(15, unit='m'))]
else:
return df
def is_sudden_stop(d, prev):
sog_mean = prev['sog'].mean()
return sog_mean >= MOVE_SPEED_THRESHOLD and \
d['sog'] < STOP_SPEED_THRESHOLD and \
len(prev) > 1 and (prev['sudden_stopping'] == False).all()
def append_sudden_stopping(ais):
ais.assign(sudden_stopping = None)
vessels = {}
for i, d in ais.iterrows():
mmsi = d['mmsi']
if not mmsi in vessels.keys():
vessels[mmsi] = {}
vessels[mmsi]['previous_observations'] = pd.DataFrame(columns=ais.columns)
prev = filter_previous_observations_by_timestamp(vessels[mmsi]['previous_observations'])
vessels[mmsi]['previous_observations'] = prev
ais.set_value(i, 'sudden_stopping', is_sudden_stop(d, prev))
vessels[mmsi]['previous_observations'] = vessels[mmsi]['previous_observations'].append(ais.loc[i])
return ais
def merge_vessel_meta_and_location_data(vm, vl):
""" Merges dataframes (vessel meta data and vessel location) into a single dataframe.
Assumes that the dataframes are for a single mmsi and ordered by timestamp.
"""
logger = logging.getLogger(__name__)
mmsis = vl['mmsi'].unique()
df = None
i = 0
for mmsi in mmsis:
try:
logger.debug("Merge vessel {} ({}%)".format(mmsi, round(i / len(mmsis) * 100, 1)))
result = merge_single_vessel_meta_and_location_data(vm[vm['mmsi'] == mmsi], vl[vl['mmsi'] == mmsi])
if df is None:
df = result
else:
df = df.append(result, ignore_index=True)
except:
logger.exception("Exception in merging vessel {}".format(mmsi))
i += 1
return df
def merge_single_vessel_meta_and_location_data(vm, vl):
vm = vm.sort_values('timestamp')
vl = vl.sort_values('timestamp')
vl_colnames = ['timestamp', 'mmsi', 'lon', 'lat', 'sog','cog', 'heading']
vm_colnames = ['name', 'ship_type', 'callsign', 'imo', 'destination', 'eta', 'draught', 'pos_type', 'reference_point_a', 'reference_point_b', 'reference_point_c', 'reference_point_d'];
n_rows = len(vl[vl['timestamp'] >= min(vm['timestamp'])])
df = pd.DataFrame(index=range(n_rows), columns = vl_colnames + vm_colnames)
if len(vm) == 0 or len(vl) == 0:
return df
vm_index = 0
vl_index = 0
i = 0
while vl_index < len(vl):
if vl.iloc[vl_index].timestamp > vm.iloc[vm_index].timestamp:
while vm_index + 1 < len(vm) and vm.iloc[vm_index + 1].timestamp < vl.iloc[vl_index].timestamp:
vm_index += 1
if vm_index >= len(vm):
break
df.loc[i] = merge_vessel_row(vl, vl_index, vl_colnames, vm, vm_index, vm_colnames)
i += 1
vl_index += 1
return df
def merge_vessel_row(vl, vl_index, vl_colnames, vm, vm_index, vm_colnames):
d = {}
for col_name in vl_colnames:
d[col_name] = vl.iloc[vl_index][col_name]
for col_name in vm_colnames:
d[col_name] = vm.iloc[vm_index][col_name]
return d
def get_ice_conditions(ice, location, day):
ice_condition = ice[(ice['lat'] <= location['lat']) & (ice['lon'] <= location['lon'])]
if len(ice_condition) > 0:
return ice_condition.tail(1)
else:
return None
def merge_location_and_ice_condition(vl, ice):
vl = vl.sort_values('timestamp')
i = 0
ice_columns = ['concentration', 'thickness']
df = pd.DataFrame(index=range(len(vl)), columns=np.append(vl.columns.values, ice_columns))
while i < len(vl):
ts = vl.iloc[i].timestamp
day = ts.strftime('%Y-%m-%d')
ice_conditions = get_ice_conditions(ice, vl.iloc[i][['lat', 'lon']], day)
if ice_conditions is not None:
df.loc[i] = vl.iloc[i].append(ice_conditions[ice_columns].iloc[0])
else:
df.loc[i] = vl.iloc[i]
if i % 1000 == 0:
logger.debug("Merge ice conditions ({}%)".format(int(i / len(vl) * 100)))
i += 1
return df
| UTF-8 | Python | false | false | 4,449 | py | 30 | analysis.py | 11 | 0.589121 | 0.580805 | 0 | 141 | 30.553191 | 188 |
riichi/kcc3 | 12,189,117,197,200 | 97b0999f3163fc60c418a327134a37adac5b8721 | f336b5329a86cb0c0a989edb2bafe55c85f88381 | /yakumans/yakumans.py | b22cd8275cdd1294f0b4b58d9541cb41f326bd8d | [
"MIT"
] | permissive | https://github.com/riichi/kcc3 | 4880a41f05be794abdf395229d7490b8cf20066f | 2cc9674d13483ee59fe7cb83e8b9c9e8b213ba47 | refs/heads/master | "2023-04-27T06:05:04.839349" | "2022-01-19T13:40:07" | "2022-01-19T13:40:07" | 206,862,732 | 1 | 0 | MIT | false | "2023-04-21T21:45:42" | "2019-09-06T19:39:54" | "2023-01-25T23:58:49" | "2023-04-21T21:45:42" | 171 | 4 | 0 | 3 | Python | false | false | from dataclasses import dataclass
from enum import Enum
class YakumanType(Enum):
REGULAR = 1
INITIAL = 2
OPTIONAL = 3
@dataclass
class Yakuman:
id: str
name: str
japanese_name: str
type: YakumanType
double: bool
@property
def verbose_name(self):
return f'{self.name} ({self.japanese_name})'
def __str__(self):
return self.name
KAZOE_YAKUMAN = Yakuman(
id='KAZOE_YAKUMAN',
name='Kazoe Yakuman',
japanese_name='数え役満',
type=YakumanType.REGULAR,
double=False,
)
KOKUSHI_MUSOU = Yakuman(
id='KOKUSHI_MUSOU',
name='Kokushi musou',
japanese_name='国士無双',
type=YakumanType.REGULAR,
double=False,
)
KOKUSHI_MUSOU_JUUSAN_MENMACHI = Yakuman(
id='KOKUSHI_MUSOU_JUUSAN_MENMACHI',
name='Kokushi musou juusan menmachi',
japanese_name='国士無双13面待ち',
type=YakumanType.REGULAR,
double=True,
)
SUUANKOU = Yakuman(
id='SUUANKOU',
name='Suuankou',
japanese_name='四暗刻',
type=YakumanType.REGULAR,
double=False,
)
SUUANKOU_TANKI = Yakuman(
id='SUUANKOU_TANKI',
name='Suuankou tanki',
japanese_name='四暗刻単騎',
type=YakumanType.REGULAR,
double=True,
)
DAISANGEN = Yakuman(
id='DAISANGEN',
name='Daisangen',
japanese_name='大三元',
type=YakumanType.REGULAR,
double=False,
)
SHOUSUUSHII = Yakuman(
id='SHOUSUUSHII',
name='Shousuushii',
japanese_name='小四喜',
type=YakumanType.REGULAR,
double=False,
)
DAISUUSHII = Yakuman(
id='DAISUUSHII',
name='Daisuushii',
japanese_name='大四喜',
type=YakumanType.REGULAR,
double=True,
)
TSUUIISOU = Yakuman(
id='TSUUIISOU',
name='Tsuuiisou',
japanese_name='字一色',
type=YakumanType.REGULAR,
double=False,
)
CHINROUTOU = Yakuman(
id='CHINROUTOU',
name='Chinroutou',
japanese_name='清老頭',
type=YakumanType.REGULAR,
double=False,
)
RYUUIISOU = Yakuman(
id='RYUUIISOU',
name='Ryuuiisou',
japanese_name='緑一色',
type=YakumanType.REGULAR,
double=False,
)
CHUUREN_POUTOU = Yakuman(
id='CHUUREN_POUTOU',
name='Chuuren poutou',
japanese_name='九連宝燈',
type=YakumanType.REGULAR,
double=False,
)
JUNSEI_CHUUREN_POUTOU = Yakuman(
id='JUNSEI_CHUUREN_POUTOU',
name='Junsei chuuren poutou',
japanese_name='純正九蓮宝燈',
type=YakumanType.REGULAR,
double=True,
)
SUUKANTSU = Yakuman(
id='SUUKANTSU',
name='Suukantsu',
japanese_name='四槓子',
type=YakumanType.REGULAR,
double=False,
)
TENHOU = Yakuman(
id='TENHOU',
name='Tenhou',
japanese_name='天和',
type=YakumanType.INITIAL,
double=False,
)
CHIIHOU = Yakuman(
id='CHIIHOU',
name='Chiihou',
japanese_name='地和',
type=YakumanType.INITIAL,
double=False,
)
DAICHISEI = Yakuman(
id='DAICHISEI',
name='Daichisei',
japanese_name='大七星',
type=YakumanType.OPTIONAL,
double=True,
)
DAISHARIN = Yakuman(
id='DAISHARIN',
name='Daisharin',
japanese_name='大車輪',
type=YakumanType.OPTIONAL,
double=False,
)
DAICHIKURIN = Yakuman(
id='DAICHIKURIN',
name='Daichikurin',
japanese_name='大竹林',
type=YakumanType.OPTIONAL,
double=False,
)
DAISUURIN = Yakuman(
id='DAISUURIN',
name='Daisuurin',
japanese_name='大数林',
type=YakumanType.OPTIONAL,
double=False,
)
IISHOKU_YONJUN = Yakuman(
id='IISHOKU_YONJUN',
name='Iishoku yonjun',
japanese_name='一色四順',
type=YakumanType.OPTIONAL,
double=False,
)
OPEN_RIICHI = Yakuman(
id='OPEN_RIICHI',
name='Open riichi',
japanese_name='オープン立直',
type=YakumanType.OPTIONAL,
double=False,
)
PAARENCHAN = Yakuman(
id='PAARENCHAN',
name='Paarenchan',
japanese_name='八連荘',
type=YakumanType.OPTIONAL,
double=False,
)
RENHOU = Yakuman(
id='RENHOU',
name='Renhou',
japanese_name='人和',
type=YakumanType.OPTIONAL,
double=False,
)
SUURENKOU = Yakuman(
id='SUURENKOU',
name='Suurenkou',
japanese_name='四連刻',
type=YakumanType.OPTIONAL,
double=False,
)
SHIISANPUUTAA = Yakuman(
id='SHIISANPUUTAA',
name='Shiisanpuutaa',
japanese_name='十三不塔',
type=YakumanType.OPTIONAL,
double=False,
)
SHIISUUPUUTAA = Yakuman(
id='SHIISUUPUUTAA',
name='Shiisuupuutaa',
japanese_name='十四不塔',
type=YakumanType.OPTIONAL,
double=False,
)
YAKUMANS = (
KAZOE_YAKUMAN,
KOKUSHI_MUSOU,
KOKUSHI_MUSOU_JUUSAN_MENMACHI,
SUUANKOU,
SUUANKOU_TANKI,
DAISANGEN,
SHOUSUUSHII,
DAISUUSHII,
TSUUIISOU,
CHINROUTOU,
RYUUIISOU,
CHUUREN_POUTOU,
JUNSEI_CHUUREN_POUTOU,
SUUKANTSU,
TENHOU,
CHIIHOU,
DAICHISEI,
DAISHARIN,
DAICHIKURIN,
DAISUURIN,
IISHOKU_YONJUN,
OPEN_RIICHI,
PAARENCHAN,
RENHOU,
SUURENKOU,
SHIISANPUUTAA,
SHIISUUPUUTAA,
)
YAKUMANS_BY_NAME = sorted(YAKUMANS, key=lambda x: x.name)
def yakuman_by_id(yaku_id: str) -> Yakuman:
return next(x for x in YAKUMANS if x.id == yaku_id)
def yakuman_by_name(name: str) -> Yakuman:
return next(x for x in YAKUMANS if x.name == name)
| UTF-8 | Python | false | false | 5,277 | py | 63 | yakumans.py | 48 | 0.645542 | 0.644558 | 0 | 258 | 18.693798 | 57 |
maddarauci/py_maze | 2,628,519,992,848 | 7f4edf8fe35f0e0e0c379c43c79f01526492ef5c | 1c981e538b050bb7bfdfea9d85cdff417c527298 | /py_maze.py | 9c02d9652701f3d84bcd0d2664a16f06aead2d45 | [
"Apache-2.0"
] | permissive | https://github.com/maddarauci/py_maze | 605273b560f94eec19eea86025097388b8a5f940 | 2392d11409b04f6eb7f76188789dbb5527bbd5ce | refs/heads/main | "2023-06-17T04:58:41.916454" | "2021-07-16T16:21:49" | "2021-07-16T16:21:49" | 386,698,250 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # w: walls
# c: cells
# u: unvistied block
from colorama import init, Fore
from colorama import Back, Style
import time, random
def print_maze(maze):
for i in range(0, len(maze)):
for j in range(0, len(maze[0])):
if maze[i][j] == 'u':
print(Fore.White, f'{maze[i][j]}', end='')
elif maze[i][j] == 'c':
print(Fore.GRENNN, f'{maze[i][j]}', end='')
else:
print(Fore.RED, f'{maze[i][j]}', end='')
print('\n')
# find number of surrounding cells
def surround(rand_wall):
s_cells = 0
if (maze[rand_wall[0]][rand_wall[1]] == 'c'):
s_cells += 1
if (maze[rand_wall[0]+1][rand_wall[0]] == 'c'):
s_walls += 1
if(maz[rand_wall[0][rand_wall[1]-1]] == 'c'):
s_wall += 1
if(maz[rand_wall[0][rand_wall[1]+1]] == 'c'):
s_wall += 1
return s_cells
# main code
# init variables
cell = 'c'
wall = 'w'
unvisited = 'u'
height=11
width = 27
# initialize colorama before use
init()
# denote ll cells unvisited
for i in range(0, height):
line = []
for j in range(0, width):
line.append(unvisited)
maze.appen(line)
# randomize starting poinr and set it a cell
start_height = int(random.random()*height)
start_width = int(random.random()*width)
start_height=0:
start_height += 1
if start_height ==height-1:
start_height -= 1
start_width=0:
start_width += 1
if start_width ==width-1:
start_width -= 1
# mark it as cell and add surround walls to the list
maze[start_height] [start_width] = cell
walls []
walls.append([start_height-1], starting_width)
walls.append([start_height], starting_width-1)
walls.append([start_height], starting_widt+1)
walls.append([start_height+1], starting_width)
# denote walls in cell
maze(start_height-1)[start_width] ='w'
maze(start_height)[start_width-1] = 'w'
maze(start_height)[start_width+1] = 'w'
maze(start_height+1)[start_width] = 'w'
while(walls):
# pick a random wall
rand_wall = walks(int(random.random)*len(walls)-1)
# check if it is a left wall
if (rand_wall[1] != 0):
if (maze(rand_wall[0][rand_wall[1]-1] == 'u' and maze(rand_wall[0][rand_wall[1]+1] == 'c'):
# find the number of surrounding cells
s_cells = surroundingCells(rand_wall)
if (s_cells < 2):
# denote new path
maze[rand_wall[0][rand_wall[1]] = 'c'
# mark the new walls
if(rand_wall[0] != 0):
if(maze[rand_wall[0]-1][rand_wall[1]] !='c'):
maze[rand_wall[0]-1][rand_wall[1]] == 'w'
if([rand_wall[0]-1, rand_wall[1]] not in walls):
walls.append(rand_wall[0]-1, rand_wall[1])
# bottom cell
# for a fixed height and with create a function to make empty maze
def init_maze(width, height):
maz = []
for i in range(0, height):
line = []
for j in range(0, width):
line.append('u')
maze.append(line)
return maze
maze = init_maze(width, height)
print_maze(maze)
create_maze()
make_walls(width, height)
create_entrance_exit(widht, height)
print(maze)
'''
'''
# for debugging purposes we will a function that prints the maze in a user
# friendly format.
In order to be able to easily distinguish walls, cells and unvisited blocks,
we will paint each letter with a different color,
depending on the letter. To do so, we will use colorama .
'''
print(maze.print_maze)
'''
print('hi')
| UTF-8 | Python | false | false | 3,841 | py | 1 | py_maze.py | 1 | 0.538922 | 0.520958 | 0 | 155 | 22.780645 | 99 |
adventure-capitalist/PythonPractice | 8,315,056,690,871 | 87c0b50e7c9ebccb62e4167aefa09c9419d48954 | d194571c520fa1cfc957d23ff0e6b5672d644c0c | /ex11.py | 0205daa6ece8311a37f4917f09ffb52b918059a2 | [] | no_license | https://github.com/adventure-capitalist/PythonPractice | 0c173adc934b2e25ac2a735d14661fbd1d06012d | 8cf1ce755e5b410949a3ee25b199891c35ea97b1 | refs/heads/master | "2020-04-25T12:25:26.688657" | "2019-03-04T23:10:11" | "2019-03-04T23:10:11" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | DOB = (raw_input("What is your DOB DD-MM-YYY? ")
date_extraction = DOB.split("-")
day = date_extraction[0]
month = date_extraction[1]
year = date_extraction[2]
result = calculate_gen(int(year))
while raw_input is not int:
try:
birth_year = int(raw_input("What year were you born? "))
break
except ValueError:
print("Please enter a valid number")
def calculate_gen()
if year >= 1901: gen = "Interbellum"
if year >= 1910: gen = "Greatest"
if year >= 1946: gen = "Baby Boomer"
if year >= 1965: gen = "X"
if year >= 1975: gen = "Xennial"
if year >= 1985: gen = "Millennial"
if year >= 1995: gen = "Z"
if year >= 2013: gen = "Alpha"
print "So, you're part of the %s generation." % gen
| UTF-8 | Python | false | false | 778 | py | 13 | ex11.py | 13 | 0.583548 | 0.53856 | 0 | 28 | 25.785714 | 64 |
ferasbg/crypton | 9,706,626,119,729 | 322b02e3bb71f5addb3322ed32e85df6b65dbdc8 | 8d0f1e3fc10a2f276be3c4d10c5a01e731708171 | /src/settings.py | 04f8cea77b6d3078dee56b0832e1447ce8c313d2 | [] | no_license | https://github.com/ferasbg/crypton | a30ce8d16a729611adcfe7bd3b2ec2a8279bc882 | cc0ace34131a8c0fc1caaee6ce462f5480391b5c | refs/heads/master | "2023-08-22T23:33:49.966892" | "2021-10-25T03:56:42" | "2021-10-25T03:56:42" | 267,441,167 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import flwr
from flwr.server.strategy import FedAdagrad, FedAvg, FedFSv0, FedFSv1, FaultTolerantFedAvg
from flwr.common import EvaluateIns, EvaluateRes, FitIns, FitRes, ParametersRes, weights_to_parameters
from flwr.server.client_proxy import ClientProxy
from server import *
from client import AdvRegClient, Client
import argparse
import absl
from absl import flags
import tensorflow_datasets as tfds
'''
Configurations:
- dataset to use (dataset="cifar10" or dataset="cifar100")
- adv_grad_norm : str (options: adv_grad_norm="infinity"; adv_grad_norm="l2")
- adv_step_size : float (options: range(0.1, 0.9) --> range differs/depends on the adv_grad_norm argument)
- batch_size : int (default=32)
- epochs : int (default=5)
- steps_per_epoch : int (default=None)
- num_clients : int
- num_partitions : int (default=num_clients)
- num_rounds : int (default=10)
- federated_optimizer_strategy : str (options: federated_optimizer="fedavg", federated_optimizer="fedadagrad", federated_optimizer="faulttolerantfedavg", federated_optimizer="fedsv1", federated_optimizer="fedopt")
- adv_reg : bool (default=False)
- gaussian_layer : bool (default=False)
- pseudorandom_image_distribution_transformation_train : bool (default=False, options=[False, False])
- apply_all_image_degradation_configs : bool (default=False, options=[True, False])
- image_corruption_train : bool
- image_resolution_loss_train : bool
- formal_robustness_analysis : bool
- input_shape = [32, 32, 3]
- num_classes = num_classes
- conv_filters = [32, 64, 64, 128, 128, 256]
- kernel_size = (3, 3)
- pool_size = (2, 2)
- num_fc_units = [64]
- batch_size = 32
- epochs = 5
- adv_multiplier = adv_multiplier
- adv_step_size = adv_step_size
- adv_grad_norm = adv_grad_norm
- fraction_fit: float = 0.1,
- fraction_eval: float = 0.1,
- min_fit_clients: int = 2,
- min_eval_clients: int = 2,
- min_available_clients: int = 2,
- eval_fn: Optional[
Callable[[Weights], Optional[Tuple[float, Dict[str, Scalar]]]]
] = None,
- on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None,
- on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None,
- accept_failures: bool = True,
- initial_parameters: Optional[Parameters] = None
'''
DEFAULT_SERVER_ADDRESS="[::]:8080"
DEFAULT_NUM_CLIENTS = 10
DEFAULT_NUM_ROUNDS = 10
DEFAULT_CLIENT = Client()
ADV_REG_CLIENT = AdvRegClient()
CLIENT_SET = [DEFAULT_CLIENT, ADV_REG_CLIENT]
DEFAULT_ADV_GRAD_NORM = "infinity"
ADV_GRAD_NORM_OPTIONS = ["infinity", "l2"]
DEFAULT_ADV_MULTIPLIER = 0.2
DEFAULT_ADV_STEP_SIZE = 0.05
DEFAULT_CLIENT_LR_SCHEDULE = []
DEFAULT_SERVER_LR_SCHEDULE = []
CLIENT_LEARNING_RATE_SCHEDULER = tf.keras.callbacks.LearningRateScheduler(schedule=DEFAULT_CLIENT_LR_SCHEDULE)
SERVER_LEARNING_RATE_SCHEDULER = tf.keras.callbacks.LearningRateScheduler(schedule=DEFAULT_SERVER_LR_SCHEDULE)
PARAMETERS = HParams(num_classes=DEFAULT_NUM_CLIENTS, adv_multiplier=DEFAULT_ADV_MULTIPLIER, adv_step_size=DEFAULT_ADV_STEP_SIZE, adv_grad_norm=ADV_GRAD_NORM_OPTIONS[0])
ADVERSARIAL_REGULARIZED_MODEL = build_adv_model(parameters=PARAMETERS)
BASE_MODEL = build_base_model(parameters=PARAMETERS)
DEFAULT_MODEL = BASE_MODEL
DEFAULT_FRACTION_FIT = 0.3
DEFAULT_FRACTION_EVAL = 0.2
DEFAULT_MIN_FIT_CLIENTS = 2
DEFAULT_MIN_EVAL_CLIENTS = 2
DEFAULT_MIN_AVAILABLE_CLIENTS = 10
DEFAULT_INITIAL_SERVER_MODEL_PARAMETERS = weights_to_parameters(DEFAULT_MODEL.get_weights())
federated_averaging = flwr.server.strategy.FedAvg(
fraction_fit=DEFAULT_FRACTION_FIT,
fraction_eval=DEFAULT_FRACTION_EVAL,
min_fit_clients=DEFAULT_MIN_FIT_CLIENTS,
min_eval_clients=DEFAULT_MIN_EVAL_CLIENTS,
min_available_clients=DEFAULT_MIN_AVAILABLE_CLIENTS,
eval_fn=get_eval_fn(DEFAULT_MODEL),
on_fit_config_fn=fit_config,
on_evaluate_config_fn=evaluate_config,
initial_parameters=weights_to_parameters(DEFAULT_MODEL.get_weights()),
)
federated_adaptive_optimization = FedAdagrad(
fraction_fit=0.3,
fraction_eval=0.2,
min_fit_clients=101,
min_eval_clients=101,
min_available_clients=110,
eval_fn=get_eval_fn(DEFAULT_MODEL),
on_fit_config_fn=fit_config,
on_evaluate_config_fn=evaluate_config,
accept_failures=False,
initial_parameters=weights_to_parameters(DEFAULT_MODEL.get_weights()),
tau=1e-9,
eta=1e-1,
eta_l=1e-1
)
DEFAULT_FEDERATED_STRATEGY_SET = [federated_averaging, federated_adaptive_optimization]
DEFAULT_TRAIN_EPOCHS = 5
DEFAULT_CLIENT_LEARNING_RATE = 0.1
DEFAULT_SERVER_LEARNING_RATE = 0.1
# DEFAULT_WEIGHT_REGULARIZATION = add in DEFAULT MODEL layer
DEFAULT_GAUSSIAN_STATE = False
DEFAULT_IMAGE_CORRUPTION_STATE = False
# based on the set I want to apply that specific image corruption
MISC_CORRUPTION_SET = ["spatter", "saturate", "fog", "brightness", "contrast"]
BLUR_CORRUPTION_SET = ["motion_blur", "glass_blur", "zoom_blur", "gaussian_blur", "defocus_blur"]
DATA_CORRUPTION_SET = ["jpeg_compression", "elastic_transform", "pixelate"]
NOISE_CORRUPTION_SET = ["gaussian_noise", "shot_noise", "impulse_noise", "speckle_noise"]
# this defines whether a particular set of corruptions are applied
TRAIN_SET_IMAGE_DISTORTION_STATE = False
SERVER_TEST_SET_PERTURBATION_STATE = False
headers = ["Model", "Adversarial Regularization Technique", "Strategy", "Server Model ε-Robust Federated Accuracy", "Server Model Certified ε-Robust Federated Accuracy"]
# define options per variable; adv reg shares pattern of adversarial augmentation, noise (non-uniform, uniform) perturbations/corruptions/degradation as regularization
adv_reg_options = ["Neural Structured Learning", "Gaussian Regularization", "Data Corruption Regularization", "Noise Regularization", "Blur Regularization"]
strategy_options = ["FedAvg", "FedAdagrad", "FaultTolerantFedAvg", "FedFSV1"]
metrics = ["server_loss", "server_accuracy_under_attack", "server_certified_loss"]
variables = ["epochs", "communication_rounds", "client_learning_rate", "server_learning_rate", "adv_grad_norm", "adv_step_size"]
nsl_variables = ["neighbor_loss"]
baseline_adv_reg_variables = ["severity", "noise_sigma"] | UTF-8 | Python | false | false | 6,347 | py | 58 | settings.py | 14 | 0.71253 | 0.696454 | 0 | 134 | 46.358209 | 217 |
brady-vitrano/django-canon | 13,383,118,124,463 | c9d423f8a021f1c891430188178ca02c6a7e4d1a | 8917ef48be453aca05e8cef294a02ac57f3e6940 | /mysite/mysite/urls.py | 1b7062bce2d76409f5a3fb68dd63ceafaba766d3 | [] | no_license | https://github.com/brady-vitrano/django-canon | fc49ff810d5c1373220b118df3ef6eeac8b65f06 | 64183c9a3126f8312005cb12320971ca298c6add | refs/heads/master | "2019-03-08T23:29:06.871472" | "2014-11-17T05:57:41" | "2014-11-17T05:57:41" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^polls/', include('polls.urls')),
(r'^accounts/login/$', 'django.contrib.auth.views.login'),
)
| UTF-8 | Python | false | false | 269 | py | 8 | urls.py | 5 | 0.684015 | 0.684015 | 0 | 8 | 32.625 | 62 |
lauradoc/take-home-challenge-ascii | 13,649,406,102,460 | 743ad754e4c0b56ba73a1bd1fcf27c11f6ff70c5 | 749ce87255d4aef3cf5bde1ba5d6b31d4872ae7a | /functions.py | 385e9100b6bf642db92061b3310fc98a612cae26 | [] | no_license | https://github.com/lauradoc/take-home-challenge-ascii | d7a60f589cae08182b0ca593261b0a4be65114e9 | ebbd0efe724c4754f54f5b9514e5a98be1077699 | refs/heads/master | "2022-12-09T18:16:03.783908" | "2020-09-24T13:39:45" | "2020-09-24T13:39:45" | 298,291,202 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # all functions for ASCII Graphics API for drawing rectangles and squares
#!/usr/bin/env python
print('Hello, World')
def render_canvas():
row = [' ',' ',' ',' ',' ',' ',' ',' ',' ',' ']
grid = []
for i in range(11):
grid.append(row)
for i in range(len(grid)):
print(grid[i])
# def add_shape():
# def clear_all_shapes():
# def create_rectangle():
# def change_rectangle_fill():
# def translate(): | UTF-8 | Python | false | false | 449 | py | 1 | functions.py | 1 | 0.550111 | 0.545657 | 0 | 36 | 11.5 | 73 |
jackton1/django-view-breadcrumbs | 18,176,301,618,123 | e2ca5080954fa6b457e36363c05bb3e6b548ea0e | 9de9a4c978aa3488fcc4ef622a6f72c889f02cf8 | /view_breadcrumbs/generic/base.py | 0def474514e7e2cdbac13f46d8b550d7556ef04b | [
"BSD-3-Clause"
] | permissive | https://github.com/jackton1/django-view-breadcrumbs | 8bf684c6bef676f45ce0c279058b3c7cc7f822fd | 9d421b28d742414414097b9769659e07288e1cc2 | refs/heads/main | "2021-05-11T08:44:38.260031" | "2021-05-10T23:07:38" | "2021-05-10T23:07:38" | 118,060,183 | 19 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import logging
from django.conf import settings
from django.utils.functional import cached_property
from django.utils.translation import gettext_lazy as _
from view_breadcrumbs.constants import (
LIST_VIEW_SUFFIX,
CREATE_VIEW_SUFFIX,
UPDATE_VIEW_SUFFIX,
DELETE_VIEW_SUFFIX,
DETAIL_VIEW_SUFFIX,
)
from ..templatetags.view_breadcrumbs import (
CONTEXT_KEY,
append_breadcrumb,
clear_breadcrumbs,
)
from ..utils import (
get_verbose_name_plural,
get_verbose_name,
)
log = logging.getLogger(__name__)
def add_breadcrumb(context, label, view_name, **kwargs):
return append_breadcrumb(context, label, view_name, (), kwargs)
class BaseBreadcrumbMixin(object):
add_home = True
model = None
home_path = "/"
@cached_property
def home_label(self):
return _(getattr(settings, "BREADCRUMBS_HOME_LABEL", _("Home")))
@property
def crumbs(self):
raise NotImplementedError(
_(
"%(class_name)s should have a crumbs property."
% {"class_name": type(self).__name__}
)
)
def update_breadcrumbs(self, context):
crumbs = self.crumbs
if self.add_home:
crumbs = [(self.home_label, self.home_path)] + crumbs
for crumb in crumbs:
try:
label, view_name = crumb
except (TypeError, ValueError):
raise ValueError(
_("Breadcrumb requires a tuple of label and view name.")
)
else:
if hasattr(self, "object") and self.object:
if callable(label):
label = label(self.object)
if callable(view_name):
view_name = view_name(self.object)
add_breadcrumb(context, label, view_name)
def get_context_data(self, **kwargs):
ctx = {"request": self.request}
if CONTEXT_KEY in self.request.META:
clear_breadcrumbs(ctx)
self.update_breadcrumbs(ctx)
return super(BaseBreadcrumbMixin, self).get_context_data(**kwargs)
class BaseModelBreadcrumbMixin(BaseBreadcrumbMixin):
breadcrumb_use_pk = True
list_view_suffix = LIST_VIEW_SUFFIX
create_view_suffix = CREATE_VIEW_SUFFIX
update_view_suffix = UPDATE_VIEW_SUFFIX
delete_view_suffix = DELETE_VIEW_SUFFIX
detail_view_suffix = DETAIL_VIEW_SUFFIX
@property
def model_name_title(self):
return get_verbose_name(self.model).title()
@property
def model_name_title_plural(self):
return get_verbose_name_plural(self.model).title()
| UTF-8 | Python | false | false | 2,654 | py | 24 | base.py | 13 | 0.609269 | 0.609269 | 0 | 92 | 27.847826 | 76 |
jefflund/archive | 6,047,313,961,940 | 73fa11941ed6125e72d5a369082521471d64f34d | 0e37fa6532db1218342f9eafcbd62fd30ae98953 | /fall17/labs/style/grid.py | 90cf76b93486dc2b3efabaeda49d20816ebbb4e2 | [] | no_license | https://github.com/jefflund/archive | e4100bb79e849448c7784dcb6b69996d128f6c47 | 295e4d28d4b02e7276e7609ea49d603028078dbe | refs/heads/master | "2020-03-09T02:43:11.058223" | "2019-02-03T13:49:44" | "2019-02-03T13:49:44" | 128,547,018 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python3
from scipy.misc import imread, imresize, imsave
import numpy as np
import os
from glob import glob
D = 224
styles = glob('style/*.png')
contents = glob('content/*.png')
out_name = lambda s, c: 'output/{}-{}.png'.format(s[6:-4], c[8:-4])
for style in styles:
for content in contents:
os.system('./transfer.py {} {} {}'.format(style, content, out_name(style, content)))
grid = np.zeros((D * (len(styles)+1), D * (len(contents)+1), 3), dtype='uint8')
for i, style in enumerate(styles, 1):
grid[i*D: (i+1)*D, 0:D] = imresize(imread(style, mode='RGB'), (D, D))
for j, content in enumerate(contents, 1):
grid[0:D, j*D:(j+1)*D] = imresize(imread(content, mode='RGB'), (D, D))
for i, style in enumerate(styles, 1):
for j, content in enumerate(contents, 1):
grid[i*D: (i+1)*D, j*D:(j+1)*D] = imread(out_name(style, content), mode='RGB')
imsave('grid.png', grid)
| UTF-8 | Python | false | false | 911 | py | 285 | grid.py | 217 | 0.624588 | 0.600439 | 0 | 28 | 31.535714 | 92 |
jbhersch/avero_assessment | 3,418,793,985,999 | dfbf8a96177e4aeecfdf9ef654641b7944b7f590 | 3f4c1efd5abdc917723c2b1f951e06284b015b57 | /virtual_env/lib/python2.7/genericpath.py | 52a7acc50c72e16a5a67b4eb4d53f509d2dfc1cf | [] | no_license | https://github.com/jbhersch/avero_assessment | 57e9b2bf8e7e17df52fef127c5c35db960ae7df8 | ce1677e0cb107b142de9ea8c4a8e46d1a469eb60 | refs/heads/master | "2022-12-10T21:06:31.699616" | "2018-12-26T23:03:04" | "2018-12-26T23:03:04" | 163,022,330 | 0 | 0 | null | false | "2022-12-08T01:30:00" | "2018-12-24T20:41:44" | "2018-12-26T23:03:12" | "2022-12-08T01:29:59" | 48,870 | 0 | 0 | 4 | Python | false | false | /Users/jonathanhersch/anaconda/lib/python2.7/genericpath.py | UTF-8 | Python | false | false | 59 | py | 34 | genericpath.py | 26 | 0.864407 | 0.830508 | 0 | 1 | 59 | 59 |
leaders/Tibet.6 | 15,006,615,768,026 | 767cb24487c675dfeb98c5e81e9d55c1ef734594 | f8f61961b9a8a50c096fabe48aaa93e3803c3724 | /Latchet/src/main.py | c34bfe84fd71812a2ccf863ee4de355e9be28d29 | [
"MIT"
] | permissive | https://github.com/leaders/Tibet.6 | e32515ca9f9e86a1bf7210816b12746121e39b7e | 3c53060edafd80b9c4dafa10699a68d86a410c66 | refs/heads/master | "2022-03-01T14:41:33.430163" | "2019-10-25T16:33:03" | "2019-10-25T16:33:03" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding:utf-8
from mantis.fundamental.application.use_gevent import USE_GEVENT
if USE_GEVENT:
from gevent.queue import Queue
else:
from Queue import Queue
import json
import time
from threading import Thread
from collections import OrderedDict
import datetime
# from vnpy.trader.vtEvent import EVENT_LOG, EVENT_ERROR, EVENT_TICK, EVENT_CONTRACT
# from vnpy.trader.vtObject import VtSubscribeReq,VtTickData
from mantis.fundamental.application.app import instance
# from mantis.fundamental.utils.timeutils import datetime_to_timestamp
from mantis.fundamental.application.service import BaseService
# from mantis.trade.constants import *
from mantis.sg.fisher.model import set_database
class MainService(BaseService):
def __init__(self, name):
BaseService.__init__(self, name)
# self.active = False # 工作状态
self.queue = Queue() # 队列
# self.thread = Thread(target=self.threadDataFanout) # 线程
self.ee = None
self.mainEngine = None
self.logger = instance.getLogger()
self.symbols = {} # 已经订阅的合约
self.contracts = OrderedDict()
self.ticks_counter = 0
self.ticks_samples = []
self.tick_filters = []
self.contract_ticks = {} # { symbol: tick }
self.gatewayName = 'CTP'
def init(self, cfgs, **kwargs):
self.service_id = cfgs.get('id')
# self.service_type = ServiceType.LatchetServer
# super(MainService, self).init(cfgs)
self.cfgs = cfgs
BaseService.init(self, **kwargs)
self.init_database()
def init_database(self):
conn = instance.datasourceManager.get('mongodb').conn
db = conn['CTP_BlackLocust']
set_database(db)
return db
def setupFanoutAndLogHandler(self):
from mantis.trade.log import TradeServiceLogHandler
self.initFanoutSwitchers(self.cfgs.get('fanout'))
handler = TradeServiceLogHandler(self)
self.logger.addHandler(handler)
def start(self, block=True):
import http.gateway # 必须导入
self.setupFanoutAndLogHandler()
# 创建日志引擎
super(MainService, self).start()
self.active = True
# self.thread.start()
def stop(self):
super(MainService, self).stop()
self.mainEngine.exit()
if self.active:
self.active = False
# self.thread.join()
def join(self):
# self.thread.join()
pass
def onTick(self,symbol, tickobj):
pass
# def threadDataFanout(self):
# """运行插入线程"""
# import traceback
# while self.active:
# try:
# # print 'current tick queue size:', self.queue.qsize()
# # dbName, collectionName, d = self.queue.get(block=True, timeout=1)
# tick = self.queue.get(block=True, timeout=1)
# symbol = tick.vtSymbol
#
# #调试,仅允许调试合约发送
#
# if self.cfgs.get('debug_symbols',[]):
# if self.cfgs.get('debug_symbols').count(symbol) == 0:
# continue
#
# dt = datetime.datetime.strptime(' '.join([tick.date, tick.time]),'%Y%m%d %H:%M:%S.%f')
# tick.ts = datetime_to_timestamp( dt ) # 合约生成时间
# tick.ts_host = int(time.time()) # 主机系统时间
# tick.mp = self.contracts.get(symbol).marketProduct # IF,AU,CU,..
#
# # 传播到下级服务系统
# jsondata = json.dumps(tick.__dict__)
# self.dataFanout('switch0', jsondata, symbol=symbol)
#
# # -- cache current tick into redis ---
# key_name = CtpMarketSymbolTickFormat.format(symbol = tick.vtSymbol)
# redis = instance.datasourceManager.get('redis').conn
# redis.hmset(key_name,tick.__dict__)
#
# #-- cache for query --
# self.ticks_counter += 1
# if len(self.ticks_samples) > 2:
# del self.ticks_samples[0]
# self.ticks_samples.append(tick.__dict__)
#
# except Exception as e:
# # self.logger.error( str(e) )
# # traceback.print_exc()
# pass
#
| UTF-8 | Python | false | false | 4,420 | py | 146 | main.py | 97 | 0.572693 | 0.570596 | 0 | 129 | 32.271318 | 104 |
YikSanChan/pyflink-quickstart | 16,131,897,188,571 | e183f56ba12beb9087ec923acd33f3a1ef523a01 | 2e24502d10be1f563f52dd1d796ae66acd0d8a60 | /datagen_mock.py | ed974592407ec8f86e84dba2aa2c4d8d2bd47355 | [] | no_license | https://github.com/YikSanChan/pyflink-quickstart | 9184eb8eae788edf28964f2bfbda86d6b24abcef | d6b43d23a575e6393e1c58dab55587afb004c78d | refs/heads/master | "2023-04-23T06:27:57.354791" | "2021-05-08T11:43:14" | "2021-05-08T11:43:14" | 347,844,350 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from pyflink.table import EnvironmentSettings, StreamTableEnvironment
# https://ci.apache.org/projects/flink/flink-docs-stable/dev/table/connectors/datagen.html
env_settings = EnvironmentSettings.new_instance().in_streaming_mode().use_blink_planner().build()
table_env = StreamTableEnvironment.create(environment_settings=env_settings)
orders_source_ddl = """
CREATE TABLE Orders (
order_number BIGINT,
price DECIMAL(32,2),
buyer ROW<first_name STRING, last_name STRING>,
order_time TIMESTAMP(3)
) WITH (
'connector' = 'datagen',
'number-of-rows' = '10'
)
"""
printorders_sink_ddl = """
CREATE TABLE PrintOrders WITH ('connector' = 'print')
LIKE Orders (EXCLUDING ALL)
"""
table_env.execute_sql(orders_source_ddl)
table_env.execute_sql(printorders_sink_ddl)
table_env.from_path("Orders").insert_into("PrintOrders")
table_env.execute("Datagen Mock")
| UTF-8 | Python | false | false | 892 | py | 14 | datagen_mock.py | 11 | 0.7287 | 0.721973 | 0 | 30 | 28.733333 | 97 |
adolfoBee/algorithms | 953,482,781,118 | 96a8698242208b9ddbc0660f0ac0a3503823c98d | dc792759def3345cdd0ad748dbfe9f70644d8133 | /knapsack.py | 0db1412e3b3a4d2c0e4461869fcc51c2c65494bc | [] | no_license | https://github.com/adolfoBee/algorithms | dfccfd270163186d0bb7c85b93aa1743b6507961 | f2b7c2dcf96f032821feafcd5e8d3132ae297465 | refs/heads/master | "2021-01-20T01:22:19.318047" | "2017-04-24T16:20:57" | "2017-04-24T16:20:57" | 89,244,498 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Uses python3
def optimal_weight(W, w):
n = len(w)
matrix = [[0 for x in range(W+1)] for y in range(n+1)]
for i in range(1, n+1):
for j in range(1, W+1):
matrix[i][j] = matrix[i-1][j]
if w[i-1] <= j:
val = w[i-1] + matrix[i-1][j-w[i-1]]
if matrix[i][j] < val:
matrix[i][j] = val
return matrix[n][W]
W, n = map(int, input().split())
w = list(map(int, input().split()))
print(optimal_weight(W, w)) | UTF-8 | Python | false | false | 502 | py | 8 | knapsack.py | 8 | 0.464143 | 0.438247 | 0 | 21 | 22.952381 | 58 |
mikegilroy/LearningPython | 17,334,488,044,071 | 1fecd4f7eae9c4b1da35f8848f200785fedc2907 | 6cd168cd46b8d88a165cb376288a472ff85aa6c5 | /Treehouse/random_number_game.py | d4670a163c3a03b016ee1178ab421d7c9fa898b7 | [] | no_license | https://github.com/mikegilroy/LearningPython | a72fa5addba30912cb4ca813d1abeeb630f4f462 | f5125326b734bc52820edb4281acc3b4abcfe8e6 | refs/heads/master | "2021-01-25T06:56:33.821739" | "2017-06-07T12:10:35" | "2017-06-07T12:10:35" | 93,631,188 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
level = 100
random_number: int = None
guessed_number: int = None
def show_game_help():
print("Welcome to the random number game!")
print("Aim of the game: guess the number I'm thinking of")
print("The number will be between 0 and whatever level you've chosen...")
print("""
GIVE UP - this will end the game and reveal the number I'm thinking of
NEW GAME - this will start a new game with a new number
HELP - shows this really helpful help message
""")
def get_new_level():
try:
new_level = int(input("Choose your level:\n> "))
except:
print("That's not a number! Try again...")
get_new_level()
else:
return new_level
def start_new_game():
print("Starting new game...\n")
global random_number
random_number = random.randint(0, get_new_level())
print("Now start guessing...")
ask_for_number()
def ask_for_number():
guess = input("> ").lower()
if guess == "give up":
end_game(False)
elif guess == "new game":
start_new_game()
elif guess == "help":
show_game_help()
ask_for_number()
else:
try:
guessed_number = int(guess)
except:
print("That's not a number! Try again...")
ask_for_number()
else:
check_number(guessed_number)
def end_game(won):
if won:
print("YES! That's my number! You won!")
else:
print("Unlucky! The number was {}".format(random_number))
ask_to_play_again()
def ask_to_play_again():
play_again = input("Want to play again? Y/N \n> ")
if play_again.lower() == "y":
start_new_game()
else:
print("Thanks for playing!")
def check_number(number: int):
if number == random_number:
end_game(True)
elif number > random_number:
print("Almost, try a little lower...")
ask_for_number()
elif number < random_number:
print("Almost, try a little higher...")
ask_for_number()
def main():
show_game_help()
start_new_game()
main()
| UTF-8 | Python | false | false | 1,844 | py | 3 | random_number_game.py | 2 | 0.661063 | 0.658351 | 0 | 88 | 19.954545 | 74 |
csaling/cpsc430_game_engine | 14,216,341,786,778 | d37f92a99a16c8d1796717f2a31fb94e00f04665 | b00c3c8c014b1c5b9e82bc35e205a908846cb58e | /behavior_item_transfer.py | 79a2c27dc1be13b4ace2ae1e51f66291c1098224 | [] | no_license | https://github.com/csaling/cpsc430_game_engine | 95669273030a13c3ac8ce01e8dd558f2e5a306f8 | 7a319a7480f16ba0642888a006088776f421b8d7 | refs/heads/master | "2023-04-17T06:25:22.567363" | "2021-04-29T20:46:29" | "2021-04-29T20:46:29" | 331,025,053 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from behavior import Behavior
from game_logic import GameLogic
from sounds import Sounds
class ItemTransfer(Behavior):
def __init__(self, name, value, sound = None):
super(ItemTransfer, self).__init__()
self.name = name
self.value = value
self.sound = sound
def clicked(self, game_object):
if self.game_object.get_property(self.name) == self.value:
self.game_object.set_property(self.name, None)
game_object.set_property(self.name, self.value)
if self.sound:
Sounds.play_sound(self.sound)
| UTF-8 | Python | false | false | 635 | py | 59 | behavior_item_transfer.py | 54 | 0.587402 | 0.587402 | 0 | 19 | 31.736842 | 67 |
YiranH/Viola_Jones_649 | 4,724,464,044,095 | 1bbb364012c27dc8cb65c50e4cedbcf557e448d8 | ab2dc72703daffe59d2e892ef820824f9985df62 | /cascade.py | 38be3cc30df6bcd24e57c4af9d94b1e34913d70d | [] | no_license | https://github.com/YiranH/Viola_Jones_649 | 2dcd4a881c7cb78153177c937e0d2ea32e3acc83 | 1a56356640e826a037bee55007ccb8d163ff1368 | refs/heads/master | "2020-09-11T06:42:28.846673" | "2019-12-10T05:18:15" | "2019-12-10T05:18:15" | 221,976,266 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from haar import *
from adaboost import *
from helpers import *
import numpy as np
def learn_cascade(pos_train, neg_train, max_t):
F = np.ones(max_t + 10)
cascaded_classifiers = []
trains = pos_train + neg_train
img_height, img_width = pos_train[0].shape
img_height -= 1
img_width -= 1
features, n_f = get_feature_list(img_height, img_width)
guess = {}
for i in range(len(trains)):
image = trains[i]
for j in range(len(features)):
feature = features[j]
guess[(i, j)] = feature.feature_classifier(image)
if (i % 10 == 0):
print('guessing: %d / %d' % (i, len(trains)))
skip_img = set()
neg_train_len = len(neg_train)
for t in range(1, max_t + 1):
print('cascade round: %d' % t)
F[t] = F[t - 1]
feature_num = 0
classifier = []
while F[t] >= F[t - 1]:
feature_num += 1
print('cascade feature: %d' % feature_num)
classifier = learn_adaboost(pos_train, neg_train, feature_num, 'emp', guess_matrix=guess, skip_img = skip_img, pre_classifiers=classifier)
accuracy, fp, fn, tp, tn, next_skip_img = find_acc(classifier, pos_train,neg_train,skip_img)
F[t] = fp / neg_train_len
if next_skip_img != None:
skip_img = skip_img.union(next_skip_img)
neg_train_len -= tn
cascaded_classifiers.append(classifier)
return cascaded_classifiers
| UTF-8 | Python | false | false | 1,554 | py | 12 | cascade.py | 5 | 0.539897 | 0.530888 | 0 | 57 | 25.192982 | 150 |
Misterhex/python-bootcamp | 9,397,388,469,607 | a7a6d416e3fbf0d84a22d2c687c3da9f40465180 | 8d056a7ff35f2fa09dac9a2d95b7b5c8e53bc74f | /portscanner/scanner.py | 7f887dbbc6bbc390c1c6c46ca365401bfdade0f2 | [
"MIT"
] | permissive | https://github.com/Misterhex/python-bootcamp | 3e9e8b1d1f27e1856cb28921666a4dcc0b222cfb | 5f4576c1b8340db4ea18da7bec053a62ffc911ca | refs/heads/master | "2020-03-08T08:34:53.213286" | "2018-04-07T15:51:01" | "2018-04-07T15:51:01" | 128,025,308 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from multiprocessing import Pool
import socket
print("-" * 20)
hostname = raw_input("\nplease enter hostname to scan...\n")
print("looking up {}".format(hostname))
ipaddr = socket.gethostbyname(hostname)
print(ipaddr)
def scan(port):
try:
print("scanning port {} ...".format(port))
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((ipaddr, port))
if result == 0:
print("port {} is open".format(port))
finally:
sock.close()
pool = Pool(50)
pool.map(scan, range(1, 1025))
print("-" * 8 + " Completed " + "-" * 8)
| UTF-8 | Python | false | false | 611 | py | 6 | scanner.py | 5 | 0.612111 | 0.592471 | 0 | 26 | 22.5 | 64 |
selinon/demo-worker | 17,987,323,045,295 | 1aebad2ce8072a5427caeaab40a702db9285ab36 | e5f40ad414cd4b578f2b7edfca83634467a904fd | /demo_worker/storages/__init__.py | 8d82b72a92a857de81bb9c80de3556feed82a2b5 | [
"MIT"
] | permissive | https://github.com/selinon/demo-worker | 54054ce008289a9e5c3e90373a73e794f7a38b4b | 7adef95eaa5badfb473f33408944fe2e63d96e74 | refs/heads/master | "2023-05-28T00:30:39.605006" | "2023-01-27T14:16:01" | "2023-01-27T14:16:01" | 131,403,187 | 0 | 2 | MIT | false | "2023-05-23T06:08:55" | "2018-04-28T11:18:38" | "2023-01-27T14:45:13" | "2023-05-23T06:08:55" | 153 | 0 | 2 | 3 | Python | false | false |
from .travis_logs import TravisLogsStorage
| UTF-8 | Python | false | false | 44 | py | 24 | __init__.py | 13 | 0.840909 | 0.840909 | 0 | 1 | 42 | 42 |
ren100700/ren | 9,208,409,890,892 | f7ec7f06023448d2a7673a004acc978d30e5e44f | 2f700a61ccb2e005ad086ce734da378b160cb854 | /gerenboke/user/urls.py | d1247a4b863f80ca40b089a953f6b61a5479425c | [] | no_license | https://github.com/ren100700/ren | 1d8becea61b965470ba7a505e5028e79263a19f2 | d95b2146f289a7f1b98af2fee587b898160e369d | refs/heads/master | "2020-03-29T19:27:40.212624" | "2018-11-26T13:02:08" | "2018-11-26T13:02:08" | 150,264,341 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.conf.urls import url
from user import views
urlpatterns = [
url(r'^index/',views.index,name='index'),
url(r'^share/',views.share,name='share'),
url(r'^about/',views.about,name='about'),
url(r'^gbook/',views.gbook,name='gbook'),
url(r'^info/',views.info,name='info'),
url(r'^list/',views.list,name='list'),
] | UTF-8 | Python | false | false | 327 | py | 56 | urls.py | 46 | 0.669725 | 0.669725 | 0 | 12 | 26.333333 | 42 |
surf3r92/Python_Space_Shooter | 481,036,361,162 | f7f581cfde5cc7c0e23f517ad6766914e54e140c | e10c8f281210c96e85d0e4044199b5cdab61667c | /lib/explosion.py | 2405cc3d763550d53bb557612914f63e515b6837 | [] | no_license | https://github.com/surf3r92/Python_Space_Shooter | fe2b3c94a667d18d45005799e808e13a963a4a79 | 49b9909749859c44b154b441265932cc8f3de651 | refs/heads/master | "2021-01-10T03:05:47.935801" | "2016-02-01T11:40:57" | "2016-02-01T11:40:57" | 48,750,813 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os, sys, pygame, random
from pygame.locals import *
from lib.methods import *
class Explosion(pygame.sprite.Sprite):
def __init__(self, pos):
pygame.sprite.Sprite.__init__(self)
self.image, self.rect = load_image("img/sprites/Blue Explosion/1.png", -1)
self.rect.center = pos
def update(self, pos):
self.rect.center = pos
def changeImage(currentExplosionImage, explosionSprite):
explosionSprite.sprites()[0].image, explosionSprite.sprites()[0].rect = currentExplosionImage | UTF-8 | Python | false | false | 528 | py | 16 | explosion.py | 13 | 0.695076 | 0.6875 | 0 | 17 | 30.117647 | 97 |
openworkouts/OpenWorkouts | 19,224,273,626,137 | fb8ff4feb65f3f4335078c733af3fbf1c0f7d94a | b8a6eb2abed4abdcece1449c19019f0ad85a7479 | /ow/tasks/mail.py | b42130776ea43c472888048fad6b8ac2747d5b3e | [
"BSD-3-Clause"
] | permissive | https://github.com/openworkouts/OpenWorkouts | 060c85bbdec0c62d44ec432704e12fbc58d33e85 | ecfed69e8c654c09bb8c074d8aedda9c13cd2235 | refs/heads/master | "2020-04-23T02:43:29.440162" | "2019-02-28T09:47:21" | "2019-02-28T09:47:21" | 170,855,430 | 4 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Mail handling code. Any periodic task related to send/get emails is here
"""
import fcntl
import logging
from repoze.sendmail.queue import ConsoleApp
log = logging.getLogger(__name__)
def queue_processor(env):
"""
Process the email queue, reusing repoze.sendmail default queue management
machinery.
"""
# This mimics what is done by running the "qp" utility from
# repoze.sendmail, but uses our default .ini settings instead of using a
# separate ini file.
# This function expects some qp.* parameters in the provided settings,
# using the mail.* parameters as a fallback in case the former were not
# there.
# Before doing anything, check if a lock file exists. If it exists, exit
# without doing anything, as another queue processor process is running.
# See ticket #1389 for more information
settings = env['registry'].settings
lock_filename = settings.get('mail.queue_processor_lock')
lock_file = open(lock_filename, 'w')
try:
fcntl.lockf(lock_file, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
# Can't lock, probably another process is running, report to logging
# and exit
log.warning('Could not run the mail queue processing task, '
'could not acquire lock (maybe another process is '
'running?)')
return False
args = ['qp',
'--hostname', settings.get('qp.host', settings.get('mail.host')),
'--username', settings.get('qp.username',
settings.get('mail.username')),
'--password', settings.get('qp.password',
settings.get('mail.password')),
settings.get('mail.queue_path')]
app = ConsoleApp(args)
app.main()
| UTF-8 | Python | false | false | 1,816 | py | 131 | mail.py | 63 | 0.625551 | 0.623348 | 0 | 49 | 36.061224 | 77 |
vutrungtrieu/policyv1.0 | 9,122,510,542,554 | 763319193b3ef9d773759ceda1e83ac24df0a34b | 617fab1ae9ee78dce70a1df66780044451dcd864 | /policy-k/9/2.py | 5694c43a484f9b143a6c33ba817ce610af0e3f07 | [] | no_license | https://github.com/vutrungtrieu/policyv1.0 | 21b2f3b9fb30fb26e00efba41c37593a47325ff5 | 2ceded51f668ac92367c43dee77e96208eeaccc5 | refs/heads/master | "2016-06-01T09:15:31.350494" | "2015-12-22T08:46:13" | "2015-12-22T08:46:13" | 42,650,703 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/local/bin/python2.7
# -*- coding: utf-8 -*-
import os
f3 = open("k2.dat","a")
f1 = open("k1.dat").readlines()
f2 = open("title.dat").readlines()
for cmd,cmd_re in zip(f1,f2):
#f3.write("\""+cmd.strip()+"\",\""+cmd_re.strip()+"\"\n")
f3.write(cmd.strip()+",\""+cmd_re.strip()+"\"\n") | UTF-8 | Python | false | false | 305 | py | 29 | 2.py | 22 | 0.537705 | 0.498361 | 0 | 12 | 23.583333 | 58 |
Shahzadfarukh100/medical | 4,664,334,530,610 | ea35cd61db6f5cd75ffa8be3e04dc697466fee35 | 040f0bbf3df6d057639e3dfc8490d0f4efc519a3 | /newapp/admin.py | 400792f892dd094c2a052c1354272cf6b75fd0d1 | [] | no_license | https://github.com/Shahzadfarukh100/medical | f8852350058315a151e2edfefbaa2afc8164e039 | 6dcf47fd721911b705309321d3a44db61391f856 | refs/heads/master | "2020-03-28T16:58:45.138463" | "2018-10-16T10:00:23" | "2018-10-16T10:00:23" | 148,747,024 | 0 | 1 | null | false | "2018-10-18T07:02:12" | "2018-09-14T06:44:04" | "2018-10-16T10:00:25" | "2018-10-18T07:01:25" | 8,088 | 0 | 1 | 3 | HTML | false | null | from django.contrib import admin
from newapp.models import Banner, Appointment, Consultant, Input, BlogPost, Newsletter
admin.site.register(Banner)
admin.site.register(Appointment)
admin.site.register(Consultant)
admin.site.register(Input)
admin.site.register(BlogPost)
admin.site.register(Newsletter)
| UTF-8 | Python | false | false | 304 | py | 34 | admin.py | 23 | 0.828947 | 0.828947 | 0 | 10 | 29.4 | 86 |
isabella232/python-client | 5,789,615,955,836 | 199dc2595397e6d10fc64f71715d4d14c99fefe3 | 71212e4521971d85206672fa91f0fdd1a1ab4f1f | /ldclient/redis_requester.py | 89cb1085d9ea697eba2cae033509504625b04a81 | [
"Apache-2.0"
] | permissive | https://github.com/isabella232/python-client | a0f6b56add049e6bc04e5b6bbd9dbcaecdfdf208 | 4e44553d5228c462f8bfa87bcbeb807973a3170b | refs/heads/master | "2021-05-30T11:12:00.259406" | "2016-01-26T21:20:27" | "2016-01-26T21:20:27" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import json
from ldclient.expiringdict import ExpiringDict
from ldclient.interfaces import FeatureRequester
import redis
# noinspection PyUnusedLocal
def create_redis_ldd_requester(api_key, config, **kwargs):
return RedisLDDRequester(config, **kwargs)
class ForgetfulDict(dict):
def __setitem__(self, key, value):
pass
class RedisLDDRequester(FeatureRequester):
"""
Requests features from redis, usually stored via the LaunchDarkly Daemon (LDD). Recommended to be combined
with the ExpiringInMemoryFeatureStore
"""
def __init__(self, config,
expiration=15,
redis_host='localhost',
redis_port=6379,
redis_prefix='launchdarkly'):
"""
:type config: Config
"""
self._redis_host = redis_host
self._redis_port = redis_port
self._features_key = "{}:features".format(redis_prefix)
self._cache = ForgetfulDict() if expiration == 0 else ExpiringDict(max_len=config.capacity,
max_age_seconds=expiration)
self._pool = None
def _get_connection(self):
if self._pool is None:
self._pool = redis.ConnectionPool(host=self._redis_host, port=self._redis_port)
return redis.Redis(connection_pool=self._pool)
def get(self, key, callback):
cached = self._cache.get(key)
if cached is not None:
return callback(cached)
else:
rd = self._get_connection()
raw = rd.hget(self._features_key, key)
if raw:
val = json.loads(raw.decode('utf-8'))
else:
val = None
self._cache[key] = val
return callback(val)
| UTF-8 | Python | false | false | 1,803 | py | 26 | redis_requester.py | 20 | 0.580144 | 0.575707 | 0 | 54 | 32.388889 | 111 |
InstituteforDiseaseModeling/selvaraj_spatial_covid_vax_2021 | 10,050,223,489,406 | fc53b41546400478ed00591f2f9b22f520cd9069 | 67e5bce1461f6d0b38130c10e97741078a148b0e | /build_input_files/refdat_policy_effect.py | 8d3829294bd1cfe99429c56b629f1633b920ab16 | [] | no_license | https://github.com/InstituteforDiseaseModeling/selvaraj_spatial_covid_vax_2021 | 68aabf337a9aacd9cb6903f8aa8807b12c964ed5 | a4a20bfc07fe71f2fdab8a3addad637072445bfd | refs/heads/main | "2023-05-25T19:38:42.827107" | "2021-06-07T23:41:17" | "2021-06-07T23:41:17" | 374,775,221 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
import numpy as np
import os
policy_file = os.path.join('data_input', 'combined_data.csv')
def custom_round(x, base=5):
return int(base * round(float(x) / base))
def social_distancing_effect(countries: list = None, sd_bins=10):
"""
:param countries: list of countries to consider for averaging social distancing effects
:param sd_bins: minimum percentage difference in social distancing effect
:return: dataframe of social distancing efficacy ('Index_value') and date of change to new efficacy
"""
df = pd.read_csv(policy_file, encoding="ISO-8859-1")
df = df[df['Country'].isin(countries)]
df = df.groupby('Date_from_first_case')['Index_value'].apply(np.nanmean).reset_index()
df.dropna(inplace=True)
# round index_value to nearest 10
df['Index_value'] = df['Index_value'].apply(lambda x: custom_round(x, base=sd_bins))
df['Diff'] = df['Index_value'].diff()
df = df[df['Diff'] != 0]
df['Index_value'] = 1 - (df['Index_value'] / 100 * 1.0)
# # Add extra line to open all settings
df_add = pd.DataFrame.from_dict({'Date_from_first_case': [250], 'Index_value': [1], 'Diff': [100]})
df = pd.concat([df, df_add])
return df | UTF-8 | Python | false | false | 1,223 | py | 210 | refdat_policy_effect.py | 21 | 0.656582 | 0.636958 | 0 | 35 | 33.971429 | 103 |
pavankumarjs/GrootFSM | 2,130,303,820,856 | 97c0a1c3af4433b9bc91730dbdc6ce2012f6cf28 | 2de1120d63b77c914a2ce530a22c2f088a62dcf6 | /tests/test_base.py | 7d316b0f09a38c8ec16a21b28386c560c23f1eb8 | [
"MIT"
] | permissive | https://github.com/pavankumarjs/GrootFSM | 804aa1e9480903d22c8442428188bd0f24b84e21 | 29ff50764c8d2bcf4fecb55ef4e8a764b8b3da32 | refs/heads/master | "2021-01-20T08:20:12.407759" | "2017-08-29T21:01:14" | "2017-08-29T21:01:14" | 101,554,254 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from unittest import TestCase
import logging
import sys
from mock import Mock
from fsm.base import FSMBuilder, FSMException
def setUpModule():
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
def tearDownModule():
pass
class TestFSM(TestCase):
def setUp(self):
self.builder = FSMBuilder()
def tearDown(self):
pass
def test_fsm_builder_with_random_names(self):
before_exit1, after_entry1 = Mock(), Mock()
state1 = self.builder.add_state( before_exit=before_exit1, after_entry=after_entry1)
before_exit2, after_entry2 = Mock(), Mock()
state2 = self.builder.add_state(before_exit=before_exit2, after_entry=after_entry2)
before_exit3, after_entry3 = Mock(), Mock()
state3 = self.builder.add_state(before_exit=before_exit3, after_entry=after_entry3)
on_transition11, on_transition12, on_transition23, on_transition31 = Mock(), Mock(), Mock(), Mock()
transition11 = self.builder.add_transition(state1.name, state1.name, on_transition=on_transition11)
transition12 = self.builder.add_transition(state1.name, state2.name, on_transition=on_transition12)
transition23 = self.builder.add_transition(state2.name, state3.name, on_transition=on_transition23)
transition31 = self.builder.add_transition(state3.name, state1.name, on_transition=on_transition31)
self.builder.set_initial_state(state1.name)
fsm = self.builder.build()
self.assertEqual(before_exit1.call_count, 0)
self.assertEqual(after_entry1.call_count, 0)
self.assertEqual(on_transition11.call_count, 0)
fsm.execute_transition_to(state1.name, test_arg=111)
self.assertEqual(fsm.state, state1.name)
self.assertEqual(before_exit1.call_count, 1)
before_exit1.assert_called_with(test_arg=111)
self.assertEqual(after_entry1.call_count, 1)
after_entry1.assert_called_with(test_arg=111)
self.assertEqual(on_transition11.call_count, 1)
on_transition11.assert_called_with(test_arg=111)
self.assertRaises(FSMException, fsm.execute_transition_to, state3.name)
self.assertEqual(after_entry2.call_count, 0)
self.assertEqual(on_transition12.call_count, 0)
fsm.execute_transition_to(state2.name)
self.assertEqual(fsm.state, state2.name)
self.assertEqual(before_exit1.call_count, 2)
self.assertEqual(after_entry2.call_count, 1)
self.assertEqual(on_transition12.call_count, 1)
self.assertRaises(FSMException, fsm.execute_transition, transition31.name, **{'test_arg':111})
before_exit2.assert_not_called()
on_transition31.assert_not_called()
self.assertEqual(before_exit2.call_count, 0)
self.assertEqual(after_entry3.call_count, 0)
self.assertEqual(on_transition23.call_count, 0)
fsm.execute_transition(transition23.name)
self.assertEqual(fsm.state, state3.name)
self.assertEqual(before_exit2.call_count, 1)
self.assertEqual(after_entry3.call_count, 1)
self.assertEqual(on_transition23.call_count, 1)
def test_fsm_builder_with_names(self):
before_exit1, after_entry1 = Mock(), Mock()
state1 = self.builder.add_named_state('state1', before_exit=before_exit1, after_entry=after_entry1)
before_exit2, after_entry2 = Mock(), Mock()
state2 = self.builder.add_named_state('state2', before_exit=before_exit2, after_entry=after_entry2)
before_exit3, after_entry3 = Mock(), Mock()
state3 = self.builder.add_named_state('state3', before_exit=before_exit3, after_entry=after_entry3)
on_transition11, on_transition12, on_transition23, on_transition31 = Mock(), Mock(), Mock(), Mock()
transition11 = self.builder.add_named_transition('transition11', state1.name, state1.name, on_transition=on_transition11)
transition12 = self.builder.add_named_transition('transition12', state1.name, state2.name, on_transition=on_transition12)
transition23 = self.builder.add_named_transition('transition23', state2.name, state3.name, on_transition=on_transition23)
transition31 = self.builder.add_named_transition('transition31', state3.name, state1.name, on_transition=on_transition31)
self.builder.set_initial_state(state1.name)
fsm = self.builder.build()
self.assertEqual(before_exit1.call_count, 0)
self.assertEqual(after_entry1.call_count, 0)
self.assertEqual(on_transition11.call_count, 0)
fsm.execute_transition_to('state1', test_arg=111)
self.assertEqual(fsm.state, 'state1')
self.assertEqual(before_exit1.call_count, 1)
self.assertEqual(after_entry1.call_count, 1)
self.assertEqual(on_transition11.call_count, 1)
self.assertRaises(FSMException, fsm.execute_transition_to, 'state3')
self.assertEqual(after_entry2.call_count, 0)
self.assertEqual(on_transition12.call_count, 0)
fsm.execute_transition_to('state2')
self.assertEqual(fsm.state, 'state2')
self.assertEqual(before_exit1.call_count, 2)
self.assertEqual(after_entry2.call_count, 1)
self.assertEqual(on_transition12.call_count, 1)
self.assertRaises(FSMException, fsm.execute_transition, 'transition31', **{'test_arg':111})
before_exit2.assert_not_called()
on_transition31.assert_not_called()
self.assertEqual(before_exit2.call_count, 0)
self.assertEqual(after_entry3.call_count, 0)
self.assertEqual(on_transition23.call_count, 0)
fsm.execute_transition('transition23')
self.assertEqual(fsm.state, 'state3')
self.assertEqual(before_exit2.call_count, 1)
self.assertEqual(after_entry3.call_count, 1)
self.assertEqual(on_transition23.call_count, 1)
def test_fsm_builder_error(self):
before_exit1, after_entry1 = Mock(), Mock()
state1 = self.builder.add_state(before_exit=before_exit1, after_entry=after_entry1)
before_exit2, after_entry2 = Mock(), Mock()
state2 = self.builder.add_state(before_exit=before_exit2, after_entry=after_entry2)
on_transition12 = Mock()
transition12 = self.builder.add_transition(state1.name, state2.name, on_transition=on_transition12)
self.assertRaises(FSMException, self.builder.build)
def test_fsm_builder_duplicate_transition_error(self):
before_exit1, after_entry1 = Mock(), Mock()
state1 = self.builder.add_state(before_exit=before_exit1, after_entry=after_entry1)
before_exit2, after_entry2 = Mock(), Mock()
state2 = self.builder.add_state(before_exit=before_exit2, after_entry=after_entry2)
on_transition11, on_transition12 = Mock(), Mock()
transition11 = self.builder.add_transition(state1.name, state1.name, on_transition=on_transition11)
transition11_duplicate = self.builder.add_transition(state1.name, state1.name, on_transition=on_transition11)
transition12 = self.builder.add_transition(state1.name, state2.name, on_transition=on_transition12)
self.builder.set_initial_state(state1.name)
self.assertRaises(FSMException, self.builder.build)
def test_fsm_builder_duplicate_transition_name_error(self):
before_exit1, after_entry1 = Mock(), Mock()
state1 = self.builder.add_state(before_exit=before_exit1, after_entry=after_entry1)
before_exit2, after_entry2 = Mock(), Mock()
state2 = self.builder.add_state(before_exit=before_exit2, after_entry=after_entry2)
on_transition11, on_transition12 = Mock(), Mock()
transition11 = self.builder.add_named_transition('transition11', state1.name, state1.name, on_transition=on_transition11)
transition11_duplicate = self.builder.add_named_transition('transition11', state1.name, state1.name, on_transition=on_transition11)
transition12 = self.builder.add_transition(state1.name, state2.name, on_transition=on_transition12)
self.builder.set_initial_state(state1.name)
self.assertRaises(FSMException, self.builder.build)
def test_fsm_builder_duplicate_state_error(self):
before_exit1, after_entry1 = Mock(), Mock()
state1 = self.builder.add_named_state('state1', before_exit=before_exit1, after_entry=after_entry1)
state1_duplicate = self.builder.add_named_state('state1', before_exit=before_exit1, after_entry=after_entry1)
before_exit2, after_entry2 = Mock(), Mock()
state2 = self.builder.add_state(before_exit=before_exit2, after_entry=after_entry2)
on_transition11, on_transition12 = Mock(), Mock()
transition11 = self.builder.add_transition(state1.name, state1.name, on_transition=on_transition11)
transition12 = self.builder.add_transition(state1.name, state2.name, on_transition=on_transition12)
self.builder.set_initial_state(state1.name)
self.assertRaises(FSMException, self.builder.build)
| UTF-8 | Python | false | false | 9,009 | py | 3 | test_base.py | 2 | 0.698524 | 0.658786 | 0 | 179 | 49.329609 | 139 |
ThomasThorpe/CallDataRecordCorrelation | 9,302,899,175,232 | 7476ee7ae3cafc12bfda056f53177beb34baa548 | 1444eddad91a939d7573e612175770fac8e6f021 | /ExampleGeneratorTest.py | e1889ac0a7a3c8d7ed0248b83e140fd4ce810ea5 | [] | no_license | https://github.com/ThomasThorpe/CallDataRecordCorrelation | 145aa6fb555e4691f3f439d93105b881b254b4e3 | b7a030b2a6c7cbf2f552edffddfd0a44c799ea6c | refs/heads/master | "2020-06-20T23:04:18.205279" | "2019-07-16T23:26:20" | "2019-07-16T23:26:20" | 197,280,894 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import unittest,math,datetime
from CLIDChecker import CheckCLID
import ExampleGenerator as e
dateTimeFormat = "%Y%m%d%H%M%S"
class TestExampleGenerator(unittest.TestCase):
def test_MobileNumber(self):
self.assertTrue(CheckCLID(e.MobileNumber()))
def test_GeographicalNumber(self):
self.assertTrue(CheckCLID(e.GeographicalNumber()))
def test_FakeNumber(self):
for i in range(0,6):
with self.subTest(i=i):
x = e.FakeNumber(i)
for j in range(len(x)):
with self.subTest(j=j):
self.assertIn(x[j],["0","1","2","3","4","5","6","7","8","9"])
def test_NonSpooferNonSpoofer1(self):
num1 = e.MobileNumber()
num2 = e.MobileNumber()
for i in range(0,5):
with self.subTest(i=i):
if i == 0: #test without parameters
x = e.NonSpooferNonSpoofer1()
r1, r2 = x[0], x[1]
for j in range(0,9):
with self.subTest(j=j):
if j == 0:
self.assertTrue(len(r1) == 3)
elif j == 1:
self.assertTrue(len(r2) == 3)
elif j == 2:
self.assertIn(r1[0],["MT","MO"])
elif j == 3:
self.assertIn(r2[0],["MT","MO"])
elif j == 4:
self.assertTrue(CheckCLID(r1[1]))
elif j == 5:
self.assertTrue(CheckCLID(r2[1]))
elif j == 6:
self.assertTrue(CheckCLID(r1[2]))
elif j == 7:
self.assertTrue(CheckCLID(r2[2]))
elif j == 8:
if r1[0] == "MT" and r2[0] == "MT":
self.assertTrue((r1[1] == r2[1]) and (r1[2] != r2[2]))
else:
self.assertTrue((r1[1] == r2[1]) and (r1[2] == r2[2]))
elif i == 1: #test with num1 being provided
x = e.NonSpooferNonSpoofer1(num1=num1)
r1, r2 = x[0], x[1]
for j in range(0,10):
with self.subTest(j=j):
if j == 0:
self.assertTrue(len(r1) == 3)
elif j == 1:
self.assertTrue(len(r2) == 3)
elif j == 2:
self.assertIn(r1[0],["MT","MO"])
elif j == 3:
self.assertIn(r2[0],["MT","MO"])
elif j == 4:
self.assertTrue(CheckCLID(r1[1]))
elif j == 5:
self.assertTrue(CheckCLID(r2[1]))
elif j == 6:
self.assertTrue(CheckCLID(r1[2]))
elif j == 7:
self.assertTrue(CheckCLID(r2[2]))
elif j == 8:
if r1[0] == "MT" and r2[0] == "MT":
self.assertTrue((r1[1] == r2[1]) and (r1[2] != r2[2]))
else:
self.assertTrue((r1[1] == r2[1]) and (r1[2] == r2[2]))
elif j == 9:
if r1[0] == "MO" and r2[0] == "MT":
self.assertTrue((r1[1] == num1) and (r2[1] == num1))
elif r1[0] == "MT" and r2[0] == "MO":
self.assertTrue((r1[2] == num1) and (r2[2] == num1))
elif r1[0] == "MT" and r2[0] == "MT":
self.assertTrue(r1[2] == num1)
elif i == 2: #test with num2 being provided
x = e.NonSpooferNonSpoofer1(num2=num2)
r1, r2 = x[0], x[1]
for j in range(0,10):
with self.subTest(j=j):
if j == 0:
self.assertTrue(len(r1) == 3)
elif j == 1:
self.assertTrue(len(r2) == 3)
elif j == 2:
self.assertIn(r1[0],["MT","MO"])
elif j == 3:
self.assertIn(r2[0],["MT","MO"])
elif j == 4:
self.assertTrue(CheckCLID(r1[1]))
elif j == 5:
self.assertTrue(CheckCLID(r2[1]))
elif j == 6:
self.assertTrue(CheckCLID(r1[2]))
elif j == 7:
self.assertTrue(CheckCLID(r2[2]))
elif j == 8:
if r1[0] == "MT" and r2[0] == "MT":
self.assertTrue((r1[1] == r2[1]) and (r1[2] != r2[2]))
else:
self.assertTrue((r1[1] == r2[1]) and (r1[2] == r2[2]))
elif j == 9:
if r1[0] == "MO" and r2[0] == "MT":
self.assertTrue((r2[2] == num2) and (r1[2] == num2))
elif r1[0] == "MT" and r2[0] == "MO":
self.assertTrue((r1[1] == num2) and (r2[1] == num2))
elif r1[0] == "MT" and r2[0] == "MT":
self.assertTrue(r2[2] == num2)
elif i == 3: #test with num1 and num2 being provided
x = e.NonSpooferNonSpoofer1(num1=num1,num2=num2)
r1, r2 = x[0], x[1]
for j in range(0,10):
with self.subTest(j=j):
if j == 0:
self.assertTrue(len(r1) == 3)
elif j == 1:
self.assertTrue(len(r2) == 3)
elif j == 2:
self.assertIn(r1[0],["MT","MO"])
elif j == 3:
self.assertIn(r2[0],["MT","MO"])
elif j == 4:
self.assertTrue(CheckCLID(r1[1]))
elif j == 5:
self.assertTrue(CheckCLID(r2[1]))
elif j == 6:
self.assertTrue(CheckCLID(r1[2]))
elif j == 7:
self.assertTrue(CheckCLID(r2[2]))
elif j == 8:
if r1[0] == "MT" and r2[0] == "MT":
self.assertTrue((r1[1] == r2[1]) and (r1[2] != r2[2]))
else:
self.assertTrue((r1[1] == r2[1]) and (r1[2] == r2[2]))
elif j == 9:
if r1[0] == "MO" and r2[0] == "MT":
self.assertTrue((r1[1] == num1) and (r1[2] == num2) and (r2[1] == num1) and (r2[2] == num2))
elif r1[0] == "MT" and r2[0] == "MO":
self.assertTrue((r1[1] == num2) and (r1[2] == num1) and (r2[1] == num2) and (r2[2] == num1))
elif r1[0] == "MT" and r2[0] == "MT":
self.assertTrue((r1[2] == num1) and (r2[2] == num2))
elif i == 4:#test numbers are in correct places when given each pair of call types
for j in range(0,3):
with self.subTest(j=j):
if j == 0:
x = e.NonSpooferNonSpoofer1(num1=num1,num2=num2,types=("MO","MT"))
r1, r2 = x[0], x[1]
self.assertTrue((r1[1] == r2[1]) and (r1[2] == r2[2]))
elif j == 1:
x = e.NonSpooferNonSpoofer1(num1=num1,num2=num2,types=("MT","MO"))
r1, r2 = x[0], x[1]
self.assertTrue((r1[1] == r2[1]) and (r1[2] == r2[2]))
elif j == 2:
x = e.NonSpooferNonSpoofer1(num1=num1,num2=num2,types=("MT","MT"))
r1, r2 = x[0], x[1]
self.assertTrue((r1[1] == r2[1]) and (r1[2] != r2[2]))
def test_NonSpooferNonSpoofer2(self):
t1 = datetime.datetime.today()
t1 = t1.strftime(dateTimeFormat)
gap = 15
for i in range(0,3):
with self.subTest(i=i):
if i == 0: #test with call types MO,MT
x = e.NonSpooferNonSpoofer1(types=("MO","MT"))
r1, r2 = x[0], x[1]
x = e.NonSpooferNonSpoofer2(t1,gap,r1,r2)
r1, r2 = x[0], x[1]
for j in range(0,12):
with self.subTest(j=j):
if j == 0:
self.assertTrue(len(x) == 3)
elif j == 1:
self.assertTrue(type(datetime.datetime.strptime(x[2],dateTimeFormat)) == datetime.datetime)
elif j == 2:
self.assertTrue(len(r1) == 5)
elif j == 3:
self.assertTrue(len(r2) == 5)
elif j == 4:
self.assertTrue(type(datetime.datetime.strptime(r1[0],dateTimeFormat)) ==datetime.datetime)
elif j == 5:
self.assertTrue(type(datetime.datetime.strptime(r1[1],dateTimeFormat)) == datetime.datetime)
elif j == 6:
self.assertTrue(type(datetime.datetime.strptime(r2[0],dateTimeFormat)) == datetime.datetime)
elif j == 7:
self.assertTrue(type(datetime.datetime.strptime(r2[1],dateTimeFormat)) == datetime.datetime)
elif j == 8:
start1 = datetime.datetime.strptime(r1[0],dateTimeFormat)
t2 = datetime.datetime.strptime(t1,dateTimeFormat)
gap2 = datetime.timedelta(seconds = gap)
self.assertTrue(start1 >= (t2 + gap2))
elif j == 9:
start1 = datetime.datetime.strptime(r1[0],dateTimeFormat)
end1 = datetime.datetime.strptime(r1[1],dateTimeFormat)
self.assertTrue(end1 > start1)
elif j == 10:
start1 = datetime.datetime.strptime(r1[0],dateTimeFormat)
start2 = datetime.datetime.strptime(r2[0],dateTimeFormat)
self.assertTrue(start2 >= start1)
elif j == 11:
start2 = datetime.datetime.strptime(r2[0],dateTimeFormat)
end2 = datetime.datetime.strptime(r2[1],dateTimeFormat)
self.assertTrue(end2 > start2)
elif i == 1: #test with call types MT,MO
x = e.NonSpooferNonSpoofer1(types=("MT","MO"))
r1, r2 = x[0], x[1]
x = e.NonSpooferNonSpoofer2(t1,gap,r1,r2)
r1, r2 = x[0], x[1]
for j in range(0,12):
with self.subTest(j=j):
if j == 0:
self.assertTrue(len(x) == 3)
elif j == 1:
self.assertTrue(type(datetime.datetime.strptime(x[2],dateTimeFormat)) == datetime.datetime)
elif j == 2:
self.assertTrue(len(r1) == 5)
elif j == 3:
self.assertTrue(len(r2) == 5)
elif j == 4:
self.assertTrue(type(datetime.datetime.strptime(r1[0],dateTimeFormat)) ==datetime.datetime)
elif j == 5:
self.assertTrue(type(datetime.datetime.strptime(r1[1],dateTimeFormat)) == datetime.datetime)
elif j == 6:
self.assertTrue(type(datetime.datetime.strptime(r2[0],dateTimeFormat)) == datetime.datetime)
elif j == 7:
self.assertTrue(type(datetime.datetime.strptime(r2[1],dateTimeFormat)) == datetime.datetime)
elif j == 8:
start2 = datetime.datetime.strptime(r2[0],dateTimeFormat)
t2 = datetime.datetime.strptime(t1,dateTimeFormat)
gap2 = datetime.timedelta(seconds = gap)
self.assertTrue(start2 >= (t2 + gap2))
elif j == 9:
start1 = datetime.datetime.strptime(r1[0],dateTimeFormat)
end1 = datetime.datetime.strptime(r1[1],dateTimeFormat)
self.assertTrue(end1 > start1)
elif j == 10:
start1 = datetime.datetime.strptime(r1[0],dateTimeFormat)
start2 = datetime.datetime.strptime(r2[0],dateTimeFormat)
self.assertTrue(start1 >= start2)
elif j == 11:
start2 = datetime.datetime.strptime(r2[0],dateTimeFormat)
end2 = datetime.datetime.strptime(r2[1],dateTimeFormat)
self.assertTrue(end2 > start2)
elif i == 2: #test with call types MT,MT
x = e.NonSpooferNonSpoofer1(types=("MT","MT"))
r1, r2 = x[0], x[1]
x = e.NonSpooferNonSpoofer2(t1,gap,r1,r2)
r1, r2 = x[0], x[1]
for j in range(0,12):
with self.subTest(j=j):
if j == 0:
self.assertTrue(len(x) == 3)
elif j == 1:
self.assertTrue(type(datetime.datetime.strptime(x[2],dateTimeFormat)) == datetime.datetime)
elif j == 2:
self.assertTrue(len(r1) == 5)
elif j == 3:
self.assertTrue(len(r2) == 5)
elif j == 4:
self.assertTrue(type(datetime.datetime.strptime(r1[0],dateTimeFormat)) ==datetime.datetime)
elif j == 5:
self.assertTrue(type(datetime.datetime.strptime(r1[1],dateTimeFormat)) == datetime.datetime)
elif j == 6:
self.assertTrue(type(datetime.datetime.strptime(r2[0],dateTimeFormat)) == datetime.datetime)
elif j == 7:
self.assertTrue(type(datetime.datetime.strptime(r2[1],dateTimeFormat)) == datetime.datetime)
elif j == 8:
start1 = datetime.datetime.strptime(r1[0],dateTimeFormat)
t2 = datetime.datetime.strptime(t1,dateTimeFormat)
gap2 = datetime.timedelta(seconds = gap)
self.assertTrue(start1 >= (t2 + gap2))
elif j == 9:
start1 = datetime.datetime.strptime(r1[0],dateTimeFormat)
end1 = datetime.datetime.strptime(r1[1],dateTimeFormat)
self.assertTrue(end1 > start1)
elif j == 10:
start1 = datetime.datetime.strptime(r1[0],dateTimeFormat)
start2 = datetime.datetime.strptime(r2[0],dateTimeFormat)
self.assertTrue(start2 >= start1)
elif j == 11:
start2 = datetime.datetime.strptime(r2[0],dateTimeFormat)
end2 = datetime.datetime.strptime(r2[1],dateTimeFormat)
self.assertTrue(end2 > start2)
def test_SpooferSpooferRC1(self):
num1 = e.MobileNumber()
num2 = e.MobileNumber()
for i in range(0,4):
with self.subTest(i=i):
if i == 0: #test without parameters
x = e.SpooferSpooferRC1()
r1, r2 = x[0], x[1]
for j in range(0,8):
with self.subTest(j=j):
if j == 0:
self.assertTrue(len(r1) == 3)
elif j == 1:
self.assertTrue(len(r2) == 3)
elif j == 2:
self.assertTrue(r1[0] == "MT")
elif j == 3:
self.assertTrue(r2[0] == "MT")
elif j == 4:
self.assertTrue(CheckCLID(r1[2]))
elif j == 5:
self.assertTrue(CheckCLID(r2[2]))
elif j == 6:
self.assertFalse(r1[1] == None)
elif j == 7:
self.assertFalse(r2[1] == None)
elif i == 1: #test with providing num1
x = e.SpooferSpooferRC1(num1=num1)
r1, r2 = x[0], x[1]
for j in range(0,9):
with self.subTest(j=j):
if j == 0:
self.assertTrue(len(r1) == 3)
elif j == 1:
self.assertTrue(len(r2) == 3)
elif j == 2:
self.assertTrue(r1[0] == "MT")
elif j == 3:
self.assertTrue(r2[0] == "MT")
elif j == 4:
self.assertTrue(CheckCLID(r1[2]))
elif j == 5:
self.assertTrue(r1[2] == num1)
elif j == 6:
self.assertTrue(CheckCLID(r2[2]))
elif j == 7:
self.assertFalse(r1[1] == None)
elif j == 8:
self.assertFalse(r2[1] == None)
elif i == 2: #test with providing num2
x = e.SpooferSpooferRC1(num2=num2)
r1, r2 = x[0], x[1]
for j in range(0,9):
with self.subTest(j=j):
if j == 0:
self.assertTrue(len(r1) == 3)
elif j == 1:
self.assertTrue(len(r2) == 3)
elif j == 2:
self.assertTrue(r1[0] == "MT")
elif j == 3:
self.assertTrue(r2[0] == "MT")
elif j == 4:
self.assertTrue(CheckCLID(r1[2]))
elif j == 5:
self.assertTrue(r2[2] == num2)
elif j == 6:
self.assertTrue(CheckCLID(r2[2]))
elif j == 7:
self.assertFalse(r1[1] == None)
elif j == 8:
self.assertFalse(r2[1] == None)
elif i == 3: #test with providing both num1 and num2
x = e.SpooferSpooferRC1(num1=num1,num2=num2)
r1, r2 = x[0], x[1]
for j in range(0,10):
with self.subTest(j=j):
if j == 0:
self.assertTrue(len(r1) == 3)
elif j == 1:
self.assertTrue(len(r2) == 3)
elif j == 2:
self.assertTrue(r1[0] == "MT")
elif j == 3:
self.assertTrue(r2[0] == "MT")
elif j == 4:
self.assertTrue(CheckCLID(r1[2]))
elif j == 5:
self.assertTrue(r2[2] == num2)
elif j == 6:
self.assertTrue(r1[2] == num1)
elif j == 7:
self.assertTrue(CheckCLID(r2[2]))
elif j == 8:
self.assertFalse(r1[1] == None)
elif j == 9:
self.assertFalse(r2[1] == None)
def test_SpooferSpooferRC2(self):
t1 = datetime.datetime.today()
t1 = t1.strftime(dateTimeFormat)
gap = 15
x = e.SpooferSpooferRC1()
r1, r2 = x[0], x[1]
x = e.SpooferSpooferRC2(t1,gap,r1,r2)
r1, r2 = x[0], x[1]
for i in range(0,11):
with self.subTest(i=i):
if i == 0:
self.assertTrue(len(x) == 3)
elif i == 1:
self.assertTrue(type(datetime.datetime.strptime(x[2],dateTimeFormat)) == datetime.datetime)
elif i == 2:
self.assertTrue(len(r1) == 5)
elif i == 3:
self.assertTrue(len(r2) == 5)
elif i == 4:
self.assertTrue(type(datetime.datetime.strptime(r1[0],dateTimeFormat)) == datetime.datetime)
elif i == 5:
self.assertTrue(type(datetime.datetime.strptime(r1[1],dateTimeFormat)) == datetime.datetime)
elif i == 6:
self.assertTrue(type(datetime.datetime.strptime(r2[0],dateTimeFormat)) == datetime.datetime)
elif i == 7:
self.assertTrue(type(datetime.datetime.strptime(r2[1],dateTimeFormat)) == datetime.datetime)
elif i == 8:
start1 = datetime.datetime.strptime(r1[0],dateTimeFormat)
end1 = datetime.datetime.strptime(r1[1],dateTimeFormat)
self.assertTrue(end1 > start1)
elif i == 9:
start2 = datetime.datetime.strptime(r2[0],dateTimeFormat)
end2 = datetime.datetime.strptime(r2[1],dateTimeFormat)
self.assertTrue(end2 > start2)
elif i == 10:
start1 = datetime.datetime.strptime(r1[0],dateTimeFormat)
start2 = datetime.datetime.strptime(r2[0],dateTimeFormat)
if start1 > start2:
delta = start1 - start2
else:
delta = start2 - start1
self.assertTrue((delta.seconds >= 10) and (delta.seconds <= 15))
def test_SpooferSpooferCR1(self):
num1 = e.MobileNumber()
num2 = e.MobileNumber()
for i in range(0,5):
with self.subTest(i=i):
if i == 0: #test without parameters
x = e.SpooferSpooferCR1()
r1, r2 = x[0], x[1]
for j in range(0,8):
with self.subTest(j=j):
if j == 0:
self.assertTrue(len(r1) == 3)
elif j == 1:
self.assertTrue(len(r2) == 3)
elif j == 2:
self.assertIn(r1[0],["MT","MO"])
elif j == 3:
self.assertIn(r2[0],["MT","MO"])
elif j == 4:
if r1[0] == "MO":
self.assertTrue(CheckCLID(r1[1]))
else:
self.assertTrue(CheckCLID(r1[2]))
elif j == 5:
if r2[0] == "MO":
self.assertTrue(CheckCLID(r2[1]))
else:
self.assertTrue(CheckCLID(r2[2]))
elif j == 6:
if r1[0] == "MO":
self.assertFalse(r1[2] == None)
else:
self.assertFalse(r1[1] == None)
elif j == 7:
if r2[0] == "MO":
self.assertFalse(r2[2] == None)
else:
self.assertFalse(r2[1] == None)
elif i == 1: #test with providing num1
x = e.SpooferSpooferCR1(num1=num1)
r1, r2 = x[0], x[1]
for j in range(0,9):
with self.subTest(j=j):
if j == 0:
self.assertTrue(len(r1) == 3)
elif j == 1:
self.assertTrue(len(r2) == 3)
elif j == 2:
self.assertIn(r1[0],["MT","MO"])
elif j == 3:
self.assertIn(r2[0],["MT","MO"])
elif j == 4:
if r1[0] == "MO":
self.assertTrue(CheckCLID(r1[1]))
else:
self.assertTrue(CheckCLID(r1[2]))
elif j == 5:
if r2[0] == "MO":
self.assertTrue(CheckCLID(r2[1]))
else:
self.assertTrue(CheckCLID(r2[2]))
elif j == 6:
if r1[0] == "MO":
self.assertTrue(r1[1] == num1)
else:
self.assertTrue(r1[2] == num1)
elif j == 7:
if r1[0] == "MO":
self.assertFalse(r1[2] == None)
else:
self.assertFalse(r1[1] == None)
elif j == 8:
if r2[0] == "MO":
self.assertFalse(r2[2] == None)
else:
self.assertFalse(r2[1] == None)
elif i == 2: #test with providing num2
x = e.SpooferSpooferCR1(num2=num2)
r1, r2 = x[0], x[1]
for j in range(0,9):
with self.subTest(j=j):
if j == 0:
self.assertTrue(len(r1) == 3)
elif j == 1:
self.assertTrue(len(r2) == 3)
elif j == 2:
self.assertIn(r1[0],["MT","MO"])
elif j == 3:
self.assertIn(r2[0],["MT","MO"])
elif j == 4:
if r1[0] == "MO":
self.assertTrue(CheckCLID(r1[1]))
else:
self.assertTrue(CheckCLID(r1[2]))
elif j == 5:
if r2[0] == "MO":
self.assertTrue(CheckCLID(r2[1]))
else:
self.assertTrue(CheckCLID(r2[2]))
elif j == 6:
if r2[0] == "MO":
self.assertTrue(r2[1] == num2)
else:
self.assertTrue(r2[2] == num2)
elif j == 7:
if r1[0] == "MO":
self.assertFalse(r1[2] == None)
else:
self.assertFalse(r1[1] == None)
elif j == 8:
if r2[0] == "MO":
self.assertFalse(r2[2] == None)
else:
self.assertFalse(r2[1] == None)
elif i == 3: #test with providing both num1 and num2
x = e.SpooferSpooferCR1(num1=num1,num2=num2)
r1, r2 = x[0], x[1]
for j in range(0,9):
with self.subTest(j=j):
if j == 0:
self.assertTrue(len(r1) == 3)
elif j == 1:
self.assertTrue(len(r2) == 3)
elif j == 2:
self.assertIn(r1[0],["MT","MO"])
elif j == 3:
self.assertIn(r2[0],["MT","MO"])
elif j == 4:
if r1[0] == "MO":
self.assertTrue(CheckCLID(r1[1]))
else:
self.assertTrue(CheckCLID(r1[2]))
elif j == 5:
if r2[0] == "MO":
self.assertTrue(CheckCLID(r2[1]))
else:
self.assertTrue(CheckCLID(r2[2]))
elif j == 6:
if r1[0] == "MO":
self.assertTrue(r1[1] == num1)
else:
self.assertTrue(r1[2] == num1)
elif j == 7:
if r2[0] == "MO":
self.assertTrue(r2[1] == num2)
else:
self.assertTrue(r2[2] == num2)
elif j == 7:
if r1[0] == "MO":
self.assertFalse(r1[2] == None)
else:
self.assertFalse(r1[1] == None)
elif j == 8:
if r2[0] == "MO":
self.assertFalse(r2[2] == None)
else:
self.assertFalse(r2[1] == None)
elif i == 4: #test numbers are in correct places when given each pair of call types
for j in range(0,2):
with self.subTest(j=j):
if j == 0:
x = e.SpooferSpooferCR1(num1=num1,num2=num2,types=("MO","MT"))
r1, r2 = x[0], x[1]
self.assertTrue((r1[1] == num1) and (r2[2] == num2))
elif j == 1:
x = e.SpooferSpooferCR1(num1=num1,num2=num2,types=("MT","MO"))
r1, r2 = x[0], x[1]
self.assertTrue((r1[2] == num1) and (r2[1] == num2))
def test_SpooferSpooferCR2(self):
t1 = datetime.datetime.today()
t1 = t1.strftime(dateTimeFormat)
gap = 15
for i in range(0,2):
with self.subTest(i=i):
if i == 0: #test with MO,MT
x = e.SpooferSpooferCR1(types=("MO","MT"))
r1, r2 = x[0], x[1]
x = e.SpooferSpooferCR2(t1,gap,r1,r2)
r1, r2 = x[0], x[1]
for j in range(0,12):
with self.subTest(j=j):
if j == 0:
self.assertTrue(len(x) == 3)
elif j == 1:
self.assertTrue(type(datetime.datetime.strptime(x[2],dateTimeFormat)) == datetime.datetime)
elif j == 2:
self.assertTrue(len(r1) == 5)
elif j == 3:
self.assertTrue(len(r2) == 5)
elif j == 4:
self.assertTrue(type(datetime.datetime.strptime(r1[0],dateTimeFormat)) == datetime.datetime)
elif j == 5:
self.assertTrue(type(datetime.datetime.strptime(r1[1],dateTimeFormat)) == datetime.datetime)
elif j == 6:
self.assertTrue(type(datetime.datetime.strptime(r2[0],dateTimeFormat)) == datetime.datetime)
elif j == 7:
self.assertTrue(type(datetime.datetime.strptime(r2[1],dateTimeFormat)) == datetime.datetime)
elif j == 8:
start1 = datetime.datetime.strptime(r1[0],dateTimeFormat)
end1 = datetime.datetime.strptime(r1[1],dateTimeFormat)
self.assertTrue(end1 > start1)
elif j == 9:
start2 = datetime.datetime.strptime(r2[0],dateTimeFormat)
end2 = datetime.datetime.strptime(r2[1],dateTimeFormat)
self.assertTrue(end2 > start2)
elif j == 10:
start1 = datetime.datetime.strptime(r1[0],dateTimeFormat)
start2 = datetime.datetime.strptime(r2[0],dateTimeFormat)
self.assertTrue(start2 >= start1)
elif j == 11:
start1 = datetime.datetime.strptime(r1[0],dateTimeFormat)
start2 = datetime.datetime.strptime(r2[0],dateTimeFormat)
delta = start2 - start1
self.assertTrue((delta.seconds >= 0) and (delta.seconds <= 3))
elif i == 1: #test with MT,MO
x = e.SpooferSpooferCR1(types=("MT","MO"))
r1, r2 = x[0], x[1]
x = e.SpooferSpooferCR2(t1,gap,r1,r2)
r1, r2 = x[0], x[1]
for j in range(0,12):
with self.subTest(j=j):
if j == 0:
self.assertTrue(len(x) == 3)
elif j == 1:
self.assertTrue(type(datetime.datetime.strptime(x[2],dateTimeFormat)) == datetime.datetime)
elif j == 2:
self.assertTrue(len(r1) == 5)
elif j == 3:
self.assertTrue(len(r2) == 5)
elif j == 4:
self.assertTrue(type(datetime.datetime.strptime(r1[0],dateTimeFormat)) == datetime.datetime)
elif j == 5:
self.assertTrue(type(datetime.datetime.strptime(r1[1],dateTimeFormat)) == datetime.datetime)
elif j == 6:
self.assertTrue(type(datetime.datetime.strptime(r2[0],dateTimeFormat)) == datetime.datetime)
elif j == 7:
self.assertTrue(type(datetime.datetime.strptime(r2[1],dateTimeFormat)) == datetime.datetime)
elif j == 8:
start1 = datetime.datetime.strptime(r1[0],dateTimeFormat)
end1 = datetime.datetime.strptime(r1[1],dateTimeFormat)
self.assertTrue(end1 > start1)
elif j == 9:
start2 = datetime.datetime.strptime(r2[0],dateTimeFormat)
end2 = datetime.datetime.strptime(r2[1],dateTimeFormat)
self.assertTrue(end2 > start2)
elif j == 10:
start1 = datetime.datetime.strptime(r1[0],dateTimeFormat)
start2 = datetime.datetime.strptime(r2[0],dateTimeFormat)
self.assertTrue(start1 >= start2)
elif j == 1:
start1 = datetime.datetime.strptime(r1[0],dateTimeFormat)
start2 = datetime.datetime.strptime(r2[0],dateTimeFormat)
delta = start1 - start2
self.assertTrue((delta.seconds >= 0) and (delta.seconds <= 3))
def test_NonSpooferSpoofer1(self):
num1 = e.MobileNumber()
num2 = e.MobileNumber()
for i in range(0,4):
with self.subTest(i=i):
if i == 0: #test without parameters provided
x = e.NonSpooferSpoofer1()
r1, r2 = x[0], x[1]
for j in range(0,8):
with self.subTest(j=j):
if j == 0:
self.assertTrue(len(r1) == 3)
elif j == 1:
self.assertTrue(len(r2) == 3)
elif j == 2:
self.assertTrue(r1[0] == "MO")
elif j == 3:
self.assertTrue(r2[0] == "MT")
elif j == 4:
self.assertTrue(CheckCLID(r1[1]))
elif j == 5:
self.assertTrue(CheckCLID(r2[2]))
elif j == 6:
self.assertFalse(r1[2] == None)
elif j == 7:
self.assertFalse(r2[1] == None)
elif i == 1: #test with providing num1
x = e.NonSpooferSpoofer1(num1=num1)
r1, r2 = x[0], x[1]
for j in range(0,9):
with self.subTest(j=j):
if j == 0:
self.assertTrue(len(r1) == 3)
elif j == 1:
self.assertTrue(len(r2) == 3)
elif j == 2:
self.assertTrue(r1[0] == "MO")
elif j == 3:
self.assertTrue(r2[0] == "MT")
elif j == 4:
self.assertTrue(CheckCLID(r1[1]))
elif j == 5:
self.assertTrue(r1[1] == num1)
elif j == 6:
self.assertTrue(CheckCLID(r2[2]))
elif j == 7:
self.assertFalse(r1[2] == None)
elif j == 8:
self.assertFalse(r2[1] == None)
elif i == 2: #test with providing num2
x = e.NonSpooferSpoofer1(num2=num2)
r1, r2 = x[0], x[1]
for j in range(0,9):
with self.subTest(j=j):
if j == 0:
self.assertTrue(len(r1) == 3)
elif j == 1:
self.assertTrue(len(r2) == 3)
elif j == 2:
self.assertTrue(r1[0] == "MO")
elif j == 3:
self.assertTrue(r2[0] == "MT")
elif j == 4:
self.assertTrue(CheckCLID(r1[1]))
elif j == 5:
self.assertTrue(r2[2] == num2)
elif j == 6:
self.assertTrue(CheckCLID(r2[2]))
elif j == 7:
self.assertFalse(r1[2] == None)
elif j == 8:
self.assertFalse(r2[1] == None)
elif i == 3: #test with providing both num1 and num2
x = e.NonSpooferSpoofer1(num1=num1,num2=num2)
r1, r2 = x[0], x[1]
for j in range(0,10):
with self.subTest(j=j):
if j == 0:
self.assertTrue(len(r1) == 3)
elif j == 1:
self.assertTrue(len(r2) == 3)
elif j == 2:
self.assertTrue(r1[0] == "MO")
elif j == 3:
self.assertTrue(r2[0] == "MT")
elif j == 4:
self.assertTrue(CheckCLID(r1[1]))
elif j == 5:
self.assertTrue(r2[2] == num2)
elif j == 6:
self.assertTrue(r1[1] == num1)
elif j == 7:
self.assertTrue(CheckCLID(r2[2]))
elif j == 8:
self.assertFalse(r1[2] == None)
elif j == 9:
self.assertFalse(r2[1] == None)
def test_NonSpooferSpoofer2(self):
t1 = datetime.datetime.today()
t1 = t1.strftime(dateTimeFormat)
gap = 15
x = e.NonSpooferSpoofer1()
r1, r2 = x[0], x[1]
x = e.NonSpooferSpoofer2(t1,gap,r1,r2)
r1, r2 = x[0], x[1]
for i in range(0,11):
with self.subTest(i=i):
if i == 0:
self.assertTrue(len(x) == 3)
elif i == 1:
self.assertTrue(type(datetime.datetime.strptime(x[2],dateTimeFormat)) == datetime.datetime)
elif i == 2:
self.assertTrue(len(r1) == 5)
elif i == 3:
self.assertTrue(len(r2) == 5)
elif i == 4:
self.assertTrue(type(datetime.datetime.strptime(r1[0],dateTimeFormat)) == datetime.datetime)
elif i == 5:
self.assertTrue(type(datetime.datetime.strptime(r1[1],dateTimeFormat)) == datetime.datetime)
elif i == 6:
self.assertTrue(type(datetime.datetime.strptime(r2[0],dateTimeFormat)) == datetime.datetime)
elif i == 7:
self.assertTrue(type(datetime.datetime.strptime(r2[1],dateTimeFormat)) == datetime.datetime)
elif i == 8:
start1 = datetime.datetime.strptime(r1[0],dateTimeFormat)
end1 = datetime.datetime.strptime(r1[1],dateTimeFormat)
self.assertTrue(end1 > start1)
elif i == 9:
start2 = datetime.datetime.strptime(r2[0],dateTimeFormat)
end2 = datetime.datetime.strptime(r2[1],dateTimeFormat)
self.assertTrue(end2 > start2)
elif i == 10:
start1 = datetime.datetime.strptime(r1[0],dateTimeFormat)
start2 = datetime.datetime.strptime(r2[0],dateTimeFormat)
delta = start2 - start1
self.assertTrue((delta.seconds >= 0) and (delta.seconds <= 3))
def test_SpooferNonSpooferCR1(self):
num1 = e.MobileNumber()
num2 = e.MobileNumber()
for i in range(0,4):
with self.subTest(i=i):
if i == 0: #test without parameters provided
x = e.SpooferNonSpooferCR1()
r1, r2 = x[0], x[1]
for j in range(0,8):
with self.subTest(j=j):
if j == 0:
self.assertTrue(len(r1) == 3)
elif j == 1:
self.assertTrue(len(r2) == 3)
elif j == 2:
self.assertTrue(r1[0] == "MO")
elif j == 3:
self.assertTrue(r2[0] == "MT")
elif j == 4:
self.assertTrue(CheckCLID(r1[1]))
elif j == 5:
self.assertTrue(CheckCLID(r2[2]))
elif j == 6:
self.assertFalse(r1[2] == None)
elif j == 7:
self.assertFalse(r2[1] == None)
elif i == 1: #test with providing num1
x = e.SpooferNonSpooferCR1(num1=num1)
r1, r2 = x[0], x[1]
for j in range(0,9):
with self.subTest(j=j):
if j == 0:
self.assertTrue(len(r1) == 3)
elif j == 1:
self.assertTrue(len(r2) == 3)
elif j == 2:
self.assertTrue(r1[0] == "MO")
elif j == 3:
self.assertTrue(r2[0] == "MT")
elif j == 4:
self.assertTrue(CheckCLID(r1[1]))
elif j == 5:
self.assertTrue(r1[1] == num1)
elif j == 6:
self.assertTrue(CheckCLID(r2[2]))
elif j == 7:
self.assertFalse(r1[2] == None)
elif j == 8:
self.assertFalse(r2[1] == None)
elif i == 2: #test with providing num2
x = e.SpooferNonSpooferCR1(num2=num2)
r1, r2 = x[0], x[1]
for j in range(0,9):
with self.subTest(j=j):
if j == 0:
self.assertTrue(len(r1) == 3)
elif j == 1:
self.assertTrue(len(r2) == 3)
elif j == 2:
self.assertTrue(r1[0] == "MO")
elif j == 3:
self.assertTrue(r2[0] == "MT")
elif j == 4:
self.assertTrue(CheckCLID(r1[1]))
elif j == 5:
self.assertTrue(r2[2] == num2)
elif j == 6:
self.assertTrue(CheckCLID(r2[2]))
elif j == 7:
self.assertFalse(r1[2] == None)
elif j == 8:
self.assertFalse(r2[1] == None)
elif i == 3: #test with providing both num1 and num2
x = e.SpooferNonSpooferCR1(num1=num1,num2=num2)
r1, r2 = x[0], x[1]
for j in range(0,10):
with self.subTest(j=j):
if j == 0:
self.assertTrue(len(r1) == 3)
elif j == 1:
self.assertTrue(len(r2) == 3)
elif j == 2:
self.assertTrue(r1[0] == "MO")
elif j == 3:
self.assertTrue(r2[0] == "MT")
elif j == 4:
self.assertTrue(CheckCLID(r1[1]))
elif j == 5:
self.assertTrue(r2[2] == num2)
elif j == 6:
self.assertTrue(r1[1] == num1)
elif j == 7:
self.assertTrue(CheckCLID(r2[2]))
elif j == 8:
self.assertFalse(r1[2] == None)
elif j == 9:
self.assertFalse(r2[1] == None)
def test_SpooferNonSpooferCR2(self):
t1 = datetime.datetime.today()
t1 = t1.strftime(dateTimeFormat)
gap = 15
x = e.SpooferNonSpooferCR1()
r1, r2 = x[0], x[1]
x = e.SpooferNonSpooferCR2(t1,gap,r1,r2)
r1, r2 = x[0], x[1]
for i in range(0,11):
with self.subTest(i=i):
if i == 0:
self.assertTrue(len(x) == 3)
elif i == 1:
self.assertTrue(type(datetime.datetime.strptime(x[2],dateTimeFormat)) == datetime.datetime)
elif i == 2:
self.assertTrue(len(r1) == 5)
elif i == 3:
self.assertTrue(len(r2) == 5)
elif i == 4:
self.assertTrue(type(datetime.datetime.strptime(r1[0],dateTimeFormat)) == datetime.datetime)
elif i == 5:
self.assertTrue(type(datetime.datetime.strptime(r1[1],dateTimeFormat)) == datetime.datetime)
elif i == 6:
self.assertTrue(type(datetime.datetime.strptime(r2[0],dateTimeFormat)) == datetime.datetime)
elif i == 7:
self.assertTrue(type(datetime.datetime.strptime(r2[1],dateTimeFormat)) == datetime.datetime)
elif i == 8:
start1 = datetime.datetime.strptime(r1[0],dateTimeFormat)
end1 = datetime.datetime.strptime(r1[1],dateTimeFormat)
self.assertTrue(end1 > start1)
elif i == 9:
start2 = datetime.datetime.strptime(r2[0],dateTimeFormat)
end2 = datetime.datetime.strptime(r2[1],dateTimeFormat)
self.assertTrue(end2 > start2)
elif i == 10:
start1 = datetime.datetime.strptime(r1[0],dateTimeFormat)
start2 = datetime.datetime.strptime(r2[0],dateTimeFormat)
delta = start2 - start1
self.assertTrue((delta.seconds >= 0) and (delta.seconds <= 3))
def test_SpooferNonSpooferRC1(self):
num1 = e.MobileNumber()
num2 = e.MobileNumber()
for i in range(0,4):
with self.subTest(i=i):
if i == 0: #test without parameters provided
x = e.SpooferNonSpooferRC1()
r1, r2 = x[0], x[1]
for j in range(0,8):
with self.subTest(j=j):
if j == 0:
self.assertTrue(len(r1) == 3)
elif j == 1:
self.assertTrue(len(r2) == 3)
elif j == 2:
self.assertTrue(r1[0] == "MT")
elif j == 3:
self.assertTrue(r2[0] == "MT")
elif j == 4:
self.assertTrue(CheckCLID(r1[2]))
elif j == 5:
self.assertTrue(CheckCLID(r2[2]))
elif j == 6:
self.assertFalse(r1[1] == None)
elif j == 7:
self.assertFalse(r1[1] == None)
elif i == 1: #test with providing num1
x = e.SpooferNonSpooferRC1(num1=num1)
r1, r2 = x[0], x[1]
for j in range(0,9):
with self.subTest(j=j):
if j == 0:
self.assertTrue(len(r1) == 3)
elif j == 1:
self.assertTrue(len(r2) == 3)
elif j == 2:
self.assertTrue(r1[0] == "MT")
elif j == 3:
self.assertTrue(r2[0] == "MT")
elif j == 4:
self.assertTrue(CheckCLID(r1[2]))
elif j == 5:
self.assertTrue(r1[2] == num1)
elif j == 6:
self.assertTrue(CheckCLID(r2[2]))
elif j == 7:
self.assertFalse(r1[1] == None)
elif j == 8:
self.assertFalse(r1[1] == None)
elif i == 2: #test with providing num2
x = e.SpooferNonSpooferRC1(num2=num2)
r1, r2 = x[0], x[1]
for j in range(0,9):
with self.subTest(j=j):
if j == 0:
self.assertTrue(len(r1) == 3)
elif j == 1:
self.assertTrue(len(r2) == 3)
elif j == 2:
self.assertTrue(r1[0] == "MT")
elif j == 3:
self.assertTrue(r2[0] == "MT")
elif j == 4:
self.assertTrue(CheckCLID(r1[2]))
elif j == 5:
self.assertTrue(r2[2] == num2)
elif j == 6:
self.assertTrue(CheckCLID(r2[2]))
elif j == 7:
self.assertFalse(r1[1] == None)
elif j == 8:
self.assertFalse(r1[1] == None)
elif i == 3: #test with providing both num1 and num2
x = e.SpooferNonSpooferRC1(num1=num1,num2=num2)
r1, r2 = x[0], x[1]
for j in range(0,10):
with self.subTest(j=j):
if j == 0:
self.assertTrue(len(r1) == 3)
elif j == 1:
self.assertTrue(len(r2) == 3)
elif j == 2:
self.assertTrue(r1[0] == "MT")
elif j == 3:
self.assertTrue(r2[0] == "MT")
elif j == 4:
self.assertTrue(CheckCLID(r1[2]))
elif j == 5:
self.assertTrue(r2[2] == num2)
elif j == 6:
self.assertTrue(r1[2] == num1)
elif j == 7:
self.assertTrue(CheckCLID(r2[2]))
elif j == 8:
self.assertFalse(r1[1] == None)
elif j == 9:
self.assertFalse(r1[1] == None)
def test_SpooferNonSpooferRC2(self):
t1 = datetime.datetime.today()
t1 = t1.strftime(dateTimeFormat)
gap = 15
x = e.SpooferNonSpooferRC1()
r1, r2 = x[0], x[1]
x = e.SpooferNonSpooferRC2(t1,gap,r1,r2)
r1, r2 = x[0], x[1]
for i in range(0,11):
with self.subTest(i=i):
if i == 0:
self.assertTrue(len(x) == 3)
elif i == 1:
self.assertTrue(type(datetime.datetime.strptime(x[2],dateTimeFormat)) == datetime.datetime)
elif i == 2:
self.assertTrue(len(r1) == 5)
elif i == 3:
self.assertTrue(len(r2) == 5)
elif i == 4:
self.assertTrue(type(datetime.datetime.strptime(r1[0],dateTimeFormat)) == datetime.datetime)
elif i == 5:
self.assertTrue(type(datetime.datetime.strptime(r1[1],dateTimeFormat)) == datetime.datetime)
elif i == 6:
self.assertTrue(type(datetime.datetime.strptime(r2[0],dateTimeFormat)) == datetime.datetime)
elif i == 7:
self.assertTrue(type(datetime.datetime.strptime(r2[1],dateTimeFormat)) == datetime.datetime)
elif i == 8:
start1 = datetime.datetime.strptime(r1[0],dateTimeFormat)
end1 = datetime.datetime.strptime(r1[1],dateTimeFormat)
self.assertTrue(end1 > start1)
elif i == 9:
start2 = datetime.datetime.strptime(r2[0],dateTimeFormat)
end2 = datetime.datetime.strptime(r2[1],dateTimeFormat)
self.assertTrue(end2 > start2)
elif i == 10:
start1 = datetime.datetime.strptime(r1[0],dateTimeFormat)
start2 = datetime.datetime.strptime(r2[0],dateTimeFormat)
delta = start2 - start1
self.assertTrue((delta.seconds >= 10) and (delta.seconds <= 15))
def test_AddTimeSpooferSpooferNormal(self):
t1 = datetime.datetime.today()
t1 = t1.strftime(dateTimeFormat)
gap = 15
num1 = e.MobileNumber()
num2 = e.MobileNumber()
x = e.SpooferNonSpooferCR1(num1=num1)
r1 = x[0]
y = e.SpooferNonSpooferRC1(num2)
r2 = y[0]
x = (r1,r2)
x = e.AddTimeSpooferSpooferNormal(t1,gap,x)
r1, r2 = x[0], x[1]
for i in range(0,11):
with self.subTest(i=i):
if i == 0:
self.assertTrue(len(x) == 3)
elif i == 1:
self.assertTrue(type(datetime.datetime.strptime(x[2],dateTimeFormat)) == datetime.datetime)
elif i == 2:
self.assertTrue(len(r1) == 5)
elif i == 3:
self.assertTrue(len(r2) == 5)
elif i == 4:
self.assertTrue(type(datetime.datetime.strptime(r1[0],dateTimeFormat)) == datetime.datetime)
elif i == 5:
self.assertTrue(type(datetime.datetime.strptime(r1[1],dateTimeFormat)) == datetime.datetime)
elif i == 6:
self.assertTrue(type(datetime.datetime.strptime(r2[0],dateTimeFormat)) == datetime.datetime)
elif i == 7:
self.assertTrue(type(datetime.datetime.strptime(r2[1],dateTimeFormat)) == datetime.datetime)
elif i == 8:
start1 = datetime.datetime.strptime(r1[0],dateTimeFormat)
end1 = datetime.datetime.strptime(r1[1],dateTimeFormat)
self.assertTrue(end1 > start1)
elif i == 9:
start2 = datetime.datetime.strptime(r2[0],dateTimeFormat)
end2 = datetime.datetime.strptime(r2[1],dateTimeFormat)
self.assertTrue(end2 > start2)
def test_AddTimeSpooferNonSpooferNormal(self):
t1 = datetime.datetime.today()
t1 = t1.strftime(dateTimeFormat)
gap = 15
num1 = e.MobileNumber()
num2 = e.MobileNumber()
x = e.SpooferNonSpooferCR1(num1=num1)
r1 = x[0]
y = e.SpooferNonSpooferRC1(num2)
r2 = y[0]
x = (r1,r2)
x = e.AddTimeSpooferNonSpooferNormal(t1,gap,x)
r1, r2 = x[0], x[1]
for i in range(0,11):
with self.subTest(i=i):
if i == 0:
self.assertTrue(len(x) == 3)
elif i == 1:
self.assertTrue(type(datetime.datetime.strptime(x[2],dateTimeFormat)) == datetime.datetime)
elif i == 2:
self.assertTrue(len(r1) == 5)
elif i == 3:
self.assertTrue(len(r2) == 5)
elif i == 4:
self.assertTrue(type(datetime.datetime.strptime(r1[0],dateTimeFormat)) == datetime.datetime)
elif i == 5:
self.assertTrue(type(datetime.datetime.strptime(r1[1],dateTimeFormat)) == datetime.datetime)
elif i == 6:
self.assertTrue(type(datetime.datetime.strptime(r2[0],dateTimeFormat)) == datetime.datetime)
elif i == 7:
self.assertTrue(type(datetime.datetime.strptime(r2[1],dateTimeFormat)) == datetime.datetime)
elif i == 8:
start1 = datetime.datetime.strptime(r1[0],dateTimeFormat)
end1 = datetime.datetime.strptime(r1[1],dateTimeFormat)
self.assertTrue(end1 > start1)
elif i == 9:
start2 = datetime.datetime.strptime(r2[0],dateTimeFormat)
end2 = datetime.datetime.strptime(r2[1],dateTimeFormat)
self.assertTrue(end2 > start2)
def test_CreatePairCDRsSpooferSpoofer(self):
for size in [10]:
for gap in [15]:
x = []
for i in range(0,76,25):
x.append(i/100)
for propNormal in x:
y = []
for j in range(0,76,25):
y.append(j/100)
for i in range(len(y)):
y[i] = y[i] * (1 - propNormal)
for propCR in y:
propRC = 1 - propCR - propNormal
for k in range(0,10):
with self.subTest(size=size,gap=gap,propNormal=propNormal,propCR=propCR,propRC=propRC,k=k):
num1 = e.MobileNumber()
num2 = e.MobileNumber()
r = e.CreatePairCDRsSpooferSpoofer(size,gap,propNormal,propCR,propRC,num1=num1,num2=num2)
CDR1 = r[0]
CDR2 = r[1]
if k == 0: #ensure sizes are correct
self.assertTrue((len(CDR1) == size) and (len(CDR2) == size))
elif k == 1: #ensure in correct order and of right gaps for CDR1
end1 = datetime.datetime.strptime(CDR1[0][2],dateTimeFormat)
end2 = datetime.datetime.strptime(CDR2[0][2],dateTimeFormat)
results = []
if end2 > end1:
time = end2
else:
time = end1
for l in range(1,size):
start1 = datetime.datetime.strptime(CDR1[l][1],dateTimeFormat)
start2 = datetime.datetime.strptime(CDR2[l][1],dateTimeFormat)
end1 = datetime.datetime.strptime(CDR1[l][2],dateTimeFormat)
end2 = datetime.datetime.strptime(CDR2[l][2],dateTimeFormat)
if start1 > start2:
if start2 >= (time + datetime.timedelta(seconds=gap)):
results.append(True)
else:
results.append(False)
else:
if start1 >= (time + datetime.timedelta(seconds=gap)):
results.append(True)
else:
results.append(False)
if end1 > end2:
time = end1
else:
time = end2
f = True
for i in range(len(results)):
if results[i] == False:
f = False
self.assertTrue(f)
elif k == 2: #ensure correct number of normal calls in CDR1
t = 0
for l in range(len(CDR1)):
if CDR1[l][6] == "0":
t += 1
normalSize = math.floor(size * propNormal)
self.assertTrue((t == normalSize) or (t in range(normalSize,normalSize+5)),msg="{0} | {1}".format(t,normalSize))
elif k == 3: #ensure correct number of normal calls in CDR2
t = 0
for l in range(len(CDR2)):
if CDR2[l][6] == "0":
t += 1
normalSize = math.floor(size * propNormal)
self.assertTrue((t == normalSize) or (t in range(normalSize,normalSize+5)),msg="{0} | {1}".format(t,normalSize))
elif k == 4: #ensure correct number of CR Calls in CDR1
t = 0
for l in range(len(CDR1)):
if CDR1[l][6] == "1":
t += 1
CRSize = math.floor(size * propCR)
self.assertTrue(t == CRSize,msg=t)
elif k == 5: #ensure correct number of CR calls in CDR2
t = 0
for l in range(len(CDR2)):
if CDR2[l][6] == "1":
t += 1
CRSize = math.floor(size * propCR)
self.assertTrue(t == CRSize,msg=t)
elif k == 6: #ensure correct number of RC calls in CDR1
t = 0
for l in range(len(CDR1)):
if CDR1[l][6] == "2":
t += 1
RCSize = math.floor(size * propRC)
self.assertTrue(t == RCSize,msg=t)
elif k == 7: #ensure correct number of RC calls in CDR2
t = 0
for l in range(len(CDR2)):
if CDR2[l][6] == "2":
t += 1
RCSize = math.floor(size * propRC)
self.assertTrue(t == RCSize,msg=t)
elif k == 8: #verify num1 position is correct position
f = True
for record in CDR1:
if record[3] == "MT":
if record[5] != num1:
f = False
else:
if record[4] != num1:
f = False
self.assertTrue(f)
elif k == 9: #verify num2 position is correct
f = True
for record in CDR2:
if record[3] == "MT":
if record[5] != num2:
f = False
else:
if record[4] != num2:
f = False
self.assertTrue(f)
def test_CreatePairCDRsSpooferNonSpoofer(self):
for size in [10]:
for gap in [15]:
x = []
for i in range(0,76,25):
x.append(i/100)
for propNormal in x:
remainder = 1 - propNormal
y = []
for j in range(0,76,25):
y.append(j/100)
for i in range(len(y)):
y[i] = y[i] * remainder
for propCR in y:
remainder = 1 - propNormal - propCR
z = []
for k in range(0,76,25):
z.append(k/100)
for i in range(len(z)):
z[i] = z[i] * remainder
for propRC in z:
propAccess = 1 - propNormal - propCR - propRC
for k in range(0,9):
with self.subTest(size=size,gap=gap,propNormal=propNormal,propCR=propCR,propRC=propRC,propAccess=propAccess,k=k):
num1 = e.MobileNumber()
num2 = e.MobileNumber()
r = e.CreatePairCDRsSpooferNonSpoofer(size,gap,propNormal,propCR,propRC,propAccess,num1=num1,num2=num2)
CDR1 = r[0]
CDR2 = r[1]
if k == 0: #ensure sizes are correct
self.assertTrue((len(CDR1) == size) and (len(CDR2) == size),msg="{0} | {1}".format(len(CDR1),len(CDR2)))
elif k == 1: #ensure in correct order and of right gaps
end1 = datetime.datetime.strptime(CDR1[0][2],dateTimeFormat)
end2 = datetime.datetime.strptime(CDR2[0][2],dateTimeFormat)
results = []
if end2 > end1:
time = end2
else:
time = end1
for l in range(1,size):
start1 = datetime.datetime.strptime(CDR1[l][1],dateTimeFormat)
start2 = datetime.datetime.strptime(CDR2[l][1],dateTimeFormat)
end1 = datetime.datetime.strptime(CDR1[l][2],dateTimeFormat)
end2 = datetime.datetime.strptime(CDR2[l][2],dateTimeFormat)
if start1 > start2:
if start2 >= (time + datetime.timedelta(seconds=gap)):
results.append(True)
else:
results.append(False)
else:
if start1 >= (time + datetime.timedelta(seconds=gap)):
results.append(True)
else:
results.append(False)
if end1 > end2:
time = end1
else:
time = end2
f = True
for i in range(len(results)):
if results[i] == False:
f = False
self.assertTrue(f)
elif k == 2: #ensure correct number of normal calls in CDR1
t = 0
for l in range(len(CDR1)):
if CDR1[l][6] == "0":
t += 1
normalSize = math.floor(size * propNormal)
self.assertTrue((t == normalSize) or (t in range(normalSize,normalSize+5)))
elif k == 3: #ensure correct number of normal calls in CDR2
t = 0
for l in range(len(CDR2)):
if CDR2[l][6] == "0":
t += 1
normalSize = math.floor(size * propNormal)
self.assertTrue((t == normalSize) or (t in range(normalSize,normalSize+5)))
elif k == 4: #ensure correct number of CR calls in CDR1
t = 0
for l in range(len(CDR1)):
if CDR1[l][6] == "1":
t += 1
CRSize = math.floor(size * propCR)
self.assertTrue(t == CRSize,msg="{0} | {1}".format(t,CRSize))
elif k == 5: #ensure correct number of CR calls in CDR2
t = 0
for l in range(len(CDR2)):
if CDR2[l][6] == "1":
t += 1
CRSize = math.floor(size * propCR)
self.assertTrue(t == CRSize,msg="{0} | {1}".format(t,CRSize))
elif k == 6: #ensure correct number of RC calls in CDR1
t = 0
for l in range(len(CDR1)):
if CDR1[l][6] == "2":
t += 1
RCSize = math.floor(size * propRC)
self.assertTrue(t == RCSize,msg="{0} | {1}".format(t,RCSize))
elif k == 7: #ensure correct number of RC calls in CDR2
t = 0
for l in range(len(CDR2)):
if CDR2[l][6] == "2":
t += 1
RCSize = math.floor(size * propRC)
self.assertTrue(t == RCSize,msg="{0} | {1}".format(t,RCSize))
elif k == 8: #ensure correct number of Access calls in CDR1
t = 0
for l in range(len(CDR1)):
if CDR1[l][6] == "3":
t += 1
accessSize = math.floor(size * propAccess)
self.assertTrue(t == accessSize,msg="{0} | {1}".format(t,accessSize))
elif k == 9: #ensure correct number of Access calls in CDR2
t = 0
for l in range(len(CDR2)):
if CDR2[l][6] == "3":
t += 1
accessSize = math.floor(size * propAccess)
self.assertTrye(t == accessSize,msg="{0} | {1}".format(t,accessSize))
elif k == 10: #verify num1 position is correct for all records
f = True
for record in CDR1:
if record[3] == "MT":
if record[5] != num1:
f = False
else:
if record[4] != num1:
f = False
self.assertTrue(f)
elif k == 11: #verify num2 position is correct for all records
f = True
for record in CDR2:
if record[3] == "MT":
if record[5] != num2:
f = False
else:
if record[4] != num2:
f = False
self.assertTrue(f)
if __name__ == "__main__":
unittest.main()
| UTF-8 | Python | false | false | 80,759 | py | 11 | ExampleGeneratorTest.py | 7 | 0.352629 | 0.321747 | 0 | 1,447 | 54.811334 | 148 |
svakulenk0/ODChatbot-v2.0 | 12,086,038,014,985 | 224080f83ac8b3a10e8dce85fa3fab1c2a00d82d | 8bd040f6895f8cdde6dadc3ee3d4c86da632023e | /__init__.py | 747f52459b7ca95a6f3cd564a034e84976baba2c | [] | no_license | https://github.com/svakulenk0/ODChatbot-v2.0 | 908ee9b84e6bad5e0beaf52115700189fd77d2a3 | 7344b0e8e821810c96dc0435c3ef1481ef34dc9d | refs/heads/master | "2022-07-17T14:21:43.706008" | "2018-07-25T08:37:26" | "2018-07-25T08:37:26" | 139,401,333 | 3 | 0 | null | false | "2022-06-21T21:22:08" | "2018-07-02T06:37:45" | "2019-05-01T09:03:19" | "2022-06-21T21:22:08" | 4,404 | 2 | 0 | 5 | Python | false | false | from opsdroid.matchers import match_regex
import logging
import random
from .chatbot import Chatbot
chatbot = Chatbot()
# restart exploration
# chatbot.history = []
# chatbot.goal = []
def setup(opsdroid):
logging.debug("Loaded ODExploration skill")
@match_regex(r'(.*)')
async def searchOD(opsdroid, config, message):
request = message.regex.group(1)
# print(request)
text = chatbot.search(request)
print(text)
await message.respond(text)
| UTF-8 | Python | false | false | 470 | py | 21 | __init__.py | 6 | 0.712766 | 0.710638 | 0 | 23 | 19.434783 | 47 |
Haira505/Pythoncode | 5,205,500,383,604 | bd28ffbb0f35431851662bf699f2dd009c43451a | c7582c299df7fc3f9c270d2ea94f0d9fd1d9e946 | /prueba.py | 89a2919eeab627210f021a00a1e9cdd2dce0e0e4 | [] | no_license | https://github.com/Haira505/Pythoncode | 676deb421e9b376ad435a545bc6ff80bd52d8ab8 | 4751e3b2a5d57181adfa5449b4e2d9e7115ff6a9 | refs/heads/master | "2022-12-04T08:37:25.603113" | "2020-08-21T16:27:41" | "2020-08-21T16:27:41" | 285,740,422 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def main ():
print("hola prritas uwu")
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 76 | py | 17 | prueba.py | 7 | 0.526316 | 0.526316 | 0 | 5 | 14.2 | 26 |
dirkdupont/PythonPlayground | 13,486,197,336,674 | 13c881d5ec821069d37345a2822fa9a5011a580a | 696c7b495250f48ec2ca38889fd74a90d4d2b073 | /statistics/coin_flips.py | 79cf1a42ef3f8cad1202d235d5050db03ad1e6ef | [] | no_license | https://github.com/dirkdupont/PythonPlayground | 7513f03a5979d52134044fb30e4ec2c9de2d4d61 | e9477e8e01a6b2332852a4271a687aae183659c7 | refs/heads/master | "2018-10-10T21:03:31.211798" | "2018-08-02T18:59:39" | "2018-08-02T18:59:39" | 106,595,266 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
This example simulates N sets of coin flips and returns a list of the proportion
of heads in each set of N flips.
"""
import matplotlib.pyplot as plt
import random
from math import sqrt
def mean(data):
return float(sum(data))/len(data)
def variance(data):
mu=mean(data)
return sum([(x-mu)**2 for x in data])/len(data)
def stddev(data):
return sqrt(variance(data))
def flip(N):
return [random.random()>0.5 for _ in range(N)]
def sample(N):
return [mean(flip(N)) for _ in range(N)]
# Running N sets of N coin flips
N=5000
outcomes=sample(N)
print("Mean: {:.2f}".format(mean(outcomes)))
print("Standard Deviation: {:.2f}".format(stddev(outcomes)))
plt.hist(outcomes, bins=30)
plt.show() | UTF-8 | Python | false | false | 730 | py | 341 | coin_flips.py | 292 | 0.675342 | 0.660274 | 0 | 40 | 17.275 | 80 |
xflyyxfl/threepy | 1,812,476,205,243 | bb8f86daa3622eeebf8e4637461d0d2b89cad430 | af381159cf2efcb295a2b67f70fc57d04fa1e269 | /examples/opengl_points_billboards.py | 1e96769e640a917ba7bab416121111baf4e629f7 | [] | no_license | https://github.com/xflyyxfl/threepy | 6e70de3c02188096d0d724b2d7f754c472d24d77 | bcbbd5d04514ecf81d5824d18cbce7ec6cac4829 | refs/heads/master | "2021-06-22T13:27:42.208603" | "2017-08-07T02:45:18" | "2017-08-07T02:45:18" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from __future__ import division
import math
import random
import os
import pygame
from pygame.locals import *
import THREE
from THREE.utils import Expando
renderer = None
camera = None
material = None
clock = pygame.time.Clock()
width, height = 800, 600
windowHalfX = width / 2
windowHalfY = height / 2
def toAbs( rel ):
return os.path.join( os.path.dirname( __file__ ), rel )
def init():
global renderer, camera, scene, material
pygame.init()
pygame.display.set_mode( (width, height), DOUBLEBUF|OPENGL )
renderer = THREE.OpenGLRenderer
renderer.init()
renderer.setSize( width, height )
camera = THREE.PerspectiveCamera( 55, width / height, 10, 2000 )
camera.position.z = 1000
scene = THREE.Scene()
scene.fog = THREE.FogExp2( 0x000000, 0.001 )
geometry = THREE.Geometry()
sprite = THREE.TextureLoader().load( toAbs( "textures/sprites/disc.png" ) )
for i in xrange( 10000 ):
vertex = THREE.Vector3()
vertex.x = random.uniform( -1000, 1000 )
vertex.y = random.uniform( -1000, 1000 )
vertex.z = random.uniform( -1000, 1000 )
geometry.vertices.append( vertex )
material = THREE.PointsMaterial( size = 35, sizeAttenuation = False, map = sprite, alphaTest = 0.5, transparent = True )
material.color.setHSL( 1.0, 0.3, 0.7 )
particles = THREE.Points( geometry, material )
scene.add( particles )
def animate():
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
clock.tick()
# print( clock.get_fps() )
time = pygame.time.get_ticks() * 0.0001
mouseX, mouseY = pygame.mouse.get_pos()
mouseX -= windowHalfX
mouseY -= windowHalfY
camera.position.x += ( mouseX - camera.position.x ) * 0.05
camera.position.y += ( - mouseY - camera.position.y ) * 0.05
camera.lookAt( scene.position )
h = ( 360 * ( 1.0 + time ) % 360 ) / 360
material.color.setHSL( h, 0.5, 0.5 )
renderer.render( scene, camera )
pygame.display.flip()
pygame.time.wait( 10 )
if __name__ == "__main__":
init()
animate() | UTF-8 | Python | false | false | 2,237 | py | 41 | opengl_points_billboards.py | 39 | 0.606616 | 0.561913 | 0 | 99 | 21.606061 | 124 |
paperstiger/gaolib | 14,705,968,069,089 | ec22f964f9c5ee3d070dd7889a5e2c6d2190ecba | a7a0c440e3bf858c5f4a7541c8ce211334b5d050 | /test/argtest.py | 1187f6a9e963259a8e7736a92c17cc804d11e44a | [] | no_license | https://github.com/paperstiger/gaolib | b8e0ba9dbfe520b8c416aaefaf8a2f6b66243594 | 6c791627b0bceb420caf0d087cba258501bf8ad6 | refs/heads/master | "2020-08-22T03:26:10.510004" | "2019-10-20T04:40:28" | "2019-10-20T04:40:28" | 216,307,906 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2018 Gao Tang <gt70@duke.edu>
#
# Distributed under terms of the MIT license.
"""
argtest.py
Test getOnOffArgs module, we add numbers
"""
from pyLib.io import getOnOffArgs
def main():
args = getOnOffArgs('a', 'b1', 'c-1', 'd-0.5', 'e1e4', 'f-2.3e-5')
print(args)
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 396 | py | 43 | argtest.py | 37 | 0.602532 | 0.559494 | 0 | 23 | 16.173913 | 70 |
sharithomas/more-examples | 13,116,830,132,524 | 4201290688dd58c227d7a22dfb51865a5831c1e3 | 4c1f8e7b02cfe60da4be1300c051c6525776cc00 | /numpy_programs/matrix_3x3x3.py | 791471a55ec26a6bcacbc6531e94344f25163b3e | [] | no_license | https://github.com/sharithomas/more-examples | 04fea9dcc912eb4546dae1d03090b9e89d5bc157 | e0672e2440968cc6041ca8c2567cbd572515fb7c | refs/heads/master | "2021-02-27T11:50:05.103096" | "2020-10-02T11:46:43" | "2020-10-02T11:46:43" | 245,603,890 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Write a NumPy program to create a 3x3x3 array filled with arbitrary values
import numpy as np
array=np.random.random((3,3,3))
print(array)
| UTF-8 | Python | false | false | 142 | py | 241 | matrix_3x3x3.py | 241 | 0.767606 | 0.725352 | 0 | 5 | 27.4 | 76 |
Eggwise/syncano-python-framework | 2,499,670,988,343 | eb4cb6eaf15b7937126f0a8932f1730a7b00a519 | 8f84c13925db79b395dc9536fddd7185508aef1c | /old/framework/source_manager/main.py | 69a160f54fa5f50e1549f0e5a0268ddaea16132b | [] | no_license | https://github.com/Eggwise/syncano-python-framework | 48e2e0f03f691e10ff5575d40b1f4e833e139ae5 | a9a7afea037d3afb64772e059d61544b7786aaa1 | refs/heads/master | "2020-07-14T08:23:30.474784" | "2016-11-16T17:44:12" | "2016-11-16T17:44:12" | 67,504,198 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from . import source_indexer
from .source_indexer import SourceIndexer
import inspect
import logging, os
_init_config = {
'caller_path' : None
}
_source_indexer = None # type: SourceIndexer
def init():
global _init_config
global _source_indexer
if _init_config['caller_path'] is not None:
error_message = 'initialize for the second time, why?'
logging.error(error_message)
(frame, script_path, line_number,
function_name, lines, index) = inspect.getouterframes(inspect.currentframe())[1]
_init_config['caller_path'] = os.path.realpath(script_path)
SourceIndexer.prepare(_init_config)
_source_indexer = SourceIndexer()
def _check_initialized():
global _init_config
global _source_indexer
if _init_config['caller_path'] is None:
error_message = 'source manager is not initialized. call init() first'
logging.error(error_message)
raise Exception(error_message)
if _source_indexer is None:
error_message = 'something went wrong when initializing the source manager'
logging.error(error_message)
raise Exception(error_message)
def find() -> SourceIndexer:
global _source_indexer
_check_initialized()
return _source_indexer.new
# find = _source_indexer.refresh()
# at = find.at
#
# class Compiler:
#
# def __init__(self, component):
# self.component = component
#
# def to_palla(self):
# print('PALLA {0}'.format(self.component.name))
# return 'palla'
#
# indices = _source_indexer.indices.ok
# compiler = Compiler
def from_this():
(frame, script_path, line_number,
function_name, lines, index) = inspect.getouterframes(inspect.currentframe())[1]
indexer = _source_indexer
return indexer.at_path(script_path) # type: SourceIndexer
#
# #add all the attributes of the main package to the init.py for access
# _current_module = sys.modules[__name__]
#
#
# for i in dir(main):
# if not i.startswith('__'):
# setattr(_current_module, i, getattr(main, i))
#
| UTF-8 | Python | false | false | 2,071 | py | 17 | main.py | 16 | 0.660068 | 0.658619 | 0 | 95 | 20.8 | 85 |
kperun/NESTML-Python-Benchmark | 10,239,202,046,235 | 98f71752edf20a30b9881243e9d439bc0c6cc815 | 9d95070fe76d23fa213b95c955680a6104826ec1 | /ply/Grammar/SimpleExpressionGrammar.py | f2a30d306f5f2689187d6752ea7b5e3b06333c83 | [
"MIT"
] | permissive | https://github.com/kperun/NESTML-Python-Benchmark | 2ec731bf4c13fcf89bcbb9aa01f971bfa034b383 | e96d68fa73f75a25d34d629bbdbde43a849f1229 | refs/heads/master | "2021-03-27T13:09:50.633489" | "2017-07-20T12:28:01" | "2017-07-20T12:28:01" | 94,541,808 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import ply.lex as lex
import sys
sys.path.append('../ASTClasses')
import ASTCalculator
import ASTDeclaration
import ASTComputation
import ASTStatement
import ASTNumericLiteral
import ASTName
import ASTExpr
tokens = ['NUMBER', 'END', 'CALCULATOR', 'STRING', 'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MODULO', 'POW',
'LPAREN', 'RPAREN', 'COLON','NEWLINE','DECLARATION','COMPUTATION','EQ']
#Tokens
def t_END(t): r'end';return t
def t_CALCULATOR(t): r'calculator';return t
def t_DECLARATION(t): r'declaration';return t
def t_COMPUTATION(t): r'computation';return t
t_STRING = '[a-zA-Z\\_]+' # a string token
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_POW = r'\*\*'
t_DIVIDE = r'/'
t_MODULO = r'%'
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_COLON = r':'
t_EQ = r'='
#t_NEWLINE = r'\n'
def t_NUMBER(t):
r"[0-9]+" # a number token , e.g. 42
t.value = int(t.value)
return t
def t_NEWLINE(t):
r'\n+' # a new lie token
t.lexer.lineno += t.value.count("\n")
def t_error(t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
t_ignore = ' \t \n'
# Precedence rules for the arithmetic operators
precedence = (
('left', 'PLUS', 'MINUS'),
('left', 'TIMES', 'DIVIDE'),
#('right', 'unaryMinus'),
)
# store the overall context
calculator = None
start = 'astCalculator'
def p_astCalculator(p):
'astCalculator : CALCULATOR STRING COLON astBody END'
p[0] = ASTCalculator.ASTCalculator(str(p[2]),[p[4]])
def p_body(p):
"""
astBody : DECLARATION COLON astDeclaration END
| COMPUTATION COLON astComputation END
| astBody astBody
"""
if p[2] == ':' and p[1]=="declaration":#the header
p[0] = ASTDeclaration.ASTDeclaration([p[3]])
elif p[2] == ':' and p[1]=="computation":
p[0] = ASTComputation.ASTComputation([p[3]])
else:
p[0] = [p[1],p[2]]
def p_declaration(p):
"""
astDeclaration : STRING
| STRING EQ astExpression
| astDeclaration astDeclaration
"""
if len(p) == 2:# case sole declration
p[0] = ASTStatement.ASTStatement.makeDecl(p[1])
elif len(p) == 4:
p[0] = ASTStatement.ASTStatement.makeDeclWithExpression(p[1],p[3])
else:
p[0] = [p[1],p[2]]
def p_computation(p):
"""
astComputation : STRING EQ astExpression
| astComputation astComputation
"""
if len(p)==4:
p[0] = ASTStatement.ASTStatement.makeDeclWithExpression(p[1],p[3])
else:
p[0] = [p[1],p[2]]
def p_expression(p):
"""
astExpression : NUMBER
| STRING
| LPAREN astExpression RPAREN
| astExpression POW astExpression
| PLUS astExpression
| MINUS astExpression
| astExpression TIMES astExpression
| astExpression DIVIDE astExpression
| astExpression PLUS astExpression
| astExpression MINUS astExpression
| astExpression MODULO astExpression
|
"""
if len(p)==2:
if type(p[1]) == int:
p[0] = ASTNumericLiteral.ASTNumericLiteral.makeLiteral(p[1])
else:
p[0] = ASTName.ASTName(p[1])
elif len(p)==3:
if p[1]=='+':
p[0] = ASTExpr.ASTExpr.makeTerm(p[2])
p[0].isUnaryPlus = True
else:
p[0] = ASTExpr.ASTExpr.makeTerm(p[2])
p[0].isUnaryMinus = True
elif len(p)==4:
if p[1]=='(' and p[3] == ')':
p[0] = ASTExpr.ASTExpr.makeTerm(p[2])
p[0].isRightBracket = True
p[0].isLeftBracket = True
elif p[2]=='**':
p[0] = ASTExpr.ASTExpr.makePow(p[1],p[3])
else:
p[0] = ASTExpr.ASTExpr.makeExpr(p[1],p[3],p[2])
def p_error(p):
print("Syntax error at '%s'" % p.value)
"""
astCalculator : 'calculator' name=TString ':' EOL (astDeclaration|astComputation|EOL)* END EOL EOF;
astDeclaration : 'declaration' ':' EOL (astStatement)* END EOL;
astComputation : 'computation' ':' EOL (astStatement)* END EOL;
astStatement : decl=astName '=' expr=astExpr EOL;
astExpr : leftBracket='(' expr=astExpr rightBracket=')'
|<assorc=right> base=astExpr lpow='**' exponent=astExpr
| (unaryPlus='+' | unaryMinus='-') expr=astExpr
| lhs=astExpr (times='*' | div='/' | modulo='%') rhs=astExpr
| lhs=astExpr (plus='+' | minus='-') rhs=astExpr
| term=astTerm;
astTerm : astNumericLiteral | astName;
astNumericLiteral : value=TNumber;
"""
| UTF-8 | Python | false | false | 4,655 | py | 25 | SimpleExpressionGrammar.py | 22 | 0.562406 | 0.548228 | 0 | 173 | 25.895954 | 103 |
ivysandberg/Instagram | 5,806,795,815,794 | a792b560d287a721dc2ecd303b0ae319f1a4bc4b | c716190de713bb5415ce895892d8e27a038797a1 | /instagram_data.py | 29e2788026264eee62b69e3bffcf02c21f72f33c | [] | no_license | https://github.com/ivysandberg/Instagram | 79041a8bbf36e7cc4b99e512a15136f451683314 | 3718f3cf0a3dcf027bd7c7d1568c7871ecd7fe47 | refs/heads/master | "2020-04-15T07:17:49.143610" | "2019-02-03T19:04:59" | "2019-02-03T19:04:59" | 164,490,844 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import pandas as pd
import json
import matplotlib.pyplot as plt
import csv
""" import follower data """
path = '/Users/ivysandberg/MyData/ivysandberg_20190104/connections.json'
# import using read_json
#data = pd.read_json(path)
#df = pd.DataFrame(data)
#followers_bydate = df.sort_values('followers')
#print (followers_bydate)
# reading the JSON data using json.load()
with open(path) as data:
dict_data = json.load(data)
# converting json dataset from dictionary to dataframe
d2 = pd.DataFrame.from_dict(dict_data)#, orient='index')
d2['index1'] = d2.index
d3=d2[['index1', 'followers']]
#print (d3)
#print (type(d3))
# restructure dataframe (make username a column not an index)
d3 = d3.reset_index()
d3 = d3.drop(['index'], axis=1)
d3 = d3.rename(columns={'index1': 'username', 'followers': 'date'})
#print(d3)
#print (d3.columns)
follower_df = d3 # renaming d3
#print (follower_df)
""" import media data """
path2 = '/Users/ivysandberg/MyData/ivysandberg_20190104/media.json'
with open(path2) as data_file:
data2 = json.load(data_file)
#print (data2)
# restructure DataFrame
df2 = pd.DataFrame.from_dict(data2, orient='index')
df2.reset_index(level=0, inplace=True)
df2=df2.drop([0], axis=1)
df2=df2.transpose()
#print(list(df2.columns))
#print (df2)
''' extract just the data on instgram stories from the media data '''
df3=df2[0] # extracts just stories (format: list of dictionaries)
#print(df3)
'''parse string into list'''
var1=df3[1]
#print(var1)
#print(type(var1))
#print(var1.keys())
'''convert keys to column names'''
cap=[] # caption
tak=[] # taken_at
pat=[] # path
newdf = pd.DataFrame()
for i in range(1,195):
cp = df3[i]['caption']
tk = df3[i]['taken_at']
pt = df3[i]['path']
cap.append(cp)
tak.append(tk)
pat.append(pt)
newdf['caption']=cap
newdf['date']=tak
newdf['path']=pat
#print(newdf)
my_posts=newdf['caption']
#print (list(my_posts))
'''Merging data
pd.merge(left=DataFrame, right=DataFrame, on=None, left_on=‘left column name’, right_on=‘right column name’)
'''
#newdf=newdf.merge(d3, how='outer') # outer join
#print(newdf)
''' export dataframes as csv'''
d3.to_csv('/Users/ivysandberg/MyData/instadata.csv', index=False, header=False)
newdf.to_csv('/Users/ivysandberg/MyData/instastorydata.csv', index=False, header=False)
| UTF-8 | Python | false | false | 2,350 | py | 2 | instagram_data.py | 1 | 0.692143 | 0.661401 | 0 | 103 | 21.737864 | 108 |
WonderLuc/brother-bootcamp | 8,461,085,619,755 | 42d442469e092f2b9de50387d2bcdd21557d49cc | b1a7417caf4200557106821a0fe45d848b598467 | /python 101/tasks/dictionary.py | 806d7b96b4fd6a15d16696c628c91b3a28ba5993 | [] | no_license | https://github.com/WonderLuc/brother-bootcamp | 3ab6a197160f71d17be837c0be2001b2abcc6c3c | 841285d516566730d35da7af9690c5d3b94051a4 | refs/heads/main | "2023-08-20T15:44:33.228640" | "2021-10-14T17:43:47" | "2021-10-14T17:43:47" | 404,047,387 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | dictionary = {
"cat": 'кошка',
'house': 'дом',
'word': 'слово',
'score': 'счет',
'apple': 'яблоко'
}
while(True):
userInput = input('Dicitionary: ').split(' ')
command = userInput[0]
if (command == 'close' ):
print('See you later!')
break
elif (command == 'translate'):
# Seacrh by key
if (userInput[1] in dictionary):
print(dictionary[userInput[1]])
# Search by value
elif (userInput[1] in dictionary.values()):
for key,value in dictionary.items():
if (value == userInput[1]):
print(key)
else:
print('Sorry, there is no that word')
elif (command == 'add'):
if (len(userInput) != 3):
print('Wrong arguments number')
continue
dictionary.update({userInput[1]: userInput[2]})
else:
print('There is no that command') | UTF-8 | Python | false | false | 846 | py | 40 | dictionary.py | 38 | 0.585662 | 0.575942 | 0 | 32 | 24.75 | 51 |
SlicingDice/slicingdice-python3 | 14,705,968,065,142 | 885837ef020f90c7be7f701f205e3769c84074ce | 067252d734401a696152e4153eb26e51a3f66efa | /pyslicer/core/requester.py | 1e35efbf4bd6cda62bee73d12101a2dd7817bde1 | [
"MIT"
] | permissive | https://github.com/SlicingDice/slicingdice-python3 | f2375a36b171c40be095f0842a3823ea1ee3518b | 0c8e1b72af0992c32976c8a21b77827d236a6c15 | refs/heads/master | "2021-01-25T13:57:28.346868" | "2018-12-12T05:58:25" | "2018-12-12T05:58:25" | 123,625,426 | 1 | 0 | MIT | false | "2018-12-12T05:58:26" | "2018-03-02T20:07:20" | "2018-12-12T05:52:54" | "2018-12-12T05:58:25" | 198 | 0 | 0 | 0 | Python | false | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import aiohttp
from .. import exceptions
class Requester(object):
def __init__(self, use_ssl, timeout):
self.use_ssl = use_ssl
self.session = aiohttp.ClientSession(read_timeout=timeout)
async def post(self, url, data, headers):
"""Executes a post request result object"""
try:
async with self.session.post(url, data=data, headers=headers,
verify_ssl=self.use_ssl) as resp:
return resp.status, await resp.text()
except aiohttp.ClientConnectorError as e:
raise exceptions.SlicingDiceHTTPError(e)
except aiohttp.ServerTimeoutError as e:
raise exceptions.SlicingDiceHTTPError(e)
async def put(self, url, data, headers):
"""Returns a put request result object"""
try:
async with self.session.put(url, data=data, headers=headers,
verify_ssl=self.use_ssl) as resp:
return resp.status, await resp.text()
except aiohttp.ClientConnectorError as e:
raise exceptions.SlicingDiceHTTPError(e)
except aiohttp.ServerTimeoutError as e:
raise exceptions.SlicingDiceHTTPError(e)
async def get(self, url, headers):
"""Returns a get request result object"""
try:
async with self.session.get(url, headers=headers,
verify_ssl=self.use_ssl) as resp:
return resp.status, await resp.text()
except aiohttp.ClientConnectorError as e:
raise exceptions.SlicingDiceHTTPError(e)
except aiohttp.ServerTimeoutError as e:
raise exceptions.SlicingDiceHTTPError(e)
async def delete(self, url, headers):
"""Returns a delete request result object"""
try:
async with self.session.delete(url, headers=headers,
verify_ssl=self.use_ssl) as resp:
return resp.status, await resp.text()
except aiohttp.ClientConnectorError as e:
raise exceptions.SlicingDiceHTTPError(e)
except aiohttp.ServerTimeoutError as e:
raise exceptions.SlicingDiceHTTPError(e)
| UTF-8 | Python | false | false | 2,294 | py | 12 | requester.py | 10 | 0.601569 | 0.601133 | 0 | 55 | 40.709091 | 76 |
AnkitAvi11/Data-Structures-And-Algorithms | 1,778,116,494,426 | 18700e5ed4c7c63076aef9d5411b36f823640dbf | 1b45d1162bd60a356844fc4dced068da2e6cc438 | /Data Structures/Arrays/StringCompression.py | e838973d2902a744a4505d20732739c755dab5e8 | [
"MIT"
] | permissive | https://github.com/AnkitAvi11/Data-Structures-And-Algorithms | de9584e439861254cdce265af789c8b484c01c69 | 703f78819a41d4dd88caf71156a4a515651edc1b | refs/heads/master | "2023-02-19T21:53:39.405934" | "2021-01-24T17:27:21" | "2021-01-24T17:27:21" | 297,752,655 | 6 | 3 | MIT | false | "2021-01-24T17:27:22" | "2020-09-22T19:33:55" | "2021-01-24T17:20:09" | "2021-01-24T17:27:21" | 227 | 4 | 10 | 0 | Python | false | false |
# program : string compression
"""
Implement a method to perform basic string compression using the counts of characters.
"""
def compressString(string : str) -> dict :
i = 0
result = ""
while i < len(string) :
count = 1
try :
while string[i+1] == string[i] :
i+=1
count+=1
except :
pass
result += "{}{}".format(string[i], count)
i+=1
return result
if __name__ == '__main__' :
string = input("Enter a string : ")
print(compressString(string))
| UTF-8 | Python | false | false | 590 | py | 166 | StringCompression.py | 165 | 0.49661 | 0.486441 | 0 | 27 | 20.62963 | 86 |
cossorzano/scipion | 6,502,580,503,764 | ecd6c20d5532c6562b5b1e4f311d20ff3911a55f | 1cba04adbdfd246d44d8a736f17dc4e847451c54 | /pyworkflow/em/plotter.py | 36fca2051b5ef1dbdd0aba4467949200925824e8 | [] | no_license | https://github.com/cossorzano/scipion | b8988be7a9d96035038f516a40e4f686ae0aca1e | 29617e9b4886f1b1f83055fffc0a8ce81f4c39ba | refs/heads/master | "2020-04-06T04:33:00.739194" | "2018-08-05T07:26:20" | "2018-08-05T07:26:20" | 55,037,672 | 0 | 0 | null | true | "2016-05-03T15:19:34" | "2016-03-30T06:17:03" | "2016-03-30T20:14:23" | "2016-05-03T15:19:27" | 99,933 | 0 | 0 | 0 | Python | null | null | # **************************************************************************
# *
# * Authors: Josue Gomez Blanco (jgomez@cnb.csic.es)
# *
# * Unidad de Bioinformatica of Centro Nacional de Biotecnologia , CSIC
# *
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2 of the License, or
# * (at your option) any later version.
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
# * 02111-1307 USA
# *
# * All comments concerning this program package may be sent to the
# * e-mail address 'jgomez@cnb.csic.es'
# *
# **************************************************************************
"""
This module implement the classes to create plots on scipion.
"""
from math import radians
from itertools import izip
import matplotlib.pyplot as plt
from pyworkflow.gui.plotter import Plotter
class EmPlotter(Plotter):
''' Class to create several plots'''
def __init__(self, x=1, y=1, mainTitle="", **kwargs):
Plotter.__init__(self, x, y, mainTitle, **kwargs)
def plotAngularDistribution(self, title, rot,
tilt, weight=[], max_p=40,
min_p=5, max_w=2, min_w=1, color='blue'):
'''Create an special type of subplot, representing the angular
distribution of weight projections. '''
if weight:
max_w = max(weight)
min_w = min(weight)
a = self.createSubPlot(title, 'Min weight=%(min_w).2f, Max weight=%(max_w).2f' % locals(), '', projection='polar')
for r, t, w in izip(rot, tilt, weight):
pointsize = int((w - min_w)/(max_w - min_w + 0.001) * (max_p - min_p) + min_p)
a.plot(r, t, markerfacecolor=color, marker='.', markersize=pointsize)
else:
a = self.createSubPlot(title, 'Empty plot', '', projection='polar')
for r, t in izip(rot, tilt):
a.plot(r, t, markerfacecolor=color, marker='.', markersize=10)
def plotHist(self, yValues, nbins, color='blue', **kwargs):
""" Create an histogram. """
self.hist(yValues, nbins, facecolor=color, **kwargs)
def plotMatrix(self,_matrix,cmap='Greens'
, xticksLablesMajor=None
, yticksLablesMajor=None
, rotationX=90.
, rotationY=0.):
im = plt.imshow(_matrix, interpolation="none", cmap=cmap)
if (xticksLablesMajor is not None):
plt.xticks(range(len(xticksLablesMajor)),
xticksLablesMajor[:len(xticksLablesMajor)],
rotation=rotationX)
if (yticksLablesMajor is not None):
plt.yticks(range(len(yticksLablesMajor)),
yticksLablesMajor[:len(yticksLablesMajor)],
rotation=rotationY)
cax = plt.colorbar(im)
#im.cmap.set_over('g')#outbound values
def plotData(self, xValues, yValues, color='blue', **kwargs):
""" Shortcut function to plot some values.
Params:
xValues: list of values to show in x-axis
yValues: list of values to show as values in y-axis
color: color for the plot.
**kwargs: keyword arguments that accepts:
marker, linestyle
"""
self.plot(xValues, yValues, color, **kwargs)
def plotFile(dbName, dbPreffix, plotType,
columnsStr, colorsStr, linesStr, markersStr,
xcolumn, ylabel, xlabel, title, bins, orderColumn, orderDirection):
columns = columnsStr.split()
colors = colorsStr.split()
lines = linesStr.split()
markers = markersStr.split()
data = PlotData(dbName, dbPreffix, orderColumn, orderDirection)
#setObj = getSetObject(dbName, dbPreffix)
plotter = Plotter(windowTitle=title)
ax = plotter.createSubPlot(title, xlabel, ylabel)
xvalues = data.getColumnValues(xcolumn) if xcolumn else range(0, data.getSize())
#xvalues = range(0, setObj.getSize()) if not isxvalues else []
for i, col in enumerate(columns):
yvalues = data.getColumnValues(col)
color = colors[i]
line = lines[i]
if bins:
ax.hist(yvalues, bins=int(bins), color=color, linestyle=line, label=col)
else:
if plotType == 'Plot':
marker = (markers[i] if not markers[i] == 'none' else None)
ax.plot(xvalues, yvalues, color, marker=marker, linestyle=line, label=col)
else:
ax.scatter(xvalues, yvalues, c=color, label=col, alpha=0.5)
ax.legend(columns)
return plotter
class PlotData():
""" Small wrapper around table data such as: sqlite or metadata
files. """
def __init__(self, fileName, tableName, orderColumn, orderDirection):
self._orderColumn = orderColumn
self._orderDirection = orderDirection
if fileName.endswith(".db") or fileName.endswith(".sqlite"):
self._table = self._loadSet(fileName, tableName)
self.getColumnValues = self._getValuesFromSet
self.getSize = self._table.getSize
else: # assume a metadata file
self._table = self._loadMd(fileName, tableName)
self.getColumnValues = self._getValuesFromMd
self.getSize = self._table.size
def _loadSet(self, dbName, dbPreffix):
from pyworkflow.mapper.sqlite import SqliteFlatDb
db = SqliteFlatDb(dbName=dbName, tablePrefix=dbPreffix)
if dbPreffix:
setClassName = "SetOf%ss" % db.getSelfClassName()
else:
setClassName = db.getProperty('self') # get the set class name
from pyworkflow.em import getObjects
setObj = getObjects()[setClassName](filename=dbName, prefix=dbPreffix)
return setObj
def _getValuesFromSet(self, columnName):
return [self._getValue(obj, columnName)
for obj in self._table.iterItems(orderBy=self._orderColumn,
direction=self._orderDirection)]
def _getValue(self, obj, column):
if column == 'id':
return obj.getObjId()
return obj.getNestedValue(column)
| UTF-8 | Python | false | false | 6,999 | py | 156 | plotter.py | 153 | 0.575225 | 0.569939 | 0 | 162 | 42.203704 | 126 |
ascourge21/Classifiers | 11,751,030,548,515 | cea9f7a2a1749f36de5e1eecdce56952dec44227 | 8dddfca9449a05c20e970aa11b2de98c5fcd8259 | /DataLoader.py | d565c6b2338cf8cfbcfc1a18d4d80a0ba239d348 | [] | no_license | https://github.com/ascourge21/Classifiers | 03206f647e21c2eb8152671733371bc6173eb32c | f719182cda0aafb6928068bcade0fabb0583d890 | refs/heads/master | "2018-01-08T15:37:03.355118" | "2017-12-01T00:32:19" | "2017-12-01T00:32:19" | 54,840,215 | 1 | 0 | null | false | "2017-12-01T00:32:20" | "2016-03-27T17:34:48" | "2017-10-15T18:12:17" | "2017-12-01T00:32:20" | 9 | 1 | 0 | 0 | Python | false | null | import numpy as np
import pickle
import csv
class DataLoader(object):
@staticmethod
def load_mnist_train():
mnist_train = []
with open('data/mnist_train.csv', 'rt') as csvfile:
cread = csv.reader(csvfile)
for row in cread:
vals = np.array([float(x) / 256 for x in row[1:]])
vals = vals.reshape((784,1))
res = np.zeros((10, 1))
res[int(row[0])] = 1
mnist_train.append([vals, res, row[0]])
x = np.zeros((len(mnist_train), 784))
y = np.zeros((len(mnist_train), 10))
y_l = np.zeros(len(mnist_train))
for i in range(len(mnist_train)):
x[i, :] = mnist_train[i][0].T
y[i, :] = mnist_train[i][1].T
y_l[i] = mnist_train[i][2]
return x, y, y_l
@staticmethod
def load_mnist_test():
mnist_test = []
with open('data/mnist_test.csv', 'rt') as csvfile:
cread = csv.reader(csvfile)
for row in cread:
vals = np.array([float(x) / 256 for x in row[1:]])
vals = vals.reshape((784,1))
res = np.int64(row[0])
mnist_test.append([vals, res, row[0]])
x = np.zeros((len(mnist_test), 784))
y = np.zeros((len(mnist_test), 10))
y_l = np.zeros(len(mnist_test))
for i in range(len(mnist_test)):
x[i, :] = mnist_test[i][0].T
y[i, :] = mnist_test[i][1].T
y_l[i] = mnist_test[i][2]
return x, y, y_l
#
# def load_cifar_train():
# mnist_train = []
# with open('data/cifar_train.csv', 'rt') as csvfile:
# cread = csv.reader(csvfile)
# for row in cread:
# vals = np.array([float(x) / 256 for x in row[1:]])
# vals = vals.reshape((3072,1))
# res = np.zeros((10, 1))
# res[int(row[0])] = 1
# mnist_train.append([vals, res])
# return mnist_train
#
#
# def load_cifar_test():
# mnist_test = []
# with open('data/cifar_test.csv', 'rt') as csvfile:
# cread = csv.reader(csvfile)
# for row in cread:
# vals = np.array([float(x) / 256 for x in row[1:]])
# vals = vals.reshape((3072,1))
# res = np.int64(row[0])
# mnist_test.append([vals, res])
# return mnist_test
| UTF-8 | Python | false | false | 2,329 | py | 9 | DataLoader.py | 9 | 0.495921 | 0.466724 | 0 | 69 | 32.73913 | 66 |
ZhaoYiChina/Flask-Project | 13,657,996,004,006 | 94aad27113335d8bdda503255f6ef4994e0276a6 | 99cd5db560876ae6c3153d045c9453b63f8de996 | /app_frontend/api/order.py | 3dc616a9a36e935aad37f4a78a302d03f3610734 | [] | no_license | https://github.com/ZhaoYiChina/Flask-Project | e74009133a4de9a1e29df0fb569c4d0e220ad310 | 509d054ec28a005cb61d1a2fa6b76ecabd2e3353 | refs/heads/master | "2023-02-11T00:26:13.607017" | "2021-01-04T09:05:08" | "2021-01-04T09:05:08" | 326,627,035 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# encoding: utf-8
"""
@author: zhanghe
@software: PyCharm
@file: order.py
@time: 2017/4/13 下午9:45
"""
from app_frontend.models import Order
from app_frontend.tools.db import get_row, get_rows, get_lists, get_row_by_id, add, edit, delete
def get_order_row_by_id(order_id):
"""
通过 id 获取订单信息
:param order_id:
:return: None/object
"""
return get_row_by_id(Order, order_id)
def get_order_row(*args, **kwargs):
"""
获取订单信息
:param args:
:param kwargs:
:return: None/object
"""
return get_row(Order, *args, **kwargs)
def add_order(order_data):
"""
添加订单信息
:param order_data:
:return: None/Value of order.id
"""
return add(Order, order_data)
def edit_order(order_id, order_data):
"""
修改订单信息
:param order_id:
:param order_data:
:return: Number of affected rows (Example: 0/1)
"""
return edit(Order, order_id, order_data)
def delete_order(order_id):
"""
删除订单信息
:param order_id:
:return: Number of affected rows (Example: 0/1)
"""
return delete(Order, order_id)
def get_order_rows(page=1, per_page=10, *args, **kwargs):
"""
获取订单列表(分页)
Usage:
items: 信息列表
has_next: 如果本页之后还有超过一个分页,则返回True
has_prev: 如果本页之前还有超过一个分页,则返回True
next_num: 返回下一页的页码
prev_num: 返回上一页的页码
iter_pages(): 页码列表
iter_pages(left_edge=2, left_current=2, right_current=5, right_edge=2) 页码列表默认参数
:param page:
:param per_page:
:param args:
:param kwargs:
:return:
"""
rows = get_rows(Order, page, per_page, *args, **kwargs)
return rows
def get_order_lists(*args, **kwargs):
"""
获取订单列表信息
:param args:
:param kwargs:
:return:
"""
return get_lists(Order, *args, **kwargs)
| UTF-8 | Python | false | false | 2,050 | py | 306 | order.py | 192 | 0.599448 | 0.587293 | 0 | 91 | 18.89011 | 96 |
FireXStuff/firexapp | 6,665,789,274,145 | 294d9001c2a32fe09b2f3321e3fd35a82d3496cd | f7073a4271ca06c1b7ad1575fc95ff6f050c0613 | /firexapp/celery_manager.py | 8c4a9911be9017e303d8e5e6d57c9b7c8d5fc817 | [
"BSD-3-Clause"
] | permissive | https://github.com/FireXStuff/firexapp | d499d0de72559e088214e499c690819648aad043 | f9f9c55fdadec08e0d937d9e0a20eb7e76d951bd | refs/heads/master | "2023-09-01T02:23:47.557726" | "2023-08-24T20:40:36" | "2023-08-24T20:40:36" | 165,118,024 | 8 | 0 | NOASSERTION | false | "2022-07-19T17:14:12" | "2019-01-10T19:25:04" | "2022-02-02T02:12:34" | "2022-07-19T17:14:10" | 727 | 6 | 0 | 8 | Python | false | false | from firexapp.submit.console import setup_console_logging
from firexapp.submit.uid import Uid
from logging import INFO, DEBUG, WARNING
import os
import re
import subprocess
import psutil
from firexapp.broker_manager.broker_factory import BrokerFactory
from socket import gethostname
from firexapp.common import poll_until_file_not_empty, poll_until_dir_empty, find_procs
from firexapp.plugins import PLUGINS_ENV_NAME, cdl2list
from firexapp.fileregistry import FileRegistry
from collections.abc import Iterable
from firexapp.common import qualify_firex_bin
logger = setup_console_logging(__name__)
CELERY_LOGS_REGISTRY_KEY = 'celery_logs'
FileRegistry().register_file(CELERY_LOGS_REGISTRY_KEY, os.path.join(Uid.debug_dirname, 'celery'))
CELERY_PIDS_REGISTRY_KEY = 'celery_pids'
FileRegistry().register_file(CELERY_PIDS_REGISTRY_KEY,
os.path.join(FileRegistry().get_relative_path(CELERY_LOGS_REGISTRY_KEY), 'pids'))
MICROSERVICE_LOGS_REGISTRY_KEY = 'microservice_logs'
FileRegistry().register_file(MICROSERVICE_LOGS_REGISTRY_KEY, 'microservice_logs')
class CeleryWorkerStartFailed(Exception):
pass
class CeleryManager(object):
celery_bin_name = 'celery'
def __init__(self, plugins=None, logs_dir=None, worker_log_level='debug', cap_concurrency=None,
app='firexapp.engine', env=None, broker=None):
if not broker:
self.broker = BrokerFactory.get_broker_url(assert_if_not_set=True)
self.hostname = gethostname()
self.plugins = plugins
self.logs_dir = logs_dir
self.worker_log_level = worker_log_level
self.cap_concurrency = cap_concurrency
self.app = app
self.env = os.environ.copy()
self.env['CELERY_RDBSIG'] = '1'
self.update_env(self.get_plugins_env(plugins))
if env:
self.update_env(env)
self.pid_files = dict()
self._celery_logs_dir = None
self._celery_pids_dir = None
self._workers_logs_dir = None
@property
def celery_bin(self):
return qualify_firex_bin(self.celery_bin_name)
@classmethod
def log(cls, msg, header=None, level=DEBUG):
if header is None:
header = cls.__name__
if header:
msg = '[%s] %s' % (header, msg)
logger.log(level, msg)
def update_env(self, env):
assert isinstance(env, dict), 'env needs to be a dictionary'
self.env.update({k: str(v) for k, v in env.items()})
@staticmethod
def get_plugins_env(plugins):
plugin_files = cdl2list(plugins)
return {PLUGINS_ENV_NAME: ",".join(plugin_files)}
@staticmethod
def get_celery_logs_dir(logs_dir):
return FileRegistry().get_file(CELERY_LOGS_REGISTRY_KEY, logs_dir)
@staticmethod
def get_celery_pids_dir(logs_dir):
return FileRegistry().get_file(CELERY_PIDS_REGISTRY_KEY, logs_dir)
@staticmethod
def get_worker_logs_dir(logs_dir):
return FileRegistry().get_file(MICROSERVICE_LOGS_REGISTRY_KEY, logs_dir)
@property
def celery_logs_dir(self):
if not self._celery_logs_dir:
_celery_logs_dir = self.get_celery_logs_dir(self.logs_dir)
os.makedirs(_celery_logs_dir, exist_ok=True)
self._celery_logs_dir = _celery_logs_dir
return self._celery_logs_dir
@property
def celery_pids_dir(self):
if not self._celery_pids_dir:
_celery_pids_dir = self.get_celery_pids_dir(self.logs_dir)
os.makedirs(_celery_pids_dir, exist_ok=True)
self._celery_pids_dir = _celery_pids_dir
return self._celery_pids_dir
@property
def workers_logs_dir(self):
if not self._workers_logs_dir:
_workers_logs_dir = self.get_worker_logs_dir(self.logs_dir)
os.makedirs(_workers_logs_dir, exist_ok=True)
self._workers_logs_dir = _workers_logs_dir
return self._workers_logs_dir
@classmethod
def get_worker_log_file(cls, logs_dir, worker_and_host):
return cls.__get_worker_log_file(cls.get_worker_logs_dir(logs_dir), worker_and_host)
def _get_worker_log_file(self, workername):
queue_and_worker = self.get_worker_and_host(workername, self.hostname)
return self.__get_worker_log_file(self.workers_logs_dir, queue_and_worker)
@staticmethod
def __get_worker_log_file(worker_logs_dir, worker_and_host):
return os.path.join(worker_logs_dir, '%s.html' % worker_and_host)
@classmethod
def get_pid_file(cls, logs_dir, workername, hostname=gethostname()):
worker_and_host = cls.get_worker_and_host(workername, hostname)
return cls.__get_pid_file(cls.get_celery_pids_dir(logs_dir), worker_and_host)
def _get_pid_file(self, workername):
worker_and_host = self.get_worker_and_host(workername, self.hostname)
return self.__get_pid_file(self.celery_pids_dir, worker_and_host)
@staticmethod
def __get_pid_file(pids_logs_dir, worker_and_host):
return os.path.join(pids_logs_dir, '%s.pid' % worker_and_host)
def _get_stdout_file(self, workername):
return os.path.join(self.celery_logs_dir, '%s@%s.stdout.txt' % (workername, self.hostname))
@staticmethod
def get_worker_and_host(workername, hostname):
return '%s@%s' % (workername, hostname)
@classmethod
def get_pid_from_file(cls, pid_file):
try:
with open(pid_file) as f:
pid = f.read().strip()
except FileNotFoundError:
cls.log('No pid file found in %s' % pid_file, level=WARNING)
raise
else:
if pid:
return int(pid)
else:
raise AssertionError('no pid')
@classmethod
def get_pid(cls, logs_dir, workername, hostname=gethostname()):
pid_file = cls.get_pid_file(logs_dir, workername, hostname)
return cls.get_pid_from_file(pid_file)
@classmethod
def get_worker_pids(cls, logs_dir, hostname, workernames):
hostname = gethostname() if hostname == 'localhost' else hostname
pids = []
for workername in workernames:
try:
pid = cls.get_pid(logs_dir, workername, hostname)
except Exception as e:
cls.log(e)
else:
pids.append(pid)
return pids
@staticmethod
def cap_cpu_count(count, cap_concurrency):
return min(count, cap_concurrency) if cap_concurrency else count
def extract_errors_from_celery_logs(self, celery_log_file, max_errors=20):
err_list = None
try:
with open(celery_log_file, encoding='ascii', errors='ignore') as f:
logs = f.read()
err_list = re.findall(r'^\S*Error: .*$', logs, re.MULTILINE)
if err_list:
err_list = err_list[0:max_errors]
except FileNotFoundError:
pass
return err_list
def wait_until_active(self, pid_file, stdout_file, workername, timeout=15*60):
extra_err_info = ''
try:
poll_until_file_not_empty(pid_file, timeout=timeout)
except AssertionError:
err_list = self.extract_errors_from_celery_logs(stdout_file)
if err_list:
extra_err_info += '\nFound the following errors:\n' + '\n'.join(err_list)
extra_err_info += '\nAttempting to delete the invocation pids'
deleted_pids = subprocess.run(['/bin/pkill', '-e', '-f', pid_file],
capture_output=True,
text=True)
if deleted_pids.stdout:
extra_err_info += f'\nstdout: {deleted_pids.stdout}'
if deleted_pids.stderr:
extra_err_info += f'\nstderr: {deleted_pids.stderr}'
raise CeleryWorkerStartFailed(f'The worker{workername}@{self.hostname} did not come up after'
f' {timeout} seconds.\n'
f'Please look into {stdout_file!r} for details.'
f'{extra_err_info}')
pid = self.get_pid_from_file(pid_file)
self.log('pid %d became active' % pid)
def start(self, workername, queues=None, wait=True, timeout=15*60, concurrency=None, worker_log_level=None,
app=None, cap_concurrency=None, cwd=None, soft_time_limit=None, autoscale: tuple = None,
detach: bool = True):
# Override defaults if applicable
worker_log_level = worker_log_level if worker_log_level else self.worker_log_level
app = app if app else self.app
cap_concurrency = cap_concurrency if cap_concurrency else self.cap_concurrency
stdout_file = self._get_stdout_file(workername)
log_file = self._get_worker_log_file(workername)
pid_file = self._get_pid_file(workername)
self.pid_files[workername] = pid_file
cmd = f'{self.celery_bin} ' \
f'--app={app} worker ' \
f'--hostname={workername}@%h ' \
f'--loglevel={worker_log_level} ' \
f'--logfile={log_file} ' \
f'--pidfile={pid_file} ' \
f'--events ' \
f'--without-gossip ' \
f'--without-heartbeat ' \
f'--without-mingle ' \
f'-Ofair'
if queues:
cmd += ' --queues=%s' % queues
if concurrency and autoscale:
raise AssertionError('You can either provide a value of concurrency or autoscale, but not both')
if concurrency:
cmd += ' --concurrency=%d' % self.cap_cpu_count(concurrency, cap_concurrency)
elif autoscale:
assert isinstance(autoscale, Iterable), 'autoscale should be a tuple of (min, max)'
assert len(autoscale) == 2, 'autoscale should be a tuple of two elements (min, max)'
autoscale_v1, autoscale_v2 = autoscale
autoscale_min = min(autoscale_v1, autoscale_v2)
autoscale_max = max(autoscale_v1, autoscale_v2)
autoscale_min = self.cap_cpu_count(autoscale_min, cap_concurrency)
autoscale_max = self.cap_cpu_count(autoscale_max, cap_concurrency)
cmd += f' --autoscale={autoscale_max},{autoscale_min}'
if soft_time_limit:
cmd += f' --soft-time-limit={soft_time_limit}'
# piping to ts is helpful for debugging if available
try:
subprocess.check_call(["which", "ts"], stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
except subprocess.CalledProcessError:
pass
else:
cmd += " | ts '[%Y-%m-%d %H:%M:%S]'"
if detach:
cmd += ' &'
self.log('Starting %s on %s...' % (workername, self.hostname))
self.log(cmd)
if cwd:
self.log('cwd=%s' % cwd)
with open(stdout_file, 'ab') as fp:
subprocess.check_call(cmd, shell=True, stdout=fp, stderr=subprocess.STDOUT, env=self.env,
cwd=cwd)
if detach and wait:
self.wait_until_active(pid_file=pid_file, timeout=timeout, stdout_file=stdout_file, workername=workername)
@staticmethod
def find_procs(pid_file):
return find_procs('celery', cmdline_contains='--pidfile=%s' % pid_file)
def find_all_procs(self):
procs = []
for pid_file in os.listdir(self.celery_pids_dir):
procs += self.find_procs(os.path.join(self.celery_pids_dir, pid_file))
return procs
def kill_all_forked(self, pid_file):
for proc in self.find_procs(pid_file):
self.log('Killing pid %d' % proc.pid, level=INFO)
try:
proc.kill()
except Exception:
self.log('Failed to kill pid %d' % proc.pid, level=WARNING)
@classmethod
def terminate(cls, pid, timeout=60):
cls.log('Terminating pid %d' % pid, level=INFO)
p = psutil.Process(pid)
p.terminate()
p.wait(timeout=timeout)
def shutdown(self, timeout=60):
if self.pid_files:
name_to_pid_file = self.pid_files
else:
# self.pid_files is only populated when starting celery, so if this manager didn't start the celery
# instance being operated on, fallback to the pid directory.
name_to_pid_file = {pf: os.path.join(self.celery_pids_dir, pf) for pf in os.listdir(self.celery_pids_dir)}
for name, pid_file in name_to_pid_file.items():
self.log('Attempting shutdown of %s' % name)
try:
pid = self.get_pid_from_file(pid_file)
except Exception as e:
self.log(e)
else:
try:
self.terminate(pid, timeout=timeout)
except (psutil.TimeoutExpired, psutil.NoSuchProcess):
self.kill_all_forked(pid_file)
except Exception as e:
self.log(e)
def wait_for_shutdown(self, timeout=15):
return poll_until_dir_empty(self.celery_pids_dir, timeout=timeout)
| UTF-8 | Python | false | false | 13,246 | py | 78 | celery_manager.py | 61 | 0.597916 | 0.595878 | 0 | 338 | 38.189349 | 118 |