hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
26c8199913901f96201fe9b8091ee36c1351a53e | 347 | py | Python | examples/prompt.py | nelice/bullet | aafec4d0ca8f628d2be9b0667c50477929c2cca7 | [
"MIT"
] | 1 | 2021-03-22T07:55:30.000Z | 2021-03-22T07:55:30.000Z | examples/prompt.py | nelice/bullet | aafec4d0ca8f628d2be9b0667c50477929c2cca7 | [
"MIT"
] | null | null | null | examples/prompt.py | nelice/bullet | aafec4d0ca8f628d2be9b0667c50477929c2cca7 | [
"MIT"
] | null | null | null | from bullet import Bullet, Prompt, Check, Input, YesNo
from bullet import styles
cli = Prompt(
[
Bullet("Choose from a list: ", **styles.Example),
Check("Choose from a list: ", **styles.Example),
Input("Who are you? "),
YesNo("Are you a student? ")
],
spacing = 2
)
result = cli.launch()
print(result) | 23.133333 | 57 | 0.599424 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 80 | 0.230548 |
26c8a00c561378714f8ad7990f244b2a1e695121 | 671 | py | Python | testsuite/tests/T618-047__Ada_2012/run_test.py | AdaCore/style_checker | 17108ebfc44375498063ecdad6c6e4430458e60a | [
"CNRI-Python"
] | 2 | 2017-10-22T18:04:26.000Z | 2020-03-06T11:07:41.000Z | testsuite/tests/T618-047__Ada_2012/run_test.py | AdaCore/style_checker | 17108ebfc44375498063ecdad6c6e4430458e60a | [
"CNRI-Python"
] | null | null | null | testsuite/tests/T618-047__Ada_2012/run_test.py | AdaCore/style_checker | 17108ebfc44375498063ecdad6c6e4430458e60a | [
"CNRI-Python"
] | 4 | 2018-05-22T12:08:54.000Z | 2020-12-14T15:25:27.000Z | def test_pck_2012_adb(style_checker):
"""Style check test against pck_2012.adb."""
style_checker.set_year(2006)
p = style_checker.run_style_checker('repo_name', 'pck_2012.ads')
style_checker.assertEqual(p.status, 0, p.image)
style_checker.assertRunOutputEmpty(p)
def test_pck_2012_adb_with_alt_config_forcing_gnat2012(style_checker):
"""Style check test against pck_2012.adb with gnat12 config option."""
style_checker.set_year(2006)
p = style_checker.run_style_checker(
'--config', 'gnat2012_config.yaml', 'repo_name', 'pck_2012.ads')
style_checker.assertEqual(p.status, 0, p.image)
style_checker.assertRunOutputEmpty(p)
| 41.9375 | 74 | 0.754098 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 196 | 0.292101 |
26cacd8b2394e2ededf66d1f7ced4b0560e95348 | 594 | py | Python | src/volume_0/0011_Drawing_Lots.py | DaikiShimada/aoj-exercise | dd4b70d4fd64aa28bc4cc75f5cdb8d02ea796803 | [
"MIT"
] | null | null | null | src/volume_0/0011_Drawing_Lots.py | DaikiShimada/aoj-exercise | dd4b70d4fd64aa28bc4cc75f5cdb8d02ea796803 | [
"MIT"
] | null | null | null | src/volume_0/0011_Drawing_Lots.py | DaikiShimada/aoj-exercise | dd4b70d4fd64aa28bc4cc75f5cdb8d02ea796803 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import sys
def amida(w, side_bar):
result = []
side_bar.reverse()
for x in range(1, w+1):
status = x
for bar in side_bar:
if status == bar[0]:
status = bar[1]
elif status == bar[1]:
status = bar[0]
result.append(status)
return result
def main():
W = int(input())
N = int(input())
side_bar = [tuple(map(int, input().split(','))) for line in range(N)]
result = amida(W, side_bar)
for r in result:
print(r)
if __name__ == '__main__':
main()
| 21.214286 | 73 | 0.503367 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 36 | 0.060606 |
26cbd6df4059d6dbdf0c29f052b92ccdc1a7a881 | 1,832 | py | Python | mglg/util/profiler.py | aforren1/mglg | a9b703e109a66377dd404929fc0b13ccc12b5214 | [
"MIT"
] | null | null | null | mglg/util/profiler.py | aforren1/mglg | a9b703e109a66377dd404929fc0b13ccc12b5214 | [
"MIT"
] | 9 | 2019-08-05T21:11:09.000Z | 2021-11-18T18:19:33.000Z | mglg/util/profiler.py | aforren1/mglg | a9b703e109a66377dd404929fc0b13ccc12b5214 | [
"MIT"
] | null | null | null | from timeit import default_timer
import numpy as np
class Profiler:
__slots__ = ('active', 'gpuquery', 't0',
'cpubuffer', 'gpubuffer', 'counter',
'_size', 'worst_cpu', 'worst_gpu')
def __init__(self, gpu=False, ctx=None, buffer_size=200):
self.active = False
self.gpuquery = None
if gpu and ctx is not None:
self.gpuquery = ctx.query(time=True)
self.cpubuffer = np.zeros(buffer_size, dtype='f4')
self.gpubuffer = np.zeros(buffer_size, dtype='f4')
self._size = buffer_size
self.counter = 0
self.worst_cpu = 0
self.worst_gpu = 0
def begin(self):
if self.active:
if self.gpuquery:
self.gpuquery.mglo.begin()
self.t0 = default_timer()
def end(self):
t1 = default_timer()
if self.active:
if self.gpuquery:
self.gpuquery.mglo.end()
if self.counter < self._size:
self.worst_gpu = 0
self.worst_cpu = 0
cpu_time = (t1 - self.t0) * 1000 # ms
self.cpubuffer[self.counter % self._size] = cpu_time
self.worst_cpu = cpu_time if cpu_time > self.worst_cpu else self.worst_cpu
if self.gpuquery:
gpu_time = self.gpuquery.elapsed/1000000.0 # ms
self.gpubuffer[self.counter % self._size] = gpu_time
self.worst_gpu = gpu_time if gpu_time > self.worst_gpu else self.worst_gpu
self.counter += 1
def reset(self):
self.cpubuffer[:] = 0
self.gpubuffer[:] = 0
self.counter = 0
self.worst_cpu = 0
self.worst_gpu = 0
def __enter__(self):
self.begin()
return self
def __exit__(self, *args):
self.end()
| 31.586207 | 90 | 0.555131 | 1,777 | 0.969978 | 0 | 0 | 0 | 0 | 0 | 0 | 98 | 0.053493 |
26ccd5bc1d5e387e612a0f077f3e861929e6b021 | 2,972 | py | Python | toolbox/exp/Experiment.py | LinXueyuanStdio/KGE-toolbox | 916842835e61ba99dde1409592977a2ec55f8aae | [
"Apache-2.0"
] | 2 | 2021-10-17T17:50:24.000Z | 2021-12-13T05:22:46.000Z | toolbox/exp/Experiment.py | LinXueyuanStdio/KGE-toolbox | 916842835e61ba99dde1409592977a2ec55f8aae | [
"Apache-2.0"
] | null | null | null | toolbox/exp/Experiment.py | LinXueyuanStdio/KGE-toolbox | 916842835e61ba99dde1409592977a2ec55f8aae | [
"Apache-2.0"
] | null | null | null | import numpy as np
from toolbox.exp.OutputSchema import OutputSchema
from toolbox.utils.LaTeXSotre import EvaluateLaTeXStoreSchema
from toolbox.utils.MetricLogStore import MetricLogStoreSchema
from toolbox.utils.ModelParamStore import ModelParamStoreSchema
from toolbox.utils.Visualize import VisualizeSchema
class Experiment:
def __init__(self, output: OutputSchema, local_rank: int = -1):
self.output = output
self.local_rank = local_rank
self.debug = self.log_in_main_node(output.logger.debug)
self.log = self.log_in_main_node(output.logger.info)
self.warn = self.log_in_main_node(output.logger.warn)
self.error = self.log_in_main_node(output.logger.error)
self.critical = self.log_in_main_node(output.logger.critical)
self.success = self.log_in_main_node(output.logger.success)
self.fail = self.log_in_main_node(output.logger.failed)
self.vis = VisualizeSchema(str(output.pathSchema.dir_path_visualize))
self.model_param_store = ModelParamStoreSchema(output.pathSchema)
self.metric_log_store = MetricLogStoreSchema(str(output.pathSchema.dir_path_log))
self.latex_store = EvaluateLaTeXStoreSchema(output.pathSchema)
def re_init(self, output: OutputSchema, local_rank: int = -1):
self.output = output
self.local_rank = local_rank
self.debug = self.log_in_main_node(output.logger.debug)
self.log = self.log_in_main_node(output.logger.info)
self.warn = self.log_in_main_node(output.logger.warn)
self.error = self.log_in_main_node(output.logger.error)
self.critical = self.log_in_main_node(output.logger.critical)
self.success = self.log_in_main_node(output.logger.success)
self.fail = self.log_in_main_node(output.logger.failed)
self.vis = VisualizeSchema(str(output.pathSchema.dir_path_visualize))
self.model_param_store = ModelParamStoreSchema(output.pathSchema)
self.metric_log_store = MetricLogStoreSchema(str(output.pathSchema.dir_path_log))
self.latex_store = EvaluateLaTeXStoreSchema(output.pathSchema)
def log_in_main_node(self, log_func):
if self.local_rank == 0:
return log_func
return lambda x: [x]
def dump_model(self, model):
self.debug(model)
self.debug("")
self.debug("Trainable parameters:")
num_params = 0
for name, param in model.named_parameters():
if param.requires_grad:
ps = np.prod(param.size())
num_params += ps
self.debug(f"{name}: {sizeof_fmt(ps)}")
self.log('Total Parameters: %s' % sizeof_fmt(num_params))
self.debug("")
def sizeof_fmt(num, suffix='B'):
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
| 44.358209 | 89 | 0.685397 | 2,411 | 0.811238 | 0 | 0 | 0 | 0 | 0 | 0 | 134 | 0.045087 |
26cdda5a2dd54f427c59a8a3d865986d8ec6b5ee | 893 | py | Python | src/dataloader/tests/runner.py | ODM2/ODM2DataSharingPortal | 4ea1d633fe8e1cc39916e83041f2dbc830339e55 | [
"BSD-3-Clause"
] | 18 | 2018-11-27T11:57:24.000Z | 2022-03-19T16:52:35.000Z | src/dataloader/tests/runner.py | ODM2/ODM2DataSharingPortal | 4ea1d633fe8e1cc39916e83041f2dbc830339e55 | [
"BSD-3-Clause"
] | 362 | 2018-02-21T16:27:00.000Z | 2022-03-31T18:48:48.000Z | src/dataloader/tests/runner.py | ODM2/ODM2DataSharingPortal | 4ea1d633fe8e1cc39916e83041f2dbc830339e55 | [
"BSD-3-Clause"
] | 5 | 2018-07-04T17:13:09.000Z | 2021-12-19T22:51:40.000Z | import json
import os
from django.core.management import call_command
from django.test.runner import DiscoverRunner
from django.db import connections
from dataloader.tests.data import data_manager
class ODM2TestRunner(DiscoverRunner):
test_connection = None
database_alias = u'odm2'
def setup_test_environment(self, **kwargs):
data_manager.load_models_data()
call_command('makemigrations', 'dataloader')
super(ODM2TestRunner, self).setup_test_environment(**kwargs)
def setup_databases(self, **kwargs):
old_names = []
self.test_connection = connections[self.database_alias]
old_names.append((self.test_connection, self.test_connection.settings_dict['NAME'], True))
self.test_connection.creation.create_test_db(
verbosity=self.verbosity,
keepdb=self.keepdb
)
return old_names
| 29.766667 | 98 | 0.721165 | 691 | 0.773796 | 0 | 0 | 0 | 0 | 0 | 0 | 41 | 0.045913 |
26cf29a0e44e798901be0b42a84cea83caaf14fe | 364 | py | Python | plugins/rain.py | xditya/PikaBotPlugins | 2c5c52716158cd8964220bcc71fa383ccaf1210a | [
"Apache-2.0"
] | 2 | 2021-02-16T05:35:41.000Z | 2021-05-25T16:59:47.000Z | plugins/rain.py | xditya/PikaBotPlugins | 2c5c52716158cd8964220bcc71fa383ccaf1210a | [
"Apache-2.0"
] | null | null | null | plugins/rain.py | xditya/PikaBotPlugins | 2c5c52716158cd8964220bcc71fa383ccaf1210a | [
"Apache-2.0"
] | 2 | 2021-02-07T03:09:40.000Z | 2021-05-25T16:59:59.000Z | #Originally created By KingMars ✅ Rain Sequence 2 {Updated}
from telethon import events
import asyncio
from collections import deque
@ItzSjDude(outgoing=True, pattern=r"km_rain2")
async def _(event):
if event.fwd_from:
return
deq = deque(list("☁️⛈Ř/~\İŇ🌬⚡🌪"))
for _ in range(100):
await asyncio.sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
| 22.75 | 59 | 0.717033 | 0 | 0 | 0 | 0 | 245 | 0.639687 | 198 | 0.516971 | 105 | 0.274151 |
26cf746287a13ed33dacec35f0898c4fe183c37a | 73 | py | Python | preprocessing/__init__.py | WiktorSa/Music-Generation-with-LSTM-and-.wav-files | 37b713b5e6193788a7710cc0fac4134efb74fa62 | [
"MIT"
] | 1 | 2022-03-09T20:13:57.000Z | 2022-03-09T20:13:57.000Z | preprocessing/__init__.py | WiktorSa/Music-Generation-with-LSTM-and-.wav-files | 37b713b5e6193788a7710cc0fac4134efb74fa62 | [
"MIT"
] | 1 | 2021-10-01T16:20:06.000Z | 2021-10-01T17:25:30.000Z | preprocessing/__init__.py | WiktorSa/Music-Generation-with-LSTM-and-.wav-files | 37b713b5e6193788a7710cc0fac4134efb74fa62 | [
"MIT"
] | null | null | null | from preprocessing.generate_and_save_data import generate_and_save_data
| 36.5 | 72 | 0.917808 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
26cfb507f5245413925f5d6ffbbfcea4aa484298 | 6,126 | py | Python | plot.py | lizzieayton/PrimordialOozebot | 1e330b1ac6f27bd167734ad6c6ecff70f816986a | [
"MIT"
] | null | null | null | plot.py | lizzieayton/PrimordialOozebot | 1e330b1ac6f27bd167734ad6c6ecff70f816986a | [
"MIT"
] | null | null | null | plot.py | lizzieayton/PrimordialOozebot | 1e330b1ac6f27bd167734ad6c6ecff70f816986a | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import csv
import statistics
import math
plt.title('Population Diversity')
plt.ylabel('Diversity Score')
plt.xlabel('Iteration Number')
random = []
randombars = []
rmin = []
rmax = []
hill = []
hillbars = []
hmin = []
hmax = []
evo = []
emin = []
emax = []
evobars = []
cross = []
crossbars = []
cmin = []
cmax = []
numRuns = 5
numIterations = 100000000
sqrtRuns = math.sqrt(numRuns)
iterationDataRandom = []
iterationDataHill = []
iterationDataEvo = []
iterationDataCross = []
indicesToPlot = [10, 15, 20, 25]
index = 60
while indicesToPlot[-1] < numIterations:
indicesToPlot.append(index)
index = int(index * 1.02)
indicesToPlot[-1] = numIterations - 1
#xtiks = []
#for i in range(10):
# xtiks.append(int(numIterations / 5 * i))
#plt.xticks(xtiks)
for i in range(1, numRuns + 1):
iterationDataRandom.append({})
iterationDataHill.append({})
iterationDataEvo.append({})
iterationDataCross.append({})
with open('rand' + str(i) + '.csv', newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=' ', quotechar='|')
index = 0
for row in reversed(list(reader)):
vals = row[0].split(',')
iteration = int(vals[0])
val = float(vals[1])
while index < len(indicesToPlot) - 1 and indicesToPlot[index + 1] < iteration:
index += 1
iterationDataRandom[-1][indicesToPlot[index]] = val
with open('hill' + str(i) + '.csv', newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=' ', quotechar='|')
index = 0
for row in reversed(list(reader)):
vals = row[0].split(',')
iteration = int(vals[0])
val = float(vals[2])
while index < len(indicesToPlot) - 1 and indicesToPlot[index] < iteration:
index += 1
iterationDataHill[-1][indicesToPlot[index]] = val
with open('evo' + str(i) + '.csv', newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=' ', quotechar='|')
index = 0
for row in reversed(list(reader)):
vals = row[0].split(',')
iteration = int(vals[0]) * 100
val = float(vals[2])
while index < len(indicesToPlot) - 1 and indicesToPlot[index] < iteration:
index += 1
iterationDataEvo[-1][indicesToPlot[index]] = val
with open('ed' + str(i) + '.csv', newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=' ', quotechar='|')
index = 0
for row in reversed(list(reader)):
vals = row[0].split(',')
iteration = int(vals[0])
val = float(vals[2])
while index < len(indicesToPlot) - 1 and indicesToPlot[index] < iteration:
index += 1
iterationDataCross[-1][indicesToPlot[index]] = val
print("Done reading data")
unifiedRandom = []
unifiedHill = []
unifiedEvo = []
unifiedCross = []
index = 0
for iteration in indicesToPlot:
currentRandom = []
currentHill = []
currentEvo = []
currentCross = []
unifiedRandom.append(currentRandom)
unifiedHill.append(currentHill)
unifiedEvo.append(currentEvo)
unifiedCross.append(currentCross)
for run in range(numRuns):
valRandom = -1
if iteration in iterationDataRandom[run]:
valRandom = iterationDataRandom[run][iteration]
else:
# unchanged
valRandom = unifiedRandom[-2][run]
currentRandom.append(valRandom)
valHill = -1
if iteration in iterationDataHill[run]:
valHill = iterationDataHill[run][iteration]
else:
# unchanged
valHill = unifiedHill[-2][run]
currentHill.append(valHill)
valEvo = -1
if iteration in iterationDataEvo[run]:
valEvo = iterationDataEvo[run][iteration]
else:
#unchanged
valEvo = unifiedEvo[-2][run]
currentEvo.append(valEvo)
valCross = -1
if iteration in iterationDataCross[run]:
valCross = iterationDataCross[run][iteration]
else:
#unchanged
valCross = unifiedCross[-2][run]
currentCross.append(valCross)
randomAverage = statistics.mean(currentRandom)
randomError = statistics.stdev(currentRandom) / sqrtRuns
random.append(randomAverage)
randombars.append(randomError)
hillAverage = statistics.mean(currentHill)
hillError = statistics.stdev(currentHill) / sqrtRuns
hill.append(hillAverage)
hillbars.append(hillError)
evoAverage = statistics.mean(currentEvo)
evoError = statistics.stdev(currentEvo) / sqrtRuns
evo.append(evoAverage)
evobars.append(evoError)
crossAverage = statistics.mean(currentCross)
crossError = statistics.stdev(currentCross) / sqrtRuns
cross.append(crossAverage)
crossbars.append(crossError)
for i in range(len(random)):
rmin.append(random[i] - randombars[i])
rmax.append(random[i] + randombars[i])
hmin.append(hill[i] - hillbars[i])
hmax.append(hill[i] + hillbars[i])
emin.append(evo[i] - evobars[i])
emax.append(evo[i] + evobars[i])
cmin.append(cross[i] - crossbars[i])
cmax.append(cross[i] + crossbars[i])
print("Done processing data")
plt.xscale('log')
#plt.yscale('log')
#plt.plot(indicesToPlot, random, color='blue', linewidth=1, label='Random Search')
plt.plot(indicesToPlot, hill, color='green', linewidth=1, label='Parallel Hill Climb')
plt.plot(indicesToPlot, evo, color='red', linewidth=1, label='Weighted Selection')
plt.plot(indicesToPlot, cross, color='blue', linewidth=1, label='Parental Replacement')
plt.fill_between(indicesToPlot, hmin, hmax, facecolor='green', lw=0, alpha=0.5)
plt.fill_between(indicesToPlot, emin, emax, facecolor='red', lw=0, alpha=0.5)
plt.fill_between(indicesToPlot, cmin, cmax, facecolor='blue', lw=0, alpha=0.5)
#plt.fill_between(indicesToPlot, rmin, rmax, facecolor='blue', lw=0, alpha=0.5)
plt.legend(loc='best')
plt.savefig('diversityp.png', dpi=500)
plt.show()
| 30.939394 | 90 | 0.623572 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 628 | 0.102514 |
26cfea22c43edc42786c9199d503d77927f66e4d | 1,918 | py | Python | python/obra_hacks/backend/commands.py | brandond/obra-hacks | df451c6c6cd78b48f6e32bbd102a8e8a6bd77cb3 | [
"Apache-2.0"
] | null | null | null | python/obra_hacks/backend/commands.py | brandond/obra-hacks | df451c6c6cd78b48f6e32bbd102a8e8a6bd77cb3 | [
"Apache-2.0"
] | null | null | null | python/obra_hacks/backend/commands.py | brandond/obra-hacks | df451c6c6cd78b48f6e32bbd102a8e8a6bd77cb3 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from datetime import date
import click
from .data import DISCIPLINE_MAP
from .outputs import OUTPUT_MAP
@click.command()
@click.option('--discipline', type=click.Choice(DISCIPLINE_MAP.keys()), required=True)
@click.option('--output', type=click.Choice(sorted(OUTPUT_MAP.keys())), default='text')
@click.option('--scrape/--no-scrape', default=True)
@click.option('--debug/--no-debug', default=False)
def cli(discipline, output, scrape, debug):
log_level = 'DEBUG' if debug else 'INFO'
logging.basicConfig(level=log_level, format='%(levelname)s:%(module)s.%(funcName)s:%(message)s')
# Import these after setting up logging otherwise we don't get logs
from .scrapers import clean_events, scrape_year, scrape_new, scrape_parents, scrape_recent
from .upgrades import confirm_pending_upgrades, recalculate_points, print_points, sum_points
from .rankings import calculate_race_ranks
from .models import db
with db.atomic('IMMEDIATE'):
if scrape:
# Scrape last 5 years of results
cur_year = date.today().year
for year in range(cur_year - 6, cur_year + 1):
scrape_year(year, discipline)
scrape_parents(year, discipline)
clean_events(year, discipline)
# Load in anything new
scrape_new(discipline)
# Check for updates to anything touched in the last three days
scrape_recent(discipline, 3)
# Calculate points from new data
if recalculate_points(discipline, incremental=False):
calculate_race_ranks(discipline, incremental=False)
sum_points(discipline)
confirm_pending_upgrades(discipline)
# Finally, output data
print_points(discipline, output)
if __name__ == '__main__':
cli()
| 34.25 | 100 | 0.684567 | 0 | 0 | 0 | 0 | 1,668 | 0.869656 | 0 | 0 | 438 | 0.228363 |
26d2a8925926b05405485ed3b4fa01550942c26f | 657 | py | Python | join_json.py | ryavorsky/med_robo | 56f8d2067921ef7208166380e50af0600c10032a | [
"CC0-1.0"
] | null | null | null | join_json.py | ryavorsky/med_robo | 56f8d2067921ef7208166380e50af0600c10032a | [
"CC0-1.0"
] | null | null | null | join_json.py | ryavorsky/med_robo | 56f8d2067921ef7208166380e50af0600c10032a | [
"CC0-1.0"
] | null | null | null | import json
with open('bibliography.json', 'r', encoding='utf-8') as bib_data:
bib = sorted(json.load(bib_data), key=lambda d: d['ID'])
with open('abstracts.json', 'r', encoding='utf-8') as tex_data:
tex = sorted(json.load(tex_data), key=lambda d: d['ID'])
ID1 = [b['ID'] for b in bib]
ID2 = [t['ID'] for t in tex]
for i in range(len(ID1)):
bib[i]['reference'] = tex[i]['title']
bib[i]['abstract'] = tex[i]['abstract']
print('Done')
with open('med_robo_papers.json', 'w', encoding='utf-8') as res_file:
res_file.write(json.dumps(bib, indent=4, ensure_ascii=False, sort_keys=True))
res_file.close()
| 28.565217 | 82 | 0.614916 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 147 | 0.223744 |
26d58240f4233e1d13f48a78a83f734ca262cc13 | 147 | py | Python | Qcover/simulator/__init__.py | BAQIS-Quantum/Qcover | ca3776ed73fefa0cfef08042143a8cf842f8dad5 | [
"Apache-2.0"
] | 38 | 2021-12-22T03:12:01.000Z | 2022-03-17T06:57:10.000Z | Qcover/simulator/__init__.py | BAQIS-Quantum/Qcover | ca3776ed73fefa0cfef08042143a8cf842f8dad5 | [
"Apache-2.0"
] | null | null | null | Qcover/simulator/__init__.py | BAQIS-Quantum/Qcover | ca3776ed73fefa0cfef08042143a8cf842f8dad5 | [
"Apache-2.0"
] | 13 | 2021-12-22T07:32:44.000Z | 2022-02-28T06:47:41.000Z | from .qton import Qcircuit, Qcodes
import warnings
warnings.filterwarnings("ignore")
__all__ = [
# 'Simulator',
'Qcircuit',
'Qcodes'
] | 16.333333 | 34 | 0.680272 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 40 | 0.272109 |
26d8d630adbf36e69e2b1f614c164c0bdbf94301 | 7,563 | py | Python | pizzerias/pizzerias_search.py | LiushaHe0317/pizzerias_block_search | 16dd7fb20b1a29a4f16b28ac7e5a84b30f7f9a7b | [
"MIT"
] | null | null | null | pizzerias/pizzerias_search.py | LiushaHe0317/pizzerias_block_search | 16dd7fb20b1a29a4f16b28ac7e5a84b30f7f9a7b | [
"MIT"
] | null | null | null | pizzerias/pizzerias_search.py | LiushaHe0317/pizzerias_block_search | 16dd7fb20b1a29a4f16b28ac7e5a84b30f7f9a7b | [
"MIT"
] | null | null | null | from typing import Sequence
import numpy
class PizzeriasSearcher:
"""
This object takes the size of the city and number of shops, and construct the matrices each shop delivery can cover
and number of delivery for each cell in the city. It can also computes number of delivery for a given cell,
maximum of number of delivery, and a sequence of cell coordinates which have the maximum.
:param n_of_block: An integer which indicates the size of the city.
:param shop_covers: A sequence of sequences, each sequence contains a tuple of two integers representing the
coordinate of a pizzerias shop and an integer representing the distance the shop could cover.
"""
def __init__(self, n_of_block: int, shop_covers: Sequence):
self.n_of_block = n_of_block
self.shop_covers = shop_covers
def each_shop_matrix(self, shop_loc: Sequence):
"""
This method takes the location of a shop and dimensionality of the city, converts to a 2D ``numpy.ndarray``
which indicates the whole area a pizzerias shop delivery service can cover.
:param shop_loc: A sequence containing a tuple of two integers which indicate the coordinates on x- and y- axis and
an integer which indicates the farthest distance a delivery guy can go.
:return: A 2D ``numpy.ndarray``.
"""
(x_initial, y_initial), r = shop_loc
matrix = numpy.zeros([self.n_of_block, self.n_of_block])
# convert x, y coordinates
x_center = x_initial - 1 # in numpy, x axis = 1
y_center = self.n_of_block - y_initial # in numpy, y axis = 0
# create a list of x or y coordinate which indicates the cells the shop could cover
x_list = [x for x in range(x_center-r, x_center+r+1) if x >= 0 and x < self.n_of_block]
# y_list = [y for y in range(y_center-r, y_center+r+1) if y >= 0 and y <= n_of_block-1]
for d1 in x_list:
high_bound = y_center + r - numpy.abs(d1 - x_center) + 1
low_bound = y_center - r + numpy.abs(d1 - x_center)
matrix[low_bound:high_bound, d1] = 1
return matrix
def area_matrix(self, loc: Sequence, radius: int):
"""
This method takes a tuple of coordinates and a radius, construct a sub-matrix of the city matrix accordingly.
:param loc: A tuple of integers.
:param radius: An integer.
:return: A 2D ``numpy.ndarray``.
"""
x_initial, y_initial = loc
if y_initial < 0 or x_initial > self.n_of_block or x_initial < 0 or y_initial > self.n_of_block:
raise ValueError('The location is out of city range.')
else:
y_center = self.n_of_block - y_initial
x_center = x_initial - 1
low0 = y_center - radius if y_center - radius >= 0 else 0
high0 = y_center + radius + 1 if y_center + radius + 1 <= self.n_of_block else self.n_of_block
left1 = x_center - radius if x_center - radius >= 0 else 0
right1 = x_center + radius + 1 if x_center + radius + 1 <= self.n_of_block else self.n_of_block
return self.pizzerias_matrix[low0: high0, left1: right1]
def maximum_in_matrix(self, matrix=None):
"""
This method returns the maximum a city block could have.
:param matrix: A ``numpy.ndarray``.
:return: An integer.
"""
if isinstance(matrix, numpy.ndarray):
return int(numpy.amax(matrix))
elif matrix is None:
return int(numpy.amax(self.pizzerias_matrix))
else:
raise Exception('Accept numpy.ndarray only!')
def max_locations(self, matrix=None, d0_start=0, d1_start=0):
"""
This method returns a set of cells which have maximum.
:param matrix: A ``numpy.ndarray`.
:param d0_start: An integer.
:param d1_start: An integer.
:return: A set of tuples.
"""
if matrix is None:
d0, d1 = numpy.where(self.pizzerias_matrix == numpy.amax(self.pizzerias_matrix))
return {(x + 1, self.n_of_block - d0[i]) for i, x in enumerate(d1)}
elif isinstance(matrix, numpy.ndarray):
d0, d1 = numpy.where(matrix == numpy.amax(matrix))
return {(x + 1 + d1_start, self.n_of_block - (d0[i] + d0_start)) for i, x in enumerate(d1)}
else:
raise Exception('Accept numpy.ndarray only!')
@property
def no_of_pizzeriass(self):
"""
This method returns the total number of shops in the city.
"""
return len(self.shop_covers)
@property
def pizzerias_matrix(self):
"""
This method returns a matrix indicating the whole picture of pizzerias delivery services.
"""
p_matrix = numpy.zeros([self.n_of_block, self.n_of_block])
for shop_loc in self.shop_covers:
p_matrix += self.each_shop_matrix(shop_loc)
return p_matrix
def check_location(self, home_loc: Sequence, report=False):
"""
This method takes a tuple of two integers which indicate the coordinate of a given home location.
:param home_loc: A tuple of integers.
:return: number of delivery in the current location.
"""
num = self.pizzerias_matrix[self.n_of_block - home_loc[1], home_loc[0] - 1]
if report:
if num == 0:
print("Unfortunately, there is no delivery service in your current location.")
else:
print(f'Cool, {int(num)} pizzerias could cover your current location.')
return num
def check_area(self, loc: Sequence, radius: int, report=False):
"""
This method takes a location coordinate and a radius and search the delivery services around this specified area.
:param loc: A tuple of integers.
:param radius: An integer.
:param report: A boolean that indicates whether or not print a report.
return:
- A sub-matrix of the pizzerias matrix which is created in terms of specified range.
- A maximum in this area.
- A set of cells that have maximum.
"""
matrix = self.area_matrix(loc, radius)
x_initial, y_initial = loc
y_center = self.n_of_block - y_initial
x_center = x_initial - 1
low0 = y_center - radius if y_center - radius >= 0 else 0
left1 = x_center - radius if x_center - radius >= 0 else 0
maximum = self.maximum_in_matrix(matrix)
max_set = self.max_locations(matrix=matrix, d0_start=low0, d1_start=left1)
if report:
print(f"In the given area, there are {len(max_set)} areas where {maximum} Pizzerias delivery service "
f"can cover, they are: ", max_set)
return matrix, maximum, max_set
def check_city(self, report=False):
"""
This method returns the matrix, the maximum and a set of maximum tuple of cells.
:param report: A boolean indicating whether or not print report.
:return:
- The pizzerias matrix.
- A maximum in this the pizzerias matrix.
- A set of cells that have maximum.
"""
if report:
print(f"There are {len(self.max_locations())} area(s) where {self.maximum_in_matrix()} Pizzerias can cover, "
f"they are: ", self.max_locations())
return self.pizzerias_matrix, self.maximum_in_matrix(), self.max_locations() | 42.728814 | 123 | 0.627793 | 7,520 | 0.994314 | 0 | 0 | 521 | 0.068888 | 0 | 0 | 3,689 | 0.487769 |
26d8feef12cddd2dca60e0f08ac5f863599108a2 | 1,213 | py | Python | analysis/hist_javelin.py | LiyrAstroph/MICA | 2592b8ad3011880898f557a69b22cad63fcd47e0 | [
"MIT"
] | 1 | 2016-10-25T06:32:33.000Z | 2016-10-25T06:32:33.000Z | analysis/hist_javelin.py | LiyrAstroph/MICA | 2592b8ad3011880898f557a69b22cad63fcd47e0 | [
"MIT"
] | null | null | null | analysis/hist_javelin.py | LiyrAstroph/MICA | 2592b8ad3011880898f557a69b22cad63fcd47e0 | [
"MIT"
] | 3 | 2016-12-29T06:04:13.000Z | 2020-04-12T11:48:42.000Z | import numpy as np
import matplotlib.pyplot as plt
import corner
mcmc = np.loadtxt("mychain1.dat")
ntheta = mcmc.shape[1]
fig = plt.figure(1, figsize=(15, 6))
ax = fig.add_subplot(231)
ax.hist(mcmc[:, 0]/np.log(10.0), 100, normed=True, range=(-0.9, -0.1))
ax = fig.add_subplot(232)
ax.hist(mcmc[:, 1]/np.log(10.0), 100, normed=True, range=(0.0, 2.0))
ax = fig.add_subplot(234)
ax.hist(mcmc[:, 2], 100, normed=True, range=(1.0, 2.8))
ax = fig.add_subplot(235)
ax.hist(mcmc[:, 3], 100, normed=True, range=(0.0, 1.2))
ax = fig.add_subplot(236)
ax.hist(mcmc[:, 4], 100, normed=True, range=(5, 13))
mcmc = np.loadtxt("../data/mcmc.txt")
ntheta = mcmc.shape[1]
nb = 20000
fig = plt.figure(2, figsize=(15, 6))
ax = fig.add_subplot(231)
ax.hist( (mcmc[nb:, 1]+0.5*mcmc[nb:, 2]-0.5*np.log(2.0))/np.log(10.0), 100, normed=True, range=(-0.9, -0.1))
ax = fig.add_subplot(232)
ax.hist(mcmc[nb:, 2]/np.log(10), 100, normed=True, range=(0.0, 2.0))
ax = fig.add_subplot(234)
ax.hist(mcmc[nb:, 5], 100, normed=True, range=(1.0, 2.8))
ax = fig.add_subplot(235)
ax.hist(mcmc[nb:, 3], 100, normed=True, range=(0.0, 1.2))
ax = fig.add_subplot(236)
ax.hist(mcmc[nb:, 4], 100, normed=True, range=(5, 13))
plt.show()
| 24.26 | 108 | 0.635614 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 32 | 0.026381 |
26da85c2640497939b911d5705595d7671906491 | 1,158 | py | Python | tests/test_stats.py | janjaappape/pastas | 521b27efd921e240df0717038f8389d62099b8ff | [
"MIT"
] | 252 | 2017-01-25T05:48:53.000Z | 2022-03-31T17:46:37.000Z | tests/test_stats.py | janjaappape/pastas | 521b27efd921e240df0717038f8389d62099b8ff | [
"MIT"
] | 279 | 2017-02-14T10:59:01.000Z | 2022-03-31T09:17:37.000Z | tests/test_stats.py | janjaappape/pastas | 521b27efd921e240df0717038f8389d62099b8ff | [
"MIT"
] | 57 | 2017-02-14T10:26:54.000Z | 2022-03-11T14:04:48.000Z | import numpy as np
import pandas as pd
import pastas as ps
def acf_func(**kwargs):
index = pd.to_datetime(np.arange(0, 100, 1), unit="D", origin="2000")
data = np.sin(np.linspace(0, 10 * np.pi, 100))
r = pd.Series(data=data, index=index)
acf_true = np.cos(np.linspace(0.0, np.pi, 11))[1:]
acf = ps.stats.acf(r, lags=np.arange(1.0, 11.), min_obs=1, **kwargs).values
return acf, acf_true
def test_acf_rectangle():
acf, acf_true = acf_func(bin_method="rectangle")
assert abs((acf - acf_true)).max() < 0.05
def test_acf_gaussian():
acf, acf_true = acf_func(bin_method="gaussian")
assert abs((acf - acf_true)).max() < 0.05
def test_runs_test():
"""
http://www.itl.nist.gov/div898/handbook/eda/section3/eda35d.htm
True Z-statistic = 2.69
Read NIST test data
"""
data = pd.read_csv("tests/data/nist.csv")
test, _ = ps.stats.runs_test(data)
assert test[0] - 2.69 < 0.02
def test_stoffer_toloi():
res = pd.Series(index=pd.date_range(start=0, periods=1000, freq="D"),
data=np.random.rand(1000))
_, pval = ps.stats.stoffer_toloi(res)
assert pval > 1e-10
| 27.571429 | 79 | 0.638169 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 185 | 0.159758 |
26db23f57ee2cf9c420d9e5404d2b60d7671991a | 320 | py | Python | venv/lib64/python3.8/site-packages/tld/registry.py | nrfkhira/dnx-engine | 99a326d83058bcfe54a0f455672d90637fe753c6 | [
"MIT"
] | null | null | null | venv/lib64/python3.8/site-packages/tld/registry.py | nrfkhira/dnx-engine | 99a326d83058bcfe54a0f455672d90637fe753c6 | [
"MIT"
] | null | null | null | venv/lib64/python3.8/site-packages/tld/registry.py | nrfkhira/dnx-engine | 99a326d83058bcfe54a0f455672d90637fe753c6 | [
"MIT"
] | null | null | null | import warnings
from .base import Registry
__author__ = "Artur Barseghyan"
__copyright__ = "2013-2021 Artur Barseghyan"
__license__ = "MPL-1.1 OR GPL-2.0-only OR LGPL-2.1-or-later"
__all__ = ("Registry",)
warnings.warn(
"The `Registry` class is moved from `tld.registry` to `tld.base`.",
DeprecationWarning,
)
| 24.615385 | 71 | 0.721875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 168 | 0.525 |
26dc6cf0e3afad0c2ebf41ec4b792f1e330897c5 | 2,963 | py | Python | hvo_api/model/gps.py | wtollett-usgs/hvo_api | cdd39cb74d28a931cac4b843a71c5d8435f4620c | [
"CC0-1.0"
] | null | null | null | hvo_api/model/gps.py | wtollett-usgs/hvo_api | cdd39cb74d28a931cac4b843a71c5d8435f4620c | [
"CC0-1.0"
] | null | null | null | hvo_api/model/gps.py | wtollett-usgs/hvo_api | cdd39cb74d28a931cac4b843a71c5d8435f4620c | [
"CC0-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
from valverest.database import db7 as db
from sqlalchemy.ext.hybrid import hybrid_property
class Solution(db.Model):
__tablename__ = 'solutions'
__bind_key__ = 'gps'
sid = db.Column(db.Integer, primary_key=True)
cid = db.Column(db.Integer, primary_key=True)
x = db.Column(db.Float)
y = db.Column(db.Float)
z = db.Column(db.Float)
sxx = db.Column(db.Float)
syy = db.Column(db.Float)
szz = db.Column(db.Float)
sxy = db.Column(db.Float)
sxz = db.Column(db.Float)
syz = db.Column(db.Float)
# Relationships
source = db.relationship('GPSSource', uselist=False)
channel = db.relationship('GPSChannel', uselist=False)
class GPSSource(db.Model):
__tablename__ = 'sources'
__bind_key__ = 'gps'
sid = db.Column(db.Integer, db.ForeignKey('solutions.sid'),
primary_key=True)
name = db.Column(db.String(255))
hash = db.Column(db.String(32))
date0 = db.Column('j2ksec0', db.Float, primary_key=True)
date1 = db.Column('j2ksec1', db.Float, primary_key=True)
rid = db.Column(db.Integer)
# Relationships
rank = db.relationship('GPSRank', uselist=False)
@hybrid_property
def avgdate(self):
return (self.date0 + self.date1) / 2
class GPSChannel(db.Model):
__tablename__ = 'channels'
__bind_key__ = 'gps'
cid = db.Column(db.Integer, db.ForeignKey('solutions.cid'),
primary_key=True)
code = db.Column(db.String(16))
name = db.Column(db.String(255))
lon = db.Column(db.Float)
lat = db.Column(db.Float)
height = db.Column(db.Float)
ctid = db.Column(db.Integer)
class GPSRank(db.Model):
__tablename__ = 'ranks'
__bind_key__ = 'gps'
rid = db.Column(db.Integer, db.ForeignKey('sources.rid'), primary_key=True)
name = db.Column(db.String(24))
rank = db.Column(db.Integer)
class GPSDataPoint(object):
def __init__(self, t, r, x, y, z, sxx, syy, szz, sxy, sxz, syz, nlen):
self._t = t
self._r = r
self._x = x
self._y = y
self._z = z
self._sxx = sxx
self._syy = syy
self._szz = szz
self._sxy = sxy
self._sxz = sxz
self._syz = syz
self._nlen = nlen
@property
def t(self):
return self._t
@property
def r(self):
return self._r
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def z(self):
return self._z
@property
def sxx(self):
return self._sxx
@property
def syy(self):
return self._syy
@property
def szz(self):
return self._szz
@property
def sxy(self):
return self._sxy
@property
def sxz(self):
return self._sxz
@property
def syz(self):
return self._syz
@property
def nlen(self):
return self._nlen
| 22.44697 | 79 | 0.59838 | 2,833 | 0.956126 | 0 | 0 | 702 | 0.236922 | 0 | 0 | 203 | 0.068512 |
26ddb52d2be72d7d4dbeca2609c7ac5ce525625e | 2,091 | py | Python | SingleIRdetection/get_data.py | biqute/QTLab2122 | 4d53d4c660bb5931615d8652e698f6d689a4dead | [
"MIT"
] | 3 | 2021-11-30T18:41:11.000Z | 2021-12-12T12:27:14.000Z | SingleIRdetection/get_data.py | biqute/QTLab2122 | 4d53d4c660bb5931615d8652e698f6d689a4dead | [
"MIT"
] | null | null | null | SingleIRdetection/get_data.py | biqute/QTLab2122 | 4d53d4c660bb5931615d8652e698f6d689a4dead | [
"MIT"
] | null | null | null | from instruments import VNA_handler, Fridge_handler
import os
import time
from datetime import date, datetime
today = date.today()
d1 = today.strftime("_%d_%m")
directory = "data"+d1
dir_path=os.path.join(os.path.dirname(os.path.abspath(__file__)),directory)
if not os.path.isdir(dir_path):
try:
os.mkdir(directory)
except:
pass
VNA_lab=VNA_handler()
Fridge=Fridge_handler()
temps=[]
freqs1=[]
freqs2=[]
r = Fridge.execute("C3")
file_log = open(directory + "\\log.txt", "w")
def log_sensori():
file_log.write(f"\n{datetime.now():%H:%M:%S}")
for i in range(0, 36):
file_log.write(f"\n\tsens({i}): {Fridge.get_T(i)}")
with open('temperatures_gap.txt', encoding='utf-8') as file:
for line in file:
line = line.replace('\n', '')
temps.append(int(line))
with open('frequency_ranges_gap_1.txt', encoding='utf-8') as file:
for line in file:
line = line.replace('\n', '')
splitted = [float(x) for x in line.split('\t')]
freqs1.append(splitted)
with open('frequency_ranges_gap_2.txt', encoding='utf-8') as file:
for line in file:
line = line.replace('\n', '')
splitted = [float(x) for x in line.split('\t')]
freqs2.append(splitted)
for T in temps:
try:
print("Set temp: " + str(T))
print(f"{datetime.now():%H:%M:%S}\tsens_1:{Fridge.get_T(1)}\tsens_2:{Fridge.get_T(2)}\tsens_3:{Fridge.get_T(3)}\tG1: {Fridge.get_T(14)}\tG2: {Fridge.get_T(15)}")
log_sensori()
time.sleep(10)
Fridge.wait_for_T(T)
if T >= 200:
freqs = freqs2
else:
freqs = freqs1
for idx,f in enumerate(freqs):
file_name=str(T)+'mK_range'+str(idx+1)+'.txt'
print("Set freqs: " + str(f[0]) + " - "+ str(f[1]))
VNA_lab.set_sweep_freq(f[0],f[1])
VNA_lab.inst.write('AVERREST;')
time.sleep(40)
VNA_lab.save_sweep_data(directory + '\\' + file_name, 'polar')
except:
pass
log_sensori()
Fridge.set_T(0)
log_sensori()
file_log.close()
| 27.155844 | 169 | 0.595887 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 444 | 0.212339 |
26ddc48f78a12f6195556b4fffb431166aa3a248 | 1,356 | py | Python | repos.py | gigamonkey/git-utils | ac26ccab836b276fb7061167b4b2dc2a6bd87e66 | [
"BSD-3-Clause"
] | null | null | null | repos.py | gigamonkey/git-utils | ac26ccab836b276fb7061167b4b2dc2a6bd87e66 | [
"BSD-3-Clause"
] | 1 | 2021-05-04T19:45:16.000Z | 2021-05-04T19:45:16.000Z | repos.py | gigamonkey/git-utils | ac26ccab836b276fb7061167b4b2dc2a6bd87e66 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
"""
Get a json dump of all the repos belonging to a GitHub org or user.
"""
import json
import os
import sys
from functools import reduce
import requests
url = "https://api.github.com/graphql"
token = os.environ["GITHUB_TOKEN"]
headers = {"Authorization": "bearer {}".format(token)}
FIELDS = [
"name",
"description",
"sshUrl",
"isArchived",
"isFork",
"isPrivate",
"pushedAt",
]
def query(who, after):
args = f'first:100, after:"{after}"' if after else "first:100"
fields = " ".join(FIELDS)
return f'query {{ organization(login: "{who}") {{ repositories({args}) {{ edges {{ cursor node {{{fields} defaultBranchRef {{ name }} }} }} }} }} }}'
def maybe_get(top, *path):
return reduce(lambda d, k: None if d is None else d.get(k), path, top)
def node(edge):
n = edge["node"]
return {
**{f: n.get(f) for f in FIELDS},
"defaultBranch": maybe_get(n, "defaultBranchRef", "name"),
}
if __name__ == "__main__":
who = sys.argv[1]
edges = True
after = None
while edges:
r = requests.post(url, json={"query": query(who, after)}, headers=headers)
edges = json.loads(r.text)["data"]["organization"]["repositories"]["edges"]
for e in edges:
print(json.dumps(node(e)))
after = edges[-1]["cursor"]
| 22.229508 | 153 | 0.597345 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 533 | 0.393068 |
26de76c7a526dbcb257d0562f65b8f5f56302812 | 994 | py | Python | tfLego/logger/BasicLogger.py | FrancescoSaverioZuppichini/tfLego | 485653eff6d3b8c6677b600a4e0d3623c844749f | [
"MIT"
] | null | null | null | tfLego/logger/BasicLogger.py | FrancescoSaverioZuppichini/tfLego | 485653eff6d3b8c6677b600a4e0d3623c844749f | [
"MIT"
] | null | null | null | tfLego/logger/BasicLogger.py | FrancescoSaverioZuppichini/tfLego | 485653eff6d3b8c6677b600a4e0d3623c844749f | [
"MIT"
] | null | null | null | class BasicLogger:
def __init__(self):
self.loss_history = []
self.accuracy_history = []
self.val_loss_history = []
self.val_accuracy_history = []
self.initialise()
def initialise(self):
self.total_loss = 0
self.total_accuracy = 0
self.current = 0
def log_batch(self, loss, outputs, accuracy, *args, **kwargs):
self.current += 1
self.total_loss += loss
self.total_accuracy += accuracy
def log_epoch(self, i, X, is_val=False, *args, **kwargs):
loss = self.total_loss / len(X)
accuracy = self.total_accuracy / len(X)
if(is_val):
self.val_loss_history.append(loss)
self.val_accuracy_history.append(accuracy)
else:
self.loss_history.append(loss)
self.accuracy_history.append(accuracy)
print('EPOCH: {0}. AVG Loss: {1:0.4f} Acc: {2:0.4f}'.format(i,loss, accuracy))
self.initialise()
| 23.116279 | 86 | 0.585513 | 991 | 0.996982 | 0 | 0 | 0 | 0 | 0 | 0 | 46 | 0.046278 |
26def15c65ab2e4480b9091dca33bf04179a4722 | 3,705 | py | Python | test_package/conanfile.py | sintef-ocean/conan-casadi | 70a14829ca3b3ec4cdff8b254e3c060b345c1e79 | [
"MIT"
] | null | null | null | test_package/conanfile.py | sintef-ocean/conan-casadi | 70a14829ca3b3ec4cdff8b254e3c060b345c1e79 | [
"MIT"
] | null | null | null | test_package/conanfile.py | sintef-ocean/conan-casadi | 70a14829ca3b3ec4cdff8b254e3c060b345c1e79 | [
"MIT"
] | null | null | null | from conans import ConanFile, CMake, tools, RunEnvironment
class CasadiTestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = ("cmake_paths", "cmake", "virtualrunenv", "virtualenv")
_cmake = None
def _configure_cmake(self):
if self._cmake is None:
self._cmake = CMake(self)
cmake = self._cmake
# TODO: non-free interfaces
# ampl, cplex, gurobi, knitro, ooqp, snopt, sqic, worhp
# TODO: swig_json, swig_matlab, swig_octave, clang_jit
# TODO: opencl, hpmpc, blasfeo, dsdp(?)
# LinSol
cmake.definitions["CASADI_CONAN_CSPARSE"] = self.options["casadi"].csparse
cmake.definitions["CASADI_CONAN_LAPACK"] = self.options["casadi"].lapack
cmake.definitions["CASADI_CONAN_MUMPS"] = self.options["casadi"].mumps
cmake.definitions["CASADI_CONAN_HSL"] = self.options["casadi"].hsl
# Conic
cmake.definitions["CASADI_CONAN_HPMPC"] = self.options["casadi"].hpmpc
cmake.definitions["CASADI_CONAN_OSQP"] = self.options["casadi"].osqp
cmake.definitions["CASADI_CONAN_QPOASES"] = self.options["casadi"].qpoases
cmake.definitions["CASADI_CONAN_SUPERSCS"] = self.options["casadi"].superscs
# NLP
cmake.definitions["CASADI_CONAN_BLOCKSQP"] = self.options["casadi"].blocksqp
cmake.definitions["CASADI_CONAN_IPOPT"] = self.options["casadi"].ipopt
# Integration
cmake.definitions["CASADI_CONAN_SUNDIALS"] = self.options["casadi"].sundials
# Other
cmake.definitions["CASADI_CONAN_SLICOT"] = self.options["casadi"].slicot
cmake.definitions["CASADI_CONAN_BLASFEO"] = self.options["casadi"].blasfeo
cmake.definitions["CASADI_CONAN_BONMIN"] = self.options["casadi"].bonmin
cmake.definitions["CASADI_CONAN_CBC"] = self.options["casadi"].cbc
cmake.definitions["CASADI_CONAN_CLP"] = self.options["casadi"].clp
cmake.definitions["CASADI_CONAN_DSDP"] = self.options["casadi"].dsdp
cmake.definitions["CASADI_CONAN_TINYXML"] = self.options["casadi"].tinyxml
self._cmake.configure()
return self._cmake
def build(self):
if not tools.cross_building(self.settings):
cmake = self._configure_cmake()
cmake.build()
def test(self):
if tools.cross_building(self.settings):
print("NOT RUN (cross-building)")
return
# swig_python
if self.options["casadi"].swig_python:
self.output.info("Try to load 'casadi' python module")
try:
import casadi
A = casadi.SX.eye(2)
if casadi.trace(A) == 2:
self.output.info("Completed conanized casadi climax")
except ModuleNotFoundError:
try:
import numpy
except ModuleNotFoundError:
self.output.error("Casadi requires 'numpy', but it was not found")
self.output.error("Unable to proplerly load python casadi module")
self.output.info("Run consumer tests for library interfaces")
cmake = self._configure_cmake()
if self.options["casadi"].hpmpc:
self.output.warn("HPMPC plugin interface is not tested")
if self.options["casadi"].dsdp:
self.output.warn("DSDP interface is not tested(?)")
env_build = RunEnvironment(self)
with tools.environment_append(env_build.vars):
cmake.test()
self.output.info("Casadi OK!")
| 42.102273 | 88 | 0.612146 | 3,643 | 0.983266 | 0 | 0 | 0 | 0 | 0 | 0 | 1,164 | 0.31417 |
26e0374db2378f11fc9bfc31927fa2a8ccdcf58c | 1,995 | py | Python | src/blog/templatetags/timediffer.py | codewithrakib/first-django-blog | 339f5833025b0758f391c7c8e0979ca2eefd1b52 | [
"MIT"
] | null | null | null | src/blog/templatetags/timediffer.py | codewithrakib/first-django-blog | 339f5833025b0758f391c7c8e0979ca2eefd1b52 | [
"MIT"
] | 7 | 2021-03-19T02:00:00.000Z | 2022-02-10T10:26:38.000Z | src/blog/templatetags/timediffer.py | codewithrakib/first-django-blog | 339f5833025b0758f391c7c8e0979ca2eefd1b52 | [
"MIT"
] | null | null | null | from django import template
from datetime import datetime
from datetime import date
from datetime import time
from datetime import timedelta
register = template.Library()
@register.filter
def timediffer(now, posttime):
posttime = posttime.replace(tzinfo=None)
timedif= now -posttime
timestr=""
if timedif.days >= 365:
gettime = (int)(timedif.days/365)
if gettime==1:
timestr = f"about {gettime} year ago"
else:
timestr = f"about {gettime} years ago"
elif timedif.days >= 30 and timedif.days < 365:
gettime = (int)(timedif.days/30)
if gettime==1:
timestr= f"about {gettime} month ago"
else:
timestr= f"about {gettime} months ago"
elif timedif.days>=7 and timedif.days < 30:
gettime = (int)(timedif.days/7)
if gettime==1:
timestr=f"about {gettime} week ago"
else:
timestr=f"about {gettime} weeks ago"
elif timedif.days>=1 and timedif.days < 7:
gettime = (int)(timedif.days)
if gettime==1:
timestr=f"about {gettime} day ago"
else:
timestr=f"about {gettime} days ago"
elif timedif.seconds>=3600 and timedif.days < 1:
gettime = (int)(timedif.seconds/3600)
if gettime==1:
timestr=f"about {gettime} hour ago"
else:
timestr=f"about {gettime} hours ago"
elif timedif.seconds>=60 and timedif.seconds < 3600:
gettime = (int)(timedif.seconds/60)
if gettime==1:
timestr = f"about {gettime} minute ago"
else:
timestr = f"about {gettime} minutes ago"
elif timedif.seconds>=1 and timedif.seconds < 60:
gettime = (int)(timedif.seconds)
if gettime==1:
timestr = f"about {gettime} second ago"
else:
timestr = f"about {gettime} seconds ago"
else:
timestr='now'
return timestr | 30.227273 | 56 | 0.578947 | 0 | 0 | 0 | 0 | 1,822 | 0.913283 | 0 | 0 | 400 | 0.200501 |
26e08af32bb2b5d0bbed9fd354924f064bde0ecf | 254 | py | Python | ma.py | AmandaKhol/DLT-Blockchain | ee464b0d7b55bffe791eb0b814513620430bfa1c | [
"MIT"
] | 1 | 2021-04-05T18:59:04.000Z | 2021-04-05T18:59:04.000Z | ma.py | AmandaKhol/DLT-Blockchain | ee464b0d7b55bffe791eb0b814513620430bfa1c | [
"MIT"
] | null | null | null | ma.py | AmandaKhol/DLT-Blockchain | ee464b0d7b55bffe791eb0b814513620430bfa1c | [
"MIT"
] | null | null | null | """
title : ma.py
description : Marshmallow object
author : Amanda Garcia-Garcia
version : 0
usage : python server_api.py
python_version : 3.6.1
"""
from flask_marshmallow import Marshmallow
ma = Marshmallow() | 19.538462 | 41 | 0.629921 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 191 | 0.751969 |
26e3cb56bf5c43ffe1ebc53ce33bf565445ae974 | 6,107 | py | Python | FGMabiotic.py | tjscott214/long-term-conflict-with-1nFGM | 1c701e83c71ebe21fbc1192ca3d523a000614819 | [
"MIT"
] | 2 | 2019-09-13T13:46:33.000Z | 2020-05-14T17:21:09.000Z | FGMabiotic.py | tjscott214/long-term-conflict-with-1nFGM | 1c701e83c71ebe21fbc1192ca3d523a000614819 | [
"MIT"
] | null | null | null | FGMabiotic.py | tjscott214/long-term-conflict-with-1nFGM | 1c701e83c71ebe21fbc1192ca3d523a000614819 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
### This program simulates Fisher's geometric model with abiotic change equal to fixations during conflict simulations (from FGMconflict.py) ###
### python3 FGMabiotic.py -help for input options ###
### Written by Trey J Scott 2018 ###
### python --version ###
### Python 3.5.2 :: Anaconda 4.2.0 (x86_64) ###
# Import programs
import random
import numpy as np
from scipy.spatial import distance as dist
from scipy.stats import norm
import scipy.stats as stats
import matplotlib.pyplot as plt
import pandas as pd
import argparse
import scipy.special as spc
from itertools import groupby
### FUNCTIONS ###
# Function to generate random mutations with a specified average size
def generate_random_vector():
if distribution == 'uniform':
radial = np.random.uniform(0,uni)
if distribution == 'chi':
radial = np.random.chisquare(n)
if distribution == 'exponential':
radial = np.random.exponential(expo)
if distribution == 'normal':
radial = abs(np.random.normal(0, sd_1d))
vector = np.array(radial * (-1)**random.randint(1,2))
return radial, vector
# Gaussian fitness function
def fitness_function(distance,d):
return np.exp(-(d*(distance**Q)))
# Calculates probability of fixation for new mutations
def calculate_u(new_distance, old_distance, N = 'infinite', denominator = 0.5):
fitness_new = fitness_function(new_distance, denominator)
fitness_old = fitness_function(old_distance, denominator)
s_coefficient = (fitness_new/fitness_old) - 1
if N == 'infinite':
probability_of_fixation = (1 - np.exp(-2*s_coefficient))
elif N > 0:
probability_of_fixation = ((1 - np.exp(-2*s_coefficient))/(1 - np.exp(-4*s_coefficient*N)))
return probability_of_fixation, s_coefficient
# Functon that simulates adaptation to a moving optimum with Fisher's geometric model
def abiotic_change(position, optimum, mut_list, samp):
counter = 0
distance_to_optimum = dist.euclidean(position, optimum)
moving_optimum = optimum
for d in range(0,len(mut_list)):
moving_optimum = moving_optimum + (mut_list[d])*((-1)**(random.randint(1,2)))
distance_to_optimum = dist.euclidean(position, moving_optimum)
mutation_size, vector = generate_random_vector()
future_position = position + vector
new_dist_to_optimum = dist.euclidean(future_position, moving_optimum)
u, s = calculate_u(new_dist_to_optimum, distance_to_optimum, N_1,d1)
if random.random() <= u:
mutation_fitness = vector
position = future_position
distance_to_optimum = dist.euclidean(position, moving_optimum)
if counter >= burn_in:
output.write(str(d) + ',' + str(samp) + ',' + str(position[0]) + ',' + str(s) + ',' + str(mutation_size) + ',' + str(fitness_function(distance_to_optimum,d1)) + ',Abiotic Change,Fixed\n')
else:
if counter >= burn_in:
output.write(str(d) + ',' + str(samp) + ',' + str(position[0]) + ',' + str(s) + ',' + str(mutation_size)+ ',' + str(fitness_function(distance_to_optimum,d1)) + ',Abiotic Change,Unfixed\n')
counter += 1
# Runs simulations multiple times
def run_simulations(position, num_samples):
df = pd.read_csv(shake_file)
optimum = np.array([(-(1/d1)*np.log(r))**(1/Q)])
master_mut_list = df.groupby('Population')['Mutation'].apply(list)[1]
index = 0
for sample in range(num_samples):
mut_list = master_mut_list[index:index + m]
abiotic_change(position, optimum, mut_list, sample)
index += m
output.close()
### SET ARGUMENTS
ap = argparse.ArgumentParser()
ap.add_argument('-x', '--samples', help = 'number of resamples', type = int)
ap.add_argument('-p', '--population_size1', help = 'population size for one population', type = int)
ap.add_argument('-pp', '--population_size2', help = 'population size for second population', type = int)
ap.add_argument('-m', '--mutations', help = 'mutation distribution for mutation vectors')
ap.add_argument('-q', '--Q', help = 'changes Q parameter in fitness function', type = float)
ap.add_argument('-z', '--attempts', help = 'number of generations per walk', type = int)
ap.add_argument('-c', '--init_fit', help = 'changes the distance optimal values by a factor of the input value', type = float)
ap.add_argument('-r', '--rate', help = 'mutation rate for population 1', type = int)
ap.add_argument('-b', '--burn_in', help = 'define burn in period for equilibrium', type = int)
ap.add_argument('-a', '--ave_mut', help = 'average mutation norm', type = float)
ap.add_argument('-d', '--selection', help = 'Adjust strength of selection', type = float)
ap.add_argument('-mut', '--changes', help = 'mutation file for moving optimum', type = str)
args = ap.parse_args()
# get arguments
if args.samples:
samples = args.samples
else:
samples = 500
# Define initial position and optima
position1 = np.zeros(1)
position = position1
position2 = position1
if args.init_fit:
r = 1-args.init_fit
else:
r = 1-0.2
# Set average norm size for mutations
if args.ave_mut:
average_mutation = args.ave_mut
else:
average_mutation = 0.1
# Get population sizes
# Population 1
if args.population_size1:
N_1 = 10**(args.population_size1)
else:
N_1 = 'infinite'
# Population 2
if args.population_size2:
N_2 = 10**(args.population_size2)
else:
N_2 = 'infinite'
# Get distributions
# Mutation distribution (default is uniform)
if args.mutations:
distribution = args.mutations
else:
distribution = 'normal'
# Number of mutations
if args.attempts:
m = args.attempts
else:
m = 50000
# Get mutation rate
if args.rate:
rate = args.rate
else:
rate = 1
# Calculate normalization factor (used in mutation function)
sd_1d = average_mutation*((np.pi)**(1/2))/(2**(1/2))
uni = 2*average_mutation
expo = average_mutation
if args.burn_in:
burn_in = args.burn_in
else:
burn_in = 0
if args.Q:
Q = args.Q
q_string = 'Q_' + str(Q) + '_'
else:
Q = 2
q_string = ''
if args.selection:
d1 = args.selection
else:
d1 = 0.5
if args.changes:
shake_file = args.changes[:-7] + 'mut.csv'
# Open output file
output = open('abiotic_data.csv', 'w')
output.write('Iteration,Simulation,z,s,Mutation Size,Fitness,Population,Status\n')
### Run simulations
run_simulations(position, samples)
| 34.117318 | 192 | 0.7159 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,913 | 0.313247 |
26e5678c410804c82e1a66c1a1c30cc2e8b118d5 | 873 | py | Python | epdif.py | cvasqxz/rpi-epd | b7921190dd84b1187364902f0e3059cba5a1973f | [
"MIT"
] | null | null | null | epdif.py | cvasqxz/rpi-epd | b7921190dd84b1187364902f0e3059cba5a1973f | [
"MIT"
] | null | null | null | epdif.py | cvasqxz/rpi-epd | b7921190dd84b1187364902f0e3059cba5a1973f | [
"MIT"
] | null | null | null | import spidev
import RPi.GPIO as GPIO
import time
import yaml
with open("config.yml", 'r') as f:
cfg = yaml.load(f, Loader=yaml.FullLoader)
# Pin definition
RST_PIN = cfg['pinout']['RST_PIN']
DC_PIN = cfg['pinout']['DC_PIN']
CS_PIN = cfg['pinout']['CS_PIN']
BUSY_PIN = cfg['pinout']['BUSY_PIN']
# SPI device, bus = 0, device = 0
SPI = spidev.SpiDev(0, 0)
def epd_digital_write(pin, value):
GPIO.output(pin, value)
def epd_digital_read(pin):
return GPIO.input(BUSY_PIN)
def epd_delay_ms(delaytime):
time.sleep(delaytime / 1000.0)
def spi_transfer(data):
SPI.writebytes(data)
def epd_init():
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(RST_PIN, GPIO.OUT)
GPIO.setup(DC_PIN, GPIO.OUT)
GPIO.setup(CS_PIN, GPIO.OUT)
GPIO.setup(BUSY_PIN, GPIO.IN)
SPI.max_speed_hz = 2000000
SPI.mode = 0b00
return 0;
| 21.292683 | 46 | 0.683849 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 131 | 0.150057 |
26e616bae86ed51b35013c799f67005f184552f2 | 2,469 | py | Python | main.py | amankumarjsr/BinanceDataScrapper | e3d56c4bd274a8e472de1fbe1c9603c9e94e1d14 | [
"Apache-2.0"
] | null | null | null | main.py | amankumarjsr/BinanceDataScrapper | e3d56c4bd274a8e472de1fbe1c9603c9e94e1d14 | [
"Apache-2.0"
] | null | null | null | main.py | amankumarjsr/BinanceDataScrapper | e3d56c4bd274a8e472de1fbe1c9603c9e94e1d14 | [
"Apache-2.0"
] | null | null | null | from datetime import date
from unicodedata import name
from urllib import request
import requests
from bs4 import BeautifulSoup as bs
import pandas as pd
import datetime
import os
import zipfile
import glob
CoinName= input('Enter the coin name: ').upper()
duration= input('Enter the duration of data you want(1m,1h,2h): ').lower()
start_date= input ('Enter the date (dd-mm-yyyy): ')
end_date= input('Enter the end date (dd-mm-yyyy): ')
coin= requests.get('https://data.binance.vision/?prefix=data/spot/daily/klines/')
ucoin= bs(coin.content , 'html.parser')
start = datetime.datetime.strptime(start_date, "%d-%m-%Y")
end = datetime.datetime.strptime(end_date, "%d-%m-%Y")
date_generated = [start + datetime.timedelta(days=x) for x in range(0, (end-start).days)]
date_list=[]
for date in date_generated:
x=date.strftime("%Y-%m-%d")
date_list.append(x)
file_name_list= []
cols=['opening time', 'opening price','highest price','lowest price','closing price','volume','closing time','turnover','number of transactions','active buy volume','NA','NAN']
for item in date_list:
try:
file_name=(f'{CoinName}-{duration}-{item}.zip')
download_mainurl= (f'https://data.binance.vision/data/spot/daily/klines/{CoinName}/{duration}/{CoinName}-{duration}-{item}.zip')
download= requests.get(download_mainurl, allow_redirects= True)
print(f'Scrapping data of {item} ')
with open(file_name, 'wb') as f:
f.write(download.content)
with zipfile.ZipFile(file_name, 'r') as zip_ref:
zip_ref.extractall('C:/Users/rocka/Desktop/Practice python/Binance data scrapper/data')
file_name_list.append(file_name+'.csv')
os.remove(file_name)
except:
print('skipped')
continue
master_df= pd.DataFrame()
for file in os.listdir('C:/Users/rocka/Desktop/Practice python/Binance data scrapper/data'):
if file.endswith('.csv'):
master_df= master_df.append(pd.read_csv('C:/Users/rocka/Desktop/Practice python/Binance data scrapper/data/'+file, names= cols))
master_df.to_csv(f'{CoinName}-{duration}-master file.csv', index=False)
for file in os.listdir('C:/Users/rocka/Desktop/Practice python/Binance data scrapper/data'):
if file.endswith('.csv'):
os.remove('C:/Users/rocka/Desktop/Practice python/Binance data scrapper/data/'+file)
print('Data Scrapped sucessfully!!!')
| 34.291667 | 177 | 0.681247 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,011 | 0.409478 |
26e61f306df9220c42f34738c067514777287317 | 19,370 | py | Python | api/api.py | geoai-lab/GeoAnnotator | 6d5ee22888571f5ffefdb1d2f2455eaa9e5054f3 | [
"MIT"
] | 1 | 2022-02-14T20:43:41.000Z | 2022-02-14T20:43:41.000Z | api/api.py | geoai-lab/GeoAnnotator | 6d5ee22888571f5ffefdb1d2f2455eaa9e5054f3 | [
"MIT"
] | null | null | null | api/api.py | geoai-lab/GeoAnnotator | 6d5ee22888571f5ffefdb1d2f2455eaa9e5054f3 | [
"MIT"
] | null | null | null | from flask import Flask, jsonify, request, session,redirect, url_for
import bcrypt
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.sql import func
from sqlalchemy.exc import IntegrityError
import os
from sqlalchemy.orm import load_only
from flask_bcrypt import Bcrypt
import urllib.parse
from itertools import groupby
from operator import attrgetter
import json
from flask_cors import CORS, cross_origin
from flask_session import Session
import redis
from werkzeug.utils import secure_filename
from datetime import datetime, timedelta, timezone
from models import db, tweet_database, User, LoginForm, Project, Submission, CompareSubmission
from dotenv import load_dotenv
from flask_login import LoginManager, login_required, login_user, current_user, logout_user
from sqlalchemy.orm import sessionmaker
import pandas as pd
import requests
from sqlalchemy.types import String, DateTime
import io
load_dotenv()
app = Flask(__name__,static_folder="../build", static_url_path='/')#
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///HarveyTwitter.db"
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
app.config["SECRET_KEY"] = "6236413AA53537DE57D1F6931653B"
app.config['SQLALCHEMY_ECHO'] = True
app.config['SESSION_TYPE'] = "filesystem" # causes bugs right here this needs to be in redis soon need to download reddis and do some reddis cli stuff
app.config['SESSION_USE_SIGNER'] = True
#app.config['SESSION_COOKIE_NAME']
#app.config['SESSION_COOKIE_DOMAIN]
#app.config['SESSIO N_COOKies]
#app.config['SESSION_COOKIE_SECURE'] = True # add this to make the cookies invisible or something
bcrypt = Bcrypt(app) # this is encyrpt the app
CORS(app, supports_credentials=True)
server_session = Session(app)
db.__init__(app)
with app.app_context():
db.create_all()
login_manager = LoginManager()
login_manager.init_app(app)
with app.app_context():
# before intialization of the app, commands under here are ran first
# Replace with the commented when running the command gunicorn3 -w 3 GeoAnnotator.api:app
optionsData = jsonify(json.load(open('../../createProjectOptions.json'))) # 'GeoAnnotator/api/createProjectOptions.json'
configurationsData = json.load(open('../../configuration_data.json')) # 'GeoAnnotator/api/configuration_data.json'
@login_manager.user_loader
def load_user(user_id):
"""
Loads current user data
---
"""
return User.query.filter_by(id=user_id).first()
@app.route('/')
def index():
"""
Initialization of flask object
---
return:
returns an index.html object built by react's build file.
"""
return app.send_static_file("index.html")
@app.route("/@me", methods = ["GET"]) # might need to change
def get_current_user():
"""
User session data is retrieved through this callback.
---
GET:
description: Get session data
security:
- Session Token
responses:
200:
content:
User/json
"""
if not session["project_name"]:
return jsonify({"error": "did not select project"}), 401
if not current_user.is_authenticated:
return jsonify({"error": "Unauthorized"}), 401
return jsonify({
"id": str(current_user.id),
"email": current_user.email,
"username": current_user.username,
"projectName":session["project_name"]
}),200
@app.route("/login", methods=["POST"])
def login():
"""
Function that handles login of user
---
POST:
description: Add new user in the session
responses:
200:
description:
Successfuly log in user onto the session.
401:
description:
User entered wrong username/password that does not match any data on the database.
"""
loginform = LoginForm()
email = request.json["email"]
password = request.json["password"]
project_name = request.json["project"]
session["project_name"] = project_name
user = User.query.filter_by(email=loginform.email.data).first()
if user is None:
return jsonify({"error": "Wrong Email/Password"}), 401
if not bcrypt.check_password_hash(user.password, loginform.password.data):
return jsonify({"error": "Wrong Email/Password"}), 401
login_user(user)
return jsonify({
"id": str(user.id),
"email": user.email
}),200
@app.route("/logout", methods=["POST"])
@login_required
def logout():
"""
Function that handles logout of user
---
POST:
description: remove curent user in the session
responses:
200:
description:
Successfuly log out user from the session.
"""
logout_user() # flask logout library
return redirect("/", code=200) # successful log out will redirect to the homepage
@app.route("/createprojects", methods=["GET"])
@login_required
def create():
"""
Function that returns state geojson at the create projects page.
---
GET:
data: optionsData =>
responses:
200:
description:
Successfuly log out user from the session.
"""
return optionsData, 200
@app.route("/project+descriptions", methods=["GET"])
def project_descriptions():
"""
Function that returns data from the project database that are not deleted by the user.
---
GET:
responses:
200:
data:
{"project-name": <Project.project_name>, "geo_json":<Project.geo_json>}
"""
projects = Project.query.filter_by(isDeleted = 0).all()
print(projects)
list_of_projects = []
for project in projects:
list_of_projects.append({"project-name": project.project_name, "geo_json": project.geo_json})
return jsonify(list_of_projects), 200
@app.route("/createproject-submit", methods=["POST"])
@login_required
def createproject_submission():
"""
Creation of a new project
---
POST:
description: adds a new project item onto the Projects table of the database
responses:
200:
description:
new project added
409:
description:
* if the project name given already exists within the database
"""
projectName = request.json["Project Name"]
mapLayers = request.json["map-layers"]
project_exists = Project.query.filter_by(project_name = projectName).first() is not None
if(project_exists):
return jsonify({"error": "project already exists"}), 409
session['project_name'] = projectName
new_project = Project(project_name = projectName, geo_json = mapLayers, isDeleted = 0 )
db.session.add(new_project)
db.session.commit()
return jsonify({"success": "project created"}), 200
@app.route("/register", methods=["POST"])
def register_user():
"""
By registering a new user in the database, you may add new user data to the database.
---
POST:
description: Add new user in the database
responses:
200:
description:
new username and password are added onto the database.
409:
description:
* if the username used to register already exists in the database
* if the password entered and the password retyped do not match
"""
email = request.json["email"]
password = request.json["password"]
retype = request.json["retypepassword"]
username = request.json["username"]
user_exists = User.query.filter_by(email=email).first() is not None
if user_exists:
return jsonify({"error": "User already exists"}), 409
elif password != retype:
return jsonify({"error":"password do not match"}), 409
hashed_password = bcrypt.generate_password_hash(password)
new_user = User(email=email, username=username ,password=hashed_password)
db.session.add(new_user)
db.session.commit()
return jsonify({
"id": str(new_user.id),
"email": new_user.email
}), 200
@app.route('/comparison', methods =['GET'])
@login_required
def compare_data():
"""
Obtain information for the comparative page.
When the user who is the resolver requests data to compare,
this method must deliver data that the resolver has not resolved previously.
That would be the value of the notYet_submitted variable.
---
GET:
responses:
200:
data:
list of data that the resolver can compare and resolve
format:
{
text:<tweet_database.text>,
submission_id:<Submission.submission_id>,
annotation:<Submission.annotation>,
username:<Submission.username>,
projectGeojson:<Project.geo_json>,
tweetid:<tweet_database.id>,
userid:<Submission.userid>
}
where current_user=Submission.id values are not in current_user=CompareSubmission.id values
"""
project_name = session["project_name"]
to_send_data = []
alreadySubmitted_ids = [idvid for subid in CompareSubmission.query.filter_by(userid = current_user.id).options(load_only(CompareSubmission.submissionid_1, CompareSubmission.submissionid_2)).all() for idvid in [subid.submissionid_1,subid.submissionid_2]]
# need to change the tweet id here later on
# grab submissions you haven't looked at yet
notYet_submitted = Submission.query.filter_by(project_name= project_name).filter(Submission.submission_id.notin_(alreadySubmitted_ids)) \
.join(tweet_database, Submission.tweetid == tweet_database.id) \
.join(Project, Submission.project_name == project_name) \
.filter_by(project_name = project_name).add_columns(tweet_database.text, Submission.submission_id, Submission.annotation,Submission.username, Project.geo_json, tweet_database.id, Submission.userid)
df = pd.DataFrame(notYet_submitted, columns = ["SubmissionObject","text","submission_id","annotation","username","geo_json","id","userid"]).astype(str)
to_iterate =None # grab the first group of unique IDS
# an alternate to implementing the for loop below is by doing df.grouby('id',sort=False).first()
for name, group in df.groupby('id',sort=False):
to_iterate = group
break
for index,filtered_submission in to_iterate.iterrows(): # each group is a tweet set
to_send_data.append({"text": filtered_submission.text,
"submission_id": str(filtered_submission.submission_id),
"annotation": json.loads(filtered_submission.annotation)["annotation"],
"username":filtered_submission.username,
"projectGeojson": json.loads(filtered_submission.geo_json),
"tweetid":str(filtered_submission.id),
"userid":str(filtered_submission.userid)})
return jsonify(to_send_data), 200
@app.route('/api-grab/<tweetid>', methods=['GET'])
@login_required
def app_data(tweetid):
"""
Obtain information for the Annotation page page.
When the user who is the annotator requests data to annotate,
this method must deliver data that the annotatoer has not annotated previously.
---
@param:
tweetid: Grab the data in the database where Tweet_database.id == tweetid if this parameter exists.
---
GET:
responses:
200:
data:
data that the annotator can annotate
format:
{
id:<tweet_database.id>,
content:<tweet_data.text>,
neuro_result: Model rest api data,
project_description:{label:<Project.project_name>,geo_json:<Project.geo_json>}
}
409:
description:
* If the data from the Model prediction link did not yield any results (i.g. response from the UB servers are not 200)
* If there is no project in session
"""
submissions_exists = Submission.query.filter_by(userid = current_user.id) is not None
if(submissions_exists): # if User already annotated data before, find data that the user has not annotated before and return that
tweet_ids = [ids.tweetid for ids in Submission.query.filter_by(userid = current_user.id, project_name = session["project_name"]).options(load_only(Submission.tweetid)).all()]
tweets = tweet_database.query.filter_by(projectName = session["project_name"]).filter(tweet_database.id.notin_(tweet_ids)).first()
else: # It's the user's first time annotating, therefore pick the first tweet in the database
tweets = tweet_database.query.filter_by(projectName = session["project_name"]).first()
if(tweetid != 'any'):
tweets = tweet_database.query.filter_by(id = str(tweetid)).first()
content = tweets.text
project_name = session["project_name"]
if project_name: # if the session has a project, then query the project GeoJson
project_json = Project.query.filter_by(project_name = project_name).first()
else: # Since users must first register a project before signing in, this is extremely unlikely to occur.
return jsonify({"error": "No Project on session"}), 409
urlEncoded = urllib.parse.quote(tweets.text) #encode the text content of a tweet so that it may be converted into a url format
toRequestModel = "{}={}".format(configurationsData['modelLink'],urlEncoded) # Using the model url link from configuration.json, get a request using the URLencoded method.
response = requests.get(toRequestModel)
if response.status_code != 200:
# If the model url link does not return a response of 200, send a 409 since we do not have model prediction data.
# Cases of where the code fires here is when the servers at the University at Buffalo are down.
return jsonify({"error": "Rest Api Model unable to grab data"}), 409
neuro_results_json = response.json()['annotation'] # data from the response
toSend = {'id': str(tweets.id),
'content': content,
'neuro_result':neuro_results_json,
'project_description': {"label":project_json.project_name, "geo_json": json.loads(project_json.geo_json)}}
return jsonify(toSend), 200
@app.route('/uploadfile', methods=['POST'])
@login_required
def uploading_textFile():
"""
This method is related to the create project part,
since if a user submits twitter data, it must first
go via this method to be preprocessed and stored in the database.
---
POST:
responses:
200:
description:
The data from tweets has been successfully preprocessed and should now be available in the database.
401:
description:
* Preprocessing failed due to data format.
"""
try:
projectName = request.form['projectName'] #The name of the project on which the user wishes to upload new tweets
project_exists = Project.query.filter_by(project_name = projectName).first() is not None
if project_exists: # if the project name already exists, then tell the user
return jsonify({"error":"Project Name Already Exists"}), 401
file = request.files['file']
df = pd.read_json(file.stream.read().decode("UTF8"), lines=True, encoding="utf8")[['text','id','created_at']]
df['projectName'] = projectName
dtype={"text": String(),"id":String(), "created_at":DateTime(), "projectName":String()}
rowsAffected = df.to_sql(name = 'TwitterDataSet',con = db.engine, index = False, if_exists='append',dtype=dtype) # upload onto the database
except Exception as e: #If the entire procedure above fails, publish the line number where the error occurred.
print(
type(e).__name__, # TypeError
__file__, # /tmp/example.py
e.__traceback__.tb_lineno # 2
)
return jsonify({"error": "File Upload Fail"}), 401
return jsonify({"success": "Upload Complete"}), 200
@app.route('/deleteproject', methods=['POST'])
@login_required
def deleting_projects():
"""
This approach replaces the value on the isDeleted part of the Project column by one.
If we replace the column value with 1, we will not display the user this project since
they requested that it be removed.
---
POST:
responses:
200:
description:
Project data has successfuly been deleted/hidden fromn the user's view
"""
projects = request.json['projects'] # contains a list of projects that the user desires to get deleted
queried_projects = Project.query.filter(Project.project_name.in_(projects))
for query in queried_projects: # we replace the value with 1
query.isDeleted = 1
db.session.commit()
return jsonify({"success": "Upload Complete"}), 200
@app.route('/api/submit', methods=['POST'])
@login_required
def submission():
"""
This method handles the event when a user submits an annotation.
---
POST:
responses:
200:
description:
adds a new row value in the Submission table on the HarveyTwitter.db
"""
json_object = request.json
tweetid =json_object["tweetid"]
project = session["project_name"]
highlight = json_object["highlight"]
spatial_footprint = json_object["spatial-footprint"]
timestamp = json_object["timestamp"]
category = json_object["category"]
annotation = json.dumps({"annotation": {
"highlight": highlight ,
"spatial-footprint": spatial_footprint,
"category": category
}})
new_submission = Submission(userid = current_user.id, tweetid = tweetid, project_name = project,
timestamp = timestamp, annotation = annotation, username = current_user.username)
db.session.add(new_submission)
db.session.commit()
return jsonify("Success"), 200
@app.route('/compare/submit', methods=['POST'])
@login_required
def compare_submission():
"""
When a resolver submits a resolution from the compare submissions page, this method handles the event.
---
POST:
responses:
200:
description:
adds a new row value in the compare-submission table on the HarveyTwitter.db
"""
json_object = request.json
userId1 = json_object['submission-userid-1']
userId2 = json_object['submission-userid-2']
submissionid1 = json_object['submissionid-1']
submissionid2 = json_object['submissionid-2']
choosenId = json_object['choosing-correct-submission']
CurrentUserId = current_user.id
new_submission = CompareSubmission(userid = CurrentUserId,
submission_userid_1 = userId1,
submission_userid_2 = userId2,
submissionid_1 = submissionid1,
submissionid_2 = submissionid2,
choosing_correct_submission = choosenId)
db.session.add(new_submission)
db.session.commit()
return jsonify("Success"), 200
if __name__ == '__main__':
app.run(host='0.0.0.0') | 40.10352 | 257 | 0.663087 | 0 | 0 | 0 | 0 | 17,008 | 0.878014 | 0 | 0 | 9,652 | 0.498271 |
26e97e5ea8220154eb41374939938275b9e537b0 | 741 | py | Python | AppPython/app/core/src/forms.py | denalme/AplicacionPython | eb99af3c21f003135192ad040a0a04a40b63ea70 | [
"MIT"
] | null | null | null | AppPython/app/core/src/forms.py | denalme/AplicacionPython | eb99af3c21f003135192ad040a0a04a40b63ea70 | [
"MIT"
] | null | null | null | AppPython/app/core/src/forms.py | denalme/AplicacionPython | eb99af3c21f003135192ad040a0a04a40b63ea70 | [
"MIT"
] | null | null | null | from django import forms
from .pqrsf import pqrsf
class ContactForm(forms.Form):
#Atributos del formulario de contacto
usuario = forms.CharField(label="Nombre", required=True, widget=forms.TextInput(attrs={'class':'formulario input', 'placeholder':'Nombre'}))
correo = forms.EmailField(label="Correo Electrónico", required=True,widget=forms.EmailInput(attrs={'class':'formulario input','placeholder':'Correo Electrónico'}))
tipomsj = forms.ChoiceField(label="Asunto", required=True, choices=pqrsf, widget=forms.Select(attrs={'class':'formulario input'}))
mensaje = forms.CharField(label="Mensaje", required=True, widget=forms.Textarea(attrs={'class':'formulario input', 'rows':'5','placeholder':'Escribe tu Mensaje'}))
| 74.1 | 167 | 0.747638 | 691 | 0.930013 | 0 | 0 | 0 | 0 | 0 | 0 | 281 | 0.378197 |
26ec29318bc12813be99da269d94707649d0104c | 3,244 | py | Python | prisma/generated/models.py | mao-shonen/prisma-client-py-tortoise-orm | e26d8451ea0775bd7ddfec42f663510434537a77 | [
"MIT"
] | null | null | null | prisma/generated/models.py | mao-shonen/prisma-client-py-tortoise-orm | e26d8451ea0775bd7ddfec42f663510434537a77 | [
"MIT"
] | null | null | null | prisma/generated/models.py | mao-shonen/prisma-client-py-tortoise-orm | e26d8451ea0775bd7ddfec42f663510434537a77 | [
"MIT"
] | null | null | null | __doc__ = '''
This file is generated by the `prisma-client-py-tortoise-orm (0.2.2)`,
Please do not modify directly.
repository: https://github.com/mao-shonen/prisma-client-py-tortoise-orm
'''
import typing
from enum import Enum
from tortoise import fields
from tortoise.models import Model
from prisma import base as base
class Role(str, Enum):
'''
- USER
- ADMIN
'''
USER = 'USER'
ADMIN = 'ADMIN'
class User(base.User, Model):
'''
model comment
fields:
- 🔟 id [Int] 🔑
- default: auto_increment()
- doc: field comment
- 🆎 *password [String]
- 🆎 *email [String] 📌
- 🔟 weight [Float?]
- ✅ is18 [Boolean?]
- 🆎 name [String?]
- 🔟 *wallet [Decimal]
- 🪢 successor [User?]
- 🪢 predecessor [User?]
- 🪢 role [Role]
- default: Role.USER
- 🪢 posts [Post]
- 🪢 *biography [Json]
- 🪢 group [group?]
- 🕑 createdAt [DateTime]
- default: now()
- 🕑 *updatedAt [DateTime]
'''
id = fields.IntField(pk=True, description='field comment')
password = fields.CharField(max_length=255)
email = fields.CharField(max_length=255, unique=True)
weight = fields.FloatField(null=True)
is_18 = fields.BooleanField(null=True)
name = fields.CharField(max_length=255, null=True)
wallet = fields.DecimalField(max_digits=12, decimal_places=2)
successor: fields.OneToOneRelation[typing.Union['User', typing.Any]] = fields.OneToOneField(source_field='successorId', model_name='models.User', to_field='id', related_name='predecessor', null=True)
predecessor: fields.ReverseRelation['User']
role = fields.CharEnumField(enum_type=Role, default=Role.USER)
posts: fields.ReverseRelation['Post']
biography = fields.JSONField()
group: fields.ForeignKeyRelation[typing.Union['Group', typing.Any]] = fields.ForeignKeyField(source_field='groupId', model_name='models.Group', to_field='id', related_name='user', null=True)
created_at = fields.DatetimeField(auto_now_add=True)
updated_at = fields.DatetimeField(auto_now=True)
class Meta(base.User.Meta):
table = 'User'
def __str__(self) -> str:
return f'User<{self.id}>'
class Post(base.Post, Model):
'''
fields:
- 🔟 id [Int] 🔑
- default: auto_increment()
- 🪢 user [User?]
'''
id = fields.IntField(pk=True)
user: fields.ForeignKeyRelation[typing.Union['User', typing.Any]] = fields.ForeignKeyField(model_name='models.User', to_field='id', related_name='posts', on_delete=fields.SET_NULL, null=True)
class Meta:
table = 'Post'
def __str__(self) -> str:
return f'Post<{self.id}>'
class Group(base.Group, Model):
'''
fields:
- 🔟 id [Int] 🔑
- default: auto_increment()
- 🆎 name [String]
- default: default
- ✅ public [Boolean]
- default: true
- 🪢 User [User]
'''
id = fields.IntField(pk=True)
name = fields.CharField(max_length=255, default='default')
public = fields.BooleanField(default=True)
user: fields.ReverseRelation['User']
class Meta:
table = 'groups'
def __str__(self) -> str:
return f'Group<{self.id}>'
__all__ = ['Role', 'User', 'Post', 'Group'] | 30.317757 | 203 | 0.633785 | 2,941 | 0.886645 | 0 | 0 | 0 | 0 | 0 | 0 | 1,379 | 0.415737 |
26ed69ff9590d721e4368e521015afe41d5f9df5 | 2,536 | py | Python | samples/people_on_stairs/classify_overspeeding/classify_overspeeding.py | vgvoleg/gst-video-analytics | 7e4006551f38334bc59b2ef3d205273d07d40ce4 | [
"MIT"
] | null | null | null | samples/people_on_stairs/classify_overspeeding/classify_overspeeding.py | vgvoleg/gst-video-analytics | 7e4006551f38334bc59b2ef3d205273d07d40ce4 | [
"MIT"
] | null | null | null | samples/people_on_stairs/classify_overspeeding/classify_overspeeding.py | vgvoleg/gst-video-analytics | 7e4006551f38334bc59b2ef3d205273d07d40ce4 | [
"MIT"
] | 1 | 2020-05-14T15:30:03.000Z | 2020-05-14T15:30:03.000Z | from os.path import join, realpath
from os import listdir, environ
import shlex
import subprocess
import pickle
import json
import pickle as pkl
import time
import numpy as np
from copy import copy
MODEL_PATH = ("/root/Projects/models/intel/person-detection-retail-0013/FP32"
"/person-detection-retail-0013.xml")
DATASET_PATH = "/root/Projects/train/"
ALPHA = 0.1
ALPHA_HW = 0.01
RES_PATH = ("/root/Projects/gst-video-analytics-0.7.0/samples/"
"people_on_stairs/classify_overspeeding/res.json")
SVM_PATH = '/root/Projects/models/overspeed_classify/SVM_Classifier_without_interval.sav'
CLASSIFY_PIPELINE_TEMPLATE = """gst-launch-1.0 filesrc \
location={} \
! decodebin ! videoconvert ! video/x-raw,format=BGRx ! gvadetect \
model={} ! queue \
! gvaspeedometer alpha={} alpha-hw={} interval=0.03333333 \
! gvapython module={} class=OverspeedClassifier arg=[\\"{}\\"] \
! fakesink sync=false"""
class OverspeedClassifier():
def __init__(self, out_path=RES_PATH):
self.velocities = []
self._result_path = out_path
self.frames_processed = 0
def process_frame(self, frame):
for region in frame.regions():
for tensor in region.tensors():
if tensor.has_field("velocity"):
self.velocities.append(tensor['velocity'])
self.__updateJSON()
self.frames_processed += 1
def __updateJSON(self):
with open(self._result_path, "w") as write_file:
json.dump(self.velocities,
write_file, indent=4, sort_keys=True)
def __dump_data(self):
with open(self._result_path, "a") as write_file:
write_file.write("{} \n".format(self.velocities))
if __name__ == "__main__":
svclassifier = pickle.load(open(SVM_PATH, 'rb'))
for file_name in listdir(DATASET_PATH):
if file_name.endswith(".mp4"):
video_path = join(DATASET_PATH, file_name)
pipeline_str = CLASSIFY_PIPELINE_TEMPLATE.format(
video_path,
MODEL_PATH,
ALPHA,
ALPHA_HW,
realpath(__file__),
join(DATASET_PATH, file_name.replace('.mp4', '.json'))
)
print(pipeline_str)
proc = subprocess.run(
shlex.split(pipeline_str), env=environ.copy())
if proc.returncode != 0:
print("Error while running pipeline")
exit(-1)
| 32.101266 | 89 | 0.615536 | 801 | 0.315852 | 0 | 0 | 0 | 0 | 0 | 0 | 725 | 0.285883 |
26f0496f5cee5563d72ece3864af6c3cc42f430c | 2,883 | py | Python | indicators/migrations/0035_make_indicators_programs_foreignkey.py | mercycorps/TolaWorkflow | 59542132fafd611081adb0e8cfaa04abc5886d7a | [
"Apache-2.0"
] | null | null | null | indicators/migrations/0035_make_indicators_programs_foreignkey.py | mercycorps/TolaWorkflow | 59542132fafd611081adb0e8cfaa04abc5886d7a | [
"Apache-2.0"
] | 268 | 2020-03-31T15:46:59.000Z | 2022-03-31T18:01:08.000Z | indicators/migrations/0035_make_indicators_programs_foreignkey.py | Falliatcom-sa/falliatcom | 39fb926de072c296ed32d50cccfb8003ca870739 | [
"Apache-2.0"
] | 1 | 2021-01-05T01:58:24.000Z | 2021-01-05T01:58:24.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2018-11-06 08:34
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
def set_temp_program_id(apps, schema_editor):
Indicator = apps.get_model('indicators', 'Indicator')
for indicator in Indicator.objects.all():
if indicator.program.all().count() == 0:
program_id = None
else:
program_id = indicator.program.all().first().id
indicator.program_temp = program_id
indicator.save()
def move_temp_program_id(apps, schema_editor):
Indicator = apps.get_model('indicators', 'Indicator')
for indicator in Indicator.objects.all():
indicator.program_id = indicator.program_temp
indicator.save()
def test_program_id_uniqueness(apps, schema_editor):
Indicator = apps.get_model('indicators', 'Indicator')
for indicator in Indicator.objects.all():
if indicator.program.all().count() > 1:
raise Exception('Indicator {0} has more than 1 program, fix then rerun'.format(indicator.id))
class Migration(migrations.Migration):
dependencies = [
('workflow', '0020_auto_20180918_1554'),
('indicators', '0034_ipttindicator_programwithmetrics'),
]
operations = [
migrations.RunPython(test_program_id_uniqueness),
migrations.AddField(
model_name='historicalindicator',
name='program',
field=models.ForeignKey(
blank=True, db_constraint=False, null=True,
on_delete=django.db.models.deletion.DO_NOTHING,
related_name='+', to='workflow.Program'),
),
# first add a temp field to hold the foreign key
migrations.AddField(
model_name='indicator',
name='program_temp',
field=models.IntegerField(
null=True, blank=True, help_text=b'Program Temp',
verbose_name='Program Temp'),
preserve_default=False,
),
# put the value of the program the indicator is assigned to there
migrations.RunPython(set_temp_program_id),
migrations.RemoveField(
model_name='indicator',
name='program',
),
# add the real foreign key
migrations.AddField(
model_name='indicator',
name='program',
field=models.ForeignKey(
null=True, blank=True, help_text=b'Program',
on_delete=django.db.models.deletion.CASCADE,
to='workflow.Program', verbose_name='Program'),
preserve_default=False,
),
# copy the temp key over
migrations.RunPython(move_temp_program_id),
migrations.RemoveField(
model_name='indicator',
name='program_temp',
),
]
| 36.493671 | 105 | 0.622962 | 1,776 | 0.616025 | 0 | 0 | 0 | 0 | 0 | 0 | 651 | 0.225806 |
26f1b913f1ee12f1e92139c51f5d8c9e44276d06 | 4,335 | py | Python | pymockserver/client.py | MXWest/py-mockserver | cd0783aac2e5c1b8a021c29a4c70ef5414b7f7cc | [
"MIT"
] | 3 | 2018-06-14T19:44:05.000Z | 2020-12-14T04:33:21.000Z | pymockserver/client.py | MXWest/py-mockserver | cd0783aac2e5c1b8a021c29a4c70ef5414b7f7cc | [
"MIT"
] | 4 | 2020-02-01T16:20:18.000Z | 2021-03-23T14:43:54.000Z | pymockserver/client.py | MXWest/py-mockserver | cd0783aac2e5c1b8a021c29a4c70ef5414b7f7cc | [
"MIT"
] | 2 | 2020-02-01T16:25:50.000Z | 2021-03-23T13:06:25.000Z | import requests
import json
from urllib3.exceptions import HTTPError
class Client(object):
"""Client to connect to the mockserver"""
def __init__(self, host='localhost', port=1080):
"""
Class initialization
:param str host: host of the mockserver
:param int port: port of the mockserver
"""
self.host = host
self.port = port
self.headers = {
'Content-Type': 'application/json'
}
def _get_url(self):
"""Get full URL of the mockserver
:return str url of the mockserver
"""
return 'http://{}:{}'.format(self.host, self.port)
def expectation(self, request, response, times=None):
"""create expectation on mockserver
:param request httpRequest object
:param response httpResponse object
"""
data = {
'httpRequest': request.dict(),
'httpResponse': response.dict(),
'times': {
'remainingTimes': 1,
'unlimited': True
}
}
if times:
data['times'] = vars(times)
req = requests.put('{}/expectation'.format(self._get_url()),
json.dumps(data))
return req
def forward(self, request, forward, times=None):
"""create forwarding on mockserver
:param times: times object (optional)
:param request httpRequest object
:param forward httpResponse object
"""
data = {
'httpRequest': request.dict(),
'httpForward': forward.dict(),
'times': {
'remainingTimes': 1,
'unlimited': True
}
}
if times:
data['times'] = vars(times)
req = requests.put('{}/expectation'.format(self._get_url()),
json.dumps(data))
return req
def active_expectations(self):
"""Get list of active expectations
:return Array active expectations
"""
req = requests.put(
'{}/retrieve'.format(self._get_url()), params={'type': 'active_expectations'})
if req.status_code == 200:
try:
return req.json()
except ValueError:
return []
return []
def retrieve_requests(self, request=None):
"""Get all recorded requests
:return Array recorded requests
"""
data = {}
if request:
data = request.dict()
req = requests.put('{}/retrieve'.format(self._get_url()),
params={'type': 'requests'}, data=json.dumps(data))
if req.status_code == 200:
try:
return req.json()
except ValueError:
return []
return []
def verify(self, request, times=None):
"""Verify if a request has been received in specific number of times
:param Request request: Request object to verify
:param Times times: Times object for count. Default=None, count=1
:return Boolean true if verified, false if not
"""
data = {
'httpRequest': request.dict()
}
if times:
data['times'] = vars(times)
else:
data['times'] = {
'count': 1,
'exact': True
}
req = requests.put('{}/verify'.format(self._get_url()),
headers=self.headers,
data=json.dumps(data))
resp = {
'status': 'OK',
'reason': req.content.decode('utf-8'),
'found': None
}
if req.status_code == 202:
resp['reason'] = None
resp['found'] = True
elif req.status_code == 406:
resp['found'] = False
else:
resp['status'] = 'ERROR'
return resp
def reset(self):
"""delete all active expectations and recorded requests"""
requests.put('{}/reset'.format(self._get_url()))
def clear(self, request):
"""Delete active expectation and recorded request
:param Request request: Request to clear
"""
requests.put('{}/clear'.format(self._get_url()), data=request.json())
| 29.290541 | 90 | 0.514418 | 4,262 | 0.98316 | 0 | 0 | 0 | 0 | 0 | 0 | 1,624 | 0.374625 |
26f481dfc45ad24d352172f8f79006991163fc28 | 5,277 | py | Python | workflow/executors/validation_tasks.py | mettadatalabs1/oncoscape-datapipeline | 9c3209ba88831c3f1c598182c719ce45b4724fff | [
"Apache-2.0"
] | null | null | null | workflow/executors/validation_tasks.py | mettadatalabs1/oncoscape-datapipeline | 9c3209ba88831c3f1c598182c719ce45b4724fff | [
"Apache-2.0"
] | null | null | null | workflow/executors/validation_tasks.py | mettadatalabs1/oncoscape-datapipeline | 9c3209ba88831c3f1c598182c719ce45b4724fff | [
"Apache-2.0"
] | null | null | null | from validators.validation_configurator import ValidationConfigurator
from pipeline.models import InputFile
class HugoValidator(object):
# hugo_genes_map (Dictionary): a dictionary that has the hugo genes and
# respective aliases. Each entry is db:{gene: Set(aliases),}.
# This is created the first time the class is loaded and is static.
# We use set because alias look up will be O(1) and the overall complexity
# for each row is O(n), yielding a total complexity of O(n^2)
# for an input file. The assumption is that different projects might have
# different gene maps and we want to create the map per project once.
hugo_genes_map = {}
@classmethod
def populate_hugo_genes_map(cls, mongo_connector,collection):
"""
Populates the hugo_genes_map for a given database.
Args:
mongo_connector (db.mongo_connector.MongoConnector): The mongo
connection holding the db name and the connection to the db
collection: the name of the collection to query
"""
db = mongo_connector.db.name
if db not in HugoValidator.hugo_genes_map:
gene_maps_from_db = mongo_connector.find(query=None,
collection=collection)
gene_maps_local = {}
for gene_map in gene_maps_from_db:
gene_maps_local[gene_map["hugo"]] =\
frozenset(gene_map["symbols"])
HugoValidator.hugo_genes_map[db] = gene_maps_local
print (len(HugoValidator.hugo_genes_map[db]))
@classmethod
def validate_hugo(cls, db, gene_symbol):
"""
Validates if a given gene symbol is a gene name, an alias, or is an
invalid entry.
Args:
db (string): The database in which we want to check
gene_symbol (string): The gene symbol to checking
Returns:
(string, string): A 2 tuple with gene_symbol that was sent and the
parent if it is an alias. If a match, the tuple is (None, gene_symbol).
If invalid, the tuple is (None, None)
"""
gene_valid_status = (None, None)
db_genes_map = HugoValidator.hugo_genes_map[db]
if gene_symbol in db_genes_map:
gene_valid_status = (None, gene_symbol)
else:
for gene in db_genes_map:
if gene_symbol in db_genes_map[gene]:
gene_valid_status = (gene_symbol, gene)
break
return gene_valid_status
def validate_file(input_file_obj):
if not input_file_obj.directory and not input_file_obj.s3_path:
return None
if not input_file_obj.file:
return None
input_file = (input_file_obj.directory
if input_file_obj.directory else input_file_obj.s3_path)
input_file += "/" + input_file_obj.file
# validation_configurator (ValidationConfigurator)
validation_configurator = ValidationConfigurator(input_file_obj.datatype)
with open(input_file, "r") as file_to_validate:
header = file_to_validate.readline().strip("\n")
# header row: gene sample1 sample2 sample 3
# valid_samples(list(dictionary): A list of dictionary to store all the
# valid rows for a given sample. The dictionary has sample as the key
# and a dictionary with 2 lists, one for valid values and other for
# the genes. The values and genes are 1-1 meaning value[0] corresponds
# to the value of the first gene for the sample. If we have an invalid
# value, then we will not store the gene for the sample.
# todo: add documentation link to the datastructure.
valid_samples = [{"sample": sample, "values":[],"genes":[],}
for sample in header.split("\t")[1:]]
print (valid_samples[-1])
for line in file_to_validate:
line_tokens = line.strip("\n").split("\t")
gene = line_tokens[0]
hugo_validation = HugoValidator.validate_hugo("tcga", gene)
gene_valid = False
if hugo_validation[1]:
# the gene is alias if first token is not None else valid
gene_valid = "alias" if hugo_validation[0] else "valid"
enumerated_tokens = enumerate(line_tokens[1:])
# parse rest of the line only for valid genes
for idx,line_token in enumerated_tokens:
# the element is valid
is_valid, value = validation_configurator.validate(
line_token)
if is_valid:
# the index refers to the sample location in valid_samples.
# append the gene and the value at the end
valid_samples[idx]["genes"].append(gene)
valid_samples[idx]["values"].append(value)
# THIS HAS TO CHANGE. IF THERE IS ONE INVALID ENTRY
# the whole sample should change.
# HANDLE NULL. Default is NA. Put this in job_config
# sklearn.decomposition.PCA lib for PCA
input_file_obj.valid_samples = valid_samples
| 49.783019 | 83 | 0.617965 | 2,438 | 0.462005 | 0 | 0 | 1,863 | 0.353042 | 0 | 0 | 2,287 | 0.43339 |
26f602e46a5eecf3c443505b6bc8ba0c321a760e | 1,290 | py | Python | pytglib/api/types/input_message_video_note.py | iTeam-co/pytglib | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 6 | 2019-10-30T08:57:27.000Z | 2021-02-08T14:17:43.000Z | pytglib/api/types/input_message_video_note.py | iTeam-co/python-telegram | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 1 | 2021-08-19T05:44:10.000Z | 2021-08-19T07:14:56.000Z | pytglib/api/types/input_message_video_note.py | iTeam-co/python-telegram | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 5 | 2019-12-04T05:30:39.000Z | 2021-05-21T18:23:32.000Z |
from ..utils import Object
class InputMessageVideoNote(Object):
"""
A video note message
Attributes:
ID (:obj:`str`): ``InputMessageVideoNote``
Args:
video_note (:class:`telegram.api.types.InputFile`):
Video note to be sent
thumbnail (:class:`telegram.api.types.inputThumbnail`):
Video thumbnail, if available
duration (:obj:`int`):
Duration of the video, in seconds
length (:obj:`int`):
Video width and height; must be positive and not greater than 640
Returns:
InputMessageContent
Raises:
:class:`telegram.Error`
"""
ID = "inputMessageVideoNote"
def __init__(self, video_note, thumbnail, duration, length, **kwargs):
self.video_note = video_note # InputFile
self.thumbnail = thumbnail # InputThumbnail
self.duration = duration # int
self.length = length # int
@staticmethod
def read(q: dict, *args) -> "InputMessageVideoNote":
video_note = Object.read(q.get('video_note'))
thumbnail = Object.read(q.get('thumbnail'))
duration = q.get('duration')
length = q.get('length')
return InputMessageVideoNote(video_note, thumbnail, duration, length)
| 28.666667 | 77 | 0.615504 | 1,258 | 0.975194 | 0 | 0 | 324 | 0.251163 | 0 | 0 | 714 | 0.553488 |
26f6c233aae91fb0635319c24ac7a5452088a65f | 520 | py | Python | gdc_readgroups/exceptions.py | NCI-GDC/gdc-readgroups | 874387bb3473b0a0680551339e50b072cc058eb6 | [
"Apache-2.0"
] | null | null | null | gdc_readgroups/exceptions.py | NCI-GDC/gdc-readgroups | 874387bb3473b0a0680551339e50b072cc058eb6 | [
"Apache-2.0"
] | null | null | null | gdc_readgroups/exceptions.py | NCI-GDC/gdc-readgroups | 874387bb3473b0a0680551339e50b072cc058eb6 | [
"Apache-2.0"
] | 1 | 2020-01-23T22:07:10.000Z | 2020-01-23T22:07:10.000Z | """
Exceptions for Read Group headers
"""
class NoReadGroupError(Exception):
"""NoReadGroupError"""
class SamtoolsViewError(Exception):
"""SamtoolsViewError"""
class InvalidPlatformError(Exception):
"""InvalidPlatformError"""
class InvalidPlatformModelError(Exception):
"""InvalidPlatformError"""
class MissingReadgroupIdError(Exception):
"""MissingReadgroupIdError"""
class InvalidDatetimeError(Exception):
"""InvalidDatetimeError"""
class NotABamError(Exception):
"""NotABamError"""
| 20.8 | 43 | 0.746154 | 464 | 0.892308 | 0 | 0 | 0 | 0 | 0 | 0 | 211 | 0.405769 |
26f984eeef056e7ffe65f198d0e3689278e5fc57 | 2,098 | py | Python | aiida_logger/calculations/test_calculations.py | SINTEF/aiida-logger | d97aced2ec8967cb359f488d2218cc3b47c92f6b | [
"MIT"
] | null | null | null | aiida_logger/calculations/test_calculations.py | SINTEF/aiida-logger | d97aced2ec8967cb359f488d2218cc3b47c92f6b | [
"MIT"
] | null | null | null | aiida_logger/calculations/test_calculations.py | SINTEF/aiida-logger | d97aced2ec8967cb359f488d2218cc3b47c92f6b | [
"MIT"
] | null | null | null | """
Tests for calculations.
"""
from __future__ import print_function
from __future__ import absolute_import
import os
import numpy as np
def test_process(logger_code):
"""
Test running a calculation.
Also checks its outputs.
"""
from aiida.plugins import DataFactory, CalculationFactory
from aiida.engine import run
from aiida.common.extendeddicts import AttributeDict
from aiida_logger.tests import TEST_DIR # pylint: disable=wrong-import-position
# Prepare input parameters
parameters = AttributeDict()
parameters.comment_string = '#'
parameters.labels = True
# Define input files to use
SinglefileData = DataFactory('singlefile')
datafile = SinglefileData(
file=os.path.join(TEST_DIR, 'input_files', 'datafile'))
# Set up calculation
inputs = {
'code': logger_code,
'parameters': DataFactory('dict')(dict=parameters),
'datafiles': {
'datafile': datafile
},
'metadata': {
'options': {
'resources': {
'num_machines': 1,
'num_mpiprocs_per_machine': 1
},
'parser_name': 'logger',
'withmpi': False,
'output_filename': 'logger.out'
},
'description': 'Test job submission with the aiida_logger plugin'
},
}
result = run(CalculationFactory('logger'), **inputs)
assert 'data' in result
assert 'metadata' in result
data = result['data']
metadata = result['metadata']
metadata = metadata.get_dict()
assert 'labels' in metadata
assert 'comments' in metadata
assert metadata['labels'] == ['time', 'param1', 'param2', 'param3']
assert metadata['comments'][0] == '# This is an example file'
test_array = np.array([[1.0e+00, 3.0e+00, 4.0e+00, 5.0e+00],
[2.0e+00, 4.0e+00, 5.7e+00, -1.0e-01],
[3.0e+00, 1.0e-03, 1.0e+03, 8.0e-01]])
np.testing.assert_allclose(data.get_array('content'), test_array)
| 29.138889 | 84 | 0.594376 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 635 | 0.302669 |
26fb61ab1de0e800a79d50ca5f9597cf8788e433 | 2,155 | py | Python | db_sheet/sheet_table.py | chowmean/DBSheet | 3f1c521320cb3564c4ff55cd70c8a1978dd32a4c | [
"Apache-2.0"
] | 3 | 2017-08-18T20:04:12.000Z | 2021-01-08T12:23:43.000Z | db_sheet/sheet_table.py | chowmean/DBSheet | 3f1c521320cb3564c4ff55cd70c8a1978dd32a4c | [
"Apache-2.0"
] | 1 | 2021-06-01T23:13:57.000Z | 2021-06-01T23:13:57.000Z | db_sheet/sheet_table.py | chowmean/DBSheet | 3f1c521320cb3564c4ff55cd70c8a1978dd32a4c | [
"Apache-2.0"
] | null | null | null | class Table:
def __init__(self, columns, name):
self.columns = columns
self.name = name
class CreateTable:
def __init__(self, table_obj, sheet_obj):
self.table = table_obj
self.sheet = sheet_obj
def create_table(self):
cols = len(self.table.columns)
name = self.table.name
rows = 1
wk_sheet = self.sheet.add_worksheet(name, rows, cols)
return self.addRows(wk_sheet)
def addRows(self,worksheet):
worksheet.append_row(self.table.columns)
worksheet.delete_row(1)
return True
class WorkingTable:
def __init__(self, name, sheet_obj):
for i in range(0,100):
self.wk_sheet = sheet_obj.get_worksheet(i)
if self.wk_sheet.title == name:
break
elif self.wk_sheet.title == "":
return "No sheet with this name found"
def get_all(self):
return self.wk_sheet.get_all_records()
def insert(self, row):
if len(row) != self.wk_sheet.col_count:
return "Column count doest match"
return self.wk_sheet.append_row(row)
def get_one(self,key, value):
return self.find(key, value, 1)
def get_attr(self, key, value):
return self.find(key,value)
def find(self,key, value, count=-1):
all_data = self.get_all()
matched_rows = []
matched_count = 0
for each_d in all_data:
if each_d[key] == value:
matched_rows.append(each_d)
matched_count = matched_count +1
if matched_count == count:
break;
return matched_rows
def get_index(self,key, value):
all_data = self.get_all()
matched_rows = []
index = 1
for each_d in all_data:
if each_d[key] == value:
return index + 1
index = index + 1
return -1
def delete(self,key, value):
index = self.get_index(key, value)
print index
while index != -1:
self.wk_sheet.delete_row(index)
index = self.get_index(key, value) | 28.733333 | 61 | 0.570766 | 2,149 | 0.997216 | 0 | 0 | 0 | 0 | 0 | 0 | 59 | 0.027378 |
26fd4d122991c7a14eaad9bffe766e315791616a | 90 | py | Python | root/ilikeit/MySQLCrashCourse/dbcom/tests/__init__.py | ChyiYaqing/chyidlTutorial | 77e7f6f84f21537a58a8a8a42e31cf2e3dd31996 | [
"MIT"
] | 5 | 2018-10-17T05:57:39.000Z | 2021-07-05T15:38:24.000Z | root/ilikeit/MySQLCrashCourse/dbcom/tests/__init__.py | ChyiYaqing/chyidlTutorial | 77e7f6f84f21537a58a8a8a42e31cf2e3dd31996 | [
"MIT"
] | 2 | 2021-04-14T00:48:43.000Z | 2021-04-14T02:20:50.000Z | root/ilikeit/MySQLCrashCourse/dbcom/tests/__init__.py | ChyiYaqing/chyidlTutorial | 77e7f6f84f21537a58a8a8a42e31cf2e3dd31996 | [
"MIT"
] | 3 | 2019-03-02T14:36:19.000Z | 2022-03-18T10:12:09.000Z | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
"""Requests test package initialisation.""" | 22.5 | 43 | 0.655556 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 87 | 0.966667 |
26fdabbca3431190e788d02f52c14a320298b8ac | 9,425 | py | Python | discopy/components/sense/explicit/bert_conn_sense.py | rknaebel/discopy | 5507d656987af2df9e595434a82c0a12bbc713e4 | [
"MIT"
] | 14 | 2019-04-14T16:10:23.000Z | 2022-03-09T14:56:10.000Z | discopy/components/sense/explicit/bert_conn_sense.py | rknaebel/discopy | 5507d656987af2df9e595434a82c0a12bbc713e4 | [
"MIT"
] | 15 | 2019-04-15T16:44:40.000Z | 2021-11-23T17:36:41.000Z | discopy/components/sense/explicit/bert_conn_sense.py | rknaebel/discopy | 5507d656987af2df9e595434a82c0a12bbc713e4 | [
"MIT"
] | 1 | 2020-02-28T23:36:35.000Z | 2020-02-28T23:36:35.000Z | import json
import logging
import os
from typing import List, Dict
import click
import numpy as np
import tensorflow as tf
from sklearn.metrics import cohen_kappa_score, precision_recall_fscore_support, accuracy_score
from tqdm import tqdm
from discopy.components.component import Component
from discopy.components.connective.base import get_connective_candidates
from discopy.evaluate.conll import evaluate_docs, print_results
from discopy.utils import init_logger
from discopy_data.data.doc import Document
from discopy_data.data.loaders.conll import load_bert_conll_dataset
from discopy_data.data.relation import Relation
logger = logging.getLogger('discopy')
def get_conn_model(in_size, out_size, hidden_size, hidden_size2=256):
x = y = tf.keras.layers.Input(shape=(in_size,), name='connective')
y = tf.keras.layers.Dense(hidden_size, kernel_initializer='lecun_normal', activation='selu')(y)
y = tf.keras.layers.Dropout(0.3)(y)
y = tf.keras.layers.Dense(hidden_size2, kernel_initializer='lecun_normal', activation='selu')(y)
y = tf.keras.layers.Dropout(0.3)(y)
y = tf.keras.layers.Dense(out_size, activation='softmax')(y)
model = tf.keras.models.Model(x, y)
optimizer = tf.keras.optimizers.RMSprop()
model.compile(optimizer, 'sparse_categorical_crossentropy', metrics=[
"accuracy",
])
return model
def get_bert_features(idxs, doc_bert, used_context=0):
idxs = list(idxs)
pad = np.zeros_like(doc_bert[0])
embd = doc_bert[idxs].mean(axis=0)
if used_context > 0:
left = [doc_bert[i] if i >= 0 else pad for i in range(min(idxs) - used_context, min(idxs))]
right = [doc_bert[i] if i < len(doc_bert) else pad for i in range(max(idxs) + 1, max(idxs) + 1 + used_context)]
embd = np.concatenate(left + [embd] + right).flatten()
return embd
def generate_pdtb_features(docs: List[Document], sense_map: Dict[str, int], used_context=0):
features = []
for doc in tqdm(docs):
doc_bert = doc.get_embeddings()
global_id_map = {(s_i, t.local_idx): t.idx for s_i, s in enumerate(doc.sentences) for t in s.tokens}
conns = {tuple(t.idx for t in r.conn.tokens): r.senses[0] for r in doc.get_explicit_relations()}
for sent_i, sentence in enumerate(doc.sentences):
for connective_candidate in get_connective_candidates(sentence):
conn_idxs = tuple(global_id_map[(sent_i, i)] for i, c in connective_candidate)
if conn_idxs in conns:
sense = sense_map.get(conns[conn_idxs])
if not sense:
continue
features.append((get_bert_features(conn_idxs, doc_bert, used_context), sense))
else:
features.append((get_bert_features(conn_idxs, doc_bert, used_context), 0))
x, y = list(zip(*features))
return np.stack(x), np.array(y)
def get_sense_mapping(docs):
sense_map = {
'NoSense': 0,
}
senses = sorted({s for doc in docs for rel in doc.relations for s in rel.senses})
i = 1
for s in senses:
if s in sense_map:
sense_map[s] = sense_map[s]
else:
sense_map[s] = i
i += 1
classes = []
for sense, sense_id in sorted(sense_map.items(), key=lambda x: x[1]):
if len(classes) > sense_id:
continue
classes.append(sense)
return sense_map, classes
class ConnectiveSenseClassifier(Component):
model_name = 'explicit_sense_bert_classifier'
used_features = ['vectors']
def __init__(self, input_dim, used_context: int = 0, hidden_dim: int = 2048):
self.input_dim = input_dim
self.used_context = used_context
self.in_size = input_dim + 2 * used_context * input_dim
self.hidden_dim = hidden_dim
self.sense_map = {}
self.classes = []
self.model = None
self.batch_size = 512
def get_config(self):
return {
'model_name': self.model_name,
'input_dim': self.input_dim,
'hidden_dim': self.hidden_dim,
'used_context': self.used_context,
'sense_map': self.sense_map,
'classes': self.classes,
}
@staticmethod
def from_config(config: dict):
clf = ConnectiveSenseClassifier(config['input_dim'], config['used_context'], config['hidden_dim'])
clf.sense_map = config['sense_map']
clf.classes = config['classes']
return clf
def load(self, path):
self.sense_map = json.load(open(os.path.join(path, self.model_name, 'senses.json'), 'r'))
self.classes = []
for sense, sense_id in sorted(self.sense_map.items(), key=lambda x: x[1]):
if len(self.classes) > sense_id:
continue
self.classes.append(sense)
if not os.path.exists(os.path.join(path, self.model_name)):
raise FileNotFoundError("Model not found.")
self.model = tf.keras.models.load_model(os.path.join(path, self.model_name),
compile=False)
def save(self, path):
if not os.path.exists(path):
os.makedirs(path)
self.model.save(os.path.join(path, self.model_name))
json.dump(self.sense_map, open(os.path.join(path, self.model_name, 'senses.json'), 'w'))
def fit(self, docs_train: List[Document], docs_val: List[Document] = None):
if docs_val is None:
raise ValueError("Validation data is missing.")
self.sense_map, self.classes = get_sense_mapping(docs_train)
self.model = get_conn_model(self.in_size, len(self.sense_map), self.hidden_dim, 128)
self.model.summary()
print(self.sense_map, self.classes)
x_train, y_train = generate_pdtb_features(docs_train, self.sense_map, used_context=self.used_context)
x_val, y_val = generate_pdtb_features(docs_val, self.sense_map, used_context=self.used_context)
self.model.fit(x_train, y_train, validation_data=(x_val, y_val), verbose=1, shuffle=True, epochs=20,
batch_size=self.batch_size,
callbacks=[
tf.keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0.001, patience=7, verbose=0,
restore_best_weights=True),
tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.75, patience=3, verbose=0)
])
def score_on_features(self, x, y):
y_pred = self.model.predict(x, batch_size=self.batch_size).argmax(-1)
logger.info("Evaluation: Connective")
logger.info(" Acc : {:<06.4}".format(accuracy_score(y, y_pred)))
prec, recall, f1, support = precision_recall_fscore_support(y, y_pred, average='macro')
logger.info(" Macro: P {:<06.4} R {:<06.4} F1 {:<06.4}".format(prec, recall, f1))
logger.info(" Kappa: {:<06.4}".format(cohen_kappa_score(y, y_pred)))
def score(self, docs: List[Document]):
if not self.model:
raise ValueError("Score of untrained model.")
x, y = generate_pdtb_features(docs, self.sense_map, used_context=self.used_context)
self.score_on_features(x, y)
def parse(self, doc: Document, relations=None, **kwargs):
if not self.model:
raise ValueError("Score of untrained model.")
relations: List[Relation] = []
doc_bert = doc.get_embeddings()
global_id_map = {(s_i, t.local_idx): t.idx for s_i, s in enumerate(doc.sentences) for t in s.tokens}
for sent_i, sent in enumerate(doc.sentences):
for connective_candidate in get_connective_candidates(sent):
conn_idxs = tuple(global_id_map[(sent_i, i)] for i, c in connective_candidate)
features = get_bert_features(conn_idxs, doc_bert, self.used_context)
pred = self.model.predict(np.expand_dims(features, axis=0)).argmax(-1).flatten()[0]
if pred > 0:
conn_tokens = [sent.tokens[i] for i, c in connective_candidate]
relations.append(Relation(
conn=conn_tokens,
type='Explicit',
senses=[self.classes[pred]]
))
return relations
@click.command()
@click.argument('conll-path')
def main(conll_path):
logger = init_logger()
docs_val = load_bert_conll_dataset(os.path.join(conll_path, 'en.dev'),
cache_dir=os.path.join(conll_path, 'en.dev.bert-base-cased.joblib'))
docs_train = load_bert_conll_dataset(os.path.join(conll_path, 'en.train'),
cache_dir=os.path.join(conll_path, 'en.train.bert-base-cased.joblib'))
clf = ConnectiveSenseClassifier(input_dim=docs_val[0].get_embedding_dim(), used_context=2)
logger.info('Train model')
clf.fit(docs_train, docs_val)
logger.info('Evaluation on TRAIN')
clf.score(docs_train)
logger.info('Evaluation on TEST')
clf.score(docs_val)
# logger.info('Parse one document')
# print(docs_val[0].to_json())
print(clf.parse(docs_val[0], []))
preds = [d.with_relations(clf.parse(d)) for d in docs_val]
print_results(evaluate_docs(docs_val, preds))
if __name__ == "__main__":
main()
| 44.042056 | 119 | 0.634589 | 4,954 | 0.525623 | 0 | 0 | 1,240 | 0.131565 | 0 | 0 | 794 | 0.084244 |
f80409abd20022882a95d524c2584bb72123403a | 533 | py | Python | Perfect Squares.py | ngdeva99/Fulcrum | 3a5c69005bbaf2a5aebe13d1907f13790210fb32 | [
"MIT"
] | null | null | null | Perfect Squares.py | ngdeva99/Fulcrum | 3a5c69005bbaf2a5aebe13d1907f13790210fb32 | [
"MIT"
] | null | null | null | Perfect Squares.py | ngdeva99/Fulcrum | 3a5c69005bbaf2a5aebe13d1907f13790210fb32 | [
"MIT"
] | null | null | null | class Solution:
def numSquares(self, n: int) -> int:
if n==0:
return 0
dp = [float('inf')]*(n+1)
dp[0] = 0
c = n
n = int(sqrt(n))
a = [i**2 for i in range(1,n+1)]
for i in range(1,len(dp)):
for j in a:
if i-j>=0:
dp[i] = min(dp[i-j]+1,dp[i])
print(dp)
if dp[n]==float('inf'):
return -1
return dp[c]
| 22.208333 | 48 | 0.320826 | 523 | 0.981238 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.018762 |
f8053be6ee69e87199ea558062ed1fe681dca092 | 361 | py | Python | busshaming/models/agency.py | katharosada/busshaming | c8d7cd4baf9ff049cda49c92da4d5ca10f68e6a9 | [
"MIT"
] | 42 | 2018-01-20T01:12:25.000Z | 2022-02-02T01:40:17.000Z | busshaming/models/agency.py | katharosada/busshaming | c8d7cd4baf9ff049cda49c92da4d5ca10f68e6a9 | [
"MIT"
] | 2 | 2018-01-24T03:58:17.000Z | 2018-06-10T01:05:57.000Z | busshaming/models/agency.py | katharosada/busshaming | c8d7cd4baf9ff049cda49c92da4d5ca10f68e6a9 | [
"MIT"
] | 7 | 2018-01-24T05:49:13.000Z | 2018-12-03T08:47:43.000Z | from django.db import models
class Agency(models.Model):
gtfs_agency_id = models.CharField(max_length=200)
feed = models.ForeignKey('Feed')
name = models.CharField(max_length=200)
class Meta:
unique_together = ('gtfs_agency_id', 'feed')
def __str__(self):
return f'{self.feed.slug} - {self.gtfs_agency_id} ({self.name})'
| 25.785714 | 72 | 0.67867 | 329 | 0.911357 | 0 | 0 | 0 | 0 | 0 | 0 | 85 | 0.235457 |
f805d1c83458a20d7c8be553923b41b8d8630a7f | 328 | py | Python | sqltask/base/dq.py | mjalo/sqltask | f6eefd624614a464ae5697ac61405416244518e2 | [
"MIT"
] | 10 | 2019-10-09T15:34:13.000Z | 2022-02-21T07:44:03.000Z | sqltask/base/dq.py | mjalo/sqltask | f6eefd624614a464ae5697ac61405416244518e2 | [
"MIT"
] | 23 | 2019-10-09T15:20:01.000Z | 2020-02-08T11:51:24.000Z | sqltask/base/dq.py | mjalo/sqltask | f6eefd624614a464ae5697ac61405416244518e2 | [
"MIT"
] | 4 | 2019-10-09T15:20:51.000Z | 2020-02-11T08:43:03.000Z | from enum import Enum
class Priority(Enum):
MANDATORY = "mandatory"
HIGH = "high"
MEDIUM = "medium"
LOW = "low"
class Source(Enum):
SOURCE = "source"
TRANSFORM = "transform"
LOOKUP = "lookup"
class Category(Enum):
MISSING = "missing"
INCORRECT = "incorrect"
DUPLICATE = "duplicate"
| 15.619048 | 27 | 0.618902 | 297 | 0.905488 | 0 | 0 | 0 | 0 | 0 | 0 | 88 | 0.268293 |
f80628afca18060801db523544822b454ace8ecb | 2,386 | py | Python | main.py | deadpoool69/MediCare | eb45149dd14dc3792ef3ea724c61d46a29718068 | [
"MIT"
] | null | null | null | main.py | deadpoool69/MediCare | eb45149dd14dc3792ef3ea724c61d46a29718068 | [
"MIT"
] | null | null | null | main.py | deadpoool69/MediCare | eb45149dd14dc3792ef3ea724c61d46a29718068 | [
"MIT"
] | null | null | null | from flask import Flask, render_template, request, url_for, redirect
from forms import *
from model import generate_recommendations, get_desc
import os
app = Flask(__name__)
SECRET_KEY = os.urandom(32)
app.config['SECRET_KEY'] = SECRET_KEY
@app.route('/', methods=["GET", "POST"])
def landing():
title = 'Neurolens'
form = DiagnosisForm()
form_name = 'Try your tailored neurological treatment'
if request.method == 'POST':
diagnosis = request.form.get("diagnosis")
return redirect(url_for('drugs', diagnosis=diagnosis))
return render_template('landing.html', title = title, form = form, form_name = form_name)
@app.route('/drugs', methods=["GET", "POST"])
def drugs():
title = 'Neurolens'
diagnosis = request.args.get("diagnosis")
if diagnosis == 'Schizophrenia':
form = SchizophreniaDrugForm()
else:
form = DepressionDrugForm()
form_name = 'Select your prescribed drug'
if request.method == 'POST':
drug = request.form.get("drug")
return redirect(url_for('symptoms', diagnosis=diagnosis, drug=drug))
return render_template('drugs.html', title=title, form=form, form_name=form_name)
@app.route('/symptoms', methods=["GET", "POST"])
def symptoms():
title = 'Neurolens'
diagnosis = request.args.get("diagnosis")
drug = request.args.get("drug")
form = SymptomsForm()
form_name = 'Tell us how {drug} has treated you'.format(drug=drug)
if request.method == 'POST':
symptoms = request.form
return redirect(url_for('results', diagnosis=diagnosis, drug=drug, **symptoms))
return render_template('symptoms.html', title=title, form=form, form_name=form_name)
@app.route('/results', methods=["GET", "POST"])
def results():
title = 'Based on your responses, we recommend'
data = request.args
recs = generate_recommendations(data)
drug_1 = recs[0]
# Score, name, description, id list
drug_1 = (int(drug_1[0][0]), drug_1[1], get_desc(drug_1[1]), drug_1[0][1])
drug_2 = recs[1]
drug_2 = (int(drug_2[0][0]), drug_2[1], get_desc(drug_2[1]), drug_2[0][1])
drug_3 = recs[2]
drug_3 = (int(drug_3[0][0]), drug_3[1], get_desc(drug_3[1]), drug_3[0][1])
return render_template('results.html', title = title, drug_1=drug_1, drug_2=drug_2, drug_3=drug_3)
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
| 38.483871 | 102 | 0.670159 | 0 | 0 | 0 | 0 | 2,069 | 0.867142 | 0 | 0 | 480 | 0.201174 |
f8065cbbdc71ae71f6d602d2671a71b28b0eea4a | 2,057 | py | Python | tools/draw_comparison_head_design_choices.py | twangnh/Calibration_mrcnn | e5f3076cefbe35297a403a753bb57e11503db818 | [
"Apache-2.0"
] | 87 | 2020-07-24T01:28:39.000Z | 2021-08-29T08:40:18.000Z | tools/draw_comparison_head_design_choices.py | twangnh/Calibration_mrcnn | e5f3076cefbe35297a403a753bb57e11503db818 | [
"Apache-2.0"
] | 3 | 2020-09-27T12:59:28.000Z | 2022-01-06T13:14:08.000Z | tools/draw_comparison_head_design_choices.py | twangnh/Calibration_mrcnn | e5f3076cefbe35297a403a753bb57e11503db818 | [
"Apache-2.0"
] | 20 | 2020-09-05T04:37:19.000Z | 2021-12-13T02:25:48.000Z | import matplotlib
import matplotlib.pyplot as plt
import numpy as np
labels = ['AP on bin (0,10)', 'AP on bin (10,100)']
baseline = [0.0, 13.3]
fc2_ncm = [6.0, 18.9]
fc2 = [8.6, 22.0]
fc3_rand = [9.1, 18.8]
fc3_ft = [13.2, 23.1]
x = np.arange(len(labels)) # the label locations
width = 0.15 # the width of the bars
matplotlib.rcParams.update({'font.size': 16})
# plt.rc('ytick', labelsize=10)
fig, ax = plt.subplots()
# rects1 = ax.bar(x - width, baseline, width, label='baseline')
# rects2 = ax.bar(x - width/2, fc2_ncm, width, label='2fc_ncm')
# rects3 = ax.bar(x , baseline, fc2, label='baseline')
# rects4 = ax.bar(x + width/2, fc3_rand, width, label='2fc_ncm')
# rects5 = ax.bar(x + width, fc3_ft, width, label='baseline')
# Set position of bar on X axis
r1 = np.arange(len(labels))
r2 = [x + width for x in r1]
r3 = [x + width for x in r2]
r4 = [x + width for x in r3]
r5 = [x + width for x in r4]
# Make the plot
rects1 = ax.bar(r1, baseline, color='#7f6d5f', width=width, edgecolor='white', label='baseline')
rects2 = ax.bar(r2, fc2_ncm, color='#557f2d', width=width, edgecolor='white', label='2fc_ncm')
rects3 = ax.bar(r3, fc2, width=width, edgecolor='white', label='2fc_rand')
rects4 = ax.bar(r4, fc3_rand, width=width, edgecolor='white', label='3fc_rand')
rects5 = ax.bar(r5, fc3_ft, width=width, edgecolor='white', label='3fc_ft')
ax.set_ylim([0,25])
ax.set_xticks([0.3, 1.3])
ax.set_xticklabels(labels)
ax.legend()
def autolabel(rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
ax.annotate('{}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
autolabel(rects3)
autolabel(rects4)
autolabel(rects5)
fig.tight_layout()
plt.savefig('head_design_choices.eps', format='eps', dpi=1000)
plt.show()
| 31.166667 | 96 | 0.6456 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 741 | 0.360233 |
f807e6a714508c55a5204cce88f3927910a26a1e | 9,916 | py | Python | src/entry.py | akilmarshall/vash-2 | 5307bc414afba24b235ae0ae9b2583c33ea69b1f | [
"MIT"
] | null | null | null | src/entry.py | akilmarshall/vash-2 | 5307bc414afba24b235ae0ae9b2583c33ea69b1f | [
"MIT"
] | null | null | null | src/entry.py | akilmarshall/vash-2 | 5307bc414afba24b235ae0ae9b2583c33ea69b1f | [
"MIT"
] | null | null | null | from datetime import datetime
from itertools import count
from tkinter import *
import tkinter.ttk as ttk
from functools import partial
from tkcalendar import DateEntry
from case import COD, CONTRIES, Case, INCIDENT, ORGANIZATION, POLICESTATION, STATES
from db import referred_other_agency
from preview import CasePreview
class CaseEntry(ttk.Frame):
def __init__(self, parent):
Frame.__init__(self, parent)
row = count(0, 1)
# first name
r = next(row)
ttk.Label(self, text='First Name').grid(row=r, column=0)
self.first_name = StringVar()
self.fname_entry = ttk.Entry(self, textvariable=self.first_name)
self.fname_entry.grid(row=r, column=1)
# last name
r = next(row)
ttk.Label(self, text='Last name').grid(row=r, column=0)
self.last_name = StringVar()
self.lname_entry = ttk.Entry(self, textvariable=self.last_name)
self.lname_entry.grid(row=r, column=1)
# incident date
r = next(row)
ttk.Label(self, text='Incident Date (m/d/y)').grid(row=r, column=0)
self.date = StringVar()
DateEntry(self, textvariable=self.date).grid(row=r, column=1)
# incident type
self.other_incident_death_label = None
self.other_incident_label = None
def variable_incident_entry(value):
if self.incident.get() == 'death':
self.destroy_other_incident()
self.other_incident_death_label = ttk.Label(self, text='Cause of Death')
self.other_incident_death_label.grid(row=incident_row, column=2)
self.cod = StringVar()
self.cod_combobox = ttk.Combobox(self, textvariable=self.cod)
self.cod_combobox['values'] = [''] + COD
self.cod_combobox.set('')
self.cod_combobox.grid(row=incident_row, column=3)
elif self.incident.get() == 'other':
self.destroy_other_death()
self.other_incident = StringVar()
self.other_incident_label = ttk.Label(self, text='Other Incident')
self.other_incident_label.grid(row=incident_row, column=2)
self.other_incident_entry = ttk.Entry(self, textvariable=self.other_incident)
self.other_incident_entry.grid(row=incident_row, column=3)
else:
self.destroy_other_incident()
self.destroy_other_death()
incident_row = r = next(row)
ttk.Label(self, text='Incident Type').grid(row=r, column=0)
self.incident = StringVar()
incident = ttk.Combobox(self, textvariable=self.incident)
incident.bind('<<ComboboxSelected>>', variable_incident_entry)
incident['values'] = [''] + INCIDENT
incident.set('')
incident.grid(row=r, column=1)
# water related?
r = next(row)
ttk.Label(self, text='Water Related?').grid(row=r, column=0)
self.water_related = StringVar()
ttk.Radiobutton(self, text='True', value=True,
variable=self.water_related).grid(row=r, column=1)
ttk.Radiobutton(self, text='False', value=False,
variable=self.water_related).grid(row=r, column=2)
# party size
r = next(row)
ttk.Label(self, text='Party Size').grid(row=r, column=0)
self.party_size = StringVar()
party_size = ttk.Combobox(self, textvariable=self.party_size)
party_size['values'] = list(range(1, 10))
party_size.set(1)
party_size.grid(row=r, column=1)
# incident location
r = next(row)
ttk.Label(self, text='Incident Location').grid(row=r, column=0)
self.location = StringVar()
ttk.Entry(self, textvariable=self.location).grid(row=r, column=1)
# referred by
self.other_referred_label = None
self.other_referred = StringVar()
def referred_entry(_):
if self.referred.get() == 'other':
self.other_referred_label = ttk.Label(self, text='Other Agency')
self.other_referred_label.grid(row=referred_row, column=2)
self.other_referred_entry = ttk.Entry(self, textvariable=self.other_referred)
self.other_referred_entry.grid(row=referred_row, column=3)
else:
self.destroy_other_referred()
referred_row = r = next(row)
ttk.Label(self, text='Referred by').grid(row=r, column=0)
self.referred = StringVar()
referred = ttk.Combobox(self, textvariable=self.referred)
referred.bind('<<ComboboxSelected>>', referred_entry)
referred['values'] = [''] + ORGANIZATION
referred.set('')
referred.grid(row=r, column=1)
# police station
r = next(row)
ttk.Label(self, text='Police Station').grid(row=r, column=0)
self.police = StringVar()
police = ttk.Combobox(self, textvariable=self.police)
police['values'] = [''] + POLICESTATION
police.grid(row=r, column=1)
# visitor type
r = next(row)
ttk.Label(self, text='Visitor Type').grid(row=r, column=0)
self.visitor_type = StringVar()
visitor_type = ttk.Combobox(self, textvariable=self.visitor_type)
visitor_type['values'] = ['land', 'cruise']
visitor_type.grid(row=r, column=1)
# country of origin
self.state_label = None
def state_entry(_):
if self.country.get() == 'United States':
# state of origin
self.state = StringVar()
self.state_label = ttk.Label(self, text='State')
self.state_label.grid(row=country_row, column=2)
self.state_combobox = ttk.Combobox(self, textvariable=self.state)
self.state_combobox['values'] = [''] + STATES
self.state_combobox.set('')
self.state_combobox.grid(row=country_row, column=3)
else:
self.destroy_other_state()
country_row = r = next(row)
ttk.Label(self, text='Country').grid(row=r, column=0)
self.country = StringVar()
country = ttk.Combobox(self, textvariable=self.country)
country.bind('<<ComboboxSelected>>', state_entry)
country['values'] = [''] + CONTRIES
country.set('')
country.grid(row=r, column=1)
# case notes
r = next(row)
self.notes = Text(self, height=10)
ttk.Label(self, text='Notes').grid(row=r, column=0)
self.notes.grid(row=r, column=1)
# Buttons
r = next(row)
ttk.Button(self, text='Submit', command=self.submit).grid(
row=r, column=1)
r = next(row)
ttk.Button(self, text='Clear', command=self.clear).grid(
row=r, column=1)
r = next(row)
def destroy_other_incident(self):
if self.other_incident_label is not None:
self.other_incident_label.destroy()
self.other_incident_entry.destroy()
def destroy_other_death(self):
if self.other_incident_death_label is not None:
self.other_incident_death_label.destroy()
self.cod_combobox.destroy()
def destroy_other_referred(self):
if self.other_referred_label is not None:
self.other_referred_label.destroy()
self.other_referred_entry.destroy()
def destroy_other_state(self):
if self.state_label is not None:
self.state_label.destroy()
self.state_combobox.destroy()
def submit(self):
fname = self.first_name.get()
lname = self.last_name.get()
date = datetime.strptime(self.date.get(), '%m/%d/%y')
incident = self.incident.get()
cod = ''
incident_other = ''
if incident == 'death':
cod = self.cod.get()
elif incident == 'other':
incident_other = self.other_incident_entry.get()
party_size = int(self.party_size.get())
location = self.location.get()
water_related = True if self.water_related.get() == '1' else False
referred = self.referred.get()
referred_other = self.other_referred.get() if referred == 'other' else ''
police = self.police.get()
visitor_type = self.visitor_type.get()
country = self.country.get()
state = self.state.get() if country == 'United States' else ''
notes = self.notes.get('1.0', 'end')
case = Case(
fname,
lname,
date,
incident,
incident_other,
cod,
party_size,
location,
water_related,
referred,
referred_other,
police,
visitor_type,
country,
state,
notes
)
CasePreview(self, case)
# self.clear() # somehow need pass an asyn message and check
# if the write was successfull
def clear(self):
self.first_name.set('')
self.first_name.set('')
date = datetime.today()
y, m, d = date.year, date.month, date.day
self.date.set(f'{m}/{d}/{y - 2000}')
self.last_name.set('')
self.incident.set('')
# self.cod.set('')
self.party_size.set(1)
self.location.set('')
self.water_related.set('')
self.referred.set('')
self.police.set('')
self.visitor_type.set('')
self.country.set('')
# self.state.set('')
self.notes.delete('1.0', END)
self.destroy_other_state()
self.destroy_other_referred()
self.destroy_other_death()
self.destroy_other_incident()
if __name__ == '__main__':
root = Tk()
entry = CaseEntry(root)
entry.pack()
root.mainloop()
| 38.583658 | 93 | 0.588846 | 9,479 | 0.95593 | 0 | 0 | 0 | 0 | 0 | 0 | 903 | 0.091065 |
f8082f1e3f5f385cac811686714cd680277f4584 | 7,406 | py | Python | repro_eval/__main__.py | irgroup/repro_eval | 35a4cf083dbb5f4b29d6ef602a604f0686a537c9 | [
"MIT"
] | 8 | 2020-10-27T02:11:53.000Z | 2022-03-02T11:00:10.000Z | repro_eval/__main__.py | irgroup/repro_eval | 35a4cf083dbb5f4b29d6ef602a604f0686a537c9 | [
"MIT"
] | 2 | 2021-01-25T19:59:39.000Z | 2021-12-07T09:29:01.000Z | repro_eval/__main__.py | irgroup/repro_eval | 35a4cf083dbb5f4b29d6ef602a604f0686a537c9 | [
"MIT"
] | 1 | 2021-04-16T16:21:16.000Z | 2021-04-16T16:21:16.000Z | """
Use repro_eval from the command line with e.g.
python -m repro_eval -t rpd -q qrel_orig -r orig_b rpd_b
python -m repro_eval -t rpd -q qrel_orig -r orig_b orig_a rpd_b rpd_a
python -m repro_eval -t rpd -m rmse -q qrel_orig -r orig_b rpd_b
python -m repro_eval -t rpl -q qrel_orig qrel_rpl -r orig_b rpl_b
python -m repro_eval -t rpl -q qrel_orig qrel_rpl -r orig_b orig_a rpl_b rpl_a
after having installed the Python package.
For other more specific examples also have a look at the README file.
Depending on the provided parameters and input run files,
evaluation measures will be printed.
"""
import argparse
from repro_eval.Evaluator import RpdEvaluator, RplEvaluator
from repro_eval.util import print_simple_line, print_base_adv
from repro_eval.util import arp
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--type')
parser.add_argument('-m', '--measure', nargs='+')
parser.add_argument('-q', '--qrels', nargs='+')
parser.add_argument('-r', '--runs', nargs='+')
args = parser.parse_args()
if args.type in ['rpd', 'reproducibility']:
if len(args.runs) == 4:
rpd_eval = RpdEvaluator(qrel_orig_path=args.qrels[0],
run_b_orig_path=args.runs[0],
run_a_orig_path=args.runs[1],
run_b_rep_path=args.runs[2],
run_a_rep_path=args.runs[3])
if len(args.runs) == 2:
rpd_eval = RpdEvaluator(qrel_orig_path=args.qrels[0],
run_b_orig_path=args.runs[0],
run_a_orig_path=None,
run_b_rep_path=args.runs[1],
run_a_rep_path=None)
rpd_eval.trim()
rpd_eval.evaluate()
measure_list = args.measure if args.measure is not None else []
# KTU
if 'ktu' in measure_list or args.measure is None:
ktu = rpd_eval.ktau_union()
print("Kendall's tau Union (KTU)")
print('------------------------------------------------------------------')
for topic, value in ktu.get('baseline').items():
value_adv = ktu.get('advanced').get(topic) if ktu.get('advanced') is not None else None
print_base_adv(topic, 'KTU', value, value_adv)
value_adv = arp(ktu.get('advanced')) if ktu.get('advanced') is not None else None
print_base_adv('ARP', 'KTU', arp(ktu.get('baseline')), value_adv)
print()
# RBO
if 'rbo' in measure_list or args.measure is None:
rbo = rpd_eval.rbo()
print("Rank-biased Overlap (RBO)")
print('------------------------------------------------------------------')
for topic, value in rbo.get('baseline').items():
value_adv = rbo.get('advanced').get(topic) if rbo.get('advanced') is not None else None
print_base_adv(topic, 'RBO', value, value_adv)
value_adv = arp(rbo.get('advanced')) if rbo.get('advanced') is not None else None
print_base_adv('ARP', 'RBO', arp(rbo.get('baseline')), value_adv)
print()
# RMSE
if 'rmse' in measure_list or args.measure is None:
rmse = rpd_eval.rmse()
print("Root mean square error (RMSE)")
print('------------------------------------------------------------------')
for measure, value in rmse.get('baseline').items():
value_adv = rmse.get('advanced').get(measure) if rmse.get('advanced') is not None else None
print_base_adv(measure, 'RMSE', value, value_adv)
print()
# ER
if 'er' in measure_list or args.measure is None and len(args.runs) == 4:
print("Effect ratio (ER)")
print('------------------------------------------------------------------')
er = rpd_eval.er()
for measure, value in er.items():
print_simple_line(measure, 'ER', value)
print()
# DRI
if 'dri' in measure_list or args.measure is None and len(args.runs) == 4:
print("Delta Relative Improvement (DRI)")
print('------------------------------------------------------------------')
dri = rpd_eval.dri()
for measure, value in dri.items():
print_simple_line(measure, 'DRI', value)
print()
# ttest
if 'ttest' in measure_list or args.measure is None:
pvals = rpd_eval.ttest()
print("Two-tailed paired t-test (p-value)")
print('------------------------------------------------------------------')
for measure, value in pvals.get('baseline').items():
value_adv = pvals.get('advanced').get(measure) if pvals.get('advanced') is not None else None
print_base_adv(measure, 'PVAL', value, value_adv)
print()
if args.type in ['rpl', 'replicability']:
if len(args.runs) == 4:
rpl_eval = RplEvaluator(qrel_orig_path=args.qrels[0],
run_b_orig_path=args.runs[0],
run_a_orig_path=args.runs[1],
run_b_rep_path=args.runs[2],
run_a_rep_path=args.runs[3],
qrel_rpl_path=args.qrels[1])
if len(args.runs) == 2:
rpl_eval = RplEvaluator(qrel_orig_path=args.qrels[0],
run_b_orig_path=args.runs[0],
run_a_orig_path=None,
run_b_rep_path=args.runs[1],
run_a_rep_path=None,
qrel_rpl_path=args.qrels[1])
rpl_eval.trim()
rpl_eval.evaluate()
measure_list = args.measure if args.measure is not None else []
# ER
if 'er' in measure_list or args.measure is None and len(args.runs) == 4:
print("Effect ratio (ER)")
print('------------------------------------------------------------------')
er = rpl_eval.er()
for measure, value in er.items():
print_simple_line(measure, 'ER', value)
print()
# DRI
if 'dri' in measure_list or args.measure is None and len(args.runs) == 4:
print("Delta Relative Improvement (DRI)")
print('------------------------------------------------------------------')
dri = rpl_eval.dri()
for measure, value in dri.items():
print_simple_line(measure, 'DRI', value)
print()
# ttest
if 'ttest' in measure_list or args.measure is None:
pvals = rpl_eval.ttest()
print("Two-tailed unpaired t-test (p-value)")
print('------------------------------------------------------------------')
for measure, value in pvals.get('baseline').items():
value_adv = pvals.get('advanced').get(measure) if pvals.get('advanced') is not None else None
print_base_adv(measure, 'PVAL', value, value_adv)
print()
if __name__ == "__main__":
main()
| 43.309942 | 109 | 0.498785 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,967 | 0.265595 |
f809139d6c632c257d27b2da4aee81ff3ca5dcc2 | 2,377 | py | Python | main.py | juligreen/towerdefense-prototype | 1cdac58acf697ca856a60dec6533caed17acf656 | [
"MIT"
] | null | null | null | main.py | juligreen/towerdefense-prototype | 1cdac58acf697ca856a60dec6533caed17acf656 | [
"MIT"
] | null | null | null | main.py | juligreen/towerdefense-prototype | 1cdac58acf697ca856a60dec6533caed17acf656 | [
"MIT"
] | null | null | null | import math
from game_objects import Turret, Troop
players = []
class Location:
def __init__(self, x: int, y: int):
self.x = x
self.y = y
class Lane:
# for this prototype we are going to imagine our lanes as straight lines
def __init__(self, left_start_location: Location, right_start_location: Location):
self.left_start_location = left_start_location
self.right_start_location = right_start_location
def calculate_distance(entity1: Location, entity2: Location) -> float:
# distance between vectors: https://brilliant.org/wiki/distance-formula/
distance = math.sqrt((entity1.x - entity2.x) ** 2 + (entity1.y + entity2.y) ** 2)
return distance
class Player:
def __init__(self, position: str, location: Location):
self.position = position
self.location = location
self.turrets = []
self.troops = []
self.enemy_player: Player = Player()
self.health = 100
def add_turret(self, grid_location: Location, strenght_level: int):
turret = Turret(grid_location, strenght_level)
self.turrets.append(turret)
def add_troops(self, lane: Lane, count: int, strength_level: int):
troops = []
for _ in range(count):
troop = Troop(lane, strength_level, self.enemy_player.position, self.enemy_player)
troops.append(troop)
self.troops.append(troops)
def turret_fire_check(self):
for turret in self.turrets:
for troop in self.enemy_player.troops:
distance = calculate_distance(turret.location, troop.location)
if distance < turret.range:
turret.attack(troop)
break
def init():
players[0] = Player('left')
players[1] = Player('right')
players[0].enemy_player = players[1]
players[1].enemy_player = players[0]
init()
while True:
# most of this is pseudocode, as I have no way of handling user input currently
for index, player in enumerate(players):
if 'player places turret':
player.add_turret(Location(1, 1))
if 'player places troops':
player.add_troops('bla')
for troop in player.troops:
troop.move()
player.turret_fire_check()
if player.health <= 0:
print(f'Player {index} won the game!')
| 30.088608 | 94 | 0.636096 | 1,401 | 0.589398 | 0 | 0 | 0 | 0 | 0 | 0 | 316 | 0.132941 |
f8094b25e0893a5bce69fe2d108d090003595a0e | 7,110 | py | Python | bib_processing.py | GAIGResearch/GAIGResearch.github.io | 90d0555348ad8f3f500b6480168ad65fa0226dce | [
"MIT"
] | null | null | null | bib_processing.py | GAIGResearch/GAIGResearch.github.io | 90d0555348ad8f3f500b6480168ad65fa0226dce | [
"MIT"
] | null | null | null | bib_processing.py | GAIGResearch/GAIGResearch.github.io | 90d0555348ad8f3f500b6480168ad65fa0226dce | [
"MIT"
] | 2 | 2019-07-09T11:08:15.000Z | 2020-12-04T14:55:00.000Z | import os
from pathlib import Path
from difflib import SequenceMatcher
supported_bibtex_types = {"article", "book", "booklet", "inbook", "incollection", "inproceedings", "manual",
"mastersthesis", "misc", "phdthesis", "proceedings", "techreport", "unpublished"}
supported_fields = ["author", "title", "year", "month", "pages", "note",
"journal", "booktitle",
"volume", "number", "series", "edition",
"editor", "publisher", "address",
"howpublished", "type",
"chapter",
"organization", "school", "institution"]
extra_fields = ["doi", "issn", "isbn", "keywords", "abstract", "url", "archivePrefix", "eprint", "timestamp", "biburl",
"bibsource"]
data_path = Path("_data/papers.yml")
bib_path = Path("bibfiles")
year_from = 2017
similarity_threshold = 0.8
def find_all_files(path_to_search):
"""Recursively find all bib files in root path given"""
list_of_files = os.listdir(path_to_search)
all_files = []
# Iterate over all the entries
for e in list_of_files:
# Create full path
full_path = path_to_search / e
# If entry is a directory then get the list of files in this directory
if os.path.isdir(full_path):
all_files = all_files + find_all_files(full_path)
elif full_path.with_suffix(".bib"):
all_files.append(full_path)
return all_files
def process_entry(entry_to_process):
"""
Turns a string of an entry into a dictionary mapping from fields to field values
:param entry_to_process
:return: dictionary.
"""
dict_entry = {}
entry_lines = entry_to_process.split("\n")
first_line = entry_lines[0].split("=")
entry_type = first_line[0].replace("@", "")
entry_id = first_line[1]
# Type validation
if entry_type.lower() not in supported_bibtex_types:
print("Type " + entry_type + " not supported for bibtex entry " + entry_id)
return dict_entry
dict_entry["id"] = entry_id
dict_entry["type"] = entry_type
# Process the rest of the fields
field_value = "" # Keep this up here to be able to access previous values in case of multi-line field
field = ""
for l in entry_lines:
split_line = l.split("=")
if len(split_line) == 1 and field != "": # No = found on this line, it's a multi-line field
field_value += " " + split_line[0].strip()
dict_entry[field] = field_value.strip()
else:
field = split_line[0].strip()
field_value = split_line[1].strip()
if field.lower() in supported_fields or field.lower() in extra_fields:
if field.lower() == "pages" and "--" not in field_value:
field_value = field_value.replace("-", "--")
dict_entry[field] = field_value
# Try to find pdf of this paper
pdf = find_pdf(entry_id, dict_entry["year"])
dict_entry["pdf"] = str(pdf).lower()
return dict_entry
def find_pdf(entry_id, year):
"""
Returns true if a pdf for this paper exists in the pdf/pub/year directory (must have name as paper ID)
"""
return os.path.isfile("pdf/pub/" + year + "/" + entry_id + ".pdf")
def output_entries(entries):
"""
Prints the given bibtex entries into yaml supported format
"""
with open(data_path.absolute(), 'w+', encoding='utf-8') as wf:
for entry in entries:
if int(entry["year"]) < year_from:
continue
wf.write("- id: " + entry["id"] + "\n")
for e in entry:
if e != "id":
if ":" in entry[e]:
entry[e] = '"' + entry[e] + '"'
wf.write(" " + e + ": " + entry[e] + "\n")
def check_equality(entry1, entry2):
"""
Checks if 2 entries are the same
"""
sim_fields = 0
common_fields = 0
for field1 in entry1:
for field2 in entry2:
if field1 == field2:
common_fields += 1
if similar(entry1[field1], entry2[field2]) >= similarity_threshold:
sim_fields += 1
if common_fields == 0:
return False
if sim_fields / common_fields >= similarity_threshold:
return True
return False
def similar(a, b):
"""
Checks if 2 strings are similar, returns a similarity measure.
"""
return SequenceMatcher(None, a, b).ratio()
def process_yml_entries(lines):
"""
Processes entries in yml format
:param lines: list of lines from yml file to process
:return: list of entries as dictionaries
"""
entry_list = []
entry = {}
ln = 0
for line in lines:
if "- id:" in line or ln == len(lines) - 1: # Starting a new entry
if len(entry) > 0:
entry_list.append(entry)
entry = {}
line = line.replace("\"", "")
if "- id:" in line:
line = line[1:] # Ignore first dash
stripped_line = line.strip()
if stripped_line != "": # Adding to current entry
split_line = stripped_line.split(':')
entry[split_line[0].strip()] = ':'.join(split_line[1:]).strip()
ln += 1
return entry_list
def main():
"""
Main function to process bibtex entries in a given path and output a file in yaml supported format.
"""
# Read in current entries
lines = data_path.read_text(encoding='utf-8').split('\n')
entries = process_yml_entries(lines)
# Find new entries
files = find_all_files(bib_path)
for bibfile in files:
entry = ""
full_pth = Path(bibfile)
lines = full_pth.read_text(encoding='utf-8').split('\n')
line_number = 0
for line in lines:
if "@" in line or line_number == len(lines)-1: # Starting a new entry
if entry != "":
entry = entry.translate({ord(c): None for c in '\\"{}~\'"'})
processed_entry = process_entry(entry)
entries.append(processed_entry)
entry = ""
if "@" in line:
line = line.replace("{", "=")
stripped_line = line.strip()
if stripped_line != "": # Adding to current entry
if stripped_line.endswith(","):
stripped_line = stripped_line[:-1]
entry += stripped_line + "\n"
line_number += 1
# Check for duplication
duplicate_entries = []
for i in range(len(entries)-1):
for j in range(i+1, len(entries)):
if check_equality(entries[i], entries[j]):
print("Duplicate found: " + entries[i]["id"] + " = " + entries[j]["id"])
duplicate_entries.append(j)
duplicate_entries.sort()
for i in range(len(duplicate_entries)):
e = duplicate_entries[i] - i
del entries[e]
# Finally, save entries
output_entries(entries)
if __name__ == "__main__":
main()
| 33.696682 | 119 | 0.568636 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,093 | 0.294374 |
f80a066211d5845a2d19529db9ed13271bcad6dc | 2,105 | py | Python | browser.py | 7Cortez7/instagram-giveaway-bot | 43246e3ded06ea3a6cbf2ef20164b229fe90ee0e | [
"MIT"
] | null | null | null | browser.py | 7Cortez7/instagram-giveaway-bot | 43246e3ded06ea3a6cbf2ef20164b229fe90ee0e | [
"MIT"
] | null | null | null | browser.py | 7Cortez7/instagram-giveaway-bot | 43246e3ded06ea3a6cbf2ef20164b229fe90ee0e | [
"MIT"
] | null | null | null | from selenium import webdriver
import time
import userdata as udata
import random
randomUsers = set()
class Browser:
def __init__(self, link):
self.link = link
self.browser = webdriver.Chrome()
Browser.Instagram(self)
Browser.Login(self)
Browser.goFollowers(self)
def Instagram(self):
self.browser.get(self.link)
time.sleep(2)
def goFollowers(self):
self.browser.find_element_by_xpath("//*[@id=\"react-root\"]/section/main/div/header/section/ul/li[2]/a").click()
time.sleep(5)
Browser.scrollDown(self)
followers = self.browser.find_elements_by_css_selector("._7UhW9.xLCgt.qyrsm.KV-D4.se6yk.T0kll")
for follower in followers:
randomUsers.add(follower.text)
print("Çekiliş başlıyor! {totaluser} kişi katılmaya hak kazandı.".format(totaluser = len(randomUsers)))
time.sleep(5)
randomUsersList = list(randomUsers)
print("Kazanan:", random.choice(randomUsersList))
time.sleep(5)
exit()
def scrollDown(self):
jsCode = """
page = document.querySelector(".isgrP");
page.scrollTo(0, page.scrollHeight);
var pageEnd = page.scrollHeight;
return pageEnd;
"""
pageEnd = self.browser.execute_script(jsCode)
while True:
end = pageEnd
time.sleep(1)
pageEnd = self.browser.execute_script(jsCode)
if end == pageEnd:
break
def Login(self):
username = self.browser.find_element_by_name("username")
password = self.browser.find_element_by_name("password")
loginBtn = self.browser.find_element_by_css_selector("#loginForm > div > div:nth-child(3) > button > div")
username.send_keys(udata.username)
password.send_keys(udata.password)
time.sleep(1)
loginBtn.click()
time.sleep(2)
self.browser.get(self.link + udata.username)
time.sleep(2)
| 31.893939 | 121 | 0.59715 | 1,979 | 0.937027 | 0 | 0 | 0 | 0 | 0 | 0 | 434 | 0.205492 |
f80b2ee49671a1d6b544de429dd777345fa6df27 | 246 | py | Python | HackerRank/PythonHackerRankSolutions/Numpy/LinearAlgebra.py | accidentalgenius09/competitive-programming-solution | 210746a7928dcd601ad9a735de52cf7135851070 | [
"MIT"
] | 8 | 2020-08-03T01:53:13.000Z | 2022-01-09T14:47:58.000Z | HackerRank/PythonHackerRankSolutions/Numpy/LinearAlgebra.py | accidentalgenius09/competitive-programming-solution | 210746a7928dcd601ad9a735de52cf7135851070 | [
"MIT"
] | null | null | null | HackerRank/PythonHackerRankSolutions/Numpy/LinearAlgebra.py | accidentalgenius09/competitive-programming-solution | 210746a7928dcd601ad9a735de52cf7135851070 | [
"MIT"
] | 4 | 2020-09-29T11:28:53.000Z | 2021-06-02T15:34:55.000Z | '''
Title : Linear Algebra
Subdomain : Numpy
Domain : Python
Author : codeperfectplus
Created : 10 May 2020
'''
import numpy
n=int(input())
a=numpy.array([input().split() for _ in range(n)],float)
print(round(numpy.linalg.det(a),2))
| 18.923077 | 56 | 0.670732 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 123 | 0.5 |
f80c608952146d7fe3d7ed75d5f4bc0dc27ba8ce | 774 | py | Python | pyretri/index/dim_processor/dim_processors_impl/l2_normalize.py | dongan-beta/PyRetri | 8756d5d5813a5211b58855373b6c6cd33d7a11f6 | [
"Apache-2.0"
] | 1,063 | 2020-04-21T12:42:05.000Z | 2022-03-31T06:32:50.000Z | pyretri/index/dim_processor/dim_processors_impl/l2_normalize.py | dongan-beta/PyRetri | 8756d5d5813a5211b58855373b6c6cd33d7a11f6 | [
"Apache-2.0"
] | 39 | 2020-05-07T07:24:19.000Z | 2022-02-02T23:49:23.000Z | pyretri/index/dim_processor/dim_processors_impl/l2_normalize.py | dongan-beta/PyRetri | 8756d5d5813a5211b58855373b6c6cd33d7a11f6 | [
"Apache-2.0"
] | 174 | 2020-04-26T04:33:11.000Z | 2022-03-17T02:58:45.000Z | # -*- coding: utf-8 -*-
import numpy as np
from ..dim_processors_base import DimProcessorBase
from ...registry import DIMPROCESSORS
from sklearn.preprocessing import normalize
from typing import Dict, List
@DIMPROCESSORS.register
class L2Normalize(DimProcessorBase):
"""
L2 normalize the features.
"""
default_hyper_params = dict()
def __init__(self, feature_names: List[str], hps: Dict or None = None):
"""
Args:
feature_names (list): a list of features names to be loaded.
hps (dict): default hyper parameters in a dict (keys, values).
"""
super(L2Normalize, self).__init__(feature_names, hps)
def __call__(self, fea: np.ndarray) -> np.ndarray:
return normalize(fea, norm="l2")
| 27.642857 | 75 | 0.666667 | 539 | 0.696382 | 0 | 0 | 563 | 0.72739 | 0 | 0 | 246 | 0.317829 |
f80ccbd3e3b59f33892aafb3cc6b1f95f360dd40 | 1,631 | py | Python | test_csv_write.py | wandyrandy/Groupme-Group-Stats-Report | 25a59b715a7555540695639de81db390f09eb122 | [
"MIT"
] | 2 | 2019-08-13T21:50:32.000Z | 2019-08-14T00:49:29.000Z | test_csv_write.py | wandyrandy/Groupme-Group-Stats-Report | 25a59b715a7555540695639de81db390f09eb122 | [
"MIT"
] | null | null | null | test_csv_write.py | wandyrandy/Groupme-Group-Stats-Report | 25a59b715a7555540695639de81db390f09eb122 | [
"MIT"
] | null | null | null | import csv
import person
from random import randrange
headers = ['Name', 'Messages', 'Char Count', 'Likes Given', 'Likes Received', 'Image URL']
#tester code
people = ['bob', 'joe', 'gmo']
bob = person.Person(111, 'bob', 'www.bob.com', people)
joe = person.Person(222, 'joe', 'www.joe.com', people)
gmo = person.Person(333, 'gmo', 'www.gmo.com', people)
members = [bob, joe, gmo]
bob.msgs = randrange(40)
bob.likes_given = randrange(40)
bob.likes_received = randrange(40)
bob.chars = randrange(40)
bob.friends['gmo'] = randrange(40)
bob.friends['joe'] = randrange(40)
bob.friends['bob'] = randrange(40)
joe.msgs = randrange(40)
joe.likes_given = randrange(40)
joe.likes_received = randrange(40)
joe.chars = randrange(40)
joe.friends['gmo'] = randrange(40)
joe.friends['joe'] = randrange(40)
joe.friends['bob'] = randrange(40)
gmo.msgs = randrange(40)
gmo.likes_given = randrange(40)
gmo.likes_received = randrange(40)
gmo.chars = randrange(40)
gmo.friends['gmo'] = randrange(40)
gmo.friends['joe'] = randrange(40)
gmo.friends['bob'] = randrange(40)
# loop through the list of members and add their names to the headers
for member in members:
headers.append(member.name)
with open('raw_groupme_data.csv', 'w') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow(headers)
for member in members:
row = [member.name, member.msgs, member.chars, member.likes_given,
member.likes_received, member.image_url]
for friend in member.friends:
row.append(member.friends[friend])
csv_writer.writerow(row) | 31.980392 | 91 | 0.676272 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 290 | 0.177805 |
f80e19316ce840fcc2138b746a64f522d8f4566b | 866 | py | Python | app/wqFull/G200/testAll.py | fkwai/geolearn | 30cb4353d22af5020a48100d07ab04f465a315b0 | [
"MIT"
] | null | null | null | app/wqFull/G200/testAll.py | fkwai/geolearn | 30cb4353d22af5020a48100d07ab04f465a315b0 | [
"MIT"
] | null | null | null | app/wqFull/G200/testAll.py | fkwai/geolearn | 30cb4353d22af5020a48100d07ab04f465a315b0 | [
"MIT"
] | 2 | 2021-04-04T02:45:59.000Z | 2022-03-19T09:41:39.000Z |
import pandas as pd
from hydroDL.data import usgs, gageII, gridMET, ntn, GLASS, transform, dbBasin
import numpy as np
import matplotlib.pyplot as plt
from hydroDL.post import axplot, figplot
from hydroDL import kPath, utils
import json
import os
import importlib
from hydroDL.master import basinFull
from hydroDL.app.waterQuality import WRTDS
# dataName = 'G200N'
# labelLst = ['QFPRT2C', 'FPRT2C', 'FPRT2QC', 'QFPT2C', 'QFRT2C']
dataName = 'G200'
labelLst = ['QFPRT2C']
trainLst = ['rmR20', 'rmL20', 'rmRT20', 'rmYr5', 'B10']
testLst = ['pkR20', 'pkL20', 'pkRT20', 'pkYr5', 'A10']
DF = dbBasin.DataFrameBasin(dataName)
for label in labelLst:
for trainSet, testSet in zip(trainLst, testLst):
outName = '{}-{}-{}'.format(dataName, label, trainSet)
print(outName)
yP, ycP = basinFull.testModel(outName, DF=DF, testSet=testSet, ep=500)
| 29.862069 | 78 | 0.706697 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 178 | 0.205543 |
f80f8be872541cb1fed210e79dd3fff53a87f8a4 | 9,733 | py | Python | tests/test_Dirichlet_NL_Poisson.py | bond-anton/BDPoisson1D | 538cedc187ce83e90f340cc085738671d325d2e1 | [
"Apache-2.0"
] | null | null | null | tests/test_Dirichlet_NL_Poisson.py | bond-anton/BDPoisson1D | 538cedc187ce83e90f340cc085738671d325d2e1 | [
"Apache-2.0"
] | 2 | 2017-07-21T22:10:19.000Z | 2018-07-14T21:39:07.000Z | tests/test_Dirichlet_NL_Poisson.py | bond-anton/BDPoisson1D | 538cedc187ce83e90f340cc085738671d325d2e1 | [
"Apache-2.0"
] | null | null | null | import math as m
import numpy as np
from BDMesh import Mesh1DUniform
from BDFunction1D import Function
from BDFunction1D.Functional import Functional
from BDFunction1D.Interpolation import InterpolateFunction
from BDPoisson1D.DirichletNonLinear import dirichlet_non_linear_poisson_solver_arrays
from BDPoisson1D.DirichletNonLinear import dirichlet_non_linear_poisson_solver
from BDPoisson1D.DirichletNonLinear import dirichlet_non_linear_poisson_solver_mesh_arrays
from BDPoisson1D.DirichletNonLinear import dirichlet_non_linear_poisson_solver_mesh
from BDPoisson1D.DirichletNonLinear import dirichlet_non_linear_poisson_solver_recurrent_mesh
from BDPoisson1D.DirichletNonLinear import dirichlet_non_linear_poisson_solver_amr
import unittest
class testPsi(Function):
def evaluate_point(self, x):
return m.exp(-x * 3.0)
class testF(Functional):
def __init__(self, Nd, kT, f):
super(testF, self).__init__(f)
self.Nd = Nd
self.kT = kT
def evaluate_point(self, x):
return self.Nd * (1 - (m.exp(-self.f.evaluate_point(x) / self.kT)))
class testdFdPsi(Functional):
def __init__(self, Nd, kT, f):
super(testdFdPsi, self).__init__(f)
self.Nd = Nd
self.kT = kT
def evaluate_point(self, x):
return self.Nd / self.kT * m.exp(-self.f.evaluate_point(x) / self.kT)
class TestDirichletNL(unittest.TestCase):
def setUp(self):
self.Nd = 1.0
self.kT = 0.05
self.bc1 = 1.0
self.bc2 = 0.0
def test_dirichlet_poisson_solver_arrays(self):
start = 0.0
stop = 4.0
nodes = np.linspace(start, stop, num=51, endpoint=True, dtype=np.float)
Psi = testPsi()
f = testF(self.Nd, self.kT, Psi)
dfdPsi = testdFdPsi(self.Nd, self.kT, Psi)
for i in range(100):
result_1 = np.asarray(dirichlet_non_linear_poisson_solver_arrays(nodes, Psi.evaluate(nodes),
f.evaluate(nodes),
dfdPsi.evaluate(nodes),
bc1=1, bc2=0, j=1, w=1))
Psi = InterpolateFunction(nodes, result_1[:, 0])
f.f = Psi
dfdPsi.f = Psi
nodes = np.linspace(start, stop, num=101, endpoint=True, dtype=np.float)
Psi = testPsi()
f.f = Psi
dfdPsi.f = Psi
for i in range(100):
result_2 = np.asarray(dirichlet_non_linear_poisson_solver_arrays(nodes, Psi.evaluate(nodes),
f.evaluate(nodes),
dfdPsi.evaluate(nodes),
bc1=1, bc2=0, j=1, w=1))
Psi = InterpolateFunction(nodes, result_2[:, 0])
f.f = Psi
dfdPsi.f = Psi
self.assertTrue(max(abs(result_2[:, 2])) < max(abs(result_1[:, 2])))
def test_dirichlet_poisson_solver(self):
start = 0.0
stop = 4.0
nodes = np.linspace(start, stop, num=51, endpoint=True, dtype=np.float)
Psi = testPsi()
f = testF(self.Nd, self.kT, Psi)
dfdPsi = testdFdPsi(self.Nd, self.kT, Psi)
for i in range(100):
Psi = dirichlet_non_linear_poisson_solver(nodes, Psi, f, dfdPsi,
bc1=1, bc2=0, j=1, w=1)
f.f = Psi
dfdPsi.f = Psi
error_1 = np.asarray(Psi.error(nodes))
nodes = np.linspace(start, stop, num=101, endpoint=True, dtype=np.float)
Psi = testPsi()
f.f = Psi
dfdPsi.f = Psi
for i in range(100):
Psi = dirichlet_non_linear_poisson_solver(nodes, Psi, f, dfdPsi,
bc1=1, bc2=0, j=1, w=1)
f.f = Psi
dfdPsi.f = Psi
error_2 = np.asarray(Psi.error(nodes))
self.assertTrue(max(abs(error_2)) < max(abs(error_1)))
def test_dirichlet_poisson_solver_mesh_arays(self):
start = 0.0
stop = 4.0
step = 0.5
mesh_1 = Mesh1DUniform(start, stop, boundary_condition_1=1, boundary_condition_2=0, physical_step=step)
Psi = testPsi()
f = testF(self.Nd, self.kT, Psi)
dfdPsi = testdFdPsi(self.Nd, self.kT, Psi)
for i in range(100):
dirichlet_non_linear_poisson_solver_mesh_arrays(mesh_1,
Psi.evaluate(mesh_1.physical_nodes),
f.evaluate(mesh_1.physical_nodes),
dfdPsi.evaluate(mesh_1.physical_nodes),
w=1)
Psi = InterpolateFunction(mesh_1.physical_nodes, mesh_1.solution)
f.f = Psi
dfdPsi.f = Psi
step = 0.1
mesh_2 = Mesh1DUniform(start, stop, boundary_condition_1=1, boundary_condition_2=0, physical_step=step)
Psi = testPsi()
f.f = Psi
dfdPsi.f = Psi
for i in range(100):
dirichlet_non_linear_poisson_solver_mesh_arrays(mesh_2,
Psi.evaluate(mesh_2.physical_nodes),
f.evaluate(mesh_2.physical_nodes),
dfdPsi.evaluate(mesh_2.physical_nodes),
w=1)
Psi = InterpolateFunction(mesh_2.physical_nodes, mesh_2.solution)
f.f = Psi
dfdPsi.f = Psi
self.assertTrue(max(abs(np.asarray(mesh_2.residual))) < max(abs(np.asarray(mesh_1.residual))))
def test_dirichlet_poisson_solver_mesh(self):
start = 0.0
stop = 4.0
step = 0.5
mesh_1 = Mesh1DUniform(start, stop, boundary_condition_1=1, boundary_condition_2=0, physical_step=step)
Psi = testPsi()
f = testF(self.Nd, self.kT, Psi)
dfdPsi = testdFdPsi(self.Nd, self.kT, Psi)
for i in range(100):
dirichlet_non_linear_poisson_solver_mesh(mesh_1, Psi, f, dfdPsi, w=1)
Psi = InterpolateFunction(mesh_1.physical_nodes, mesh_1.solution)
f.f = Psi
dfdPsi.f = Psi
step = 0.1
mesh_2 = Mesh1DUniform(start, stop, boundary_condition_1=1, boundary_condition_2=0, physical_step=step)
Psi = testPsi()
f.f = Psi
dfdPsi.f = Psi
for i in range(100):
dirichlet_non_linear_poisson_solver_mesh(mesh_2, Psi, f, dfdPsi, w=1)
Psi = InterpolateFunction(mesh_2.physical_nodes, mesh_2.solution)
f.f = Psi
dfdPsi.f = Psi
self.assertTrue(max(abs(np.asarray(mesh_2.residual))) < max(abs(np.asarray(mesh_1.residual))))
def test_dirichlet_poisson_solver_recurrent_mesh(self):
start = 0.0
stop = 4.0
step = 0.5
threshold = 1e-6
max_iter = 1000
mesh_1 = Mesh1DUniform(start, stop, boundary_condition_1=1, boundary_condition_2=0, physical_step=step)
Psi = testPsi()
f = testF(self.Nd, self.kT, Psi)
dfdPsi = testdFdPsi(self.Nd, self.kT, Psi)
dirichlet_non_linear_poisson_solver_recurrent_mesh(mesh_1, Psi, f, dfdPsi,
max_iter=max_iter, threshold=threshold)
self.assertTrue(mesh_1.integrational_residual < threshold)
def test_dirichlet_poisson_solver_mesh_amr(self):
Psi = testPsi()
f = testF(self.Nd, self.kT, Psi)
dfdPsi = testdFdPsi(self.Nd, self.kT, Psi)
start = 0.0
stop = 5
step = 0.433
bc1 = 1
bc2 = 0
residual_threshold = 1.5e-3
int_residual_threshold = 4e-4
mesh_refinement_threshold = 1e-7
max_iter = 1000
max_level = 20
# print('start')
sol = dirichlet_non_linear_poisson_solver_amr(start, stop, step, Psi, f, dfdPsi, bc1, bc2,
max_iter=max_iter, residual_threshold=residual_threshold,
int_residual_threshold=int_residual_threshold,
max_level=max_level,
mesh_refinement_threshold=mesh_refinement_threshold)
int_residual = np.trapz(sol.error(sol.x), sol.x)
self.assertTrue(int_residual < int_residual_threshold)
self.assertTrue(max(abs(np.asarray(sol.error(sol.x)))) < residual_threshold)
residual_threshold = 1.5e-6
int_residual_threshold = 1.5e-4
mesh_refinement_threshold = 1e-5
max_iter = 1000
max_level = 20
sol = dirichlet_non_linear_poisson_solver_amr(start, stop, step, Psi, f, dfdPsi, bc1, bc2,
max_iter=max_iter, residual_threshold=residual_threshold,
int_residual_threshold=int_residual_threshold,
max_level=max_level,
mesh_refinement_threshold=mesh_refinement_threshold)
int_residual = np.trapz(sol.error(sol.x), sol.x)
self.assertTrue(int_residual < int_residual_threshold)
self.assertTrue(max(abs(np.asarray(sol.error(sol.x)))) < residual_threshold)
| 44.646789 | 111 | 0.552348 | 8,976 | 0.922223 | 0 | 0 | 0 | 0 | 0 | 0 | 16 | 0.001644 |
f810064772dd89a3265f0776de267483682a707d | 23,282 | py | Python | trtools/dumpSTR/tests/test_dumpSTR.py | Kulivox/TRTools | ea05f9126f5145405cced8fd85821ce929657b3a | [
"MIT"
] | 14 | 2020-04-20T15:38:52.000Z | 2022-02-07T11:45:23.000Z | trtools/dumpSTR/tests/test_dumpSTR.py | Kulivox/TRTools | ea05f9126f5145405cced8fd85821ce929657b3a | [
"MIT"
] | 74 | 2020-03-02T23:34:53.000Z | 2022-03-21T18:32:10.000Z | trtools/dumpSTR/tests/test_dumpSTR.py | Kulivox/TRTools | ea05f9126f5145405cced8fd85821ce929657b3a | [
"MIT"
] | 15 | 2018-10-29T19:41:33.000Z | 2020-02-21T18:41:51.000Z | import argparse
import gzip
import os
import pytest
from ..dumpSTR import *
from trtools.testsupport.utils import assert_same_vcf, assert_same_file
# Set up base argparser
@pytest.fixture
def args(tmpdir):
args = argparse.ArgumentParser()
args.vcf = None
args.vcftype = "auto"
args.out = str(tmpdir / "test")
args.zip = False
args.min_locus_callrate = None
args.min_locus_hwep = None
args.min_locus_het = None
args.max_locus_het = None
args.use_length = False
args.filter_regions = None
args.filter_regions_names = None
args.filter_hrun = False
args.drop_filtered = False
args.hipstr_min_call_DP = None
args.hipstr_max_call_DP = None
args.hipstr_min_call_Q = None
args.hipstr_max_call_flank_indel = None
args.hipstr_max_call_stutter = None
args.hipstr_min_supp_reads = None
args.gangstr_expansion_prob_het = None
args.gangstr_expansion_prob_hom = None
args.gangstr_expansion_prob_total = None
args.gangstr_filter_span_only = False
args.gangstr_filter_spanbound_only = False
args.gangstr_filter_badCI = None
#args.gangstr_require_support = None
args.gangstr_readlen = None
args.gangstr_min_call_DP = None
args.gangstr_max_call_DP = None
args.gangstr_min_call_Q = None
args.advntr_min_call_DP = None
args.advntr_max_call_DP = None
args.advntr_min_spanning = None
args.advntr_min_flanking = None
args.advntr_min_ML = None
args.eh_min_ADFL = None
args.eh_min_ADIR = None
args.eh_min_ADSP = None
args.eh_min_call_LC = None
args.eh_max_call_LC = None
args.popstr_min_call_DP = None
args.popstr_max_call_DP = None
args.popstr_require_support = None
args.num_records = None
args.die_on_warning = False
args.verbose = False
return args
@pytest.fixture
def testDumpSTRdir(vcfdir):
return vcfdir + "/dumpSTR_vcfs"
# Test no such file or directory
def test_WrongFile(args, testDumpSTRdir):
fname = os.path.join(testDumpSTRdir, "test_non_existent.vcf")
if os.path.exists(fname):
os.remove(fname)
args.vcf = fname
retcode = main(args)
assert retcode==1
# Test a file that already has Filter IDs defined
# that we want to use that are of either the wrong number of type.
# Since cyvcf2 currently won't allow us to overwrite them,
# error out
def test_BadPreexistingFields(args, testDumpSTRdir, capsys):
fname = os.path.join(testDumpSTRdir, "bad_preexisting_hrun.vcf")
args.vcf = fname
retcode = main(args)
assert retcode == 1
captured = capsys.readouterr()
assert "HRUN" in captured.err
fname = os.path.join(testDumpSTRdir, "bad_preexisting_het_hwep.vcf")
args.vcf = fname
retcode = main(args)
assert retcode == 1
captured = capsys.readouterr()
assert "HWEP" in captured.err and "HET" in captured.err
fname = os.path.join(testDumpSTRdir, "bad_preexisting_filter_ac_refac.vcf")
args.vcf = fname
retcode = main(args)
assert retcode == 1
captured = capsys.readouterr()
assert ("FILTER" in captured.err and "AC" in captured.err
and "REFAC" in captured.err)
# Test a file that already has a HWE Filter ID defined
# if the field is of the correct type and number, as in this case
# we overwrite it and emit a warning instead of failing
# this allows dumpSTR to be run multiple times in succession
# on the same file
def test_WorrisomePreexistingFilter(args, testDumpSTRdir, capsys):
fname = os.path.join(testDumpSTRdir, "worrisome_preexisting_filter.vcf")
args.vcf = fname
args.min_locus_hwep = 0.5
retcode = main(args)
assert retcode == 0
captured = capsys.readouterr()
assert 'HWE0.5' in captured.err
# Test if basic inputs and threshold filters work for each file
def test_GangSTRFile(args, testDumpSTRdir):
fname = os.path.join(testDumpSTRdir, "trio_chr21_gangstr.sorted.vcf.gz")
args.vcf = fname
args.num_records = 10
args.gangstr_min_call_DP = 10
args.gangstr_max_call_DP = 20
args.gangstr_min_call_Q = 0.99
args.gangstr_filter_span_only = True
args.gangstr_filter_spanbound_only = True
args.gangstr_filter_badCI = True
#args.gangstr_require_support = 2
args.gangstr_readlen = 100
retcode = main(args)
assert retcode==0
# Test expansion options
args.gangstr_expansion_prob_het = 0.8
retcode = main(args)
assert retcode==0
args.gangstr_expansion_prob_het = None
args.gangstr_expansion_prob_hom = 0.8
retcode = main(args)
assert retcode==0
args.gangstr_expansion_prob_het = None
args.gangstr_expansion_prob_hom = None
args.gangstr_expansion_prob_total = 0.8
retcode = main(args)
assert retcode==0
def test_HipSTRFile(args, testDumpSTRdir):
fname = os.path.join(testDumpSTRdir, "trio_chr21_hipstr.sorted.vcf.gz")
args.vcf = fname
args.num_records = 10
args.hipstr_min_call_DP = 10
args.hipstr_max_call_DP = 100
args.hipstr_min_call_Q = 0.9
args.hipstr_min_supp_reads = 2
args.hipstr_max_call_flank_indel = 0.05
args.hipstr_max_call_stutter = 0.01
args.vcftype = 'hipstr'
retcode = main(args)
assert retcode==0
def test_AdVNTRFile(args, testDumpSTRdir):
fname = os.path.join(testDumpSTRdir, "test_advntr.vcf.gz")
args.vcf = fname
args.num_records = 10
args.advntr_min_call_DP = 10
args.advntr_max_call_DP = 20
args.advntr_min_spanning = 2
args.advntr_min_flanking = 2
args.advntr_min_ML = 0
retcode = main(args)
assert retcode==0
def test_EHFile(args, testDumpSTRdir):
# TODO add EH options
fname = os.path.join(testDumpSTRdir, "NA12878_chr21_eh.sorted.vcf.gz")
args.vcf = fname
args.use_length = True
args.num_records = 10
retcode = main(args)
assert retcode==0
def test_PopSTRFile(args, testDumpSTRdir):
fname = os.path.join(testDumpSTRdir, "NA12878_chr21_popstr.sorted.vcf.gz")
args.vcf = fname
args.num_records = 10
args.use_length = True
args.popstr_min_call_DP = 5
args.popstr_max_call_DP = 100
args.popstr_require_support = 2
retcode = main(args)
assert retcode==0
# confirm that producing zipped output doesn't crash
def test_zippedOutput(args, testDumpSTRdir):
fname = os.path.join(testDumpSTRdir, "trio_chr21_gangstr.sorted.vcf.gz")
args.vcf = fname
args.num_records = 10
args.gangstr_min_call_DP = 10
args.gangstr_max_call_DP = 20
args.gangstr_min_call_Q = 0.99
args.gangstr_filter_span_only = True
args.gangstr_filter_spanbound_only = True
args.gangstr_filter_badCI = True
#args.gangstr_require_support = 2
args.gangstr_readlen = 100
args.zip = True
retcode = main(args)
assert retcode==0
# Test invalid options
def test_InvalidOptions(args, testDumpSTRdir):
fname = os.path.join(testDumpSTRdir, "NA12878_chr21_popstr.sorted.vcf.gz")
args.vcf = fname
# HWE
args.min_locus_hwep = -1
retcode = main(args)
assert retcode==1
args.min_locus_hwep = 2
retcode = main(args)
assert retcode==1
# Het
args.min_locus_hwep = None
args.min_locus_het = -1
retcode = main(args)
assert retcode==1
args.min_locus_het = 2
retcode = main(args)
assert retcode==1
args.min_locus_het = None
args.max_locus_het = -1
retcode = main(args)
assert retcode==1
args.max_locus_het = 2
retcode = main(args)
assert retcode==1
args.min_locus_het = 0.5
args.max_locus_het = 0.2
retcode = main(args)
assert retcode==1
# Test locus-level filters
def test_LocusLevel(args, testDumpSTRdir):
tool_files = [
"trio_chr21_hipstr.sorted.vcf.gz",
"trio_chr21_gangstr.sorted.vcf.gz",
"NA12878_chr21_eh.sorted.vcf.gz",
"NA12878_chr21_popstr.sorted.vcf.gz",
"NA12878_chr21_popstr.sorted.vcf.gz",
"NA12878_chr21_advntr.sorted.vcf.gz"
]
for fname in tool_files:
args.vcf = os.path.join(testDumpSTRdir, fname)
args.num_records = 10
args.min_locus_callrate = 0.8
args.min_locus_hwep = 10e-4
args.min_locus_het = 0.1
args.max_locus_het = 0.3
args.use_length = True
args.drop_filtered = False
args.filter_hrun = True
if 'hipstr' in fname:
args.vcftype = 'hipstr'
else:
args.vcftype = 'auto'
assert main(args)==0
args.drop_filtered = True
assert main(args)==0
def test_RegionFilters(args, regiondir, testDumpSTRdir):
fname = os.path.join(testDumpSTRdir, "test_gangstr.vcf.gz")
args.vcf = fname
args.num_records = 10
# Correct filters
args.filter_regions = os.path.join(regiondir, "test_regions1.bed.gz")
retcode = main(args)
assert retcode==0
args.filter_regions_names = "test"
retcode = main(args)
assert retcode==0
# Correct filters, multiple regions
args.filter_regions = os.path.join(regiondir, "test_regions1.bed.gz") + "," + os.path.join(regiondir, "test_regions2.bed.gz")
args.filter_regions_names = "test1,test2"
retcode = main(args)
assert retcode==0
# Mismatch between region names and regions
args.filter_regions_names = "test1"
retcode = main(args)
assert retcode==1
# Nonexistent regions file
args.filter_regions = os.path.join(regiondir, "test_nonexistent.bed")
retcode = main(args)
assert retcode==1
# File missing tabix
args.filter_regions = os.path.join(regiondir, "test_regions3.bed.gz")
assert main(args)==1
# File with no chr
args.filter_regions = os.path.join(regiondir, "test_regions4.bed.gz")
assert main(args)==0
args.vcf = os.path.join(testDumpSTRdir, "test_gangstr_nochr.vcf.gz")
assert main(args)==0
def test_InvalidHipstrOptions(args, testDumpSTRdir):
fname = os.path.join(testDumpSTRdir, "trio_chr21_hipstr.sorted.vcf.gz")
args.vcf = fname
args.num_records = 10
args.hipstr_max_call_flank_indel = -1
args.vcftype = 'hipstr'
retcode = main(args)
assert retcode==1
args.hipstr_max_call_flank_indel = None
args.hipstr_max_call_flank_indel = 2
retcode = main(args)
assert retcode==1
args.hipstr_max_call_flank_indel = None
args.hipstr_max_call_stutter = -1
retcode = main(args)
assert retcode==1
args.hipstr_max_call_stutter = 2
retcode = main(args)
assert retcode==1
args.hipstr_max_call_stutter = None
args.hipstr_min_supp_reads = -1
retcode = main(args)
assert retcode==1
args.hipstr_min_supp_reads = None
args.hipstr_min_call_DP = -1
assert main(args)==1
args.hipstr_min_call_DP = None
args.hipstr_max_call_DP = -1
assert main(args)==1
args.hipstr_min_call_DP = 5
args.hipstr_max_call_DP = 2
assert main(args)==1
args.hipstr_min_call_DP = None
args.hipstr_max_call_DP = None
args.hipstr_min_call_Q = -1
assert main(args)==1
args.hipstr_min_call_Q = 2
assert main(args)==1
def test_InvalidGangSTROptions(args, testDumpSTRdir):
fname = os.path.join(testDumpSTRdir, "test_gangstr.vcf.gz")
args.vcf = fname
args.num_records = 10
args.gangstr_min_call_DP = -1
assert main(args)==1
args.gangstr_min_call_DP = None
args.gangstr_max_call_DP = -1
assert main(args)==1
args.gangstr_min_call_DP = 5
args.gangstr_max_call_DP = 2
assert main(args)==1
args.gangstr_min_call_DP = None
args.gangstr_max_call_DP = None
args.gangstr_min_call_Q = -1
assert main(args)==1
args.gangstr_min_call_Q = 2
assert main(args)==1
args.gangstr_min_call_Q = None
args.gangstr_expansion_prob_het = -1
assert main(args)==1
args.gangstr_expansion_prob_het = 2
assert main(args)==1
args.gangstr_expansion_prob_het = None
args.gangstr_expansion_prob_hom = -1
assert main(args)==1
args.gangstr_expansion_prob_hom = 2
assert main(args)==1
args.gangstr_expansion_prob_hom = None
args.gangstr_expansion_prob_total = -1
assert main(args)==1
args.gangstr_expansion_prob_total = 2
assert main(args)==1
args.gangstr_expansion_prob_total = None
'''
args.gangstr_require_support = -1
assert main(args)==1
args.gangstr_require_support = 2
assert main(args)==1
args.gangstr_readlen = 1
assert main(args)==1
'''
def test_InvalidAdVNTROptions(args, testDumpSTRdir):
fname = os.path.join(testDumpSTRdir, "test_advntr.vcf.gz")
args.vcf = fname
args.num_records = 10
args.advntr_min_call_DP = -1
assert main(args)==1
args.advntr_min_call_DP = None
args.advntr_max_call_DP = -1
assert main(args)==1
args.advntr_min_call_DP = 5
args.advntr_max_call_DP = 2
assert main(args)==1
args.advntr_min_call_DP = None
args.advntr_max_call_DP = None
args.advntr_min_ML = -1
assert main(args)==1
args.advntr_min_ML = None
args.advntr_min_flanking = -1
assert main(args)==1
args.advntr_min_spanning = -1
assert main(args)==1
"""
def test_InvalidEHOptions(args, testDumpSTRdir):
fname = os.path.join(testDumpSTRdir, "test_ExpansionHunter.vcf")
args.vcf = fname
args.num_records = 10
# TODO add once EH is implemented
"""
def test_InvalidPopSTROptions(args, testDumpSTRdir):
fname = os.path.join(testDumpSTRdir, "NA12878_chr21_popstr.sorted.vcf.gz")
args.vcf = fname
args.num_records = 10
args.popstr_min_call_DP = -1
assert main(args)==1
args.popstr_min_call_DP = None
args.popstr_max_call_DP = -1
assert main(args)==1
args.popstr_min_call_DP = 5
args.popstr_max_call_DP = 2
assert main(args)==1
args.popstr_min_call_DP = None
args.popstr_max_call_DP = None
args.popstr_require_support = -1
assert main(args)==1
def test_InvalidGenotyperOptions(args, testDumpSTRdir):
fname = os.path.join(testDumpSTRdir, "NA12878_chr21_popstr.sorted.vcf.gz")
args.vcf = fname
args.num_records = 10
args.hipstr_min_call_DP = 10
assert main(args)==1
args.hipstr_min_call_DP = None
args.gangstr_min_call_DP = 10
assert main(args)==1
args.gangstr_min_call_DP = None
fname = os.path.join(testDumpSTRdir, "trio_chr21_hipstr.sorted..vcf.gz")
args.vcf = fname
args.popstr_min_call_DP = 10
assert main(args)==1
args.popstr_min_call_DP = None
args.advntr_min_call_DP = 10
assert main(args)==1
args.advntr_min_call_DP = None
args.eh_min_call_LC = 5
assert main(args)==1
args.eh_min_call_LC = None
def test_InvalidOutput(capsys, args, testDumpSTRdir, tmpdir):
fname = os.path.join(testDumpSTRdir, "NA12878_chr21_popstr.sorted.vcf.gz")
args.vcf = fname
# Fail when trying to output inside a nonexistant directory
args.out = str(tmpdir / "notadirectory" / "somefilename")
assert main(args) == 1
# To simulate a permissions issue: fail when trying to write a file in a location
# that is already a directory
capsys.readouterr()
(tmpdir / "foo.vcf").mkdir()
args.out = str(tmpdir / "foo")
assert main(args) == 1
# Make sure we produce a meaningful error message for this issue
assert 'is a directory' in str(capsys.readouterr())
def test_TwoDumpSTRRounds(args, testDumpSTRdir, tmpdir):
args.num_records = 10
fname = os.path.join(testDumpSTRdir, "test_gangstr.vcf.gz")
args.vcf = fname
args.min_locus_callrate = 0
args.zip = True
main(args) # produces DUMPDIR/test.vcf
args.vcf = str(tmpdir / "test.vcf.gz")
args.out = str(tmpdir / "test2")
assert main(args)==0
def test_BrokenVCF(args, testDumpSTRdir):
args.num_records = 10
fname = os.path.join(testDumpSTRdir, "test_broken.vcf.gz")
args.vcf = fname
args.die_on_warning = True
args.verbose = True
assert main(args)==1
"""
These tests run dumpSTR and compare its output
to output that has been generated by a pervious version of
dumpSTR and saved in the repo. The results are expected
to be identical.
These tests are too strict and will often break because
dumpSTR output has been intentionally changed
However, the presence of these tests is important because
it should prevent any unexpected changes in output.
If you've reviewed the change in output and find it acceptable,
use trtools/testsupport/sample_vcfs/dumpSTR_vcfs/create_test_files.sh
to regenerate the tests files with the new output.
"""
def test_output_locus_filters(args, testDumpSTRdir):
args.vcf = testDumpSTRdir + '/trio_chr21_hipstr.sorted.vcf.gz'
args.min_locus_callrate = 0.5
args.min_locus_hwep = 0.5
args.min_locus_het = 0.05
args.max_locus_het = 0.45
args.filter_regions_names = 'foo_region'
args.filter_regions = testDumpSTRdir + '/sample_region.bed.gz'
args.vcftype = 'hipstr'
assert main(args) == 0
# expect changes in precision for HET and HWEP
# that will make them too much of a pain to compare
# there are also rounding errors with HipSTR field GLDIFF
# that aren't worth worrying about
assert_same_vcf(args.out + '.vcf',
testDumpSTRdir + '/locus_filters.vcf',
info_ignore = {'AC', 'REFAC', 'HET', 'HWEP'},
format_ignore= {'GLDIFF'})
for ext in '.samplog.tab', '.loclog.tab':
assert_same_file(args.out + ext,
testDumpSTRdir + '/locus_filters' + ext,
ext)
# make sure locus level filters produce the same output when
# --drop-filtered is set
def test_output_drop_filtered(args, testDumpSTRdir):
args.vcf = testDumpSTRdir + '/trio_chr21_hipstr.sorted.vcf.gz'
args.min_locus_callrate = 0.5
args.min_locus_hwep = 0.5
args.min_locus_het = 0.05
args.max_locus_het = 0.45
args.filter_regions_names = 'foo_region'
args.filter_regions = testDumpSTRdir + '/sample_region.bed.gz'
args.vcftype = 'hipstr'
args.drop_filtered = True
assert main(args) == 0
# expect changes in precision for HET and HWEP
# that will make them too much of a pain to compare
# there are also rounding errors with HipSTR field GLDIFF
# that aren't worth worrying about
assert_same_vcf(args.out + '.vcf',
testDumpSTRdir + '/drop_filtered.vcf',
info_ignore = {'AC', 'REFAC', 'HET', 'HWEP'},
format_ignore= {'GLDIFF'})
for ext in '.samplog.tab', '.loclog.tab':
assert_same_file(args.out + ext,
testDumpSTRdir + '/locus_filters' + ext,
ext)
# test advntr call level filters
def test_output_advntr_filters(args, testDumpSTRdir):
args.vcf = testDumpSTRdir + '/NA12878_chr21_advntr.sorted.vcf.gz'
args.advntr_min_call_DP = 50
args.advntr_max_call_DP = 2000
args.advntr_min_spanning = 1
args.advntr_min_flanking = 20
args.advntr_min_ML = 0.95
assert main(args) == 0
# expect changes in precision for HET and HWEP
# that will make them too much of a pain to compare
assert_same_vcf(args.out + '.vcf',
testDumpSTRdir + '/advntr_filters.vcf',
info_ignore = {'AC', 'REFAC', 'HET', 'HWEP'})
for ext in '.samplog.tab', '.loclog.tab':
assert_same_file(args.out + ext,
testDumpSTRdir + '/advntr_filters' + ext,
ext)
# test hipstr call and locus level filters
def test_output_hipstr_filters(args, testDumpSTRdir):
args.vcf = testDumpSTRdir + '/trio_chr21_hipstr.sorted.vcf.gz'
args.filter_hrun = True
args.use_length = True
args.max_locus_het = 0.45
args.min_locus_het = 0.05
args.min_locus_hwep = 0.5
args.hipstr_max_call_flank_indel = 0.05
args.hipstr_max_call_stutter = 0.3
args.hipstr_min_supp_reads = 10
args.hipstr_min_call_DP = 30
args.hipstr_max_call_DP = 200
args.hipstr_min_call_Q = 0.9
args.vcftype = 'hipstr'
assert main(args) == 0
# expect changes in precision for HET and HWEP
# that will make them too much of a pain to compare
# there are also rounding errors with HipSTR field GLDIFF
# that aren't worth worrying about
assert_same_vcf(args.out + '.vcf',
testDumpSTRdir + '/hipstr_filters.vcf',
info_ignore = {'AC', 'REFAC', 'HET', 'HWEP'},
format_ignore= {'GLDIFF'})
for ext in '.samplog.tab', '.loclog.tab':
assert_same_file(args.out + ext,
testDumpSTRdir + '/hipstr_filters' + ext,
ext)
# test gangstr call level filters that don't begin
# with 'expansion' - those are tested on another file
def test_output_gangstr_most_filters(args, testDumpSTRdir):
args.vcf = testDumpSTRdir + '/trio_chr21_gangstr.sorted.vcf.gz'
args.gangstr_min_call_DP = 10
args.gangstr_max_call_DP = 100
args.gangstr_min_call_Q = 0.9
args.gangstr_filter_span_only = True
args.gangstr_filter_spanbound_only = True
args.gangstr_filter_badCI = True
# args.gangstr_require_support = 10
# args.gangstr_readlen = 150
assert main(args) == 0
# expect changes in precision for HET and HWEP
# that will make them too much of a pain to compare
assert_same_vcf(args.out + '.vcf',
testDumpSTRdir + '/gangstr_filters_most.vcf',
info_ignore = {'AC', 'REFAC', 'HET', 'HWEP'})
for ext in '.samplog.tab', '.loclog.tab':
assert_same_file(args.out + ext,
testDumpSTRdir + '/gangstr_filters_most' + ext,
ext)
# test gangstr call level filters that begin with
# 'expansion' - the other gangstr call level filters
# are tested on another file
def test_output_gangstr_expansion_filters(args, testDumpSTRdir):
args.vcf = testDumpSTRdir + '/test_gangstr.vcf.gz'
args.gangstr_expansion_prob_het = 0.001
args.gangstr_expansion_prob_hom = 0.0005
args.gangstr_expansion_prob_total = 0.001
assert main(args) == 0
# expect changes in precision for HET and HWEP
# that will make them too much of a pain to compare
assert_same_vcf(args.out + '.vcf',
testDumpSTRdir + '/gangstr_filters_expansion.vcf',
info_ignore = {'AC', 'REFAC', 'HET', 'HWEP'})
for ext in '.samplog.tab', '.loclog.tab':
assert_same_file(args.out + ext,
testDumpSTRdir + '/gangstr_filters_expansion' + ext,
ext)
# test popstr call level filters
def test_output_popstr_filters(args, testDumpSTRdir):
args.vcf = testDumpSTRdir + '/NA12878_chr21_popstr.sorted.vcf.gz'
args.popstr_min_call_DP = 30
args.popstr_max_call_DP = 200
args.popstr_require_support = 15
args.use_length = True
assert main(args) == 0
# expect changes in precision for HET and HWEP
# that will make them too much of a pain to compare
assert_same_vcf(args.out + '.vcf',
testDumpSTRdir + '/popstr_filters.vcf',
info_ignore = {'AC', 'REFAC', 'HET', 'HWEP'})
for ext in '.samplog.tab', '.loclog.tab':
assert_same_file(args.out + ext,
testDumpSTRdir + '/popstr_filters' + ext,
ext)
| 34.038012 | 129 | 0.683489 | 0 | 0 | 0 | 0 | 1,725 | 0.074092 | 0 | 0 | 5,921 | 0.254317 |
f81030a9747b6fbce3be0c3890586bc3da2d99c2 | 27,895 | py | Python | nova/network/ldapdns.py | bopopescu/nova-token | ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2 | [
"Apache-2.0"
] | null | null | null | nova/network/ldapdns.py | bopopescu/nova-token | ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2 | [
"Apache-2.0"
] | null | null | null | nova/network/ldapdns.py | bopopescu/nova-token | ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2 | [
"Apache-2.0"
] | 2 | 2017-07-20T17:31:34.000Z | 2020-07-24T02:42:19.000Z | begin_unit
comment|'# Copyright 2012 Andrew Bogott for the Wikimedia Foundation'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
name|'try'
op|':'
newline|'\n'
indent|' '
name|'import'
name|'ldap'
newline|'\n'
dedent|''
name|'except'
name|'ImportError'
op|':'
newline|'\n'
comment|'# This module needs to be importable despite ldap not being a requirement'
nl|'\n'
DECL|variable|ldap
indent|' '
name|'ldap'
op|'='
name|'None'
newline|'\n'
nl|'\n'
dedent|''
name|'import'
name|'time'
newline|'\n'
nl|'\n'
name|'from'
name|'oslo_log'
name|'import'
name|'log'
name|'as'
name|'logging'
newline|'\n'
nl|'\n'
name|'import'
name|'nova'
op|'.'
name|'conf'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'exception'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'i18n'
name|'import'
name|'_'
op|','
name|'_LW'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'network'
name|'import'
name|'dns_driver'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'utils'
newline|'\n'
nl|'\n'
DECL|variable|CONF
name|'CONF'
op|'='
name|'nova'
op|'.'
name|'conf'
op|'.'
name|'CONF'
newline|'\n'
DECL|variable|LOG
name|'LOG'
op|'='
name|'logging'
op|'.'
name|'getLogger'
op|'('
name|'__name__'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
comment|'# Importing ldap.modlist breaks the tests for some reason,'
nl|'\n'
comment|'# so this is an abbreviated version of a function from'
nl|'\n'
comment|'# there.'
nl|'\n'
DECL|function|create_modlist
name|'def'
name|'create_modlist'
op|'('
name|'newattrs'
op|')'
op|':'
newline|'\n'
indent|' '
name|'modlist'
op|'='
op|'['
op|']'
newline|'\n'
name|'for'
name|'attrtype'
name|'in'
name|'newattrs'
op|'.'
name|'keys'
op|'('
op|')'
op|':'
newline|'\n'
indent|' '
name|'utf8_vals'
op|'='
op|'['
op|']'
newline|'\n'
name|'for'
name|'val'
name|'in'
name|'newattrs'
op|'['
name|'attrtype'
op|']'
op|':'
newline|'\n'
indent|' '
name|'utf8_vals'
op|'.'
name|'append'
op|'('
name|'utils'
op|'.'
name|'utf8'
op|'('
name|'val'
op|')'
op|')'
newline|'\n'
dedent|''
name|'newattrs'
op|'['
name|'attrtype'
op|']'
op|'='
name|'utf8_vals'
newline|'\n'
name|'modlist'
op|'.'
name|'append'
op|'('
op|'('
name|'attrtype'
op|','
name|'newattrs'
op|'['
name|'attrtype'
op|']'
op|')'
op|')'
newline|'\n'
dedent|''
name|'return'
name|'modlist'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|DNSEntry
dedent|''
name|'class'
name|'DNSEntry'
op|'('
name|'object'
op|')'
op|':'
newline|'\n'
nl|'\n'
DECL|member|__init__
indent|' '
name|'def'
name|'__init__'
op|'('
name|'self'
op|','
name|'ldap_object'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""ldap_object is an instance of ldap.LDAPObject.\n\n It should already be initialized and bound before\n getting passed in here.\n """'
newline|'\n'
name|'self'
op|'.'
name|'lobj'
op|'='
name|'ldap_object'
newline|'\n'
name|'self'
op|'.'
name|'ldap_tuple'
op|'='
name|'None'
newline|'\n'
name|'self'
op|'.'
name|'qualified_domain'
op|'='
name|'None'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'classmethod'
newline|'\n'
DECL|member|_get_tuple_for_domain
name|'def'
name|'_get_tuple_for_domain'
op|'('
name|'cls'
op|','
name|'lobj'
op|','
name|'domain'
op|')'
op|':'
newline|'\n'
indent|' '
name|'entry'
op|'='
name|'lobj'
op|'.'
name|'search_s'
op|'('
name|'CONF'
op|'.'
name|'ldap_dns_base_dn'
op|','
name|'ldap'
op|'.'
name|'SCOPE_SUBTREE'
op|','
nl|'\n'
string|"'(associatedDomain=%s)'"
op|'%'
name|'utils'
op|'.'
name|'utf8'
op|'('
name|'domain'
op|')'
op|')'
newline|'\n'
name|'if'
name|'not'
name|'entry'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'None'
newline|'\n'
dedent|''
name|'if'
name|'len'
op|'('
name|'entry'
op|')'
op|'>'
number|'1'
op|':'
newline|'\n'
indent|' '
name|'LOG'
op|'.'
name|'warning'
op|'('
name|'_LW'
op|'('
string|'"Found multiple matches for domain "'
nl|'\n'
string|'"%(domain)s.\\n%(entry)s"'
op|')'
op|','
nl|'\n'
name|'domain'
op|','
name|'entry'
op|')'
newline|'\n'
dedent|''
name|'return'
name|'entry'
op|'['
number|'0'
op|']'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'classmethod'
newline|'\n'
DECL|member|_get_all_domains
name|'def'
name|'_get_all_domains'
op|'('
name|'cls'
op|','
name|'lobj'
op|')'
op|':'
newline|'\n'
indent|' '
name|'entries'
op|'='
name|'lobj'
op|'.'
name|'search_s'
op|'('
name|'CONF'
op|'.'
name|'ldap_dns_base_dn'
op|','
nl|'\n'
name|'ldap'
op|'.'
name|'SCOPE_SUBTREE'
op|','
string|"'(sOARecord=*)'"
op|')'
newline|'\n'
name|'domains'
op|'='
op|'['
op|']'
newline|'\n'
name|'for'
name|'entry'
name|'in'
name|'entries'
op|':'
newline|'\n'
indent|' '
name|'domain'
op|'='
name|'entry'
op|'['
number|'1'
op|']'
op|'.'
name|'get'
op|'('
string|"'associatedDomain'"
op|')'
newline|'\n'
name|'if'
name|'domain'
op|':'
newline|'\n'
indent|' '
name|'domains'
op|'.'
name|'append'
op|'('
name|'domain'
op|'['
number|'0'
op|']'
op|')'
newline|'\n'
dedent|''
dedent|''
name|'return'
name|'domains'
newline|'\n'
nl|'\n'
DECL|member|_set_tuple
dedent|''
name|'def'
name|'_set_tuple'
op|'('
name|'self'
op|','
name|'tuple'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'ldap_tuple'
op|'='
name|'tuple'
newline|'\n'
nl|'\n'
DECL|member|_qualify
dedent|''
name|'def'
name|'_qualify'
op|'('
name|'self'
op|','
name|'name'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
string|"'%s.%s'"
op|'%'
op|'('
name|'name'
op|','
name|'self'
op|'.'
name|'qualified_domain'
op|')'
newline|'\n'
nl|'\n'
DECL|member|_dequalify
dedent|''
name|'def'
name|'_dequalify'
op|'('
name|'self'
op|','
name|'name'
op|')'
op|':'
newline|'\n'
indent|' '
name|'z'
op|'='
string|'".%s"'
op|'%'
name|'self'
op|'.'
name|'qualified_domain'
newline|'\n'
name|'if'
name|'name'
op|'.'
name|'endswith'
op|'('
name|'z'
op|')'
op|':'
newline|'\n'
indent|' '
name|'dequalified'
op|'='
name|'name'
op|'['
number|'0'
op|':'
name|'name'
op|'.'
name|'rfind'
op|'('
name|'z'
op|')'
op|']'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'LOG'
op|'.'
name|'warning'
op|'('
name|'_LW'
op|'('
string|'"Unable to dequalify. %(name)s is not in "'
nl|'\n'
string|'"%(domain)s.\\n"'
op|')'
op|','
nl|'\n'
op|'{'
string|"'name'"
op|':'
name|'name'
op|','
nl|'\n'
string|"'domain'"
op|':'
name|'self'
op|'.'
name|'qualified_domain'
op|'}'
op|')'
newline|'\n'
name|'dequalified'
op|'='
name|'None'
newline|'\n'
nl|'\n'
dedent|''
name|'return'
name|'dequalified'
newline|'\n'
nl|'\n'
DECL|member|_dn
dedent|''
name|'def'
name|'_dn'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'self'
op|'.'
name|'ldap_tuple'
op|'['
number|'0'
op|']'
newline|'\n'
DECL|variable|dn
dedent|''
name|'dn'
op|'='
name|'property'
op|'('
name|'_dn'
op|')'
newline|'\n'
nl|'\n'
DECL|member|_rdn
name|'def'
name|'_rdn'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'self'
op|'.'
name|'dn'
op|'.'
name|'partition'
op|'('
string|"','"
op|')'
op|'['
number|'0'
op|']'
newline|'\n'
DECL|variable|rdn
dedent|''
name|'rdn'
op|'='
name|'property'
op|'('
name|'_rdn'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|DomainEntry
dedent|''
name|'class'
name|'DomainEntry'
op|'('
name|'DNSEntry'
op|')'
op|':'
newline|'\n'
nl|'\n'
indent|' '
op|'@'
name|'classmethod'
newline|'\n'
DECL|member|_soa
name|'def'
name|'_soa'
op|'('
name|'cls'
op|')'
op|':'
newline|'\n'
indent|' '
name|'date'
op|'='
name|'time'
op|'.'
name|'strftime'
op|'('
string|"'%Y%m%d%H%M%S'"
op|')'
newline|'\n'
name|'soa'
op|'='
string|"'%s %s %s %s %s %s %s'"
op|'%'
op|'('
nl|'\n'
name|'CONF'
op|'.'
name|'ldap_dns_servers'
op|'['
number|'0'
op|']'
op|','
nl|'\n'
name|'CONF'
op|'.'
name|'ldap_dns_soa_hostmaster'
op|','
nl|'\n'
name|'date'
op|','
nl|'\n'
name|'CONF'
op|'.'
name|'ldap_dns_soa_refresh'
op|','
nl|'\n'
name|'CONF'
op|'.'
name|'ldap_dns_soa_retry'
op|','
nl|'\n'
name|'CONF'
op|'.'
name|'ldap_dns_soa_expiry'
op|','
nl|'\n'
name|'CONF'
op|'.'
name|'ldap_dns_soa_minimum'
op|')'
newline|'\n'
name|'return'
name|'utils'
op|'.'
name|'utf8'
op|'('
name|'soa'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'classmethod'
newline|'\n'
DECL|member|create_domain
name|'def'
name|'create_domain'
op|'('
name|'cls'
op|','
name|'lobj'
op|','
name|'domain'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Create a new domain entry, and return an object that wraps it."""'
newline|'\n'
name|'entry'
op|'='
name|'cls'
op|'.'
name|'_get_tuple_for_domain'
op|'('
name|'lobj'
op|','
name|'domain'
op|')'
newline|'\n'
name|'if'
name|'entry'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exception'
op|'.'
name|'FloatingIpDNSExists'
op|'('
name|'name'
op|'='
name|'domain'
op|','
name|'domain'
op|'='
string|"''"
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'newdn'
op|'='
string|"'dc=%s,%s'"
op|'%'
op|'('
name|'domain'
op|','
name|'CONF'
op|'.'
name|'ldap_dns_base_dn'
op|')'
newline|'\n'
name|'attrs'
op|'='
op|'{'
string|"'objectClass'"
op|':'
op|'['
string|"'domainrelatedobject'"
op|','
string|"'dnsdomain'"
op|','
nl|'\n'
string|"'domain'"
op|','
string|"'dcobject'"
op|','
string|"'top'"
op|']'
op|','
nl|'\n'
string|"'sOARecord'"
op|':'
op|'['
name|'cls'
op|'.'
name|'_soa'
op|'('
op|')'
op|']'
op|','
nl|'\n'
string|"'associatedDomain'"
op|':'
op|'['
name|'domain'
op|']'
op|','
nl|'\n'
string|"'dc'"
op|':'
op|'['
name|'domain'
op|']'
op|'}'
newline|'\n'
name|'lobj'
op|'.'
name|'add_s'
op|'('
name|'newdn'
op|','
name|'create_modlist'
op|'('
name|'attrs'
op|')'
op|')'
newline|'\n'
name|'return'
name|'DomainEntry'
op|'('
name|'lobj'
op|','
name|'domain'
op|')'
newline|'\n'
nl|'\n'
DECL|member|__init__
dedent|''
name|'def'
name|'__init__'
op|'('
name|'self'
op|','
name|'ldap_object'
op|','
name|'domain'
op|')'
op|':'
newline|'\n'
indent|' '
name|'super'
op|'('
name|'DomainEntry'
op|','
name|'self'
op|')'
op|'.'
name|'__init__'
op|'('
name|'ldap_object'
op|')'
newline|'\n'
name|'entry'
op|'='
name|'self'
op|'.'
name|'_get_tuple_for_domain'
op|'('
name|'self'
op|'.'
name|'lobj'
op|','
name|'domain'
op|')'
newline|'\n'
name|'if'
name|'not'
name|'entry'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exception'
op|'.'
name|'NotFound'
op|'('
op|')'
newline|'\n'
dedent|''
name|'self'
op|'.'
name|'_set_tuple'
op|'('
name|'entry'
op|')'
newline|'\n'
name|'assert'
op|'('
name|'entry'
op|'['
number|'1'
op|']'
op|'['
string|"'associatedDomain'"
op|']'
op|'['
number|'0'
op|']'
op|'=='
name|'domain'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'qualified_domain'
op|'='
name|'domain'
newline|'\n'
nl|'\n'
DECL|member|delete
dedent|''
name|'def'
name|'delete'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Delete the domain that this entry refers to."""'
newline|'\n'
name|'entries'
op|'='
name|'self'
op|'.'
name|'lobj'
op|'.'
name|'search_s'
op|'('
name|'self'
op|'.'
name|'dn'
op|','
nl|'\n'
name|'ldap'
op|'.'
name|'SCOPE_SUBTREE'
op|','
nl|'\n'
string|"'(aRecord=*)'"
op|')'
newline|'\n'
name|'for'
name|'entry'
name|'in'
name|'entries'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'lobj'
op|'.'
name|'delete_s'
op|'('
name|'entry'
op|'['
number|'0'
op|']'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'self'
op|'.'
name|'lobj'
op|'.'
name|'delete_s'
op|'('
name|'self'
op|'.'
name|'dn'
op|')'
newline|'\n'
nl|'\n'
DECL|member|update_soa
dedent|''
name|'def'
name|'update_soa'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'mlist'
op|'='
op|'['
op|'('
name|'ldap'
op|'.'
name|'MOD_REPLACE'
op|','
string|"'sOARecord'"
op|','
name|'self'
op|'.'
name|'_soa'
op|'('
op|')'
op|')'
op|']'
newline|'\n'
name|'self'
op|'.'
name|'lobj'
op|'.'
name|'modify_s'
op|'('
name|'self'
op|'.'
name|'dn'
op|','
name|'mlist'
op|')'
newline|'\n'
nl|'\n'
DECL|member|subentry_with_name
dedent|''
name|'def'
name|'subentry_with_name'
op|'('
name|'self'
op|','
name|'name'
op|')'
op|':'
newline|'\n'
indent|' '
name|'entry'
op|'='
name|'self'
op|'.'
name|'lobj'
op|'.'
name|'search_s'
op|'('
name|'self'
op|'.'
name|'dn'
op|','
name|'ldap'
op|'.'
name|'SCOPE_SUBTREE'
op|','
nl|'\n'
string|"'(associatedDomain=%s.%s)'"
op|'%'
nl|'\n'
op|'('
name|'utils'
op|'.'
name|'utf8'
op|'('
name|'name'
op|')'
op|','
nl|'\n'
name|'utils'
op|'.'
name|'utf8'
op|'('
name|'self'
op|'.'
name|'qualified_domain'
op|')'
op|')'
op|')'
newline|'\n'
name|'if'
name|'entry'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'HostEntry'
op|'('
name|'self'
op|','
name|'entry'
op|'['
number|'0'
op|']'
op|')'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'None'
newline|'\n'
nl|'\n'
DECL|member|subentries_with_ip
dedent|''
dedent|''
name|'def'
name|'subentries_with_ip'
op|'('
name|'self'
op|','
name|'ip'
op|')'
op|':'
newline|'\n'
indent|' '
name|'entries'
op|'='
name|'self'
op|'.'
name|'lobj'
op|'.'
name|'search_s'
op|'('
name|'self'
op|'.'
name|'dn'
op|','
name|'ldap'
op|'.'
name|'SCOPE_SUBTREE'
op|','
nl|'\n'
string|"'(aRecord=%s)'"
op|'%'
name|'utils'
op|'.'
name|'utf8'
op|'('
name|'ip'
op|')'
op|')'
newline|'\n'
name|'objs'
op|'='
op|'['
op|']'
newline|'\n'
name|'for'
name|'entry'
name|'in'
name|'entries'
op|':'
newline|'\n'
indent|' '
name|'if'
string|"'associatedDomain'"
name|'in'
name|'entry'
op|'['
number|'1'
op|']'
op|':'
newline|'\n'
indent|' '
name|'objs'
op|'.'
name|'append'
op|'('
name|'HostEntry'
op|'('
name|'self'
op|','
name|'entry'
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
dedent|''
name|'return'
name|'objs'
newline|'\n'
nl|'\n'
DECL|member|add_entry
dedent|''
name|'def'
name|'add_entry'
op|'('
name|'self'
op|','
name|'name'
op|','
name|'address'
op|')'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'self'
op|'.'
name|'subentry_with_name'
op|'('
name|'name'
op|')'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exception'
op|'.'
name|'FloatingIpDNSExists'
op|'('
name|'name'
op|'='
name|'name'
op|','
nl|'\n'
name|'domain'
op|'='
name|'self'
op|'.'
name|'qualified_domain'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'entries'
op|'='
name|'self'
op|'.'
name|'subentries_with_ip'
op|'('
name|'address'
op|')'
newline|'\n'
name|'if'
name|'entries'
op|':'
newline|'\n'
comment|'# We already have an ldap entry for this IP, so we just'
nl|'\n'
comment|'# need to add the new name.'
nl|'\n'
indent|' '
name|'existingdn'
op|'='
name|'entries'
op|'['
number|'0'
op|']'
op|'.'
name|'dn'
newline|'\n'
name|'self'
op|'.'
name|'lobj'
op|'.'
name|'modify_s'
op|'('
name|'existingdn'
op|','
op|'['
op|'('
name|'ldap'
op|'.'
name|'MOD_ADD'
op|','
nl|'\n'
string|"'associatedDomain'"
op|','
nl|'\n'
name|'utils'
op|'.'
name|'utf8'
op|'('
name|'self'
op|'.'
name|'_qualify'
op|'('
name|'name'
op|')'
op|')'
op|')'
op|']'
op|')'
newline|'\n'
nl|'\n'
name|'return'
name|'self'
op|'.'
name|'subentry_with_name'
op|'('
name|'name'
op|')'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
comment|'# We need to create an entirely new entry.'
nl|'\n'
indent|' '
name|'newdn'
op|'='
string|"'dc=%s,%s'"
op|'%'
op|'('
name|'name'
op|','
name|'self'
op|'.'
name|'dn'
op|')'
newline|'\n'
name|'attrs'
op|'='
op|'{'
string|"'objectClass'"
op|':'
op|'['
string|"'domainrelatedobject'"
op|','
string|"'dnsdomain'"
op|','
nl|'\n'
string|"'domain'"
op|','
string|"'dcobject'"
op|','
string|"'top'"
op|']'
op|','
nl|'\n'
string|"'aRecord'"
op|':'
op|'['
name|'address'
op|']'
op|','
nl|'\n'
string|"'associatedDomain'"
op|':'
op|'['
name|'self'
op|'.'
name|'_qualify'
op|'('
name|'name'
op|')'
op|']'
op|','
nl|'\n'
string|"'dc'"
op|':'
op|'['
name|'name'
op|']'
op|'}'
newline|'\n'
name|'self'
op|'.'
name|'lobj'
op|'.'
name|'add_s'
op|'('
name|'newdn'
op|','
name|'create_modlist'
op|'('
name|'attrs'
op|')'
op|')'
newline|'\n'
name|'return'
name|'self'
op|'.'
name|'subentry_with_name'
op|'('
name|'name'
op|')'
newline|'\n'
nl|'\n'
DECL|member|remove_entry
dedent|''
dedent|''
name|'def'
name|'remove_entry'
op|'('
name|'self'
op|','
name|'name'
op|')'
op|':'
newline|'\n'
indent|' '
name|'entry'
op|'='
name|'self'
op|'.'
name|'subentry_with_name'
op|'('
name|'name'
op|')'
newline|'\n'
name|'if'
name|'not'
name|'entry'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exception'
op|'.'
name|'NotFound'
op|'('
op|')'
newline|'\n'
dedent|''
name|'entry'
op|'.'
name|'remove_name'
op|'('
name|'name'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'update_soa'
op|'('
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|HostEntry
dedent|''
dedent|''
name|'class'
name|'HostEntry'
op|'('
name|'DNSEntry'
op|')'
op|':'
newline|'\n'
nl|'\n'
DECL|member|__init__
indent|' '
name|'def'
name|'__init__'
op|'('
name|'self'
op|','
name|'parent'
op|','
name|'tuple'
op|')'
op|':'
newline|'\n'
indent|' '
name|'super'
op|'('
name|'HostEntry'
op|','
name|'self'
op|')'
op|'.'
name|'__init__'
op|'('
name|'parent'
op|'.'
name|'lobj'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'parent_entry'
op|'='
name|'parent'
newline|'\n'
name|'self'
op|'.'
name|'_set_tuple'
op|'('
name|'tuple'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'qualified_domain'
op|'='
name|'parent'
op|'.'
name|'qualified_domain'
newline|'\n'
nl|'\n'
DECL|member|remove_name
dedent|''
name|'def'
name|'remove_name'
op|'('
name|'self'
op|','
name|'name'
op|')'
op|':'
newline|'\n'
indent|' '
name|'names'
op|'='
name|'self'
op|'.'
name|'ldap_tuple'
op|'['
number|'1'
op|']'
op|'['
string|"'associatedDomain'"
op|']'
newline|'\n'
name|'if'
name|'not'
name|'names'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exception'
op|'.'
name|'NotFound'
op|'('
op|')'
newline|'\n'
dedent|''
name|'if'
name|'len'
op|'('
name|'names'
op|')'
op|'>'
number|'1'
op|':'
newline|'\n'
comment|'# We just have to remove the requested domain.'
nl|'\n'
indent|' '
name|'self'
op|'.'
name|'lobj'
op|'.'
name|'modify_s'
op|'('
name|'self'
op|'.'
name|'dn'
op|','
op|'['
op|'('
name|'ldap'
op|'.'
name|'MOD_DELETE'
op|','
string|"'associatedDomain'"
op|','
nl|'\n'
name|'self'
op|'.'
name|'_qualify'
op|'('
name|'utils'
op|'.'
name|'utf8'
op|'('
name|'name'
op|')'
op|')'
op|')'
op|']'
op|')'
newline|'\n'
name|'if'
op|'('
name|'self'
op|'.'
name|'rdn'
op|'['
number|'1'
op|']'
op|'=='
name|'name'
op|')'
op|':'
newline|'\n'
comment|'# We just removed the rdn, so we need to move this entry.'
nl|'\n'
indent|' '
name|'names'
op|'.'
name|'remove'
op|'('
name|'self'
op|'.'
name|'_qualify'
op|'('
name|'name'
op|')'
op|')'
newline|'\n'
name|'newrdn'
op|'='
string|"'dc=%s'"
op|'%'
name|'self'
op|'.'
name|'_dequalify'
op|'('
name|'names'
op|'['
number|'0'
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'lobj'
op|'.'
name|'modrdn_s'
op|'('
name|'self'
op|'.'
name|'dn'
op|','
op|'['
name|'newrdn'
op|']'
op|')'
newline|'\n'
dedent|''
dedent|''
name|'else'
op|':'
newline|'\n'
comment|'# We should delete the entire record.'
nl|'\n'
indent|' '
name|'self'
op|'.'
name|'lobj'
op|'.'
name|'delete_s'
op|'('
name|'self'
op|'.'
name|'dn'
op|')'
newline|'\n'
nl|'\n'
DECL|member|modify_address
dedent|''
dedent|''
name|'def'
name|'modify_address'
op|'('
name|'self'
op|','
name|'name'
op|','
name|'address'
op|')'
op|':'
newline|'\n'
indent|' '
name|'names'
op|'='
name|'self'
op|'.'
name|'ldap_tuple'
op|'['
number|'1'
op|']'
op|'['
string|"'associatedDomain'"
op|']'
newline|'\n'
name|'if'
name|'not'
name|'names'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exception'
op|'.'
name|'NotFound'
op|'('
op|')'
newline|'\n'
dedent|''
name|'if'
name|'len'
op|'('
name|'names'
op|')'
op|'=='
number|'1'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'lobj'
op|'.'
name|'modify_s'
op|'('
name|'self'
op|'.'
name|'dn'
op|','
op|'['
op|'('
name|'ldap'
op|'.'
name|'MOD_REPLACE'
op|','
string|"'aRecord'"
op|','
nl|'\n'
op|'['
name|'utils'
op|'.'
name|'utf8'
op|'('
name|'address'
op|')'
op|']'
op|')'
op|']'
op|')'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'remove_name'
op|'('
name|'name'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'parent'
op|'.'
name|'add_entry'
op|'('
name|'name'
op|','
name|'address'
op|')'
newline|'\n'
nl|'\n'
DECL|member|_names
dedent|''
dedent|''
name|'def'
name|'_names'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'names'
op|'='
op|'['
op|']'
newline|'\n'
name|'for'
name|'domain'
name|'in'
name|'self'
op|'.'
name|'ldap_tuple'
op|'['
number|'1'
op|']'
op|'['
string|"'associatedDomain'"
op|']'
op|':'
newline|'\n'
indent|' '
name|'names'
op|'.'
name|'append'
op|'('
name|'self'
op|'.'
name|'_dequalify'
op|'('
name|'domain'
op|')'
op|')'
newline|'\n'
dedent|''
name|'return'
name|'names'
newline|'\n'
DECL|variable|names
dedent|''
name|'names'
op|'='
name|'property'
op|'('
name|'_names'
op|')'
newline|'\n'
nl|'\n'
DECL|member|_ip
name|'def'
name|'_ip'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'ip'
op|'='
name|'self'
op|'.'
name|'ldap_tuple'
op|'['
number|'1'
op|']'
op|'['
string|"'aRecord'"
op|']'
op|'['
number|'0'
op|']'
newline|'\n'
name|'return'
name|'ip'
newline|'\n'
DECL|variable|ip
dedent|''
name|'ip'
op|'='
name|'property'
op|'('
name|'_ip'
op|')'
newline|'\n'
nl|'\n'
DECL|member|_parent
name|'def'
name|'_parent'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'self'
op|'.'
name|'parent_entry'
newline|'\n'
DECL|variable|parent
dedent|''
name|'parent'
op|'='
name|'property'
op|'('
name|'_parent'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|LdapDNS
dedent|''
name|'class'
name|'LdapDNS'
op|'('
name|'dns_driver'
op|'.'
name|'DNSDriver'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Driver for PowerDNS using ldap as a back end.\n\n This driver assumes ldap-method=strict, with all domains\n in the top-level, aRecords only.\n """'
newline|'\n'
nl|'\n'
DECL|member|__init__
name|'def'
name|'__init__'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'not'
name|'ldap'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'ImportError'
op|'('
name|'_'
op|'('
string|"'ldap not installed'"
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'self'
op|'.'
name|'lobj'
op|'='
name|'ldap'
op|'.'
name|'initialize'
op|'('
name|'CONF'
op|'.'
name|'ldap_dns_url'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'lobj'
op|'.'
name|'simple_bind_s'
op|'('
name|'CONF'
op|'.'
name|'ldap_dns_user'
op|','
nl|'\n'
name|'CONF'
op|'.'
name|'ldap_dns_password'
op|')'
newline|'\n'
nl|'\n'
DECL|member|get_domains
dedent|''
name|'def'
name|'get_domains'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'DomainEntry'
op|'.'
name|'_get_all_domains'
op|'('
name|'self'
op|'.'
name|'lobj'
op|')'
newline|'\n'
nl|'\n'
DECL|member|create_entry
dedent|''
name|'def'
name|'create_entry'
op|'('
name|'self'
op|','
name|'name'
op|','
name|'address'
op|','
name|'type'
op|','
name|'domain'
op|')'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'type'
op|'.'
name|'lower'
op|'('
op|')'
op|'!='
string|"'a'"
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exception'
op|'.'
name|'InvalidInput'
op|'('
name|'_'
op|'('
string|'"This driver only supports "'
nl|'\n'
string|'"type \'a\' entries."'
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'dEntry'
op|'='
name|'DomainEntry'
op|'('
name|'self'
op|'.'
name|'lobj'
op|','
name|'domain'
op|')'
newline|'\n'
name|'dEntry'
op|'.'
name|'add_entry'
op|'('
name|'name'
op|','
name|'address'
op|')'
newline|'\n'
nl|'\n'
DECL|member|delete_entry
dedent|''
name|'def'
name|'delete_entry'
op|'('
name|'self'
op|','
name|'name'
op|','
name|'domain'
op|')'
op|':'
newline|'\n'
indent|' '
name|'dEntry'
op|'='
name|'DomainEntry'
op|'('
name|'self'
op|'.'
name|'lobj'
op|','
name|'domain'
op|')'
newline|'\n'
name|'dEntry'
op|'.'
name|'remove_entry'
op|'('
name|'name'
op|')'
newline|'\n'
nl|'\n'
DECL|member|get_entries_by_address
dedent|''
name|'def'
name|'get_entries_by_address'
op|'('
name|'self'
op|','
name|'address'
op|','
name|'domain'
op|')'
op|':'
newline|'\n'
indent|' '
name|'try'
op|':'
newline|'\n'
indent|' '
name|'dEntry'
op|'='
name|'DomainEntry'
op|'('
name|'self'
op|'.'
name|'lobj'
op|','
name|'domain'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'exception'
op|'.'
name|'NotFound'
op|':'
newline|'\n'
indent|' '
name|'return'
op|'['
op|']'
newline|'\n'
dedent|''
name|'entries'
op|'='
name|'dEntry'
op|'.'
name|'subentries_with_ip'
op|'('
name|'address'
op|')'
newline|'\n'
name|'names'
op|'='
op|'['
op|']'
newline|'\n'
name|'for'
name|'entry'
name|'in'
name|'entries'
op|':'
newline|'\n'
indent|' '
name|'names'
op|'.'
name|'extend'
op|'('
name|'entry'
op|'.'
name|'names'
op|')'
newline|'\n'
dedent|''
name|'return'
name|'names'
newline|'\n'
nl|'\n'
DECL|member|get_entries_by_name
dedent|''
name|'def'
name|'get_entries_by_name'
op|'('
name|'self'
op|','
name|'name'
op|','
name|'domain'
op|')'
op|':'
newline|'\n'
indent|' '
name|'try'
op|':'
newline|'\n'
indent|' '
name|'dEntry'
op|'='
name|'DomainEntry'
op|'('
name|'self'
op|'.'
name|'lobj'
op|','
name|'domain'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'exception'
op|'.'
name|'NotFound'
op|':'
newline|'\n'
indent|' '
name|'return'
op|'['
op|']'
newline|'\n'
dedent|''
name|'nEntry'
op|'='
name|'dEntry'
op|'.'
name|'subentry_with_name'
op|'('
name|'name'
op|')'
newline|'\n'
name|'if'
name|'nEntry'
op|':'
newline|'\n'
indent|' '
name|'return'
op|'['
name|'nEntry'
op|'.'
name|'ip'
op|']'
newline|'\n'
nl|'\n'
DECL|member|modify_address
dedent|''
dedent|''
name|'def'
name|'modify_address'
op|'('
name|'self'
op|','
name|'name'
op|','
name|'address'
op|','
name|'domain'
op|')'
op|':'
newline|'\n'
indent|' '
name|'dEntry'
op|'='
name|'DomainEntry'
op|'('
name|'self'
op|'.'
name|'lobj'
op|','
name|'domain'
op|')'
newline|'\n'
name|'nEntry'
op|'='
name|'dEntry'
op|'.'
name|'subentry_with_name'
op|'('
name|'name'
op|')'
newline|'\n'
name|'nEntry'
op|'.'
name|'modify_address'
op|'('
name|'name'
op|','
name|'address'
op|')'
newline|'\n'
nl|'\n'
DECL|member|create_domain
dedent|''
name|'def'
name|'create_domain'
op|'('
name|'self'
op|','
name|'domain'
op|')'
op|':'
newline|'\n'
indent|' '
name|'DomainEntry'
op|'.'
name|'create_domain'
op|'('
name|'self'
op|'.'
name|'lobj'
op|','
name|'domain'
op|')'
newline|'\n'
nl|'\n'
DECL|member|delete_domain
dedent|''
name|'def'
name|'delete_domain'
op|'('
name|'self'
op|','
name|'domain'
op|')'
op|':'
newline|'\n'
indent|' '
name|'dEntry'
op|'='
name|'DomainEntry'
op|'('
name|'self'
op|'.'
name|'lobj'
op|','
name|'domain'
op|')'
newline|'\n'
name|'dEntry'
op|'.'
name|'delete'
op|'('
op|')'
newline|'\n'
nl|'\n'
DECL|member|delete_dns_file
dedent|''
name|'def'
name|'delete_dns_file'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'LOG'
op|'.'
name|'warning'
op|'('
name|'_LW'
op|'('
string|'"This shouldn\'t be getting called except during "'
nl|'\n'
string|'"testing."'
op|')'
op|')'
newline|'\n'
name|'pass'
newline|'\n'
dedent|''
dedent|''
endmarker|''
end_unit
| 12.044473 | 174 | 0.578276 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 14,016 | 0.502456 |
f81075d9a768c275f1cbe075abbbe7e3dce2e3c6 | 2,554 | py | Python | src/weekly_contest_251/1946_largest-number-after-mutating-substring.py | dongminlee94/leetcode-practice | 4d33816d66df8ab447087a04b76008f6bec51f23 | [
"MIT"
] | null | null | null | src/weekly_contest_251/1946_largest-number-after-mutating-substring.py | dongminlee94/leetcode-practice | 4d33816d66df8ab447087a04b76008f6bec51f23 | [
"MIT"
] | null | null | null | src/weekly_contest_251/1946_largest-number-after-mutating-substring.py | dongminlee94/leetcode-practice | 4d33816d66df8ab447087a04b76008f6bec51f23 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
1946. Largest Number After Mutating Substring
https://leetcode.com/problems/largest-number-after-mutating-substring/
Example 1:
Input: num = "132", change = [9,8,5,0,3,6,4,2,6,8]
Output: "832"
Explanation: Replace the substring "1":
- 1 maps to change[1] = 8.
Thus, "132" becomes "832".
"832" is the largest number that can be created, so return it.
Example 2:
Input: num = "021", change = [9,4,3,5,7,2,1,9,0,6]
Output: "934"
Explanation: Replace the substring "021":
- 0 maps to change[0] = 9.
- 2 maps to change[2] = 3.
- 1 maps to change[1] = 4.
Thus, "021" becomes "934".
"934" is the largest number that can be created, so return it.
Example 3:
Input: num = "5", change = [1,4,7,5,3,2,5,6,9,4]
Output: "5"
Explanation: "5" is already the largest number that can be created, so return it.
"""
from typing import List
class Solution:
def maximumNumber1(self, num: str, change: List[int]) -> str:
"""
TC: O(N^2) / SC: O(N)
Time Limit Exceeded
"""
max_num = num
for i in range(len(num)):
changed_num = num[:i] + str(change[int(num[i])]) + num[i + 1 :]
if changed_num >= max_num:
max_num = changed_num
for j in range(1, len(num[i + 1 :]) + 1):
changed_num = (
changed_num[: i + j] + str(change[int(num[i + j])]) + changed_num[i + j + 1 :]
)
if changed_num >= max_num:
max_num = changed_num
else:
break
return max_num
def maximumNumber2(self, num: str, change: List[int]) -> str:
"""
TC: O(N) / SC: O(N)
"""
num_list = list(num)
changed = False
for i in range(len(num_list)):
if change[int(num_list[i])] > int(num_list[i]):
num_list[i] = str(change[int(num_list[i])])
changed = True
elif changed == True and change[int(num_list[i])] < int(num_list[i]):
break
return "".join(num_list)
def maximumNumber3(self, num: str, change: List[int]) -> str:
"""
TC: O(N^2) / SC: O(N)
"""
changed = False
for i in range(len(list(num))):
if str(change[int(num[i])]) > num[i]:
num = num[:i] + str(change[int(num[i])]) + num[i + 1 :] # TC: O(N)
changed = True
elif changed == True and str(change[int(num[i])]) < num[i]:
break
return num
| 30.047059 | 98 | 0.523101 | 1,694 | 0.663273 | 0 | 0 | 0 | 0 | 0 | 0 | 1,002 | 0.392326 |
f810c90f204568fd67dca941d0e6266424f5517d | 261 | py | Python | test/unit/bot/test_bot.py | kubicki14/BurtTheCardKing | d0854ea08ffdffee687830097b0645069e263d9d | [
"MIT"
] | null | null | null | test/unit/bot/test_bot.py | kubicki14/BurtTheCardKing | d0854ea08ffdffee687830097b0645069e263d9d | [
"MIT"
] | 1 | 2020-03-04T04:39:20.000Z | 2020-03-04T04:39:20.000Z | test/unit/bot/test_bot.py | kubicki14/BurtTheCardKing | d0854ea08ffdffee687830097b0645069e263d9d | [
"MIT"
] | null | null | null | import pytest
from bot.bot import Bot
class TestBotClass:
# Can't instantiate abstract classes. skipping test for abstract class.
def setup(self):
self.bot = 1
def test_init(self):
test_bot = 1
assert test_bot == self.bot
| 20.076923 | 75 | 0.659004 | 220 | 0.842912 | 0 | 0 | 0 | 0 | 0 | 0 | 71 | 0.272031 |
f8125fde876b086f96371a2951d0cf190eba3f48 | 1,437 | py | Python | Hackerrank/30DaysOfCode/Day9-Recursion3.py | eduardormonteiro/PythonPersonalLibrary | 561733bb8305c4e25a08f99c28b60ec77251ad67 | [
"MIT"
] | null | null | null | Hackerrank/30DaysOfCode/Day9-Recursion3.py | eduardormonteiro/PythonPersonalLibrary | 561733bb8305c4e25a08f99c28b60ec77251ad67 | [
"MIT"
] | null | null | null | Hackerrank/30DaysOfCode/Day9-Recursion3.py | eduardormonteiro/PythonPersonalLibrary | 561733bb8305c4e25a08f99c28b60ec77251ad67 | [
"MIT"
] | null | null | null | """
Hackerrank
Day 9: Recursion 3
https://www.hackerrank.com/challenges/30-recursion/problem?h_r=email&unlock_token=bc6d5f3963afb26ed0b2f69c3f4f3ddb1826e1b2&utm_campaign=30_days_of_code_continuous&utm_medium=email&utm_source=daily_reminder
Objective
Today, we are learning about an algorithmic concept called recursion. Check out the Tutorial tab for learning materials and an instructional video.
Recursive Method for Calculating Factorial
Function Description
Complete the factorial function in the editor below. Be sure to use recursion.
factorial has the following paramter:
int n: an integer
Returns
int: the factorial of
Note: If you fail to use recursion or fail to name your recursive function factorial or Factorial, you will get a score of .
Input Format
A single integer, (the argument to pass to factorial).
Constraints
Your submission must contain a recursive function named factorial.
Sample Input
3
Sample Output
6
Explanation
Consider the following steps. After the recursive calls from step 1 to 3, results are accumulated from step 3 to 1.
"""
import math
import os
import random
import re
import sys
# Complete the factorial function below.
def factorial(n):
if n <= 1:
return 1
return n * factorial(n-1)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
result = factorial(n)
fptr.write(str(result) + '\n')
fptr.close() | 22.809524 | 205 | 0.76618 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,148 | 0.798887 |
f8126dd049c2dae8dffb8bb81f37f683297e9ca7 | 408 | py | Python | todolist/main/models.py | gda2048/TODOlist | cc6c359ab0a8d2f43ed82b19dfc0eb5d640f8b9f | [
"MIT"
] | 1 | 2019-12-19T19:04:02.000Z | 2019-12-19T19:04:02.000Z | todolist/main/models.py | gda2048/TODOlist | cc6c359ab0a8d2f43ed82b19dfc0eb5d640f8b9f | [
"MIT"
] | 5 | 2020-02-12T02:57:13.000Z | 2021-12-13T20:02:16.000Z | todolist/main/models.py | gda2048/TODOlist | cc6c359ab0a8d2f43ed82b19dfc0eb5d640f8b9f | [
"MIT"
] | null | null | null | from django.db import models
class Card(models.Model):
"""
Stores all information about the TODOlist item
"""
id = models.AutoField(primary_key=True)
name = models.CharField('Название', max_length=50)
description = models.TextField('Описание', max_length=500)
is_archived = models.BooleanField('Архивировано', default=False)
def __str__(self):
return str(self.name) | 29.142857 | 68 | 0.70098 | 405 | 0.928899 | 0 | 0 | 0 | 0 | 0 | 0 | 124 | 0.284404 |
f812c1ff23e3b82b8ed9c4bca10c6b857649c53a | 2,358 | py | Python | src/qbrobot/util/log.py | jucuguru/crypto-robot-basic | 3addaaff9fb2f41d8e9dcd66bae7ae7f75216704 | [
"BSD-2-Clause"
] | null | null | null | src/qbrobot/util/log.py | jucuguru/crypto-robot-basic | 3addaaff9fb2f41d8e9dcd66bae7ae7f75216704 | [
"BSD-2-Clause"
] | null | null | null | src/qbrobot/util/log.py | jucuguru/crypto-robot-basic | 3addaaff9fb2f41d8e9dcd66bae7ae7f75216704 | [
"BSD-2-Clause"
] | null | null | null | import logging
from qbrobot import qsettings
try :
from util import send_dingding
except ImportError:
DINGDING_CANUSE = False
else:
DINGDING_CANUSE = True
"""
class DingDingLogger
pass all args to logger.method, and call dingding.send_msg()
1. debug message don't send to dingding.
2. only send_msg( message ), can't pass multi args.
"""
class DingDingLogger:
def __init__(self, logger = None ):
self.logger = logger
def debug(self, msg, *args, **kwargs):
self.logger.debug(msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
self.logger.info(msg, *args, **kwargs)
if DINGDING_CANUSE:
send_dingding.send_msg(msg, dingding_robot_id)
def warning(self, msg, *args, **kwargs):
self.logger.warning(msg, *args, **kwargs)
if DINGDING_CANUSE:
send_dingding.send_msg(msg, dingding_robot_id)
def error(self, msg, *args, **kwargs):
self.logger.error(msg, *args, **kwargs)
if DINGDING_CANUSE:
send_dingding.send_msg(msg, dingding_robot_id)
def log(self, lvl, msg, *args, **kwargs):
self.logger.log(lvl, msg, *args, **kwargs)
if DINGDING_CANUSE:
send_dingding.send_msg(msg, dingding_robot_id)
"""
handler = logging.handlers.RotatingFileHandler(str(logFile) + '.LOG', maxBytes = 1024 * 1024 * 500, backupCount = 5)
fmt = '%(asctime)s - %(filename)s:%(lineno)s - %(name)s - %(message)s'
formatter = logging.Formatter(fmt)
handler.setFormatter(formatter)
logger = logging.getLogger(str(logFile))
logger.addHandler(handler)
logger.setLevel(logging.INFO)
"""
def setup_custom_logger():
formatter = logging.Formatter(fmt=qsettings.LOG_FORMATTER)
file_name = qsettings.LOG_FILE
#file_name = None
if file_name :
handler = logging.FileHandler( file_name )
else:
handler = logging.StreamHandler()
#handler = logging.StreamHandler()
handler.setFormatter(formatter)
#print('setup_custom_logger', name)
logger = logging.getLogger()
logger.addHandler(handler)
logger.setLevel(qsettings.LOG_LEVEL)
return logger
"""
if DINGDING_CANUSE :
print('setup_custom_logger dingding ')
return DingDingLogger( logger )
else:
return logger
"""
| 25.912088 | 116 | 0.651399 | 909 | 0.385496 | 0 | 0 | 0 | 0 | 0 | 0 | 830 | 0.351993 |
f81309425c4d43dc4fcef12218a6de6d14c72768 | 722 | py | Python | Country cleaning/Chile/PRT/OfflineRB.py | Demonliquid/cars-python-cleaning | 91c516a33c4522114dc024cfaf04f1c1d594f973 | [
"MIT"
] | null | null | null | Country cleaning/Chile/PRT/OfflineRB.py | Demonliquid/cars-python-cleaning | 91c516a33c4522114dc024cfaf04f1c1d594f973 | [
"MIT"
] | null | null | null | Country cleaning/Chile/PRT/OfflineRB.py | Demonliquid/cars-python-cleaning | 91c516a33c4522114dc024cfaf04f1c1d594f973 | [
"MIT"
] | null | null | null | # %%
import os
import pandas as pd
import numpy as np
import datetime
# %% CARGA DE DATOS
path = r'F:\Trabajo\Promotive\Chile\PRT\7\CSV\3'
os.chdir(path)
files = os.listdir(path)
files
# %%
files_xls = [f for f in files if f[-3:] == 'csv']
files_xls
# %%
columnas = ['PPU', 'MARCA', 'MODELO', 'ANO_FABRICACION', 'NUM_MOTOR', 'NUM_CHASIS', 'VIN']
chile = pd.DataFrame(columns=columnas)
# %%
for f in files_xls:
data = pd.read_csv(f, sep=";", encoding="latin-1")
chile = pd.concat([chile , data], ignore_index=True, join='outer')
# %%
chile = chile[columnas]
# %%
chile.drop_duplicates(subset="PPU", inplace=True)
# %%
chile.to_csv(r'F:\Trabajo\Promotive\Chile\PRT\Limpio\OfflineRB3.csv')
# %%
chile
# %%
| 17.609756 | 90 | 0.65651 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 245 | 0.339335 |
f815471c4b7feac192ccd8f44032afcd4c9605be | 3,850 | py | Python | datasets/lfw_crop.py | laoreja/face-identity-transformer | 5569d93017ad9371deae7e2b35564523c64b501e | [
"BSD-3-Clause"
] | 13 | 2020-10-09T07:15:02.000Z | 2022-03-28T20:51:30.000Z | datasets/lfw_crop.py | laoreja/face-identity-transformer | 5569d93017ad9371deae7e2b35564523c64b501e | [
"BSD-3-Clause"
] | 2 | 2021-03-03T15:04:51.000Z | 2021-06-02T03:42:03.000Z | datasets/lfw_crop.py | laoreja/face-identity-transformer | 5569d93017ad9371deae7e2b35564523c64b501e | [
"BSD-3-Clause"
] | 5 | 2021-03-02T11:44:19.000Z | 2021-07-09T16:42:02.000Z | import os.path as osp
import numpy as np
from PIL import Image
import torch.utils.data as data
import torch
__all__ = ['LFW_CROP']
EXTENSION_FACTOR = 2
class LFW_CROP(data.Dataset):
def __init__(self, train, transform, args):
self.root = osp.join(args.data_root, 'lfw')
self.transform = transform
landmark_path = osp.join(args.data_root, 'lfw_landmark.txt')
with open(landmark_path) as fd:
self.raw_annotations = [line.strip().split() for line in fd.readlines()]
for idx in range(len(self.raw_annotations)):
self.raw_annotations[idx] = self.raw_annotations[idx][0:1] + [
float(item) for item in self.raw_annotations[idx][1:]]
if not args.evaluate:
test_id_indices = set(np.random.choice(len(self.raw_annotations), size=args.test_size, replace=False))
self.raw_annotations = [anno for idx, anno in enumerate(self.raw_annotations) if
idx in test_id_indices]
self.anno_dict = {anno[0]: anno for anno in self.raw_annotations}
bbox_path = osp.join(args.data_root, 'lfw_detection.txt')
self.bbox_dict = {}
with open(bbox_path) as fd:
bbox_lines = [bbox_line.strip().split() for bbox_line in fd.readlines()]
for bbox_line in bbox_lines:
if bbox_line[0] not in self.anno_dict:
continue
oleft = float(bbox_line[1])
oup = float(bbox_line[2])
oright = float(bbox_line[3])
odown = float(bbox_line[4])
width = oright - oleft
new_width = width * EXTENSION_FACTOR
x_margin = (new_width - width) / 2
y_margin = (new_width - (odown - oup)) / 2 # MAY BE NEED CHANGE
box_left = max(int(oleft - x_margin), 0)
box_right = min(int(oright + x_margin), 249)
box_up = max(int(oup - y_margin), 0)
box_down = min(int(odown + y_margin), 249)
new_width = box_right - box_left
new_height = box_down - box_up
for i in range(5):
self.anno_dict[bbox_line[0]][2 * i + 1] = (self.anno_dict[bbox_line[0]][
2 * i + 1] - box_left) / new_width * 250.
self.anno_dict[bbox_line[0]][2 * i + 2] = (self.anno_dict[bbox_line[0]][
2 * i + 2] - box_up) / new_height * 250.
self.bbox_dict[bbox_line[0]] = [box_left,
box_up,
box_right,
box_down]
# extended left, right, up, down
def __len__(self):
return len(self.raw_annotations)
def __getitem__(self, index):
anno = self.anno_dict[self.raw_annotations[index][0]]
img_path = osp.join(self.root, anno[0])
label = 0
landmarks = torch.empty((5, 2), dtype=torch.float32)
for i in range(5):
landmarks[i, 0] = anno[2 * i + 1]
landmarks[i, 1] = anno[2 * i + 2]
img = Image.open(img_path).convert("RGB")
bbox = self.bbox_dict[anno[0]]
img = img.crop((bbox[0], bbox[1], bbox[2], bbox[3]))
if self.transform is not None:
img = self.transform(img)
return img, label, landmarks, img_path
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of imgs: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__str__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
| 37.745098 | 114 | 0.540779 | 3,692 | 0.958961 | 0 | 0 | 0 | 0 | 0 | 0 | 222 | 0.057662 |
f816945723bd501f06ebbe8199fa11cd256a3a52 | 1,065 | py | Python | test.py | JFF-Bohdan/pyimei | d881f4a11374d29828867e2de397d1fcc8413d25 | [
"MIT"
] | 1 | 2021-07-29T17:39:34.000Z | 2021-07-29T17:39:34.000Z | test.py | JFF-Bohdan/pyimei | d881f4a11374d29828867e2de397d1fcc8413d25 | [
"MIT"
] | null | null | null | test.py | JFF-Bohdan/pyimei | d881f4a11374d29828867e2de397d1fcc8413d25 | [
"MIT"
] | 3 | 2018-08-07T08:01:01.000Z | 2020-03-24T17:14:31.000Z | from pyimei import ImeiSupport
def checkImeisArray(imeis):
for imei in imeis:
if ImeiSupport.isValid(imei):
print("IMEI: '{}' is valid".format(imei))
else:
print("IMEI '{}' is NOT valid".format(imei))
#testing classes
ImeiSupport.test()
valid_imeis = [
356938035643809,
490154203237518,
"356938035643809"
]
invalid_imeis = [
358065019104263,
"357805023984941",
356938035643801
]
checkImeisArray(valid_imeis)
checkImeisArray(invalid_imeis)
print("Generating independent FAKE imeis...")
RANDOM_IMEIS_QTY = 5
for i in range(RANDOM_IMEIS_QTY):
print("\tfake IMEI[{}] = {}".format(i+1, ImeiSupport.generateNew()))
print("Generating sequental FAKE imeis:")
DEP_RANDOM_IMEIS_QTY = 5
startImei = ImeiSupport.generateNew()
currentImei = startImei
print("start IMEI: {}".format(startImei))
for i in range(RANDOM_IMEIS_QTY):
currentImei = ImeiSupport.next(currentImei)
print("\tfake IMEI[{}] = {}".format(i+1, currentImei))
print("DONE") | 23.152174 | 73 | 0.66385 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 234 | 0.219718 |
f816d939ecc6c4f196c356dcf81afa3b4caf0b94 | 2,175 | py | Python | aioauth/responses.py | grmnz/aioauth | e69c989bc81284d60798599816c39ff91074a24b | [
"MIT"
] | null | null | null | aioauth/responses.py | grmnz/aioauth | e69c989bc81284d60798599816c39ff91074a24b | [
"MIT"
] | null | null | null | aioauth/responses.py | grmnz/aioauth | e69c989bc81284d60798599816c39ff91074a24b | [
"MIT"
] | null | null | null | """
.. code-block:: python
from aioauth import responses
Response objects used throughout the project.
----
"""
from dataclasses import dataclass, field
from http import HTTPStatus
from typing import Dict
from .collections import HTTPHeaderDict
from .constances import default_headers
from .types import ErrorType, TokenType
@dataclass
class ErrorResponse:
"""Response for errors."""
error: ErrorType
description: str
error_uri: str = ""
@dataclass
class AuthorizationCodeResponse:
"""Response for ``authorization_code``.
Used by :py:class:`aioauth.response_type.ResponseTypeAuthorizationCode`.
"""
code: str
scope: str
@dataclass
class NoneResponse:
"""Response for :py:class:`aioauth.response_type.ResponseTypeNone`.
See: `OAuth v2 multiple response types <openid.net/specs/oauth-v2-multiple-response-types-1_0.html#none>`_,
"""
@dataclass
class TokenResponse:
"""Response for valid token.
Used by :py:class:`aioauth.response_type.ResponseTypeToken`.
"""
expires_in: int
refresh_token_expires_in: int
access_token: str
refresh_token: str
scope: str
token_type: str = "Bearer"
@dataclass
class IdTokenResponse:
"""Response for OpenID id_token.
Used by :py:class:`aioauth.response_type.ResponseResponseTypeIdTokenTypeToken`.
"""
id_token: str
@dataclass
class TokenActiveIntrospectionResponse:
"""Response for a valid access token.
Used by :py:meth:`aioauth.server.AuthorizationServer.create_token_introspection_response`.
"""
scope: str
client_id: str
token_type: TokenType
expires_in: int
active: bool = True
@dataclass
class TokenInactiveIntrospectionResponse:
"""For an invalid, revoked or expired token.
Used by :py:meth:`aioauth.server.AuthorizationServer.create_token_introspection_response`.
"""
active: bool = False
@dataclass
class Response:
"""General response class.
Used by :py:class:`aioauth.server.AuthorizationServer`.
"""
content: Dict = field(default_factory=dict)
status_code: HTTPStatus = HTTPStatus.OK
headers: HTTPHeaderDict = default_headers
| 20.518868 | 111 | 0.722299 | 1,730 | 0.795402 | 0 | 0 | 1,818 | 0.835862 | 0 | 0 | 1,078 | 0.495632 |
f818d292ca6f1460d6aa1027f16f35e13ba6829c | 5,441 | py | Python | fipomdp/experiments/NYC_experiment.py | xbrlej/FiPOMDP | b7a97aaaf43a43e5ee9b8776c0e7f6d0bb09392f | [
"MIT"
] | null | null | null | fipomdp/experiments/NYC_experiment.py | xbrlej/FiPOMDP | b7a97aaaf43a43e5ee9b8776c0e7f6d0bb09392f | [
"MIT"
] | null | null | null | fipomdp/experiments/NYC_experiment.py | xbrlej/FiPOMDP | b7a97aaaf43a43e5ee9b8776c0e7f6d0bb09392f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import logging
import platform
import time
from functools import partial
from statistics import stdev
from typing import List, Tuple, Dict, Union, Any
import psutil
from joblib import Parallel, delayed
from fimdp.objectives import BUCHI
from fipomdp import ConsPOMDP
from fipomdp.energy_solvers import ConsPOMDPBasicES
from fipomdp.experiments.NYC_environment import NYCPOMDPEnvironment
from fipomdp.experiments.UUV_experiment import simulate_observation
from fipomdp.pomcp import OnlineStrategy
from fipomdp.rollout_functions import basic, grid_manhattan_distance, product, consumption_based
def nyc_experiment(computed_cpomdp: ConsPOMDP, computed_solver: ConsPOMDPBasicES, capacity: int, targets: List[int], random_seed: int, logger) -> \
Tuple[int, bool, List[int], List[int], bool, int]:
logger = logger
if computed_cpomdp.belief_supp_cmdp is None or computed_solver.bs_min_levels[BUCHI] is None:
raise AttributeError(f"Given CPOMDP or its solver is not pre computed!")
# SPECIFY ROLLOUT FUNCTION
# rollout_function = basic
# grid_adjusted = partial(grid_manhattan_distance, grid_size=(20, 20), targets=[3, 12, 15])
rollout_function = consumption_based
#
# rollout_product = partial(product, a=10, b=20)
# rollout_function = rollout_product
# -----
# HYPER PARAMETERS
init_energy = capacity
init_obs = computed_cpomdp.state_with_name('42459137')
init_bel_supp = tuple([computed_cpomdp.state_with_name('42459137')])
exploration = 1
rollout_horizon = 100
max_iterations = 100
actual_horizon = 1000 # number of action to take
softmax_on = False
# -----
strategy = OnlineStrategy(
computed_cpomdp,
capacity,
init_energy,
init_obs,
init_bel_supp,
targets,
exploration,
rollout_function,
rollout_horizon=rollout_horizon,
random_seed=random_seed,
recompute=False,
solver=computed_solver,
logger=logger,
softmax_on=softmax_on
)
simulated_state = init_bel_supp[0]
path = [simulated_state]
logger.info(f"\nLAUNCHING with max iterations: {max_iterations}\n")
reward = 0
target_hit = False
decision_times = []
for j in range(actual_horizon):
pre_decision_time = time.time()
action = strategy.next_action(max_iterations)
simulated_state, new_obs = simulate_observation(computed_cpomdp, action, simulated_state)
path.append(simulated_state)
reward -= action.cons
if simulated_state in targets:
reward += 1000
target_hit = True
break
strategy.update_obs(new_obs)
decision_times.append(round(time.time() - pre_decision_time))
logger.info(f"\n--------EXPERIMENT FINISHED---------")
logger.info(f"--------RESULTS--------")
logger.info(f"For max iterations: {max_iterations}, target has been reached {target_hit} times.")
logger.info(f"Path of the agent was: {path}")
logger.info(f"Decision times: {decision_times}")
logger.info(f"Decision time average: {sum(decision_times)/len(decision_times)}, standard deviation: {stdev(decision_times)}")
logger.info(f"Target hit: {target_hit}, reward: {reward}")
return max_iterations, target_hit, path, decision_times, target_hit, reward
def log_experiment_with_seed(cpomdp, env, i, log_file_name, solver, targets):
handler = logging.FileHandler(f"./logs/{log_file_name}{i}.log", 'w')
formatter = logging.Formatter("%(asctime)s %(levelname)-8s %(message)s")
handler.setFormatter(formatter)
logger = logging.getLogger(f"{i}")
for handler in logger.handlers[:]:
logger.removeHandler(handler)
logger.addHandler(handler)
logger.level = logging.INFO
logger.info("START")
uname = platform.uname()
logger.info(f"Node name: {uname.node}")
logger.info(f"System: {uname.system}")
logger.info(f"Release: {uname.release}")
logger.info(f"Version: {uname.version}")
logger.info(f"Machine: {uname.machine}")
logger.info(f"Processor: {uname.processor}")
logger.info(f"RAM: {str(round(psutil.virtual_memory().total / (1024.0 ** 3)))} GB")
return nyc_experiment(cpomdp, solver, env.cmdp_env.capacity, targets, i, logger)
def main():
log_file_name = "NYCExperiments" # Change for your needs
logging_level = logging.INFO
# set to INFO (20) for logging to be active, set to DEBUG (10) for details,
# set to 5 for extreme debug
logging.basicConfig(
filename=f"{log_file_name}.log",
filemode="w", # Erase previous log
format="%(asctime)s %(levelname)-8s %(message)s",
level=logging_level,
datefmt="%Y-%m-%d %H:%M:%S",
)
env = NYCPOMDPEnvironment()
cpomdp, targets = env.get_cpomdp()
preprocessing_start = time.time()
cpomdp.compute_guessing_cmdp_initial_state([cpomdp.state_with_name('42459137')])
solver = ConsPOMDPBasicES(cpomdp, [cpomdp.state_with_name('42459137')], env.cmdp_env.capacity, targets)
solver.compute_buchi()
preprocessing_time = round(time.time() - preprocessing_start)
results = Parallel(n_jobs=10)(
delayed(log_experiment_with_seed)(cpomdp, env, i, log_file_name, solver, targets) for i in range(10))
logging.info(f"RESULTS (): {results}")
print(preprocessing_time)
if __name__ == "__main__":
main()
| 33.58642 | 147 | 0.695093 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,432 | 0.263187 |
f818e9acfc35ef6a4d51efdba0e1aa6dcf47703d | 399 | py | Python | examples/connect_to_wifi.py | flaiming/TechFurMeet-Micropython | 00ff427429dfc186e33aa5e77bafe39eb820b854 | [
"MIT"
] | 1 | 2018-01-19T12:05:32.000Z | 2018-01-19T12:05:32.000Z | examples/connect_to_wifi.py | flaiming/TechFurMeet-Micropython | 00ff427429dfc186e33aa5e77bafe39eb820b854 | [
"MIT"
] | null | null | null | examples/connect_to_wifi.py | flaiming/TechFurMeet-Micropython | 00ff427429dfc186e33aa5e77bafe39eb820b854 | [
"MIT"
] | null | null | null | import network
import time
# deactivate AP
ap = network.WLAN(network.AP_IF)
ap.active(False)
# activate static network
wlan = network.WLAN(network.STA_IF)
wlan.active(True)
# connect to local WIFI
wlan.connect('TFM-Attendees')
# wait until connected
while not wlan.isconnected():
print('connecting...')
time.sleep(1)
print('Connected!')
print('Current network config:', wlan.ifconfig())
| 19 | 49 | 0.736842 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 152 | 0.380952 |
f8197ad55d7f3b5e1e727b66b9aaef3047efa623 | 3,317 | py | Python | hikcamerabot/services/tasks/video.py | CamVipQ/hikvision-camera-bot | 84afa0a4dc2fc1ebda71b5020520dc1c300cf3b2 | [
"MIT"
] | 44 | 2019-03-07T00:25:44.000Z | 2022-02-20T15:57:11.000Z | hikcamerabot/services/tasks/video.py | CamVipQ/hikvision-camera-bot | 84afa0a4dc2fc1ebda71b5020520dc1c300cf3b2 | [
"MIT"
] | 25 | 2019-02-17T13:37:27.000Z | 2022-03-22T16:11:46.000Z | hikcamerabot/services/tasks/video.py | CamVipQ/hikvision-camera-bot | 84afa0a4dc2fc1ebda71b5020520dc1c300cf3b2 | [
"MIT"
] | 14 | 2019-06-28T05:40:10.000Z | 2022-03-24T08:05:01.000Z | import asyncio
import logging
import os
import time
from addict import Addict
from aiogram.types import Message
from hikcamerabot.config.config import get_result_queue
from hikcamerabot.constants import Event, VideoGifType
from hikcamerabot.utils.utils import format_ts, gen_random_str
class RecordVideoTask:
_video_filename = {
VideoGifType.ALERT: '{0}-alert-{1}-{2}.mp4',
VideoGifType.REGULAR: '{0}-{1}-{2}.mp4',
}
_video_type_to_event = {
VideoGifType.ALERT: Event.ALERT_VIDEO,
VideoGifType.REGULAR: Event.RECORD_VIDEOGIF,
}
FILENAME_TIME_FORMAT = '%Y-%b-%d--%H-%M-%S'
def __init__(self, ffmpeg_cmd: str, storage_path: str, conf: Addict,
cam, video_type: str, context: Message = None):
self._log = logging.getLogger(self.__class__.__name__)
self._conf = conf
self._cam = cam
self._bot: 'CameraBot' = cam.bot
self._video_type = video_type
self._file_path = os.path.join(storage_path, self._get_filename())
self._ffmpeg_cmd_full = f'{ffmpeg_cmd} {self._file_path}'
self._context = context
self._event = self._video_type_to_event[self._video_type]
async def run(self) -> None:
if await self._record():
await self._send_result()
async def _record(self) -> bool:
"""Start Ffmpeg subprocess and return file path and video type."""
self._log.debug('Recording video gif from %s: %s',
self._conf.description, self._ffmpeg_cmd_full)
await self._start_ffmpeg_subprocess()
validated = await self._validate_file()
if not validated:
err_msg = f'Failed to record {self._file_path}'
self._log.error(err_msg)
await self._bot.send_message(
self._context.chat.id,
text=f'{err_msg}.\nEvent type: {self._event}\nCheck logs.',
reply_to_message_id=self._context.message_id if self._context else None,
)
return validated
async def _start_ffmpeg_subprocess(self) -> None:
proc = await asyncio.create_subprocess_shell(self._ffmpeg_cmd_full)
await proc.wait()
async def _validate_file(self) -> bool:
"""Validate recorded file existence and size."""
try:
is_empty = os.path.getsize(self._file_path) == 0
except FileNotFoundError:
self._log.error('Failed to validate %s: File does not exist',
self._file_path)
return False
except Exception:
self._log.exception('Failed to validate %s', self._file_path)
return False
if is_empty:
self._log.error('Failed to validate %s: File %s is empty',
self._file_path)
return not bool(is_empty)
async def _send_result(self):
await get_result_queue().put({
'event': self._event,
'video_path': self._file_path,
'cam': self._cam,
'message': self._context
})
def _get_filename(self) -> str:
return self._video_filename[self._video_type].format(
self._cam.id,
format_ts(time.time(), time_format=self.FILENAME_TIME_FORMAT),
gen_random_str())
| 35.666667 | 88 | 0.621948 | 3,026 | 0.91227 | 0 | 0 | 0 | 0 | 1,857 | 0.559843 | 482 | 0.145312 |
f81adf96e79c10244b5314e809ea884419299412 | 71,349 | py | Python | HyperOXO/hypercube.py | drtjc/Hyper | 83579186d915de603d27b8757dfc5a0f82c6770e | [
"MIT"
] | null | null | null | HyperOXO/hypercube.py | drtjc/Hyper | 83579186d915de603d27b8757dfc5a0f82c6770e | [
"MIT"
] | null | null | null | HyperOXO/hypercube.py | drtjc/Hyper | 83579186d915de603d27b8757dfc5a0f82c6770e | [
"MIT"
] | null | null | null | """ Provides functionalilty for working with celled hypercubes.
Hypercubes are extensions of lines, squares and cubes into higher
dimensions. Celled hypercubes can be thought as a grid or lattice
structure. From this point, hypercubes is used to mean celled
hypercubes.
A hypercube can be described by its dimension and the number of
cells in any dimension. We denote this as h(d, n).
For example: h(2, 3) is a 3x3 grid; h(3, 4) is a 4x4x4 lattice.
A hypercube of dimension d may also be referred to as a d-cube.
A cell's position can be specified in coordinate style.
For example, given h(3, 4) and an agreed ordering of dimension
then some valid coordinates are (1,1,1), (2,1,3) and (4,4,4).
The term m-agonal is a short for "m-dimensional diagonal" and can be
thought of as a line of contiguous cells that span m dimensions.
For example, in a 3-cube you would find many 1-agonals, 2-agonals and
3-agonals. A 1-agonal is customarily known as a row, column or pillar.
In another example, if a line of contiguous cells in a 5-cell have the
property that 3 coordinates change, while the others remain constant,
these cells constitute a 3-agonal.
For a given h(d, n), 1 <= m <= n, a m-agonal always has n cells.
The term line is used to refer to any m-agonal in general.
A cell apppears in multiple lines, which are refered to as the
scope of the cell, or the scoped lines of the cell.
The combination of lines and scopes is referred to as the structure
of the hypercube.
For a given cell, we define its connected cells as those cells that
appear in the scoped lines of the given cell.
We define a slice as a sub-cube of a hypercube. For example,
consder h(2,3), a 3x3 hypercube. Let the dimensions be denoted as
d1 and d2, respectively, where 1 <= d1, d2 <= 3.
If we consider d1 as rows, and d2 as columns, then the slice that is
the first column is defined by d1 = 1, 2, 3, and d2 = 1. This has the
form h(1, 3).
The slice that is the top left 2x2 corner is defined by d1, d2 = 1, 2.
This has the form h(2, 2).
This module essentially has 2 classes of functions:
1. Those that use a numpy ndarray to implement the underlying
hypercube. These functions have the suffix _np. An array of d dimensions
may be referred to as a d-array
2. Those that do not implement the underlying hypercube but
provide information as coordinates that can be used with
a user-implementation of the hypercube. These functions have
the suffix _coord.
########################################################################
Type annotations are used in this module. In addition to the standard
types defined in the typing module, several aliases are also defined
which can be viewed in the source code.
"""
# numpy (and scipy) don't yet have type annotations
import numpy as np # type: ignore
from scipy.special import comb # type: ignore
import itertools as it
import numbers
import re
from typing import List, Callable, Union, Collection, Tuple, Any, Type, Deque
from typing import DefaultDict, TypeVar, Counter, Dict, Iterable, Generator, Sequence
Cell_coord = Tuple[int, ...]
Cube_np = TypeVar('Cube_np', np.ndarray, np.ndarray) # Cube_np should really be a numpy array representing h(d, n)
Line_np = TypeVar('Line_np', np.ndarray, np.ndarray) # Line_np should really be a 1d numpy array with n elements
Line_coord = List[Cell_coord]
Lines_np = List[Line_np]
Lines_enum_np = Dict[int, Line_np]
Lines_coord = List[Line_coord]
Lines_enum_coord = Dict[int, Line_coord]
Scopes_np = DefaultDict[Cell_coord, Lines_np]
Scopes_coord = DefaultDict[Cell_coord, Lines_coord]
Scopes_enum = DefaultDict[Cell_coord, List[int]]
Scopes = Union[Scopes_np, Scopes_coord, Scopes_enum]
Structure_np = Tuple[Cube_np, Lines_np, Scopes_np]
Structure_enum_np = Tuple[Cube_np, Lines_enum_np, Scopes_enum]
Structure_coord = Tuple[Lines_coord, Scopes_coord]
Structure_enum_coord = Tuple[Lines_enum_coord, Scopes_enum]
Connected_cells = DefaultDict[Cell_coord, List[Cell_coord]]
def num_lines_grouped(d: int, n: int) -> Generator[int, None, None]:
"""
num_lines_grouped(d: int, n: int) -> Generator[int, None, None]:
Calculate the number of lines in a hypercube, grouped by the
number of dimensions spanned.
Parameters
----------
d
The number of dimensions of the hypercube
n
The number of cells in any dimension
Yields
-------
The number of lines in a hypercube, grouped by number of
dimensions spanned.
Notes
-----
Consider a hypercube h(d, n).
Let l be the number of lines, then
l = sum{i=1, i=d} [ dCi * n^(d-i) * (2^i)/2 ]
where dCi is 'd choose i'.
Sketch of proof:
Let l_i be the number of i-agonals (lines that span exactly
i dimensions). For example, consider the following square (2-cube):
[[0, 1],
[2, 3]]
The 1-agonals are [0, 1], [2, 3], [0, 2] and [1, 3] and l_1 = 4.
The 2-agonals are [0, 3] and [1, 2] and l_2 = 2.
Hence l = l_1 + l_2 = 6
It is trivially true that the l is the sum of l_i, i.e.,
l = sum{i=1, i=d} l_i
Next we show how l_i can be calculated. Firstly, we argue
that the distinct number of h(i, n) is dCi * n^(d-i).
The number of ways of choosing i dimensions from d is dCi.
For example if d=3 and i=2, then the 3 combinations of
2 dimensions (squares) are (1, 2), (1, 3) and (2, 3).
Given a fixed set of i dimension, the number of remaining dimensions
is d-i, and the number of cells in these dimensions is n^(d-i).
Any one of these cells could be chosen relative to the
fixed i dimensions.
Hence the distinct number of h(i, n) is dCi * n^(d-i).
Finally, for any h(i, n), the number of i-agonals is (2^i)/2.
This is because an i-cube has 2^i corners and a line has 2 corners.
Hence l_i = dCi * n^(d-i) * (2^i)/2 and thus:
l = sum{i=1, i=d} [ dCi * n^(d-i) * (2^i)/2 ]
Examples
--------
>>> list(num_lines_grouped(2, 3))
[6, 2]
>>> list(num_lines_grouped(3, 4))
[48, 24, 4]
"""
for i in range(1, d + 1):
yield comb(d, i, True) * (n ** (d - i)) * (2 ** (i - 1))
def num_lines(d: int, n: int) -> int:
"""
num_lines(d: int, n: int) -> int:
Calculate the number of lines in a hypercube.
Parameters
----------
d
The number of dimensions of the hypercube
n
The number of cells in any dimension
Returns
-------
The number of lines in a hypercube.
See Also
--------
num_lines_grouped
Notes
-----
There are two ways to calculate the number of lines:
1. Call the function num_lines_grouped and sum the number of lines
spanning each dimension.
2. Directly, using the formula:
((n+2)**d-n**d)/2
Sketch of proof:
Embed the n**d hypercube in an (n+2)**d hypercube which extends one
cell further in each dimension. Then each winning line in the n**d
hypercube terminates in exactly two "border" cells of the enlarged
hypercube, and these two borders are unique to that line. Moreover,
every border cell is at the end of a line, so that (n+2)**d border
cells are in two-to-one correspondence with the winning lines.
(See Hypercube -Tic-Tac-Toe: Solomon W.Golomb and Alfred W. Hales)
Examples
--------
>>> num_lines(2, 3)
8
>>> num_lines(3, 4)
76
"""
# return sum(list(num_lines_grouped(d, n)))
return int(((n+2)**d-n**d)/2)
def get_diagonals_np(hc: Cube_np) -> Generator[Line_np, None, None]:
"""
get_diagonals_np(hc: Cube_np) -> Generator[Line_np, None, None]:
Calculate the d-agonals of a d-cube h(d, n).
Parameters
----------
hc
A d-cube whose d-agonals are to be calculated
Yields
-------
numpy.ndarray views of the d-gonals of `hc`.
Notes
-----
The number of corners of `hc` is 2^d. The number of d-agonals
is 2^d / 2 since two connecting corners form a line.
Examples
--------
>>> import numpy as np
>>> hc = np.arange(8).reshape(2, 2, 2)
>>> hc
array([[[0, 1],
[2, 3]],
<BLANKLINE>
[[4, 5],
[6, 7]]])
>>> diagonals = list(get_diagonals_np(hc))
>>> diagonals
[array([0, 7]), array([1, 6]), array([4, 3]), array([5, 2])]
>>> hc[0, 0, 0] = 99
>>> diagonals
[array([99, 7]), array([1, 6]), array([4, 3]), array([5, 2])]
"""
# The function is recursive. How it works is best shown by example.
# 1d: hc = [0, 1] then the diagonal is also [0, 1].
# 2d: hc = [[0, 1],
# [2, 3]]
# The numpy diagonal method gives the main diagonal = [0, 3], a 1d array
# which is recursively passed to the function.
# To get the opposite diagonal we first use the numpy flip function to
# reverse the order of the elements along the given dimension, 0 in this case.
# This gives [[2, 3],
# 0, 1]]
# The numpy diagonal method gives the main diagonal = [2, 1], a 1d array
# which is recursively passed to the function.
# 3d: hc = [[[0, 1],
# [2, 3]],
# [[4, 5],
# [6, 7]]]
# The numpy diagonal method gives the main diagonals in the 3rd dimension
# as rows.
# [[0, 6],
# [1, 7]]
# Note that the diagonals of this array are [0, 7] and [6, 1] which are
# retrieved by a recurive call to the function.
# We now have 2 of the 4 3-agonals of the orginal 3-cube hc.
# To get the opposite 3-agonals we first use the numpy flip function which
# gives
# [[[4, 5],
# [6, 7]],
# [[0, 1],
# [2, 3]]]
# and a call to the numpy diagonal method gives
# [[4, 2],
# [5, 3]]
# The diagonals of this array are [4, 3] and [2, 5]
# We now have all four 3-agonals of the original 3-cube hc.
if hc.ndim == 1:
yield hc
else:
yield from get_diagonals_np(hc.diagonal())
yield from get_diagonals_np(np.flip(hc, 0).diagonal())
def get_lines_grouped_np(hc: Cube_np) -> Generator[Lines_np, None, None]:
"""
get_lines_grouped_np(hc: Cube_np) ->
Generator[Lines_np, None, None]:
Generate the lines of a hypercube, grouped by the number of
dimensions spanned.
Parameters
----------
hc
The hypercube whose lines are to be calculated
Yields
-------
numpy.ndarray views of the lines in `hc`, grouped by the
numbers of dimensions spanned.
See Also
--------
get_lines_i_np
Examples
--------
>>> import numpy as np
>>> hc = np.arange(4).reshape(2, 2)
>>> hc
array([[0, 1],
[2, 3]])
>>> lines = list(get_lines_grouped_np(hc))
>>> lines #doctest: +NORMALIZE_WHITESPACE
[[array([0, 2]), array([1, 3]), array([0, 1]), array([2, 3])],
[array([0, 3]), array([2, 1])]]
>>> hc[0, 0] = 99
>>> lines #doctest: +NORMALIZE_WHITESPACE
[[array([99, 2]), array([1, 3]), array([99, 1]), array([2, 3])],
[array([99, 3]), array([2, 1])]]
"""
for i in range(hc.ndim):
yield from get_lines_i_np(hc, i)
def get_lines_i_np(hc: Cube_np, i: int) -> Generator[Lines_np, None, None]:
"""
get_lines_i_np(hc: Cube_np, i: int) ->
Generator[Lines_np, None, None]:
Generates the lines of a hypercube that span the specified
number of dimensions.
Parameters
----------
hc
The hypercube whose lines are to be calculated
i
The number of dimensions that the returned lines must span
Yields
-------
numpy.ndarray views of the lines in `hc` that span
`i` dimensions.
See Also
--------
num_lines_grouped
Notes
-----
The notes section for the function num_lines_grouped provides a
sketchof a constructive proof for the number of lines in a
hypercube. This has been used to implement this function.
Examples
--------
>>> import numpy as np
>>> hc = np.arange(4).reshape(2, 2)
>>> hc
array([[0, 1],
[2, 3]])
>>> lines = list(get_lines_i_np(hc, 0))
>>> lines
[[array([0, 2]), array([1, 3]), array([0, 1]), array([2, 3])]]
>>> lines = list(get_lines_i_np(hc, 1))
>>> lines
[[array([0, 3]), array([2, 1])]]
>>> hc[0, 0] = 99
>>> lines
[[array([99, 3]), array([2, 1])]]
"""
d = hc.ndim
n = hc.shape[0]
lines = []
# loop over all possible combinations of i dimensions
for i_comb in it.combinations(range(d), r = i + 1):
# a cell could be in any position in the other dimensions
other_d = set(range(d)) - set(i_comb)
for cell in it.product(range(n), repeat = d - i - 1):
# take a slice of selected i dimensions given a cell
sl = slice_ndarray(hc, other_d, cell)
# get all possible lines from slice
lines.extend(list(get_diagonals_np(sl)))
yield lines
def get_lines_np(hc: Cube_np) -> Generator[Line_np, None, None]:
"""
get_lines_np(hc: Cube_np) -> Generator[Line_np, None, None]:
Returns the lines in a hypercube
Parameters
----------
hc
The hypercube whose lines are to be calculated
Yields
-------
numpy.ndarray views of the lines in `hc`.
See Also
--------
get_lines_grouped_np
Examples
--------
>>> import numpy as np
>>> hc = np.arange(4).reshape(2, 2)
>>> hc
array([[0, 1],
[2, 3]])
>>> lines = list(get_lines_np(hc))
>>> lines #doctest: +NORMALIZE_WHITESPACE
[array([0, 2]), array([1, 3]), array([0, 1]), array([2, 3]),
array([0, 3]), array([2, 1])]
>>> len(lines)
6
>>> hc[0, 0] = 99
>>> lines #doctest: +NORMALIZE_WHITESPACE
[array([99, 2]), array([1, 3]), array([99, 1]), array([2, 3]),
array([99, 3]), array([2, 1])]
"""
grouped = get_lines_grouped_np(hc)
flat = (x for y in grouped for x in y)
yield from flat # return flat works as well but yield from this is explicit as to being a generator
def get_scopes_np(lines: Lines_np, d: int) -> Scopes_np:
"""
get_scopes_np(lines: Lines_np, d: int) -> Scopes_np:
Calculate the scope of each cell in a hypercube
Parameters
----------
lines
The returned value from get_lines_np(hc) where hc is of the
form np.arange(n ** d, dtype = intx__).reshape([n] * d).
That is, hc is populated with the values 0,1,2,...,n^d - 1.
dim
The dimension of the hypercube that was used to
generate `lines`.
Returns
-------
A dictionary with keys equal to the coordinates of each cell in
the hypercube. For each cell key, the value is the cell's
scope - a list of numpy.ndarray views that are lines containing
the cell.
See Also
--------
get_lines_np
Notes
-----
The implementation of this function uses np.unravel_index, and
relies uopn the lines parameter being generated from an array
populated with values 0,1,2,...
Examples
--------
>>> import numpy as np
>>> from pprint import pprint
>>> hc = np.arange(4).reshape(2, 2)
>>> hc
array([[0, 1],
[2, 3]])
>>> lines = list(get_lines_np(hc))
>>> lines #doctest: +NORMALIZE_WHITESPACE
[array([0, 2]), array([1, 3]), array([0, 1]), array([2, 3]),
array([0, 3]), array([2, 1])]
>>> scopes = get_scopes_np(lines, 2)
>>> pprint(scopes) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [array([0, 2]), array([0, 1]), array([0, 3])],
(0, 1): [array([1, 3]), array([0, 1]), array([2, 1])],
(1, 0): [array([0, 2]), array([2, 3]), array([2, 1])],
(1, 1): [array([1, 3]), array([2, 3]), array([0, 3])]})
>>> sorted(scopes.items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [array([0, 2]), array([0, 1]), array([0, 3])]),
((0, 1), [array([1, 3]), array([0, 1]), array([2, 1])]),
((1, 0), [array([0, 2]), array([2, 3]), array([2, 1])]),
((1, 1), [array([1, 3]), array([2, 3]), array([0, 3])])]
>>> hc[0, 0] = 99
>>> pprint(scopes) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [array([99, 2]), array([99, 1]), array([99, 3])],
(0, 1): [array([1, 3]), array([99, 1]), array([2, 1])],
(1, 0): [array([99, 2]), array([2, 3]), array([2, 1])],
(1, 1): [array([1, 3]), array([2, 3]), array([99, 3])]})
>>> sorted(scopes.items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [array([99, 2]), array([99, 1]), array([99, 3])]),
((0, 1), [array([1, 3]), array([99, 1]), array([2, 1])]),
((1, 0), [array([99, 2]), array([2, 3]), array([2, 1])]),
((1, 1), [array([1, 3]), array([2, 3]), array([99, 3])])]
"""
n = lines[0].size
shape = [n] * d
scopes: Scopes_np = DefaultDict(list)
for line in lines:
for j in range(n):
cell = np.unravel_index(line[j], shape)
scopes[cell].append(line)
return scopes
def structure_np(d: int, n: int, zeros: bool = True, OFFSET: int = 0) -> Structure_np:
"""
structure_np(d: int, n: int, zeros: bool = True, OFFSET: int = 0) ->
Structure_np:
Return a hypercube, its lines, and the scopes of its cells.
Parameters
----------
d
The number of dimensions of the hypercube
n
The number of cells in any dimension
zeros
If true, all values in array are 0, else they are 0,1,2,...
OFFSET
The number of cells is n^d. If this greater than
(2^31 - OFFSET - 1) then we use np.int64 (instead of np.int32)
as the dtype of the numpy array.
Returns
-------
The hypercube (as a numpy array), its lines, and the scopes of
its cells.
See Also
--------
get_lines_np
get_scopes_np
Examples
--------
>>> import numpy as np
>>> from pprint import pprint
>>> struct = structure_np(2, 2)
>>> struct[0]
array([[0, 0],
[0, 0]])
>>> struct[1] #doctest: +NORMALIZE_WHITESPACE
[array([0, 0]), array([0, 0]), array([0, 0]), array([0, 0]),
array([0, 0]), array([0, 0])]
>>> pprint(struct[2]) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [array([0, 0]), array([0, 0]), array([0, 0])],
(0, 1): [array([0, 0]), array([0, 0]), array([0, 0])],
(1, 0): [array([0, 0]), array([0, 0]), array([0, 0])],
(1, 1): [array([0, 0]), array([0, 0]), array([0, 0])]})
>>> sorted(struct[2].items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [array([0, 0]), array([0, 0]), array([0, 0])]),
((0, 1), [array([0, 0]), array([0, 0]), array([0, 0])]),
((1, 0), [array([0, 0]), array([0, 0]), array([0, 0])]),
((1, 1), [array([0, 0]), array([0, 0]), array([0, 0])])]
>>> struct = structure_np(2, 2, False)
>>> struct[0]
array([[0, 1],
[2, 3]])
>>> struct[1] #doctest: +NORMALIZE_WHITESPACE
[array([0, 2]), array([1, 3]), array([0, 1]), array([2, 3]),
array([0, 3]), array([2, 1])]
>>> pprint(struct[2]) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [array([0, 2]), array([0, 1]), array([0, 3])],
(0, 1): [array([1, 3]), array([0, 1]), array([2, 1])],
(1, 0): [array([0, 2]), array([2, 3]), array([2, 1])],
(1, 1): [array([1, 3]), array([2, 3]), array([0, 3])]})
>>> sorted(struct[2].items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [array([0, 2]), array([0, 1]), array([0, 3])]),
((0, 1), [array([1, 3]), array([0, 1]), array([2, 1])]),
((1, 0), [array([0, 2]), array([2, 3]), array([2, 1])]),
((1, 1), [array([1, 3]), array([2, 3]), array([0, 3])])]
"""
# number of cells is n^d. If this greater than (2^31 - OFFSET - 1)
# then we use int64. This is because the get_scopes
# function populates the arrays with values 0,1,2, ...
dtype = np.int64 if n ** d > 2 ** 31 - OFFSET - 1 else np.int32
hc = np.arange(n ** d, dtype = dtype).reshape([n] * d)
lines = list(get_lines_np(hc))
scopes = get_scopes_np(lines, d)
if zeros:
hc.fill(0)
return (hc, lines, scopes)
def get_lines_enum_np(hc: Cube_np) -> Lines_enum_np:
"""
get_lines_enum_np(hc: Cube_np) -> Lines_enum_np
Returns emunerated lines of a hypercube
Parameters
----------
hc
The hypercube whose lines are to be calculated
Returns
-------
Enumerated numpy.ndarray views of the lines in `hc`.
See Also
--------
get_lines_np
Examples
--------
>>> import numpy as np
>>> from pprint import pprint
>>> hc = np.arange(4).reshape(2, 2)
>>> hc
array([[0, 1],
[2, 3]])
>>> lines = get_lines_enum_np(hc)
>>> pprint(lines) #doctest: +SKIP
{0: array([0, 2]), 1: array([1, 3]), 2: array([0, 1]),
3: array([2, 3]), 4: array([0, 3]), 5: array([2, 1])}
>>> sorted(lines.items()) #doctest: +NORMALIZE_WHITESPACE
[(0, array([0, 2])), (1, array([1, 3])), (2, array([0, 1])),
(3, array([2, 3])), (4, array([0, 3])), (5, array([2, 1]))]
"""
lines: Lines_enum_np = dict()
idx = 0
for line in get_lines_np(hc):
lines[idx] = line
idx += 1
return lines
def get_scopes_enum_np(lines: Lines_enum_np, d: int) -> Scopes_enum:
"""
get_scopes_enum_np(lines: Lines_enum_np, d: int) -> Scopes_enum:
Calculate the scope of each cell in a hypercube
Parameters
----------
lines
The returned value from get_lines_enum_np(hc) where hc is of the
form np.arange(n ** d, dtype = intxx).reshape([n] * d).
That is, hc is populated with the values 0,1,2,...,n^d - 1.
dim
The dimension of the hypercube that was used to
generate `lines`.
Returns
-------
A dictionary with keys equal to each cell coordinates of the
hypercube. For each cell key, the value is the cell's
scope - a list of line enumerations that are lines containing
the cell.
See Also
--------
get_lines_enum_np
Examples
--------
>>> import numpy as np
>>> from pprint import pprint
>>> hc = np.arange(4).reshape(2, 2)
>>> hc
array([[0, 1],
[2, 3]])
>>> lines = get_lines_enum_np(hc)
>>> pprint(lines) #doctest: +SKIP
{0: array([0, 2]), 1: array([1, 3]), 2: array([0, 1]),
3: array([2, 3]), 4: array([0, 3]), 5: array([2, 1])}
>>> sorted(lines.items()) #doctest: +NORMALIZE_WHITESPACE
[(0, array([0, 2])), (1, array([1, 3])), (2, array([0, 1])),
(3, array([2, 3])), (4, array([0, 3])), (5, array([2, 1]))]
>>> scopes = get_scopes_enum_np(lines, 2)
>>> pprint(scopes) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [0, 2, 4],
(0, 1): [1, 2, 5],
(1, 0): [0, 3, 5],
(1, 1): [1, 3, 4]})
>>> sorted(scopes.items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [0, 2, 4]), ((0, 1), [1, 2, 5]),
((1, 0), [0, 3, 5]), ((1, 1), [1, 3, 4])]
"""
n = lines[0].size
shape = [n] * d
scopes: Scopes_enum = DefaultDict(list)
for idx, line in lines.items():
for j in range(n):
cell = np.unravel_index(line[j], shape)
scopes[cell].append(idx)
return scopes
def structure_enum_np(d: int, n: int, zeros: bool = True, OFFSET: int = 0) -> Structure_enum_np:
"""
structure_enum_np(d: int, n: int, zeros: bool = True,
OFFSET: int = 0) ->
Structure_enum_np:
Return a hypercube, its enumerated lines and the scopes of
its cell scopes.
Parameters
----------
d
The number of dimensions of the hypercube
n
The number of cells in any dimension
zeros
If true, all values in array are 0, else they are 0,1,2,...
base: int
Tne number of cells is n^d. If this greater than
(2^31 - OFFSET - 1) then we use np.int64 (instead of np.int32)
as the dtype of the numpy array.
Returns
-------
A tuple containing the hypercube, its enumerated lines, and the
scopes of its cells.
See Also
--------
get_lines_enum_np
get_scopes_enum_np
Examples
--------
>>> import numpy as np
>>> from pprint import pprint
>>> struct = structure_enum_np(2, 2)
>>> struct[0]
array([[0, 0],
[0, 0]])
>>> pprint(struct[1]) #doctest: +SKIP
{0: array([0, 0]), 1: array([0, 0]), 2: array([0, 0]),
3: array([0, 0]), 4: array([0, 0]), 5: array([0, 0])}
>>> sorted(struct[1].items()) #doctest: +NORMALIZE_WHITESPACE
[(0, array([0, 0])), (1, array([0, 0])), (2, array([0, 0])),
(3, array([0, 0])), (4, array([0, 0])), (5, array([0, 0]))]
>>> pprint(struct[2]) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [0, 2, 4],
(0, 1): [1, 2, 5],
(1, 0): [0, 3, 5],
(1, 1): [1, 3, 4]})
>>> sorted(struct[2].items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [0, 2, 4]), ((0, 1), [1, 2, 5]),
((1, 0), [0, 3, 5]), ((1, 1), [1, 3, 4])]
>>> struct = structure_enum_np(2, 2, False)
>>> struct[0]
array([[0, 1],
[2, 3]])
>>> pprint(struct[1]) #doctest: +SKIP
{0: array([0, 2]), 1: array([1, 3]), 2: array([0, 1]),
3: array([2, 3]), 4: array([0, 3]), 5: array([2, 1])}
>>> sorted(struct[1].items()) #doctest: +NORMALIZE_WHITESPACE
[(0, array([0, 2])), (1, array([1, 3])), (2, array([0, 1])),
(3, array([2, 3])), (4, array([0, 3])), (5, array([2, 1]))]
>>> pprint(struct[2]) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [0, 2, 4],
(0, 1): [1, 2, 5],
(1, 0): [0, 3, 5],
(1, 1): [1, 3, 4]})
>>> sorted(struct[2].items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [0, 2, 4]), ((0, 1), [1, 2, 5]),
((1, 0), [0, 3, 5]), ((1, 1), [1, 3, 4])]
"""
# number of cells is n^d. If this greater than (2^31 - OFFSET - 1)
# then we use int64. This is because the the get_scopes
# function populates the arrays with values 0,1,2, ...
dtype = np.int64 if n ** d > 2 ** 31 - OFFSET - 1 else np.int32
hc = np.arange(n ** d, dtype = dtype).reshape([n] * d)
lines = get_lines_enum_np(hc)
scopes = get_scopes_enum_np(lines, d)
if zeros:
hc.fill(0)
return (hc, lines, scopes)
def connected_cells_np(lines: Lines_enum_np, scopes: Scopes_enum, d: int) -> Connected_cells:
"""
connected_cells_np(lines: Lines_enum_np,
scopes: Scopes_enum, d: int) -> Connected_cells:
Calculate the connected cells for a cube.
Parameters
----------
lines
The enumerated lines of the hypercube
scopes
The enumerated scopes of the hypercube
Returns
------
A dictionary with keys beings cell coordinates and values the
connected cell coordinates.
See Also
--------
structure_enum_np
Examples
--------
>>> from pprint import pprint
>>> d = 2
>>> n = 3
>>> struct = structure_enum_np(d, n, False)
>>> struct[1] #doctest: +NORMALIZE_WHITESPACE
{0: array([0, 3, 6]),
1: array([1, 4, 7]),
2: array([2, 5, 8]),
3: array([0, 1, 2]),
4: array([3, 4, 5]),
5: array([6, 7, 8]),
6: array([0, 4, 8]),
7: array([6, 4, 2])}
>>> pprint(struct[2]) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [0, 3, 6],
(0, 1): [1, 3],
(0, 2): [2, 3, 7],
(1, 0): [0, 4],
(1, 1): [1, 4, 6, 7],
(1, 2): [2, 4],
(2, 0): [0, 5, 7],
(2, 1): [1, 5],
(2, 2): [2, 5, 6]})
>>> sorted(struct[2].items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [0, 3, 6]),
((0, 1), [1, 3]),
((0, 2), [2, 3, 7]),
((1, 0), [0, 4]),
((1, 1), [1, 4, 6, 7]),
((1, 2), [2, 4]),
((2, 0), [0, 5, 7]),
((2, 1), [1, 5]),
((2, 2), [2, 5, 6])]
>>> connected_cells = connected_cells_np(struct[1], struct[2], d)
>>> pprint(connected_cells) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [(0, 1), (0, 0), (2, 0), (1, 1), (2, 2),
(1, 0), (0, 2)],
(0, 1): [(0, 1), (0, 0), (2, 1), (1, 1), (0, 2)],
(0, 2): [(1, 2), (0, 1), (0, 0), (2, 0), (1, 1),
(2, 2), (0, 2)],
(1, 0): [(1, 2), (0, 0), (2, 0), (1, 0), (1, 1)],
(1, 1): [(0, 1),
(1, 2),
(0, 0),
(0, 2),
(2, 1),
(2, 0),
(2, 2),
(1, 0),
(1, 1)],
(1, 2): [(1, 2), (0, 2), (2, 2), (1, 0), (1, 1)],
(2, 0): [(0, 0), (0, 2), (2, 1), (2, 0), (2, 2),
(1, 0), (1, 1)],
(2, 1): [(0, 1), (2, 1), (2, 0), (2, 2), (1, 1)],
(2, 2): [(1, 2), (0, 0), (2, 1), (2, 0), (1, 1),
(2, 2), (0, 2)]})
>>> sorted(connected_cells.items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [(0, 1), (0, 0), (2, 0), (1, 1), (2, 2), (1, 0), (0, 2)]),
((0, 1), [(0, 1), (0, 0), (2, 1), (1, 1), (0, 2)]),
((0, 2), [(1, 2), (0, 1), (0, 0), (2, 0), (1, 1), (2, 2), (0, 2)]),
((1, 0), [(1, 2), (0, 0), (2, 0), (1, 0), (1, 1)]),
((1, 1), [(0, 1), (1, 2), (0, 0), (0, 2), (2, 1), (2, 0),
(2, 2), (1, 0), (1, 1)]),
((1, 2), [(1, 2), (0, 2), (2, 2), (1, 0), (1, 1)]),
((2, 0), [(0, 0), (0, 2), (2, 1), (2, 0), (2, 2), (1, 0), (1, 1)]),
((2, 1), [(0, 1), (2, 1), (2, 0), (2, 2), (1, 1)]),
((2, 2), [(1, 2), (0, 0), (2, 1), (2, 0), (1, 1), (2, 2), (0, 2)])]
"""
n = lines[0].size
shape = [n] * d
connected_cells: Connected_cells = DefaultDict(list)
for cell, lines_enums in scopes.items():
for line_enum in lines_enums:
for j in range(n):
cc = np.unravel_index(lines[line_enum][j], shape)
connected_cells[cell].append(cc)
connected_cells[cell] = list(set(connected_cells[cell]))
return connected_cells
def get_diagonals_coord(d: int, n: int) -> Generator[Line_coord, None, None]:
"""
get_diagonals_coord(d: int, n: int) ->
Generator[Line_coord, None, None]:
Calculates the d-agonals coordinates of h(d, n).
Parameters
----------
d
The number of dimensions of the hypercube
n
The number of cells in any dimension
Yields
-------
d-gonals coordinates of the diagonals in h(d,n).
Notes
-----
The number of corners of h(d, n) is 2^d. The number of d-agonals
is 2^d / 2 since two connecting corners form a line.
Examples
--------
>>> diags = get_diagonals_coord(2, 3)
>>> list(diags)
[[(0, 0), (1, 1), (2, 2)], [(0, 2), (1, 1), (2, 0)]]
"""
# comments below use an example with h(2, 3)
# get an iterator of all corners. E.g.: (0,0), (0,2), (2,0), (2,2)
corners_all = it.product([0, n - 1], repeat = d)
# restrict to corners with 0 as first coordinate. E.g.: (0,0), (0,2)
corners_0 = [corner for corner in corners_all if corner[0] == 0]
for corner in corners_0:
# create the diagonals for each corner
diagonal: Line_coord = []
diagonal.append(corner) # add corner as first cell in diagonal
# add rest of diagonal
for i in range(1, n):
# find next cell. Start by decrementing coords.
# E.g.: (0,0) -> (-1,-1); (0,2) -> (-1,1)
# E.g.: (0,0) -> (-2,-2); (0,2) -> (-2,0)
tmp = tuple(c - i for c in corner)
# Take absolute values of coords.
# E.g.: (-1,-1) -> (1,1); (-1,1) -> (1,1)
# E.g.: (-2,-2) -> (2,2); (-2,0) -> (2,0)
coords = tuple(abs(t) for t in tmp)
diagonal.append(coords)
yield diagonal
def get_lines_grouped_coord(d: int, n: int) -> Generator[Lines_coord, None, None]:
"""
get_lines_grouped_coord(d: int, n: int) ->
Generator[Lines_coord, None, None]:
Generate the lines of a hypercube, h(d, n), grouped by the
number of dimensions spanned.
Parameters
----------
d
The number of dimensions of the hypercube
n
The number of cells in any dimension
Yields
-------
lines (as coordinates) in h(d, n).
See Also
--------
get_lines_i_coord
Examples
--------
>>> lines = list(get_lines_grouped_coord(2, 2))
>>> lines #doctest: +NORMALIZE_WHITESPACE
[[[(0, 0), (1, 0)], [(0, 1), (1, 1)], [(0, 0), (0, 1)],
[(1, 0), (1, 1)]], [[(0, 0), (1, 1)], [(0, 1), (1, 0)]]]
"""
for i in range(d):
yield from get_lines_i_coord(d, n, i)
def get_lines_i_coord(d: int, n: int, i: int) -> Generator[Lines_coord, None, None]:
"""
get_lines_i_coord(d: int, n: int, i: int) ->
Generator[Lines_coord, None, None]:
Generates the lines of a hypercube that span the specified number
of dimensions
Parameters
----------
d
The number of dimensions of the hypercube
n
The number of cells in any dimension
i
The number of dimensions that the returned lines must span
Yields
-------
Lines in h(d, n) that span `i` dimensions.
See Also
--------
num_lines_grouped
Notes
-----
The notes section for the function num_lines_grouped provides a
sketch of a constructive proof for the number of lines in a
hypercube. This has been used to implement this function.
Examples
--------
>>> lines = list(get_lines_grouped_coord(2, 2))
>>> lines #doctest: +NORMALIZE_WHITESPACE
[[[(0, 0), (1, 0)], [(0, 1), (1, 1)], [(0, 0), (0, 1)],
[(1, 0), (1, 1)]], [[(0, 0), (1, 1)], [(0, 1), (1, 0)]]]
"""
lines = []
diagonals = list(get_diagonals_coord(i + 1, n))
# loop over all possible combinations of i dimensions
for i_comb in it.combinations(range(d), r = i + 1):
# a cell could be in any position in the other dimensions
other_d = set(range(d)) - set(i_comb)
for cell in it.product(range(n), repeat = d - i - 1):
diags: Lines_coord = []
for diagonal in diagonals:
diag = []
for c in diagonal:
diag.append(insert_into_tuple(c, other_d, cell))
diags.append(diag)
lines.extend(diags)
yield lines
def get_lines_coord(d: int, n: int) -> Generator[Line_coord, None, None]:
"""
get_lines_coord(d: int, n: int) ->
Generator[Line_coord, None, None]:
Returns the lines in a hypercube
Parameters
----------
d
The number of dimensions of the hypercube
n
The number of cells in any dimension
Yields
-------
Lines in h(d, n).
See Also
--------
get_lines_grouped_coord
Examples
--------
>>> lines = list(get_lines_coord(2, 2))
>>> lines #doctest: +NORMALIZE_WHITESPACE
[[(0, 0), (1, 0)], [(0, 1), (1, 1)], [(0, 0), (0, 1)],
[(1, 0), (1, 1)], [(0, 0), (1, 1)], [(0, 1), (1, 0)]]
>>> len(lines)
6
"""
grouped = get_lines_grouped_coord(d, n)
flat = (x for y in grouped for x in y)
yield from flat # return flat works as well but yield from this is explicit as to being a generator
def get_scopes_coord(lines: Lines_coord, d: int) -> Scopes_coord:
"""
get_scopes_coord(lines: Lines_coord, d: int) -> Scopes_coord:
Calculate the scope of each cell in a hypercube
Parameters
----------
lines
The returned value from get_lines_coord(d, n).
dim
The dimension of the hypercube that was used to
generate `lines`.
Returns
-------
A dictionary with keys equal to the coordinates of each cell in
the hypercube. For each cell key, the value is the cell's
scope - a list of coordinates that are lines containing
the cell.
See Also
--------
get_lines_coord
Examples
--------
>>> from pprint import pprint
>>> lines = list(get_lines_coord(2, 2))
>>> lines #doctest: +NORMALIZE_WHITESPACE
[[(0, 0), (1, 0)],
[(0, 1), (1, 1)],
[(0, 0), (0, 1)],
[(1, 0), (1, 1)],
[(0, 0), (1, 1)],
[(0, 1), (1, 0)]]
>>> scopes = get_scopes_coord(lines, 2)
>>> pprint(scopes) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [[(0, 0), (1, 0)], [(0, 0), (0, 1)], [(0, 0), (1, 1)]],
(0, 1): [[(0, 1), (1, 1)], [(0, 0), (0, 1)], [(0, 1), (1, 0)]],
(1, 0): [[(0, 0), (1, 0)], [(1, 0), (1, 1)], [(0, 1), (1, 0)]],
(1, 1): [[(0, 1), (1, 1)], [(1, 0), (1, 1)], [(0, 0), (1, 1)]]})
>>> sorted(scopes.items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [[(0, 0), (1, 0)], [(0, 0), (0, 1)], [(0, 0), (1, 1)]]),
((0, 1), [[(0, 1), (1, 1)], [(0, 0), (0, 1)], [(0, 1), (1, 0)]]),
((1, 0), [[(0, 0), (1, 0)], [(1, 0), (1, 1)], [(0, 1), (1, 0)]]),
((1, 1), [[(0, 1), (1, 1)], [(1, 0), (1, 1)], [(0, 0), (1, 1)]])]
"""
n = len(lines[0])
scopes: Scopes_coord = DefaultDict(list)
cells = it.product(range(n), repeat = d) # get all possible cells
for cell in cells:
for line in lines:
if cell in line:
scopes[cell].append(line)
return scopes
def structure_coord(d: int, n: int) -> Structure_coord:
"""
structure_coord(d: int, n: int) -> Structure_coord:
Return lines, and the scopes of its cells, for h(d, n)
Parameters
----------
d
The number of dimensions of the hypercube
n
The number of cells in any dimension
Returns
-------
Lines, and the scopes of its cells, for h(d, n)
See Also
--------
get_lines_coord
get_scopes_coord
Examples
--------
>>> from pprint import pprint
>>> struct = structure_coord(2, 2)
>>> struct[0] #doctest: +NORMALIZE_WHITESPACE
[[(0, 0), (1, 0)], [(0, 1), (1, 1)], [(0, 0), (0, 1)],
[(1, 0), (1, 1)], [(0, 0), (1, 1)], [(0, 1), (1, 0)]]
>>> pprint(struct[1]) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [[(0, 0), (1, 0)], [(0, 0), (0, 1)], [(0, 0), (1, 1)]],
(0, 1): [[(0, 1), (1, 1)], [(0, 0), (0, 1)], [(0, 1), (1, 0)]],
(1, 0): [[(0, 0), (1, 0)], [(1, 0), (1, 1)], [(0, 1), (1, 0)]],
(1, 1): [[(0, 1), (1, 1)], [(1, 0), (1, 1)], [(0, 0), (1, 1)]]})
>>> sorted(struct[1].items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [[(0, 0), (1, 0)], [(0, 0), (0, 1)], [(0, 0), (1, 1)]]),
((0, 1), [[(0, 1), (1, 1)], [(0, 0), (0, 1)], [(0, 1), (1, 0)]]),
((1, 0), [[(0, 0), (1, 0)], [(1, 0), (1, 1)], [(0, 1), (1, 0)]]),
((1, 1), [[(0, 1), (1, 1)], [(1, 0), (1, 1)], [(0, 0), (1, 1)]])]
"""
lines = list(get_lines_coord(d, n))
scopes = get_scopes_coord(lines, d)
return (lines, scopes)
def get_lines_enum_coord(d: int, n: int) -> Lines_enum_coord:
"""
get_lines_enum_coord(d: int, n: int) -> Lines_enum_coord:
Returns enumerated lines of a hypercube
Parameters
----------
d
The number of dimensions of the hypercube
n
The number of cells in any dimension
Yields
-------
Enumerated lines in h(d, n).
See Also
--------
get_lines_coord
Examples
--------
>>> lines = get_lines_enum_coord(2, 2)
>>> lines #doctest: +NORMALIZE_WHITESPACE
{0: [(0, 0), (1, 0)],
1: [(0, 1), (1, 1)],
2: [(0, 0), (0, 1)],
3: [(1, 0), (1, 1)],
4: [(0, 0), (1, 1)],
5: [(0, 1), (1, 0)]}
"""
lines: Lines_enum_coord = dict()
idx = 0
for line in get_lines_coord(d, n):
lines[idx] = line
idx += 1
return lines
def get_scopes_enum_coord(lines: Lines_enum_coord, d: int) -> Scopes_enum:
"""
get_scopes_enum_coord(lines: Lines_enum_coord, d: int) ->
Scopes_enum:
Calculate the scope of each cell in a hypercube
Parameters
----------
lines
The returned value from get_lines_enum_coord(d, n).
dim
The dimension of the hypercube that was used to
generate `lines`.
Returns
-------
A dictionary with keys equal to each cell coordinates of the
hypercube. For each cell key, the value is the cell's
scope - a list of line enumerations that are lines containing
the cell.
See Also
--------
get_lines_enum_coord
Examples
--------
>>> from pprint import pprint
>>> lines = get_lines_enum_coord(2, 2)
>>> lines #doctest: +NORMALIZE_WHITESPACE
{0: [(0, 0), (1, 0)],
1: [(0, 1), (1, 1)],
2: [(0, 0), (0, 1)],
3: [(1, 0), (1, 1)],
4: [(0, 0), (1, 1)],
5: [(0, 1), (1, 0)]}
>>> scopes = get_scopes_enum_coord(lines, 2)
>>> pprint(scopes) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [0, 2, 4],
(0, 1): [1, 2, 5],
(1, 0): [0, 3, 5],
(1, 1): [1, 3, 4]})
>>> sorted(scopes.items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [0, 2, 4]),
((0, 1), [1, 2, 5]),
((1, 0), [0, 3, 5]),
((1, 1), [1, 3, 4])]
"""
n = len(lines[0])
scopes: Scopes_enum = DefaultDict(list)
cells = it.product(range(n), repeat = d) # get all possible cells
for cell in cells:
for idx, line in lines.items():
if cell in line:
scopes[cell].append(idx)
return scopes
def structure_enum_coord(d: int, n: int) -> Structure_enum_coord:
"""
structure_enum_coord(d: int, n: int) ->
Structure_enum_coord:
Return enumerated lines, and the scopes of its cells, for h(d, n)
Parameters
----------
d
The number of dimensions of the hypercube
n
The number of cells in any dimension
Returns
-------
Enumerated lines, and the scopes of its cells, for h(d, n)
See Also
--------
get_lines_enum_coord
get_scopes_enum_coord
Examples
--------
>>> from pprint import pprint
>>> struct = structure_enum_coord(2, 2)
>>> struct[0] #doctest: +NORMALIZE_WHITESPACE
{0: [(0, 0), (1, 0)],
1: [(0, 1), (1, 1)],
2: [(0, 0), (0, 1)],
3: [(1, 0), (1, 1)],
4: [(0, 0), (1, 1)],
5: [(0, 1), (1, 0)]}
>>> pprint(struct[1]) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [0, 2, 4],
(0, 1): [1, 2, 5],
(1, 0): [0, 3, 5],
(1, 1): [1, 3, 4]})
>>> sorted(struct[1].items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [0, 2, 4]),
((0, 1), [1, 2, 5]),
((1, 0), [0, 3, 5]),
((1, 1), [1, 3, 4])]
"""
lines = get_lines_enum_coord(d, n)
scopes = get_scopes_enum_coord(lines, d)
return (lines, scopes)
def connected_cells_coord(lines: Lines_enum_coord, scopes: Scopes_enum) -> Connected_cells:
"""
connected_cells_coord(lines: Lines_enum_coord, scopes: Scopes_enum)
-> Connected_cells:
Calculate the connected cells for a cube.
Parameters
----------
lines
The enumerated lines of the hypercube
scopes
The enumerated scopes of the hypercube
Returns
------
A dictionary with keys beings cell coordinates and values the
connected cell coordinates.
See Also
--------
structure_enum_coord
Examples
--------
>>> from pprint import pprint
>>> struct = structure_enum_coord(2, 3)
>>> struct[0] #doctest: +NORMALIZE_WHITESPACE
{0: [(0, 0), (1, 0), (2, 0)],
1: [(0, 1), (1, 1), (2, 1)],
2: [(0, 2), (1, 2), (2, 2)],
3: [(0, 0), (0, 1), (0, 2)],
4: [(1, 0), (1, 1), (1, 2)],
5: [(2, 0), (2, 1), (2, 2)],
6: [(0, 0), (1, 1), (2, 2)],
7: [(0, 2), (1, 1), (2, 0)]}
>>> pprint(struct[1]) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [0, 3, 6],
(0, 1): [1, 3],
(0, 2): [2, 3, 7],
(1, 0): [0, 4],
(1, 1): [1, 4, 6, 7],
(1, 2): [2, 4],
(2, 0): [0, 5, 7],
(2, 1): [1, 5],
(2, 2): [2, 5, 6]})
>>> sorted(struct[1].items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [0, 3, 6]),
((0, 1), [1, 3]),
((0, 2), [2, 3, 7]),
((1, 0), [0, 4]),
((1, 1), [1, 4, 6, 7]),
((1, 2), [2, 4]),
((2, 0), [0, 5, 7]),
((2, 1), [1, 5]),
((2, 2), [2, 5, 6])]
>>> connected_cells = connected_cells_coord(*struct)
>>> pprint(connected_cells) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [(0, 1), (0, 0), (2, 0), (1, 1), (2, 2),
(1, 0), (0, 2)],
(0, 1): [(0, 1), (0, 0), (2, 1), (1, 1), (0, 2)],
(0, 2): [(1, 2), (0, 1), (0, 0), (2, 0), (1, 1),
(2, 2), (0, 2)],
(1, 0): [(1, 2), (0, 0), (2, 0), (1, 0), (1, 1)],
(1, 1): [(0, 1),
(1, 2),
(0, 0),
(0, 2),
(2, 1),
(2, 0),
(2, 2),
(1, 0),
(1, 1)],
(1, 2): [(1, 2), (0, 2), (2, 2), (1, 0), (1, 1)],
(2, 0): [(0, 0), (2, 1), (2, 0), (1, 1), (2, 2),
(1, 0), (0, 2)],
(2, 1): [(0, 1), (2, 1), (2, 0), (2, 2), (1, 1)],
(2, 2): [(1, 2), (0, 0), (2, 1), (2, 0), (1, 1),
(2, 2), (0, 2)]})
>>> sorted(connected_cells.items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [(0, 1), (0, 0), (2, 0), (1, 1), (2, 2), (1, 0), (0, 2)]),
((0, 1), [(0, 1), (0, 0), (2, 1), (1, 1), (0, 2)]),
((0, 2), [(1, 2), (0, 1), (0, 0), (2, 0), (1, 1), (2, 2), (0, 2)]),
((1, 0), [(1, 2), (0, 0), (2, 0), (1, 0), (1, 1)]),
((1, 1), [(0, 1), (1, 2), (0, 0), (0, 2), (2, 1), (2, 0),
(2, 2), (1, 0), (1, 1)]),
((1, 2), [(1, 2), (0, 2), (2, 2), (1, 0), (1, 1)]),
((2, 0), [(0, 0), (2, 1), (2, 0), (1, 1), (2, 2), (1, 0), (0, 2)]),
((2, 1), [(0, 1), (2, 1), (2, 0), (2, 2), (1, 1)]),
((2, 2), [(1, 2), (0, 0), (2, 1), (2, 0), (1, 1), (2, 2), (0, 2)])]
"""
connected_cells: Connected_cells = DefaultDict(list)
for cell, lines_enums in scopes.items():
for line_enum in lines_enums:
connected_cells[cell].extend(lines[line_enum])
connected_cells[cell] = list(set(connected_cells[cell]))
return connected_cells
def get_scope_cell_coord(d: int, n: int, cell: Cell_coord) -> Generator[Line_coord, None, None]:
"""
get_scope_cell_coord(d: int, n: int, cell: Cell_coord) ->
Generator[Line_coord, None, None]:
Calculate the scope for a cell.
Parameters
----------
d
The number of dimensions of the hypercube
n
The number of cells in any dimension
cell
The cell whose scope is to be calculated
Yields
------
Lines that form the scope of `cell`.
See Also
--------
get_scopes_coord
Notes
-----
The scope for a specific cell can also be found by calling
get_scopes_coord and indexing with the cell. get_scopes_coord
calculates the scope for every cell and stores this in a dictionary.
get_scope_cell_coord only calculates the scope for the
specified cell.
Examples
--------
>>> d = 3
>>> n = 4
>>> list(get_scope_cell_coord(d, n, (1,2,3))) # doctest: +NORMALIZE_WHITESPACE
[[(0, 2, 3), (1, 2, 3), (2, 2, 3), (3, 2, 3)],
[(1, 0, 3), (1, 1, 3), (1, 2, 3), (1, 3, 3)],
[(1, 2, 0), (1, 2, 1), (1, 2, 2), (1, 2, 3)],
[(0, 3, 3), (1, 2, 3), (2, 1, 3), (3, 0, 3)]]
"""
# loop over the numbers of dimensions
for i in range(d):
# for each combination of i dimensions
for i_comb in it.combinations(range(d), r = i + 1):
# increment call coordinates along all potential lines
incr = it.product([-1, 1], repeat = i + 1)
seen: Line_coord = []
for j in incr:
# store potential lines. Could use a list but deque
# makes it clear we are moving "up and down" the line
d_line: Deque[Cell_coord] = Deque((cell,))
# since we are moving "up and down" we don't need
# to move "down and up" as well
j_neg = tuple(-x for x in list(j))
if j_neg not in seen:
seen.append(j)
for k in range(1, n):
jk = tuple(x * k for x in list(j)) # size of increments
# record cells positions of increments
d_line.appendleft(increment_cell_coord(cell, i_comb, jk))
d_line.append(increment_cell_coord(cell, i_comb, jk, False))
# some calculated cells will simply not be part of the board
line = remove_invalid_cells_coord(n, list(d_line))
# we only want lines that are winning lines
if len(line) == n:
yield line
def scopes_size(scopes: Scopes) -> Counter:
"""
scopes_size(scopes: Scopes) -> Counter:
Calculate the different scope lengths.
Parameters
----------
scopes
Dictionary of cells (keys) and their scopes
Returns
-------
Counter of scopes lengths (key) and their frequency (values).
See Also
--------
get_scopes_np
get_scopes_coord
Examples
--------
>>> import numpy as np
>>> scopes = structure_np(2, 3)[2]
>>> scopes_size(scopes) == Counter({2: 4, 3: 4, 4: 1})
True
>>> scopes = structure_enum_np(2, 3)[2]
>>> scopes_size(scopes) == Counter({2: 4, 3: 4, 4: 1})
True
>>> scopes = structure_coord(2, 3)[1]
>>> scopes_size(scopes) == Counter({2: 4, 3: 4, 4: 1})
True
>>> scopes = structure_enum_coord(2, 3)[1]
>>> scopes_size(scopes) == Counter({2: 4, 3: 4, 4: 1})
True
"""
return Counter([len(scope) for scope in scopes.values()])
def scopes_size_cell(scopes: Scopes) -> DefaultDict[int, List[Cell_coord]]:
"""
scopes_size_cell(scopes: Scopes) ->
DefaultDict[int, List[Cell_coord]]:
Group cells by length of their scope.
Parameters
----------
scopes
Dictionary of cells (keys) and their scopes
Returns
-------
Dictonary of scopes lengths (key) and the list of cells with
scopes of that length.
See Also
--------
get_scopes_np
get_scopes_coord
get_scopes_enum
Examples
--------
>>> import numpy as np
>>> from pprint import pprint
>>> scopes = structure_np(2, 3)[2]
>>> pprint(scopes_size_cell(scopes)) #doctest: +SKIP
defaultdict(<class 'list'>,
{2: [(1, 0), (0, 1), (2, 1), (1, 2)],
3: [(0, 0), (2, 0), (0, 2), (2, 2)],
4: [(1, 1)]})
>>> sorted(scopes_size_cell(scopes).items()) #doctest: +NORMALIZE_WHITESPACE
[(2, [(1, 0), (0, 1), (2, 1), (1, 2)]),
(3, [(0, 0), (2, 0), (0, 2), (2, 2)]),
(4, [(1, 1)])]
>>> scopes = structure_enum_np(2, 3)[2]
>>> pprint(scopes_size_cell(scopes)) #doctest: +SKIP
defaultdict(<class 'list'>,
{2: [(1, 0), (0, 1), (2, 1), (1, 2)],
3: [(0, 0), (2, 0), (0, 2), (2, 2)],
4: [(1, 1)]})
>>> sorted(scopes_size_cell(scopes).items()) #doctest: +NORMALIZE_WHITESPACE
[(2, [(1, 0), (0, 1), (2, 1), (1, 2)]),
(3, [(0, 0), (2, 0), (0, 2), (2, 2)]),
(4, [(1, 1)])]
>>> scopes = structure_coord(2, 3)[1]
>>> pprint(scopes_size_cell(scopes)) #doctest: +SKIP
defaultdict(<class 'list'>,
{2: [(0, 1), (1, 0), (1, 2), (2, 1)],
3: [(0, 0), (0, 2), (2, 0), (2, 2)],
4: [(1, 1)]})
>>> sorted(scopes_size_cell(scopes).items()) #doctest: +NORMALIZE_WHITESPACE
[(2, [(0, 1), (1, 0), (1, 2), (2, 1)]),
(3, [(0, 0), (0, 2), (2, 0), (2, 2)]),
(4, [(1, 1)])]
>>> scopes = structure_enum_coord(2, 3)[1]
>>> pprint(scopes_size_cell(scopes)) #doctest: +SKIP
defaultdict(<class 'list'>,
{2: [(0, 1), (1, 0), (1, 2), (2, 1)],
3: [(0, 0), (0, 2), (2, 0), (2, 2)],
4: [(1, 1)]})
>>> sorted(scopes_size_cell(scopes).items()) #doctest: +NORMALIZE_WHITESPACE
[(2, [(0, 1), (1, 0), (1, 2), (2, 1)]),
(3, [(0, 0), (0, 2), (2, 0), (2, 2)]),
(4, [(1, 1)])]
"""
scopes_size_cell: DefaultDict[int, List[Cell_coord]] = DefaultDict(list)
for cell, scope in scopes.items():
scopes_size_cell[len(scope)].append(cell)
return scopes_size_cell
####################################################################################################
# The following 3 functions are for the displaying of a hypercube to a terminal.
# It is assumed that an numpy ndarray has been used to represent the hypercube
def display_np(hc: Cube_np, display_cell: Callable[[Any], Tuple[str, str, str]] = None, ul = False) -> str:
"""
display_np(hc: Cube_np, display_cell: Callable[[Any],
Tuple[str, str, str]] = None, ul = False) ->
str:
Construct a string to display the hypercube in the terminal.
Parameters
----------
hc
The hypercube to be displayed
display_cell
A callback function called with the value of each cell value.
It returns a tuple of strings - the character/string to be
displayed, and any formatting to be applied (typically ansi
color sequences). See Examples for how colors are specified.
If display_cell is not provided, the cell value is displayed.
ul
display_np calls itself recursively (see Notes). This parameter
is used to track whether a cell is on the bottom row of a
2-d array. It has direct impact when the user calls dislay_np
unless the array is 1-d, in which case it determines if cell
values are underlined when displayed.
Returns
-------
A string that can be printed to the terminal to display the
hypercube.
See Also
--------
underline
join_multiline
Notes
-----
The '|' character is used to represent the board horizontally.
Cell contents are underlined in order to represent the board
vertically. For example, the character 'X' is underlined to
give 'X̲'. This function is recursive, it starts with hypercube and
keeps removing dimensions until at a single cell, which can be
given a string value. We are trying to display d dimensions in
two dimensions. To do this, odd dimensions are
shown horizontally; even dimensions are shown vertically.
Examples
--------
>>> import numpy as np
>>> from pprint import pprint
>>> def dc(v: Any) -> Tuple[str, str, str]:
...
... # define colors - could also use colorama module
... # red foreground + yellow background
... pre_fmt = '\033[31;43m'
... post_fmt = '\033[0m' # removes color settings
...
... if v > 0:
... return 'X', pre_fmt, post_fmt
... elif v < 0:
... return 'O', pre_fmt, post_fmt
... else:
... return ' ', '', ''
>>> d = 3
>>> n = 3
>>> hc = np.zeros((n,) * d, dtype = int)
>>> hc[0, 0, 0] = 1
>>> hc[1, 1, 1] = -1
>>> disp = display_np(hc, dc)
>>> print(disp) #doctest: +SKIP
X̲|_|_ _|_|_ _|_|_
_|_|_ _|O̲|_ _|_|_
| | | | | |
"""
if hc.size == 1: # hc is a single cell
if display_cell is None:
s, pre_fmt, post_fmt = str(hc), '', ''
else:
s, pre_fmt, post_fmt = display_cell(hc)
# underline displayed string (to repsent board structure) unless
# string is in the bottom row of array
if ul:
s = '_' * len(s) if s.isspace() else underline(s)
return pre_fmt + s + post_fmt
# hc is not a single cell
d = hc.ndim
# break the array into sub arrays along the first dimension
sub_hc = [hc[i] for i in range(hc.shape[0])]
# constuct a string for each sub array
sub_hc_str = []
for c, a in enumerate(sub_hc):
if d == 2 and c == len(sub_hc) - 1:
# sub arr is 2-dimensional and last row - don't underline
ul = False
elif d != 1:
ul = True
sub_hc_str.append(display_np(a, display_cell, ul))
# join the sub strings
if d % 2 == 0: # even number of dimensions - display down the screen
if d == 2:
return ''.join('\n'.join(sub_hc_str))
else:
sp = '\n' + '\n' * (int((d / 2) ** 1.5) - 1) # increase space between higher dimesions
return sp.join(sub_hc_str)
else: # odd number of dimensions - display across the screen
if d == 1:
return '|'.join(sub_hc_str)
else:
return join_multiline(sub_hc_str, ' ' + ' ' * int((d - 2) ** 1.5) + ' ', False)
def underline(s: str, alpha_only = True) -> str:
"""
underline(s: str, alpha_only = True) -> str
Underlines a string.
Parameters
----------
s
The string to be underlined
Returns
-------
An underlined string
Notes
-----
The code appears only to work properly with alphabetic characters.
Examples
--------
>>> underline('X')
'X̲'
>>> underline('XX')
'X̲X̲'
>>> underline('1')
'1'
>>> underline('1', False)
'1̲'
"""
try:
if alpha_only:
s_ = ""
for chr in str(s):
if chr.isalpha():
s_ = s_ + chr + "\u0332"
else:
s_ = s_ + chr
return s_
else:
return ''.join([chr + "\u0332" for chr in str(s)])
except:
return s
def join_multiline(iter: Iterable[str], divider: str = ' ', divide_empty_lines: bool = False,
fill_value: str = '_') -> str:
"""
join_multiline(iter: Iterable[str], divider: str = ' ',
divide_empty_lines: bool = False,
fill_value: str = '_') -> str
Join multiline string line by line.
Parameters
----------
iter
An iterable of multiline (or single line) strings
divider
String to divide the corresponding lines in each iterable
divide_empty_lines
If the corresponding line in each iterable is blank, then
determines if the lines are still divided by divider, or
divided by ''.
fill_value
If the number of lines in each multiline string in iter
differs, then fill_value is used to fill in values of the
shorter strings.
Returns
-------
The joined string.
Examples
--------
>>> # note that newline has to be escaped to work in doctest
examples below.
>>> ml_1 = 'AA\\nMM\\nXX'
>>> ml_2 = 'BB\\nNN\\nYY'
>>> ml_3 = 'CC\\nOO\\nZZ'
>>> ml = join_multiline([ml_1, ml_2, ml_3])
>>> print(ml) #doctest: +NORMALIZE_WHITESPACE
AA BB CC
MM NN OO
XX YY ZZ
>>> ml = join_multiline([ml_1, ml_2, ml_3], divider = '_')
>>> print(ml) #doctest: +NORMALIZE_WHITESPACE
AA_BB_CC
MM_NN_OO
XX_YY_ZZ
>>> ml_3 = 'CC\\nOO'
>>> ml = join_multiline([ml_1, ml_2, ml_3], fill_value = '@')
>>> print(ml) #doctest: +NORMALIZE_WHITESPACE
AA BB CC
MM NN OO
XX YY @
>>> ml_1 = 'AA\\n\\nMM'
>>> ml_2 = 'BB\\n\\nNN'
>>> ml_3 = 'CC\\n\\nZZ'
>>> ml = join_multiline([ml_1, ml_2, ml_3], divider = '_')
>>> print(ml) #doctest: +NORMALIZE_WHITESPACE
AA_BB_CC
<BLANKLINE>
MM_NN_ZZ
>>> ml = join_multiline([ml_1, ml_2, ml_3], '_', True)
>>> print(ml) #doctest: +NORMALIZE_WHITESPACE
AA_BB_CC
__
MM_NN_ZZ
"""
# for each multiline block, split into individual lines
spl = [x.split('\n') for x in iter]
# create list of tuples with tuple i containing line i from each multiline block
tl = [i for i in it.zip_longest(*spl, fillvalue = fill_value)]
if divide_empty_lines:
st = [divider.join(t) for t in tl]
else:
st = []
for t in tl:
if all([not x.strip() for x in t]):
st.append('')
else:
st.append(divider.join(t))
# finally, join each string separated by a new line
return '\n'.join(st)
####################################################################################################
####################################################################################################
# The following functions are helper functions
def slice_ndarray(arr: Cube_np, dims: Collection[int], coords: Collection[int]) -> Cube_np:
"""
slice_ndarray(arr: Cube_np, dims: Collection[int],
coords: Collection[int]) ->
Cube_np:
Returns a slice of a hypercube.
Parameters
----------
arr
The hypercube to be sliced
dims
The dims to slice along
coords
The coordinates corresponding to the dimensions being sliced
Returns
-------
A view of a slice of `arr`.
Raises
------
ValueError
If length of `dims` is not equal to length of `coords`
Examples
--------
>>> import numpy as np
>>> arr = np.arange(8).reshape(2, 2, 2)
>>> arr
array([[[0, 1],
[2, 3]],
<BLANKLINE>
[[4, 5],
[6, 7]]])
>>> slice_ndarray(arr, (0,), (0,))
array([[0, 1],
[2, 3]])
>>> slice_ndarray(arr, (1, 2), (0, 0))
array([0, 4])
"""
# create a list of slice objects, one for each dimension of the array
# Note: slice(None) is the same as ":". E.g. arr[:, 4] = arr[slice(none), 4)]
sl: List[Union[slice, int]] = [slice(None)] * arr.ndim
if len(dims) != len(coords):
raise ValueError("dims and coords must be of the same length")
for dim, coord in zip(dims, coords):
sl[dim] = coord
return arr[tuple(sl)]
def insert_into_tuple(tup: Tuple, pos: Union[int, Collection[int]], val: Any) -> Tuple[int, ...]:
"""
insert_into_tuple(tup: Tuple, pos: Union[int, Collection[int]],
val: Any) ->
Tuple[int, ...]:
Insert values into a tuple.
Parameters
----------
tup
the tuple into which values are to be inserted
pos
The positions into which values are to be inserted
val
The values corresponding to the positions in `pos`
Returns
-------
A copy of `tup` with values inserted.
Raises
------
ValueError
If length of `pos` is not equal to length of `val`
Examples
--------
>>> tup = (0, 1, 2, 3)
>>> pos = (5, 1)
>>> val = (9, 8)
>>> insert_into_tuple(tup, pos, val)
(0, 8, 1, 2, 3, 9)
>>> insert_into_tuple(tup, (), ())
(0, 1, 2, 3)
"""
tl = list(tup)
if isinstance(pos, int):
tl.insert(pos, val)
else:
if len(pos) != len(val):
raise ValueError("pos and val must be of the same length")
if len(pos) == 0:
return tup
# sort pos so from low to high; sort val correspondingly
stl = list(zip(*sorted(zip(pos, val))))
for p, v in zip(stl[0], stl[1]):
tl.insert(p, v)
return tuple(tl)
def increment_cell_coord(cell: Cell_coord, pos: Sequence[int], incr: Sequence[int], add: bool = True) -> Cell_coord:
"""
increment_cell_coord(cell: Cell_coord, pos: Sequence[int],
incr: Sequence[int], add: bool = True) ->
Cell_coord:
Increments coordinates of a cell.
Parameters
----------
cell
the cell which will have coordinates incremented
pos
The coordinates which are to be incremented
incr
The increment values at the specified coordinates
add
If True, the the increments are added, else they are subtracted
Returns
-------
A copy of `cell` with incremented coordinates.
Raises
------
ValueError
If length of `pos` is not equal to length of `val`
Examples
--------
>>> cell = (1, 2, 1)
>>> pos = (0, 2)
>>> incr = (1, -1)
>>> increment_cell_coord(cell, pos, incr)
(2, 2, 0)
>>> increment_cell_coord(cell, pos, incr, False)
(0, 2, 2)
"""
if len(pos) != len(incr):
raise ValueError("pos and incr must be of the same length")
if len(pos) == 0:
return cell
cl = list(cell)
for i in range(len(pos)):
if add:
cl[pos[i]] += incr[i]
else:
cl[pos[i]] -= incr[i]
return tuple(cl)
def str_to_tuple(d: int, n: int, cell: str, offset: int = 1) -> Cell_coord:
"""
str_to_tuple(d: int, n: int, cell: str, offset: int = 1) ->
Cell_coord:
Returns cells coordinates provided as a string as a tuple
of integers.
Parameters
----------
d
The number of dimensions of the hypercube
n
The number of cells in any dimension
cell
Cell coordinates specified as a string (see Notes).
Will accept a non-string argument which will be cast
to a string.
offset
idx offset - typically 0 or 1.
Raises
------
ValueError
1. if digits are not separated and the n is greater than 9
2. Incorrect numbers of coordinates provided
3. One or more coordinates is not valid
Notes
-----
If the string is all digits then assumes that each digit is a
coordinate. If non-digit characters are provided then assumes that
these split coordinates.
Returns
-------
A tuple containing the cell coordinates.
Examples
--------
>>> d = 3
>>> n = 3
>>> str_to_tuple(d, n, '123')
(0, 1, 2)
>>> str_to_tuple(d, n, '012', offset = 0)
(0, 1, 2)
>>> str_to_tuple(d, n, '1,2::3')
(0, 1, 2)
>>> str_to_tuple(d, n, 123)
(0, 1, 2)
>>> str_to_tuple(d, n, '12')
Traceback (most recent call last):
...
ValueError: Incorrect number of coordinates provided
>>> str_to_tuple(d, n, '125')
Traceback (most recent call last):
...
ValueError: One or more coordinates are not valid
>>> d = 3
>>> n = 10
>>> str_to_tuple(d, n, '123') #doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: Board is too big for each dimension to be specified
by single digit
"""
cell = str(cell)
# check to see if there are any non-digits
nd = re.findall(r'\D+', cell)
if len(nd) == 0:
if n > 9:
raise ValueError("Board is too big for each dimension to be specified by single digit")
else:
tup = tuple(int(coord) - offset for coord in cell)
else: # there are non-digits, use these as separators
tup = tuple(int(coord) - offset for coord in re.findall(r'\d+', cell))
# check that correct number of coordinates specified
if len(tup) != d:
raise ValueError("Incorrect number of coordinates provided")
# check that each coordinate is valid
if all(t in range(n) for t in tup):
return tup
else:
raise ValueError("One or more coordinates are not valid")
def remove_invalid_cells_coord(n:int, line: Line_coord) -> Line_coord:
"""
remove_invalid_cells_coord(n:int, line: Line_coord) -> Line_coord
Remove cells that do not have valid coordinates.
Parameters
----------
n
The number of cells in any dimension
line
list of tuples representing cell coordinates (possibly invalid)
Returns
-------
list of tuples representing valid cell coordinate
Examples
--------
>>> n = 3
>>> line = [(1, 2, 0), (-1, 0, 3), (0, 1, 2), (1, 2, 3)]
>>> remove_invalid_cells_coord(n, line)
[(1, 2, 0), (0, 1, 2)]
"""
rl = []
for cell in line:
if all(coord in range(n) for coord in cell):
rl.append(cell)
return rl
####################################################################################################
# used in internal testing
def _lines_np_coord_check(d: int, n: int) -> bool:
"""
_lines_np_coord_check(d: int, n: int) -> bool
Checks if lines_np and lines_coord give the same lines.
Parameters
----------
d
The number of dimensions of the hypercube
n
The number of cells in any dimension
Returns
-------
True if lines_np and lines_coord give the same lines.
False otherwise.
See Also
--------
get_lines_np
get_lines_coord
Notes
-----
This function is a private function used in testing.
"""
dtype = np.int64 if n ** d > 2 ** 31 else np.int32
arr = np.arange(n ** d, dtype = dtype).reshape([n] * d)
lines_np = get_lines_np(arr)
lines_coord = get_lines_coord(d, n)
t_np = [tuple(sorted(l.tolist())) for l in lines_np] # type: ignore
t_coord = [tuple(sorted([arr[c] for c in l])) for l in lines_coord]
return set(t_np) == set(t_coord)
| 30.374202 | 116 | 0.510575 | 0 | 0 | 17,170 | 0.240624 | 0 | 0 | 0 | 0 | 57,395 | 0.804347 |
f81ca2ce592e84428e81a66ce38e515a6ee5edcf | 42 | py | Python | firecloud/__about__.py | jnktsj/fiss | 2cfce1f6dc0c43f62c51e8a9296946b9990a76fa | [
"BSD-3-Clause"
] | 20 | 2017-08-05T08:44:51.000Z | 2022-03-24T15:33:48.000Z | firecloud/__about__.py | jnktsj/fiss | 2cfce1f6dc0c43f62c51e8a9296946b9990a76fa | [
"BSD-3-Clause"
] | 117 | 2016-10-26T15:31:48.000Z | 2022-02-16T23:06:33.000Z | firecloud/__about__.py | jnktsj/fiss | 2cfce1f6dc0c43f62c51e8a9296946b9990a76fa | [
"BSD-3-Clause"
] | 21 | 2017-03-13T15:16:03.000Z | 2022-02-25T19:14:36.000Z | # Package version
__version__ = "0.16.31"
| 14 | 23 | 0.714286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.619048 |
f81ce517b53ccd795c4a506f2213bfeafa42c8e0 | 255 | py | Python | django_selectel_storage/exceptions.py | Stuvros/django-selectel-storage | 076f7e3c58d9391e2e7e27feb0526736d101c2b5 | [
"MIT"
] | 27 | 2015-01-28T09:17:09.000Z | 2021-06-21T20:48:01.000Z | django_selectel_storage/exceptions.py | Stuvros/django-selectel-storage | 076f7e3c58d9391e2e7e27feb0526736d101c2b5 | [
"MIT"
] | 9 | 2015-08-07T15:03:00.000Z | 2020-05-01T04:54:02.000Z | django_selectel_storage/exceptions.py | Stuvros/django-selectel-storage | 076f7e3c58d9391e2e7e27feb0526736d101c2b5 | [
"MIT"
] | 19 | 2015-05-20T14:16:25.000Z | 2022-03-31T06:31:59.000Z | class SelectelException(ValueError):
pass
class InvalidSchema(SelectelException):
pass
class EmptyUsername(SelectelException):
pass
class EmptyPassword(SelectelException):
pass
class EmptyContainerName(SelectelException):
pass
| 13.421053 | 44 | 0.772549 | 242 | 0.94902 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f81e6f765fb2c951a1b3a358bc3ab07fe69f4752 | 11,140 | py | Python | simpa_tests/manual_tests/acoustic_forward_models/KWaveAcousticForwardConvenienceFunction.py | IMSY-DKFZ/simpa | b8bddcf43a4bff2564f0ec208dc511b82e49bfb4 | [
"MIT"
] | 3 | 2022-03-14T15:40:09.000Z | 2022-03-20T02:34:25.000Z | simpa_tests/manual_tests/acoustic_forward_models/KWaveAcousticForwardConvenienceFunction.py | jgroehl/simpa | e56f0802e5a8555ee8bb139dd4f776025e7e9267 | [
"MIT"
] | 3 | 2022-03-18T07:19:12.000Z | 2022-03-30T12:15:19.000Z | simpa_tests/manual_tests/acoustic_forward_models/KWaveAcousticForwardConvenienceFunction.py | IMSY-DKFZ/simpa | b8bddcf43a4bff2564f0ec208dc511b82e49bfb4 | [
"MIT"
] | null | null | null | # SPDX-FileCopyrightText: 2021 Division of Intelligent Medical Systems, DKFZ
# SPDX-FileCopyrightText: 2021 Janek Groehl
# SPDX-License-Identifier: MIT
from simpa.core.device_digital_twins import SlitIlluminationGeometry, LinearArrayDetectionGeometry, PhotoacousticDevice
from simpa import perform_k_wave_acoustic_forward_simulation
from simpa.core.simulation_modules.reconstruction_module.reconstruction_module_delay_and_sum_adapter import \
reconstruct_delay_and_sum_pytorch
from simpa import MCXAdapter, ModelBasedVolumeCreationAdapter, \
GaussianNoise
from simpa.utils import Tags, Settings, TISSUE_LIBRARY
from simpa.core.simulation import simulate
from simpa.io_handling import load_data_field
import numpy as np
from simpa.utils.path_manager import PathManager
from simpa_tests.manual_tests import ManualIntegrationTestClass
import matplotlib.pyplot as plt
# FIXME temporary workaround for newest Intel architectures
import os
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
class KWaveAcousticForwardConvenienceFunction(ManualIntegrationTestClass):
"""
This class test the convenience function for acoustic forward simulation.
It first creates a volume and runs an optical forward simulation.
Then the function is actually tested.
Lastly the generated time series data is reconstructed to compare whether everything worked.
"""
def setup(self):
"""
Runs a pipeline consisting of volume creation and optical simulation. The resulting hdf5 file of the
simple test volume is saved at SAVE_PATH location defined in the path_config.env file.
"""
self.path_manager = PathManager()
self.VOLUME_TRANSDUCER_DIM_IN_MM = 75
self.VOLUME_PLANAR_DIM_IN_MM = 20
self.VOLUME_HEIGHT_IN_MM = 25
self.SPACING = 0.25
self.RANDOM_SEED = 4711
self.VOLUME_NAME = "TestKWaveAcousticForwardConvenienceFunction_" + str(self.RANDOM_SEED)
np.random.seed(self.RANDOM_SEED)
# These parameters set the general properties of the simulated volume
self.general_settings = {
Tags.RANDOM_SEED: self.RANDOM_SEED,
Tags.VOLUME_NAME: self.VOLUME_NAME,
Tags.SIMULATION_PATH: self.path_manager.get_hdf5_file_save_path(),
Tags.SPACING_MM: self.SPACING,
Tags.DIM_VOLUME_Z_MM: self.VOLUME_HEIGHT_IN_MM,
Tags.DIM_VOLUME_X_MM: self.VOLUME_TRANSDUCER_DIM_IN_MM,
Tags.DIM_VOLUME_Y_MM: self.VOLUME_PLANAR_DIM_IN_MM,
Tags.WAVELENGTHS: [700]
}
self.settings = Settings(self.general_settings)
self.settings.set_volume_creation_settings({
Tags.SIMULATE_DEFORMED_LAYERS: True,
Tags.STRUCTURES: self.create_example_tissue()
})
self.settings.set_optical_settings({
Tags.OPTICAL_MODEL_NUMBER_PHOTONS: 1e7,
Tags.OPTICAL_MODEL_BINARY_PATH: self.path_manager.get_mcx_binary_path(),
Tags.OPTICAL_MODEL: Tags.OPTICAL_MODEL_MCX,
Tags.ILLUMINATION_TYPE: Tags.ILLUMINATION_TYPE_PENCIL,
Tags.LASER_PULSE_ENERGY_IN_MILLIJOULE: 50,
Tags.MCX_ASSUMED_ANISOTROPY: 0.9
})
self.settings["noise_model"] = {
Tags.NOISE_MEAN: 0.0,
Tags.NOISE_STD: 0.4,
Tags.NOISE_MODE: Tags.NOISE_MODE_ADDITIVE,
Tags.DATA_FIELD: Tags.DATA_FIELD_INITIAL_PRESSURE,
Tags.NOISE_NON_NEGATIVITY_CONSTRAINT: True
}
self.device = PhotoacousticDevice(device_position_mm=np.array([self.VOLUME_TRANSDUCER_DIM_IN_MM/2,
self.VOLUME_PLANAR_DIM_IN_MM/2,
0]))
self.device.set_detection_geometry(LinearArrayDetectionGeometry(device_position_mm=
self.device.device_position_mm, pitch_mm=0.25,
number_detector_elements=200))
self.device.add_illumination_geometry(SlitIlluminationGeometry(slit_vector_mm=[100, 0, 0]))
# run pipeline including volume creation and optical mcx simulation
self.pipeline = [
ModelBasedVolumeCreationAdapter(self.settings),
MCXAdapter(self.settings),
GaussianNoise(self.settings, "noise_model")
]
def teardown(self):
os.remove(self.settings[Tags.SIMPA_OUTPUT_PATH])
def perform_test(self):
simulate(self.pipeline, self.settings, self.device)
self.test_convenience_function()
def test_convenience_function(self):
# load initial pressure
initial_pressure = load_data_field(self.path_manager.get_hdf5_file_save_path() + "/" +
self.VOLUME_NAME + ".hdf5",
Tags.DATA_FIELD_INITIAL_PRESSURE, wavelength=700)
image_slice = np.s_[:, 40, :]
self.initial_pressure = np.rot90(initial_pressure[image_slice], -1)
# define acoustic settings and run simulation with convenience function
acoustic_settings = {
Tags.ACOUSTIC_SIMULATION_3D: True,
Tags.ACOUSTIC_MODEL_BINARY_PATH: self.path_manager.get_matlab_binary_path(),
Tags.KWAVE_PROPERTY_ALPHA_POWER: 0.00,
Tags.KWAVE_PROPERTY_SENSOR_RECORD: "p",
Tags.KWAVE_PROPERTY_PMLInside: False,
Tags.KWAVE_PROPERTY_PMLSize: [31, 32],
Tags.KWAVE_PROPERTY_PMLAlpha: 1.5,
Tags.KWAVE_PROPERTY_PlotPML: False,
Tags.RECORDMOVIE: False,
Tags.MOVIENAME: "visualization_log",
Tags.ACOUSTIC_LOG_SCALE: True,
Tags.MODEL_SENSOR_FREQUENCY_RESPONSE: False
}
time_series_data = perform_k_wave_acoustic_forward_simulation(initial_pressure=self.initial_pressure,
detection_geometry=self.device.
get_detection_geometry(),
speed_of_sound=1540, density=1000,
alpha_coeff=0.0)
# reconstruct the time series data to compare it with initial pressure
self.settings.set_reconstruction_settings({
Tags.RECONSTRUCTION_MODE: Tags.RECONSTRUCTION_MODE_PRESSURE,
Tags.RECONSTRUCTION_BMODE_BEFORE_RECONSTRUCTION: True,
Tags.RECONSTRUCTION_BMODE_METHOD: Tags.RECONSTRUCTION_BMODE_METHOD_HILBERT_TRANSFORM,
Tags.DATA_FIELD_SPEED_OF_SOUND: 1540,
Tags.SPACING_MM: 0.25,
Tags.SENSOR_SAMPLING_RATE_MHZ: 40,
})
self.reconstructed = reconstruct_delay_and_sum_pytorch(
time_series_data.copy(), self.device.get_detection_geometry(), self.settings)
def visualise_result(self, show_figure_on_screen=True, save_path=None):
'''plot initial pressure and reconstructed image volume to manually compare'''
plt.subplot(2, 2, 1)
plt.title("Initial Pressure Pipeline")
plt.imshow(self.initial_pressure)
plt.subplot(2, 2, 2)
plt.title("Reconstructed Image Pipeline")
plt.imshow(np.rot90(self.reconstructed, -1))
plt.tight_layout()
if show_figure_on_screen:
plt.show()
else:
if save_path is None:
save_path = ""
plt.savefig(save_path + f"TestKWaveConvenienceFunction.png")
plt.close()
def create_example_tissue(self):
"""
This is a very simple example script of how to create a tissue definition.
It contains a muscular background, an epidermis layer on top of the muscles
and a blood vessel.
"""
background_dictionary = Settings()
background_dictionary[Tags.MOLECULE_COMPOSITION] = TISSUE_LIBRARY.constant(1e-10, 1e-10, 1.0)
background_dictionary[Tags.STRUCTURE_TYPE] = Tags.BACKGROUND
muscle_dictionary = Settings()
muscle_dictionary[Tags.PRIORITY] = 1
muscle_dictionary[Tags.STRUCTURE_START_MM] = [0, 0, 0]
muscle_dictionary[Tags.STRUCTURE_END_MM] = [0, 0, 100]
muscle_dictionary[Tags.MOLECULE_COMPOSITION] = TISSUE_LIBRARY.constant(0.05, 100, 0.9)
muscle_dictionary[Tags.CONSIDER_PARTIAL_VOLUME] = True
muscle_dictionary[Tags.ADHERE_TO_DEFORMATION] = True
muscle_dictionary[Tags.STRUCTURE_TYPE] = Tags.HORIZONTAL_LAYER_STRUCTURE
vessel_1_dictionary = Settings()
vessel_1_dictionary[Tags.PRIORITY] = 3
vessel_1_dictionary[Tags.STRUCTURE_START_MM] = [self.VOLUME_TRANSDUCER_DIM_IN_MM/2,
0, 10]
vessel_1_dictionary[Tags.STRUCTURE_END_MM] = [
self.VOLUME_TRANSDUCER_DIM_IN_MM/2, self.VOLUME_PLANAR_DIM_IN_MM, 10]
vessel_1_dictionary[Tags.STRUCTURE_RADIUS_MM] = 3
vessel_1_dictionary[Tags.MOLECULE_COMPOSITION] = TISSUE_LIBRARY.blood()
vessel_1_dictionary[Tags.CONSIDER_PARTIAL_VOLUME] = True
vessel_1_dictionary[Tags.ADHERE_TO_DEFORMATION] = False
vessel_1_dictionary[Tags.STRUCTURE_TYPE] = Tags.CIRCULAR_TUBULAR_STRUCTURE
vessel_2_dictionary = Settings()
vessel_2_dictionary[Tags.PRIORITY] = 3
vessel_2_dictionary[Tags.STRUCTURE_START_MM] = [self.VOLUME_TRANSDUCER_DIM_IN_MM/2 - 10,
0, 5]
vessel_2_dictionary[Tags.STRUCTURE_END_MM] = [
self.VOLUME_TRANSDUCER_DIM_IN_MM/2 - 10, self.VOLUME_PLANAR_DIM_IN_MM, 5]
vessel_2_dictionary[Tags.STRUCTURE_RADIUS_MM] = 2
vessel_2_dictionary[Tags.MOLECULE_COMPOSITION] = TISSUE_LIBRARY.blood()
vessel_2_dictionary[Tags.CONSIDER_PARTIAL_VOLUME] = True
vessel_2_dictionary[Tags.ADHERE_TO_DEFORMATION] = False
vessel_2_dictionary[Tags.STRUCTURE_TYPE] = Tags.CIRCULAR_TUBULAR_STRUCTURE
epidermis_dictionary = Settings()
epidermis_dictionary[Tags.PRIORITY] = 8
epidermis_dictionary[Tags.STRUCTURE_START_MM] = [0, 0, 1]
epidermis_dictionary[Tags.STRUCTURE_END_MM] = [0, 0, 1.1]
epidermis_dictionary[Tags.MOLECULE_COMPOSITION] = TISSUE_LIBRARY.epidermis()
epidermis_dictionary[Tags.CONSIDER_PARTIAL_VOLUME] = True
epidermis_dictionary[Tags.ADHERE_TO_DEFORMATION] = True
epidermis_dictionary[Tags.STRUCTURE_TYPE] = Tags.HORIZONTAL_LAYER_STRUCTURE
tissue_dict = Settings()
tissue_dict[Tags.BACKGROUND] = background_dictionary
tissue_dict["muscle"] = muscle_dictionary
tissue_dict["epidermis"] = epidermis_dictionary
tissue_dict["vessel_1"] = vessel_1_dictionary
tissue_dict["vessel_2"] = vessel_2_dictionary
return tissue_dict
if __name__ == '__main__':
test = KWaveAcousticForwardConvenienceFunction()
test.run_test(show_figure_on_screen=False)
| 48.859649 | 119 | 0.668223 | 10,019 | 0.899372 | 0 | 0 | 0 | 0 | 0 | 0 | 1,589 | 0.142639 |
f81ea939afded2dfd41116deec7708196341c5d1 | 10,881 | py | Python | oc_ocdm/counter_handler/filesystem_counter_handler.py | arcangelo7/oc_ocdm | 128d062ce9d858024aafd26d7d238c7a26cc8914 | [
"0BSD"
] | 1 | 2020-12-17T15:33:01.000Z | 2020-12-17T15:33:01.000Z | oc_ocdm/counter_handler/filesystem_counter_handler.py | arcangelo7/oc_ocdm | 128d062ce9d858024aafd26d7d238c7a26cc8914 | [
"0BSD"
] | 26 | 2021-01-08T08:32:23.000Z | 2022-03-29T10:01:40.000Z | oc_ocdm/counter_handler/filesystem_counter_handler.py | arcangelo7/oc_ocdm | 128d062ce9d858024aafd26d7d238c7a26cc8914 | [
"0BSD"
] | 3 | 2021-04-16T08:44:44.000Z | 2022-02-15T11:09:22.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2016, Silvio Peroni <essepuntato@gmail.com>
#
# Permission to use, copy, modify, and/or distribute this software for any purpose
# with or without fee is hereby granted, provided that the above copyright notice
# and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT,
# OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
# DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
# SOFTWARE.
from __future__ import annotations
import os
from shutil import copymode, move
from tempfile import mkstemp
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import BinaryIO, Tuple, List, Dict
from oc_ocdm.counter_handler.counter_handler import CounterHandler
class FilesystemCounterHandler(CounterHandler):
initial_line_len: int = 3
trailing_char: str = " "
def __init__(self, info_dir: str) -> None:
if info_dir is None or len(info_dir) <= 0:
raise ValueError("info_dir parameter is required!")
if info_dir[-1] != os.sep:
info_dir += os.sep
self.info_dir: str = info_dir
self.datasets_dir: str = info_dir + 'datasets' + os.sep
self.short_names: List[str] = ["an", "ar", "be", "br", "ci", "de", "id", "pl", "ra", "re", "rp"]
self.metadata_short_names: List[str] = ["di"]
self.info_files: Dict[str, str] = {key: ("info_file_" + key + ".txt")
for key in self.short_names}
self.prov_files: Dict[str, str] = {key: ("prov_file_" + key + ".txt")
for key in self.short_names}
def set_counter(self, new_value: int, entity_short_name: str, prov_short_name: str = "",
identifier: int = 1) -> None:
if new_value < 0:
raise ValueError("new_value must be a non negative integer!")
if prov_short_name == "se":
file_path: str = self.get_prov_path(entity_short_name)
else:
file_path: str = self.get_info_path(entity_short_name)
self._set_number(new_value, file_path, identifier)
def read_counter(self, entity_short_name: str, prov_short_name: str = "", identifier: int = 1) -> int:
if prov_short_name == "se":
file_path: str = self.get_prov_path(entity_short_name)
else:
file_path: str = self.get_info_path(entity_short_name)
return self._read_number(file_path, identifier)[0]
def increment_counter(self, entity_short_name: str, prov_short_name: str = "", identifier: int = 1) -> int:
if prov_short_name == "se":
file_path: str = self.get_prov_path(entity_short_name)
else:
file_path: str = self.get_info_path(entity_short_name)
return self._add_number(file_path, identifier)
def get_info_path(self, short_name: str) -> str:
return self.info_dir + self.info_files[short_name]
def get_prov_path(self, short_name: str) -> str:
return self.info_dir + self.prov_files[short_name]
def get_metadata_path(self, short_name: str, dataset_name: str) -> str:
return self.datasets_dir + dataset_name + os.sep + 'metadata_' + short_name + '.txt'
def __initialize_file_if_not_existing(self, file_path: str):
if not os.path.exists(os.path.dirname(file_path)):
os.makedirs(os.path.dirname(file_path))
if not os.path.isfile(file_path):
with open(file_path, "wb") as file:
first_line: str = self.trailing_char * (self.initial_line_len - 1) + "\n"
file.write(first_line.encode("ascii"))
def _read_number(self, file_path: str, line_number: int) -> Tuple[int, int]:
if line_number <= 0:
raise ValueError("line_number must be a positive non-zero integer number!")
self.__initialize_file_if_not_existing(file_path)
cur_number: int = 0
cur_line_len: int = 0
try:
with open(file_path, "rb") as file:
cur_line_len = self._get_line_len(file)
line_offset = (line_number - 1) * cur_line_len
file.seek(line_offset)
line = file.readline(cur_line_len).decode("ascii")
cur_number = int(line.rstrip(self.trailing_char + "\n"))
except ValueError:
cur_number = 0
except Exception as e:
print(e)
return cur_number, cur_line_len
def _add_number(self, file_path: str, line_number: int = 1) -> int:
if line_number <= 0:
raise ValueError("line_number must be a positive non-zero integer number!")
self.__initialize_file_if_not_existing(file_path)
cur_number, cur_line_len = self._read_number(file_path, line_number)
cur_number += 1
cur_number_len: int = len(str(cur_number)) + 1
if cur_number_len > cur_line_len:
self._increase_line_len(file_path, new_length=cur_number_len)
cur_line_len = cur_number_len
with open(file_path, "r+b") as file:
line_offset: int = (line_number - 1) * cur_line_len
file.seek(line_offset)
line: str = str(cur_number).ljust(cur_line_len - 1, self.trailing_char) + "\n"
file.write(line.encode("ascii"))
file.seek(-cur_line_len, os.SEEK_CUR)
self._fix_previous_lines(file, cur_line_len)
return cur_number
def _set_number(self, new_value: int, file_path: str, line_number: int = 1) -> None:
if new_value < 0:
raise ValueError("new_value must be a non negative integer!")
if line_number <= 0:
raise ValueError("line_number must be a positive non-zero integer number!")
self.__initialize_file_if_not_existing(file_path)
cur_line_len = self._read_number(file_path, line_number)[1]
cur_number_len: int = len(str(new_value)) + 1
if cur_number_len > cur_line_len:
self._increase_line_len(file_path, new_length=cur_number_len)
cur_line_len = cur_number_len
with open(file_path, "r+b") as file:
line_offset: int = (line_number - 1) * cur_line_len
file.seek(line_offset)
line: str = str(new_value).ljust(cur_line_len - 1, self.trailing_char) + "\n"
file.write(line.encode("ascii"))
file.seek(-cur_line_len, os.SEEK_CUR)
self._fix_previous_lines(file, cur_line_len)
@staticmethod
def _get_line_len(file: BinaryIO) -> int:
cur_char: str = file.read(1).decode("ascii")
count: int = 1
while cur_char is not None and len(cur_char) == 1 and cur_char != "\0":
cur_char = file.read(1).decode("ascii")
count += 1
if cur_char == "\n":
break
# Undo I/O pointer updates
file.seek(0)
if cur_char is None:
raise EOFError("Reached end-of-file without encountering a line separator!")
elif cur_char == "\0":
raise ValueError("Encountered a NULL byte!")
else:
return count
def _increase_line_len(self, file_path: str, new_length: int = 0) -> None:
if new_length <= 0:
raise ValueError("new_length must be a positive non-zero integer number!")
with open(file_path, "rb") as cur_file:
if self._get_line_len(cur_file) >= new_length:
raise ValueError("Current line length is greater than new_length!")
fh, abs_path = mkstemp()
with os.fdopen(fh, "wb") as new_file:
with open(file_path, "rt", encoding="ascii") as old_file:
for line in old_file:
number: str = line.rstrip(self.trailing_char + "\n")
new_line: str = str(number).ljust(new_length - 1, self.trailing_char) + "\n"
new_file.write(new_line.encode("ascii"))
# Copy the file permissions from the old file to the new file
copymode(file_path, abs_path)
# Replace original file
os.remove(file_path)
move(abs_path, file_path)
@staticmethod
def _is_a_valid_line(buf: bytes) -> bool:
string: str = buf.decode("ascii")
return (string[-1] == "\n") and ("\0" not in string[:-1])
def _fix_previous_lines(self, file: BinaryIO, line_len: int) -> None:
if line_len < self.initial_line_len:
raise ValueError("line_len should be at least %d!" % self.initial_line_len)
while file.tell() >= line_len:
file.seek(-line_len, os.SEEK_CUR)
buf: bytes = file.read(line_len)
if self._is_a_valid_line(buf) or len(buf) < line_len:
break
else:
file.seek(-line_len, os.SEEK_CUR)
fixed_line: str = (self.trailing_char * (line_len - 1)) + "\n"
file.write(fixed_line.encode("ascii"))
file.seek(-line_len, os.SEEK_CUR)
def set_metadata_counter(self, new_value: int, entity_short_name: str, dataset_name: str) -> None:
if new_value < 0:
raise ValueError("new_value must be a non negative integer!")
if dataset_name is None:
raise ValueError("dataset_name must be provided!")
if entity_short_name not in self.metadata_short_names:
raise ValueError("entity_short_name is not a known metadata short name!")
file_path: str = self.get_metadata_path(entity_short_name, dataset_name)
return self._set_number(new_value, file_path, 1)
def read_metadata_counter(self, entity_short_name: str, dataset_name: str) -> int:
if dataset_name is None:
raise ValueError("dataset_name must be provided!")
if entity_short_name not in self.metadata_short_names:
raise ValueError("entity_short_name is not a known metadata short name!")
file_path: str = self.get_metadata_path(entity_short_name, dataset_name)
return self._read_number(file_path, 1)[0]
def increment_metadata_counter(self, entity_short_name: str, dataset_name: str) -> int:
if dataset_name is None:
raise ValueError("dataset_name must be provided!")
if entity_short_name not in self.metadata_short_names:
raise ValueError("entity_short_name is not a known metadata short name!")
file_path: str = self.get_metadata_path(entity_short_name, dataset_name)
return self._add_number(file_path, 1)
| 42.503906 | 111 | 0.637074 | 9,776 | 0.898447 | 0 | 0 | 815 | 0.074901 | 0 | 0 | 2,015 | 0.185185 |
f81fb7d0b255f47fb45c7a694f335756c5c2bb24 | 3,823 | py | Python | backend_app/serializers.py | ilveroluca/backend | 91b80b154c4e1e45587797cc41bf2b2b75c23e68 | [
"MIT"
] | null | null | null | backend_app/serializers.py | ilveroluca/backend | 91b80b154c4e1e45587797cc41bf2b2b75c23e68 | [
"MIT"
] | null | null | null | backend_app/serializers.py | ilveroluca/backend | 91b80b154c4e1e45587797cc41bf2b2b75c23e68 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from backend_app import models
class AllowedPropertySerializer(serializers.ModelSerializer):
class Meta:
model = models.AllowedProperty
fields = '__all__'
# exclude = ['id']
class DatasetSerializer(serializers.ModelSerializer):
class Meta:
model = models.Dataset
fields = ['id', 'name', 'path', 'task_id']
write_only_fields = ['name', 'path', 'task_id'] # Only for post
class InferenceSerializer(serializers.ModelSerializer):
project_id = serializers.IntegerField()
class Meta:
model = models.Inference
fields = ['project_id', 'modelweights_id', 'dataset_id']
# exclude = ['stats']
class InferenceSingleSerializer(serializers.ModelSerializer):
project_id = serializers.IntegerField()
image_url = serializers.URLField()
class Meta:
model = models.Inference
exclude = ['stats', 'dataset_id', 'logfile']
# write_only_fields = ['modelweights_id', 'image_url', 'project_id']
class ModelSerializer(serializers.ModelSerializer):
class Meta:
model = models.Model
fields = ['id', 'name', 'location', 'task_id']
class ModelWeightsSerializer(serializers.ModelSerializer):
class Meta:
model = models.ModelWeights
fields = ['id', 'name', 'celery_id', "model_id", "dataset_id", "pretrained_on"]
read_only_fields = ['location', 'celery_id', 'logfile']
write_only_fields = ['id']
class ProjectSerializer(serializers.ModelSerializer):
class Meta:
model = models.Project
fields = '__all__'
# fields = ['id', 'name', 'task_id', 'modelweights_id', 'inference_id']
# exclude = ['task', 'modelweights']
class PropertyListSerializer(serializers.ModelSerializer):
class Meta:
model = models.Property
# fields = ['id', 'name']
fields = '__all__'
class PropertyTrainSerializer(serializers.ModelSerializer):
value = serializers.CharField()
class Meta:
model = models.Property
fields = ['id', 'name', 'value']
class TaskSerializer(serializers.ModelSerializer):
class Meta:
model = models.Task
fields = '__all__'
class TrainSerializer(serializers.Serializer):
dataset_id = serializers.IntegerField()
model_id = serializers.IntegerField()
project_id = serializers.IntegerField()
properties = PropertyTrainSerializer(many=True)
weights_id = serializers.IntegerField(allow_null=True)
class TrainingSettingSerializer(serializers.ModelSerializer):
class Meta:
model = models.TrainingSetting
fields = '__all__'
# exclude = ['id']
class StopProcessSerializer(serializers.Serializer):
process_id = serializers.UUIDField()
# RESPONSES SERIALIZERS
class GeneralResponse(serializers.Serializer):
result = serializers.CharField()
class GeneralErrorResponse(serializers.Serializer):
result = serializers.CharField()
error = serializers.CharField()
class InferenceResponseSerializer(serializers.Serializer):
result = serializers.CharField()
process_id = serializers.UUIDField()
class OutputsResponse(serializers.Serializer):
outputs = serializers.ListField(
child=serializers.ListField(
child=serializers.ListField(child=serializers.Field(), min_length=2, max_length=2)))
class TrainResponse(serializers.Serializer):
result = serializers.CharField()
process_id = serializers.UUIDField()
class StatusStatusResponse(serializers.Serializer):
process_type = serializers.CharField()
process_status = serializers.CharField()
process_data = serializers.CharField()
class StatusResponse(serializers.Serializer):
result = serializers.CharField()
status = StatusStatusResponse()
| 27.905109 | 96 | 0.698666 | 3,667 | 0.959194 | 0 | 0 | 0 | 0 | 0 | 0 | 593 | 0.155114 |
f820475f96913877c23f5aa594fcc87cf676cc00 | 1,296 | py | Python | src/api_status_monitor/consumer/database_connection.py | jjaakola/bang-a-gong | d30f889c18eeaff3d62d47cd02e93516e4d24dd7 | [
"MIT"
] | null | null | null | src/api_status_monitor/consumer/database_connection.py | jjaakola/bang-a-gong | d30f889c18eeaff3d62d47cd02e93516e4d24dd7 | [
"MIT"
] | null | null | null | src/api_status_monitor/consumer/database_connection.py | jjaakola/bang-a-gong | d30f889c18eeaff3d62d47cd02e93516e4d24dd7 | [
"MIT"
] | null | null | null | """The database connection manager.
"""
import logging
import psycopg2
class DatabaseConnection():
"""Database connection manager.
"""
def __init__(self, host, port, user, dbname, password, sslmode):
self._conn = None
self._host = host
self._port = port
self._user = user
self._dbname = dbname
self._password = password
self._sslmode = "require" if sslmode else None
def get_connection(self):
if not self._conn or self._conn.closed:
try:
self._conn = psycopg2.connect(dbname=self._dbname,
user=self._user,
host=self._host,
port=self._port,
password=self._password,
sslmode=self._sslmode)
except Exception:
logging.error("Unable to connect to PostgreSQL database.", exc_info=1)
self._conn = None
return self._conn
def close(self):
try:
if self._conn:
self._conn.close()
except Exception:
logging.warning("Database connection close failed.")
| 31.609756 | 86 | 0.500772 | 1,221 | 0.94213 | 0 | 0 | 0 | 0 | 0 | 0 | 165 | 0.127315 |
f8207cbc88a40509eaabe2f12c2e9fb96d02736a | 1,154 | py | Python | app/cvp.py | ekiminatorn/murmur-rest | 594060264cd6ea594d5c07f40163782946f48eb2 | [
"Unlicense",
"MIT"
] | 73 | 2015-01-08T19:58:36.000Z | 2022-01-25T20:44:07.000Z | app/cvp.py | ekiminatorn/murmur-rest | 594060264cd6ea594d5c07f40163782946f48eb2 | [
"Unlicense",
"MIT"
] | 34 | 2015-01-08T19:52:34.000Z | 2022-03-15T08:36:30.000Z | app/cvp.py | ekiminatorn/murmur-rest | 594060264cd6ea594d5c07f40163782946f48eb2 | [
"Unlicense",
"MIT"
] | 33 | 2015-01-08T19:22:40.000Z | 2022-01-19T06:28:37.000Z | """
cvp.py
Functions for generating CVP feeds.
:copyright: (C) 2014 by github.com/alfg.
:license: MIT, see README for more details.
"""
def cvp_player_to_dict(player):
"""
Convert a player object from a Tree to a CVP-compliant dict.
"""
return {
"session": player.session,
"userid": player.userid,
"name": player.name,
"deaf": player.deaf,
"mute": player.mute,
"selfDeaf": player.selfDeaf,
"selfMute": player.selfMute,
"suppress": player.suppress,
"onlinesecs": player.onlinesecs,
"idlesecs": player.idlesecs
}
def cvp_chan_to_dict(channel):
"""
Convert a channel from a Tree object to a CVP-compliant dict, recursively.
"""
return {
"id": channel.c.id,
"parent": channel.c.parent,
"name": channel.c.name,
"description": channel.c.description,
"channels": [cvp_chan_to_dict(c) for c in channel.children],
"users": [cvp_player_to_dict(p) for p in channel.users],
"position": channel.c.position,
"temporary": channel.c.temporary,
"links": channel.c.links
}
| 26.837209 | 78 | 0.604853 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 467 | 0.404679 |
f82135374f4390dc528fb4356d78faff21f4ca0a | 5,951 | py | Python | Tools/Tools/Scripts/webkitpy/layout_tests/port/base_unittest.py | VincentWei/mdolphin-core | 48ffdcf587a48a7bb4345ae469a45c5b64ffad0e | [
"Apache-2.0"
] | 6 | 2017-05-31T01:46:45.000Z | 2018-06-12T10:53:30.000Z | Tools/Tools/Scripts/webkitpy/layout_tests/port/base_unittest.py | FMSoftCN/mdolphin-core | 48ffdcf587a48a7bb4345ae469a45c5b64ffad0e | [
"Apache-2.0"
] | null | null | null | Tools/Tools/Scripts/webkitpy/layout_tests/port/base_unittest.py | FMSoftCN/mdolphin-core | 48ffdcf587a48a7bb4345ae469a45c5b64ffad0e | [
"Apache-2.0"
] | 2 | 2017-07-17T06:02:42.000Z | 2018-09-19T10:08:38.000Z | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import base
import unittest
import tempfile
from webkitpy.common.system.executive import Executive, ScriptError
from webkitpy.thirdparty.mock import Mock
class PortTest(unittest.TestCase):
def test_format_wdiff_output_as_html(self):
output = "OUTPUT %s %s %s" % (base.Port._WDIFF_DEL, base.Port._WDIFF_ADD, base.Port._WDIFF_END)
html = base.Port()._format_wdiff_output_as_html(output)
expected_html = "<head><style>.del { background: #faa; } .add { background: #afa; }</style></head><pre>OUTPUT <span class=del> <span class=add> </span></pre>"
self.assertEqual(html, expected_html)
def test_wdiff_command(self):
port = base.Port()
port._path_to_wdiff = lambda: "/path/to/wdiff"
command = port._wdiff_command("/actual/path", "/expected/path")
expected_command = [
"/path/to/wdiff",
"--start-delete=##WDIFF_DEL##",
"--end-delete=##WDIFF_END##",
"--start-insert=##WDIFF_ADD##",
"--end-insert=##WDIFF_END##",
"/actual/path",
"/expected/path",
]
self.assertEqual(command, expected_command)
def _file_with_contents(self, contents, encoding="utf-8"):
new_file = tempfile.NamedTemporaryFile()
new_file.write(contents.encode(encoding))
new_file.flush()
return new_file
def test_run_wdiff(self):
executive = Executive()
# This may fail on some systems. We could ask the port
# object for the wdiff path, but since we don't know what
# port object to use, this is sufficient for now.
try:
wdiff_path = executive.run_command(["which", "wdiff"]).rstrip()
except Exception, e:
wdiff_path = None
port = base.Port()
port._path_to_wdiff = lambda: wdiff_path
if wdiff_path:
# "with tempfile.NamedTemporaryFile() as actual" does not seem to work in Python 2.5
actual = self._file_with_contents(u"foo")
expected = self._file_with_contents(u"bar")
wdiff = port._run_wdiff(actual.name, expected.name)
expected_wdiff = "<head><style>.del { background: #faa; } .add { background: #afa; }</style></head><pre><span class=del>foo</span><span class=add>bar</span></pre>"
self.assertEqual(wdiff, expected_wdiff)
# Running the full wdiff_text method should give the same result.
base._wdiff_available = True # In case it's somehow already disabled.
wdiff = port.wdiff_text(actual.name, expected.name)
self.assertEqual(wdiff, expected_wdiff)
# wdiff should still be available after running wdiff_text with a valid diff.
self.assertTrue(base._wdiff_available)
actual.close()
expected.close()
# Bogus paths should raise a script error.
self.assertRaises(ScriptError, port._run_wdiff, "/does/not/exist", "/does/not/exist2")
self.assertRaises(ScriptError, port.wdiff_text, "/does/not/exist", "/does/not/exist2")
# wdiff will still be available after running wdiff_text with invalid paths.
self.assertTrue(base._wdiff_available)
base._wdiff_available = True
# If wdiff does not exist _run_wdiff should throw an OSError.
port._path_to_wdiff = lambda: "/invalid/path/to/wdiff"
self.assertRaises(OSError, port._run_wdiff, "foo", "bar")
# wdiff_text should not throw an error if wdiff does not exist.
self.assertEqual(port.wdiff_text("foo", "bar"), "")
# However wdiff should not be available after running wdiff_text if wdiff is missing.
self.assertFalse(base._wdiff_available)
base._wdiff_available = True
class DriverTest(unittest.TestCase):
def _assert_wrapper(self, wrapper_string, expected_wrapper):
wrapper = base.Driver._command_wrapper(wrapper_string)
self.assertEqual(wrapper, expected_wrapper)
def test_command_wrapper(self):
self._assert_wrapper(None, [])
self._assert_wrapper("valgrind", ["valgrind"])
# Validate that shlex works as expected.
command_with_spaces = "valgrind --smc-check=\"check with spaces!\" --foo"
expected_parse = ["valgrind", "--smc-check=check with spaces!", "--foo"]
self._assert_wrapper(command_with_spaces, expected_parse)
| 46.858268 | 175 | 0.682406 | 4,263 | 0.71635 | 0 | 0 | 0 | 0 | 0 | 0 | 3,075 | 0.51672 |
f8237a8940cd62de0269063bae0eb6296bc0aa2a | 2,796 | py | Python | data/classifier/general_test.py | alexv1/tensorflow_learn | ae936ffdc211a11403d6a06401a2115334b46402 | [
"Apache-2.0"
] | null | null | null | data/classifier/general_test.py | alexv1/tensorflow_learn | ae936ffdc211a11403d6a06401a2115334b46402 | [
"Apache-2.0"
] | null | null | null | data/classifier/general_test.py | alexv1/tensorflow_learn | ae936ffdc211a11403d6a06401a2115334b46402 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from skimage import transform
import tensorflow as tf
import numpy as np
import glob
import face_recognition as FR
import os
import shutil
def read_one_image(image_file, width, height):
# img = io.imread(image_file)
img = FR.load_image_file(image_file)
img = transform.resize(img,(width, height))
# 避免有黑白格式的图片进入
if img.shape != (width, height, 3):
print(image_file, img.shape)
return None
return np.asarray(img)
def judge_category(img_file, tf_session, logits, tensor, category_dict, width, height):
img = FR.load_image_file(img_file)
faces = FR.face_locations(img)
if len(faces) != 1:
return None
# 截取头像
# print(' find face', faces[0], img_file)
top, right, bottom, left = faces[0]
face_image = img[top:bottom, left:right]
# 处理图片的缩放,扩充到100x100
face_array = np.asarray(face_image)
face_array = transform.resize(face_array, (width, height))
# 归一化
face_array = np.asarray(face_array, np.float32)
feed_dict = {tensor: [face_array]}
classification_result = tf_session.run(logits, feed_dict)
# 打印出预测矩阵
print(classification_result)
# 根据索引通过字典对应花的分类
output = tf.argmax(classification_result, 1).eval()
result = ''
for i in range(len(output)):
print(category_dict[output[i]], img_file)
result = category_dict[output[i]]
return result
def collect_results(data, dest_dir):
if not os.path.exists(dest_dir):
os.mkdir(dest_dir)
for d in data:
shutil.copy(d, dest_dir)
def test_classification(root_dir, category_dict, test_dir=None, w=100, h=100):
model_dir = root_dir + '/models'
if test_dir is None:
test_dir = root_dir + '/test_files'
with tf.Session() as sess:
saver = tf.train.import_meta_graph(model_dir + '/model.ckpt.meta')
saver.restore(sess, tf.train.latest_checkpoint(model_dir))
graph = tf.get_default_graph()
tensor = graph.get_tensor_by_name("x:0")
logits = graph.get_tensor_by_name("logits_eval:0")
datas = glob.glob(test_dir + '/*.jpg')
datas.sort()
stats = [0, 0, 0]
smiles = []
unsmiles = []
unknown = []
idx = 0
for im in datas:
idx += 1
if idx < 10000:
continue
if idx % 50 == 0:
print('test process#', idx, '.......')
category = judge_category(im, sess, logits, tensor, category_dict, w, h)
if category is None:
stats[2] += 1
unknown.append(im)
continue
if category == 'smile':
stats[1] += 1
smiles.append(im)
else:
stats[0] += 1
unsmiles.append(im)
print('nosmile#', stats[0])
print('smile#', stats[1])
print('unknown#', stats[2])
print('nosmile', unsmiles)
print('smile', smiles)
collect_dir = root_dir + '/collect'
collect_results(unsmiles, collect_dir + '/nosmile')
collect_results(smiles, collect_dir + '/smile')
| 27.96 | 87 | 0.672031 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 441 | 0.152174 |
f8238013e026edf0a1b82a52242ee8f202d32c83 | 693 | py | Python | func.py | CrownCrafter/School | 488810b223ad746d7d1b396e609ce8f90f25662c | [
"MIT"
] | null | null | null | func.py | CrownCrafter/School | 488810b223ad746d7d1b396e609ce8f90f25662c | [
"MIT"
] | null | null | null | func.py | CrownCrafter/School | 488810b223ad746d7d1b396e609ce8f90f25662c | [
"MIT"
] | 1 | 2021-02-06T04:28:17.000Z | 2021-02-06T04:28:17.000Z | def cyl(h, r):
area_cyl = 2 * 3.14 * r * h
return(area_cyl)
def con(r, l):
area_con = 3.14 * r * l
return(area_con)
def final_price(cost):
tax = 0.18 * cost
re_price = cost + tax
return(re_price)
print("Enter Values of cylindrical part of tent ")
h = float(input("Height : "))
r = float(input("radius : "))
csa_cyl = cyl(h, r)
l = float(input("Enter slant height "))
csa_con = con(r, l)
canvas_area = csa_cyl + csa_con
print("Area of canvas = ", canvas_area, " m^2")
unit_price = float(input("Enter cost of 1 m^2 "))
total_price = unit_price * canvas_area
print("Total cost of canvas before tax ",total_price)
print("Inluding tax"+ str(final_price(total_price)))
| 28.875 | 53 | 0.658009 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 181 | 0.261183 |
f823c6094a403ab6a62faccb2e76b2e2b2d997a0 | 1,282 | py | Python | pymoku/plotly_support.py | manekawije/Liquid | 284991ceca70ec3fcd0cca7e19f4100463600a6c | [
"MIT"
] | null | null | null | pymoku/plotly_support.py | manekawije/Liquid | 284991ceca70ec3fcd0cca7e19f4100463600a6c | [
"MIT"
] | null | null | null | pymoku/plotly_support.py | manekawije/Liquid | 284991ceca70ec3fcd0cca7e19f4100463600a6c | [
"MIT"
] | null | null | null | # Plotly integration for the Moku:Lab Datalogger
# Copyright 2016 Liquid Instruments Pty. Ltd.
from pymoku import InvalidOperationException
def stream_init(moku, uname, api_key, str_id1, str_id2, npoints=100, mode='lines', line={}):
line = ';'.join([ '='.join(i) for i in list(line.items())])
settings = [
('plotly.uname', uname),
('plotly.api_key', api_key),
('plotly.strid1', str_id1),
('plotly.strid2', str_id2),
('plotly.displaysize', str(npoints)),
('plotly.mode', mode),
('plotly.line', line),
]
moku._set_properties(settings)
def stream_url(moku):
return moku._get_property_single('plotly.url')
def plot_frame(dataframe, uname=None, api_key=None, mode='lines', line={}):
try:
import plotly.plotly as ply
import plotly.tools as ptls
from plotly.graph_objs import Scatter, Layout, Data, Figure
except ImportError:
raise InvalidOperationException("Please install the Python plotly bindings")
if uname and api_key:
ply.sign_in(uname, api_key)
c1 = dataframe.ch1
c2 = dataframe.ch2
x = list(range(len(c1)))
t1 = Scatter(x=x, y=c1, mode=mode, line=line)
t2 = Scatter(x=x, y=c2, mode=mode, line=line)
layout = Layout(title="Moku:Lab Frame Grab")
data = Data([t1, t2])
fig = Figure(data=data, layout=layout)
return ply.plot(fig) | 26.163265 | 92 | 0.705148 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 295 | 0.230109 |
f8277c470e26c658915e5f878e41e448502ec2a5 | 1,126 | py | Python | test_publisher.py | cpgillem/markdown_publisher | a8e6bacea95196b9a18ad8fa2f85822c5d9c4e74 | [
"MIT"
] | null | null | null | test_publisher.py | cpgillem/markdown_publisher | a8e6bacea95196b9a18ad8fa2f85822c5d9c4e74 | [
"MIT"
] | 3 | 2015-04-11T08:16:56.000Z | 2015-04-11T08:17:32.000Z | test_publisher.py | cpgillem/markdown-publisher | a8e6bacea95196b9a18ad8fa2f85822c5d9c4e74 | [
"MIT"
] | null | null | null | import publisher
test_pdf_filename = "test/test.pdf"
test_css_filename = "test/test.css"
test_md_filename = "test/test.md"
test_html_filename = "test/test.html"
test_sender = "cpg@yakko.cs.wmich.edu"
test_recipient = "cpgillem@gmail.com"
test_md = "# Test heading\n\n- test item 1\n- test item 2"
def from_html_file():
print publisher.md_to_html(publisher.from_file(test_md_filename))
def md_to_html():
print publisher.md_to_html(test_source)
def md_and_css_to_html():
html_source = publisher.md_and_css_to_html(publisher.from_file(test_md_filename),
publisher.from_file(test_css_filename))
print html_source
publisher.to_file(html_source, test_html_filename)
def from_md_file_to_pdf_file():
test_html = publisher.md_to_html(publisher.from_file("README.md"))
print publisher.html_to_pdf_file(test_html, test_pdf_filename, [test_css_filename])
def from_md_to_html_email():
test_email = publisher.md_to_html_email(publisher.from_file(test_md_filename),
publisher.from_file(test_css_filename))
print test_email
# The test case currently in use
from_md_to_html_email()
| 32.171429 | 87 | 0.781528 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 195 | 0.173179 |
f82900deb38425b32b0150ae828a4448ba15499c | 24 | py | Python | src/train/__init__.py | gracengu/multinomial_classification | 2346533415aff151d1774d36405360ca236cee3f | [
"MIT"
] | 2 | 2021-11-16T12:52:58.000Z | 2021-12-13T04:00:39.000Z | src/train/__init__.py | gracengu/multinomial_classification | 2346533415aff151d1774d36405360ca236cee3f | [
"MIT"
] | null | null | null | src/train/__init__.py | gracengu/multinomial_classification | 2346533415aff151d1774d36405360ca236cee3f | [
"MIT"
] | null | null | null | from .train import Train | 24 | 24 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f82c17e0d48a8946b94491663089d67afc63ece3 | 1,185 | py | Python | tracpro/msgs/migrations/0005_inboxmessage.py | rapidpro/tracpro | a68a782a7ff9bb0ccee85368132d8847c280fea3 | [
"BSD-3-Clause"
] | 5 | 2015-07-21T15:58:31.000Z | 2019-09-14T22:34:00.000Z | tracpro/msgs/migrations/0005_inboxmessage.py | rapidpro/tracpro | a68a782a7ff9bb0ccee85368132d8847c280fea3 | [
"BSD-3-Clause"
] | 197 | 2015-03-24T15:26:04.000Z | 2017-11-28T19:24:37.000Z | tracpro/msgs/migrations/0005_inboxmessage.py | rapidpro/tracpro | a68a782a7ff9bb0ccee85368132d8847c280fea3 | [
"BSD-3-Clause"
] | 10 | 2015-03-24T12:26:36.000Z | 2017-02-21T13:08:57.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('orgs', '0013_auto_20150715_1831'),
('contacts', '0004_auto_20150324_1024'),
('msgs', '0004_message_pollrun'),
]
operations = [
migrations.CreateModel(
name='InboxMessage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('rapidpro_message_id', models.IntegerField()),
('text', models.CharField(max_length=640, null=True)),
('archived', models.BooleanField(default=False)),
('created_on', models.DateTimeField(null=True)),
('delivered_on', models.DateTimeField(null=True)),
('sent_on', models.DateTimeField(null=True)),
('contact_from', models.ForeignKey(related_name='inbox_messages', to='contacts.Contact')),
('org', models.ForeignKey(related_name='inbox_messages', verbose_name='Organization', to='orgs.Org')),
],
),
]
| 38.225806 | 118 | 0.599156 | 1,076 | 0.908017 | 0 | 0 | 0 | 0 | 0 | 0 | 304 | 0.25654 |
f82d5b036daead0dff75c2761e785f8a14568edb | 191 | py | Python | src/Models/__init__.py | shulip/ShoppingMallSystem | 01e5a04a8353ca319ed2dc002fc358f6e44c33dd | [
"MIT"
] | null | null | null | src/Models/__init__.py | shulip/ShoppingMallSystem | 01e5a04a8353ca319ed2dc002fc358f6e44c33dd | [
"MIT"
] | null | null | null | src/Models/__init__.py | shulip/ShoppingMallSystem | 01e5a04a8353ca319ed2dc002fc358f6e44c33dd | [
"MIT"
] | 1 | 2021-04-22T15:14:21.000Z | 2021-04-22T15:14:21.000Z | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from .Contract import *
from .Receivable import *
from .Receipt import *
from .Shop import *
from .Statement import *
from .Application import * | 21.222222 | 26 | 0.701571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 44 | 0.230366 |
f82d7cf376b5b98be3742039b95afbfff6e6b1f8 | 1,630 | py | Python | description tm.py | jfoerderer/lda-topic-modeling | 998701f87df3a3d034d9208ff60266dcd6dc2b59 | [
"MIT"
] | 2 | 2017-09-02T09:00:24.000Z | 2017-09-08T07:18:38.000Z | description tm.py | jfoerderer/lda-topic-modeling | 998701f87df3a3d034d9208ff60266dcd6dc2b59 | [
"MIT"
] | null | null | null | description tm.py | jfoerderer/lda-topic-modeling | 998701f87df3a3d034d9208ff60266dcd6dc2b59 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import csv
from stop_words import get_stop_words
from nltk.stem.porter import PorterStemmer
from gensim import corpora
import gensim
import os
import re
from nltk.tokenize import RegexpTokenizer
#SET PATH
path = r''
inputname=""
def remove_html_tags(text):
"""Remove html tags from a string"""
clean = re.compile('<.*?>')
return re.sub(clean, '', text)
#setup
tokenizer = RegexpTokenizer(r'\w+')
en_stop = get_stop_words('en')
p_stemmer = PorterStemmer()
fn = os.path.join(path, inputname)
doc_set = []
with open(fn, encoding="utf8" ) as f:
csv_f = csv.reader(f)
for i, row in enumerate(csv_f):
if i > 1 and len(row) > 1 :
temp=remove_html_tags(row[1])
temp = re.sub("[^a-zA-Z ]","", temp)
doc_set.append(temp)
texts = []
for i in doc_set:
if i.strip():
raw = i.lower()
tokens = tokenizer.tokenize(raw)
if len(tokens)>5:
stopped_tokens = [i for i in tokens if not i in en_stop]
texts.append(stopped_tokens)
dictionary = corpora.Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
lsi = gensim.models.lsimodel.LsiModel(corpus, id2word=dictionary, num_topics=5 )
print (lsi.print_topics(num_topics=3, num_words=3))
ldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics=20, id2word = dictionary, passes=20)
print(ldamodel.print_topics(num_topics=20, num_words=5))
K = ldamodel.num_topics
topicWordProbMat = ldamodel.print_topics(K)
| 26.721311 | 99 | 0.628834 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 118 | 0.072393 |
f82ef0c0ee2c3fc021e7566fc3d68636a538299f | 596 | py | Python | scripts/load_sample_data.py | thobbs/logsandra | a17abc995dcb0573f3db2f714c1b47d3aff8b20a | [
"MIT"
] | 7 | 2015-05-18T13:00:54.000Z | 2018-08-06T08:27:57.000Z | scripts/load_sample_data.py | thobbs/logsandra | a17abc995dcb0573f3db2f714c1b47d3aff8b20a | [
"MIT"
] | null | null | null | scripts/load_sample_data.py | thobbs/logsandra | a17abc995dcb0573f3db2f714c1b47d3aff8b20a | [
"MIT"
] | 4 | 2015-06-16T11:09:53.000Z | 2020-04-27T19:25:57.000Z | #!/usr/bin/env python
import sys
import os
sys.path.append(os.path.join(os.path.dirname('__file__'), '..', 'src'))
from random import randint
from datetime import datetime, timedelta
from logsandra.model.client import CassandraClient
client = CassandraClient('test', 'localhost', 9160, 3)
keywords = ['foo', 'bar', 'baz']
print "Loading sample data for the following keywords:", ', '.join(keywords)
today = datetime.now()
for i in range(1000):
d = today + timedelta(randint(-7, -1), randint(-3600*24, 3600*24))
client.add_log(d, 'test entry', 'here', [keywords[i % len(keywords)]])
| 28.380952 | 76 | 0.697987 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 143 | 0.239933 |
f830e618925548200af372e7691ce927a36784c1 | 867 | py | Python | registry/setup.py | fjrmoreews/bioshadock_client | 26a1de6e130689b6385144253525c861d2a2199d | [
"Apache-2.0"
] | 1 | 2015-11-25T19:03:58.000Z | 2015-11-25T19:03:58.000Z | registry/setup.py | fjrmoreews/bioshadock_client | 26a1de6e130689b6385144253525c861d2a2199d | [
"Apache-2.0"
] | 2 | 2015-11-24T14:45:44.000Z | 2015-11-26T15:28:30.000Z | registry/setup.py | fjrmoreews/bioshadock_client | 26a1de6e130689b6385144253525c861d2a2199d | [
"Apache-2.0"
] | 1 | 2015-11-27T10:57:15.000Z | 2015-11-27T10:57:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
# name of the lib
name='bioshadock_biotools',
# version
version='1.0.1',
packages=find_packages(),
author="Francois Moreews",
description="Import tool for biotools from Dockerfile",
include_package_data=True,
classifiers=[
"Programming Language :: Python",
"Development Status :: 5 - Production/Stable",
"License :: Apache 2.0",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Topic :: Communications",
],
scripts = [
'parseDockerFile.py',
'registryClient.py'
],
install_requires = [
'lxml',
'requests>=2.7.0'
],
license="Apache 2.0",
)
| 18.847826 | 59 | 0.575548 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 461 | 0.531719 |
f831926e75acbe42ce6d5e5261d3946d9b9dfea1 | 1,176 | py | Python | _example/xor_embedded/make.py | backwardn/go-tflite | 30f5e2a268d2eb053f758636609c5c379a3016b5 | [
"MIT"
] | 3 | 2020-01-09T02:57:30.000Z | 2020-07-17T15:56:50.000Z | _example/xor_embedded/make.py | backwardn/go-tflite | 30f5e2a268d2eb053f758636609c5c379a3016b5 | [
"MIT"
] | null | null | null | _example/xor_embedded/make.py | backwardn/go-tflite | 30f5e2a268d2eb053f758636609c5c379a3016b5 | [
"MIT"
] | null | null | null | import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import RMSprop
from tensorflow.lite.python import lite
X_train = np.array([[0.0, 0.0],
[1.0, 0.0],
[0.0, 1.0],
[1.0, 1.0]])
Y_train = np.array([0.0,
1.0,
1.0,
0.0])
model = Sequential()
output_count_layer0 = 2
model.add(
Dense(
output_count_layer0,
input_shape=(2, ),
activation='sigmoid')) # Need to specify input shape for input layer
output_count_layer1 = 1
model.add(Dense(output_count_layer1, activation='linear'))
model.compile(
loss='mean_squared_error', optimizer=RMSprop(), metrics=['accuracy'])
BATCH_SIZE = 4
history = model.fit(
X_train, Y_train, batch_size=BATCH_SIZE, epochs=3600, verbose=1)
X_test = X_train
Y_test = Y_train
score = model.evaluate(X_test, Y_test, verbose=0)
model.save('xor_model.h5')
converter = lite.TFLiteConverter.from_keras_model_file('xor_model.h5')
tflite_model = converter.convert()
open('public/xor_model.tflite', 'wb').write(tflite_model)
| 30.947368 | 75 | 0.662415 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 149 | 0.126701 |
f835c7244c8f288b00b860e6cef6f64c28c3ea69 | 473 | py | Python | app/sso/user/models.py | ChristianKreuzberger/django-oauth-sso | b019e2e8232ae141b50b8270e79e0617e24f54bb | [
"MIT"
] | null | null | null | app/sso/user/models.py | ChristianKreuzberger/django-oauth-sso | b019e2e8232ae141b50b8270e79e0617e24f54bb | [
"MIT"
] | null | null | null | app/sso/user/models.py | ChristianKreuzberger/django-oauth-sso | b019e2e8232ae141b50b8270e79e0617e24f54bb | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import AbstractUser
from django.utils.translation import ugettext_lazy as _
class User(AbstractUser):
"""
Extends the basic django user model with a longer first and last name
"""
first_name = models.CharField(
_("first name"),
max_length=128,
blank=True
)
last_name = models.CharField(
_("last name"),
max_length=128,
blank=True
)
| 21.5 | 73 | 0.649049 | 332 | 0.701903 | 0 | 0 | 0 | 0 | 0 | 0 | 108 | 0.22833 |
f837af8b513ac4ce60f3ce335c72f8849a0bd813 | 1,710 | py | Python | src/fusanet_utils/features/base.py | fusa-project/fusa-net-utils | b8740c67c0c789889b7abce477c894d77c70a20c | [
"MIT"
] | null | null | null | src/fusanet_utils/features/base.py | fusa-project/fusa-net-utils | b8740c67c0c789889b7abce477c894d77c70a20c | [
"MIT"
] | null | null | null | src/fusanet_utils/features/base.py | fusa-project/fusa-net-utils | b8740c67c0c789889b7abce477c894d77c70a20c | [
"MIT"
] | null | null | null | import logging
from abc import ABC, abstractmethod
from os.path import isfile, splitext
import pathlib
import torch
from .waveform import get_waveform
logger = logging.getLogger(__name__)
class Feature(ABC):
def __init__(self, params):
self.params = params
super().__init__()
@abstractmethod
def compute(self, waveform: torch.Tensor):
pass
def create_path(self, waveform_path: pathlib.Path) -> pathlib.Path:
feature_name = type(self).__name__
file_name = waveform_path.stem + "_" + feature_name + ".pt"
for k, part in enumerate(waveform_path.parts[::-1]):
if part == 'datasets':
break
pre_path = pathlib.Path(*waveform_path.parts[:-(k+1)])
pos_path = pathlib.Path(*waveform_path.parts[-k:-1])
(pre_path / "features" / pos_path).mkdir(parents=True, exist_ok=True)
return pre_path / "features" / pos_path / file_name
def write_to_disk(self, waveform_path: str, global_normalizer = None) -> None:
feature_path = self.create_path(pathlib.Path(waveform_path))
if not feature_path.exists() or self.params["overwrite"]:
logger.debug(f"Writing features for {waveform_path}")
waveform = get_waveform(waveform_path, self.params, global_normalizer)
feature = self.compute(waveform)
torch.save(feature, feature_path)
def read_from_disk(self, waveform_path: str) -> torch.Tensor:
feature_path = self.create_path(pathlib.Path(waveform_path))
if feature_path.exists():
return torch.load(feature_path)
else:
raise FileNotFoundError("Feature file not found")
| 35.625 | 83 | 0.657895 | 1,516 | 0.88655 | 0 | 0 | 75 | 0.04386 | 0 | 0 | 112 | 0.065497 |
f837f76576c4f735618a20e51681085aeb556de5 | 251 | py | Python | led/hhh/rta2.py | tushar-tdm/osvi | 813499162b7f487ccafa8c08d3e5bf6d05b074de | [
"CC-BY-3.0"
] | 3 | 2020-02-21T01:16:26.000Z | 2020-07-12T08:06:11.000Z | led/hhh/rta2.py | tushar-tdm/osvi | 813499162b7f487ccafa8c08d3e5bf6d05b074de | [
"CC-BY-3.0"
] | 6 | 2020-02-11T23:27:43.000Z | 2022-03-11T23:34:39.000Z | led/hhh/rta2.py | tushar-tdm/osvi | 813499162b7f487ccafa8c08d3e5bf6d05b074de | [
"CC-BY-3.0"
] | null | null | null | import os
import sys
import serial
import time
import struct
ser = serial.Serial('/dev/ttyACM0',9600)
led = sys.argv[1]
act = sys.argv[2]
l = str(led)
"""a = str(act)"""
time.sleep(5)
ser.write(struct.pack(l.encode())
""" ser.write(l.encode()) """
| 14.764706 | 40 | 0.661355 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 61 | 0.243028 |
f838fea76677e89d488005a23aab7f853eac184d | 11,397 | py | Python | app.py | KendraObika/Froggit | 3734d74de6b7febabb6c1645b61e42928203cf63 | [
"MIT"
] | null | null | null | app.py | KendraObika/Froggit | 3734d74de6b7febabb6c1645b61e42928203cf63 | [
"MIT"
] | null | null | null | app.py | KendraObika/Froggit | 3734d74de6b7febabb6c1645b61e42928203cf63 | [
"MIT"
] | null | null | null | """
Primary module for Froggit
This module contains the main controller class for the Froggit application. There
is no need for any additional classes in this module. If you need more classes, 99%
of the time they belong in either the lanes module or the models module. If you are
unsure about where a new class should go, post a question on Piazza.
Kendra Obika kao78
December 20 2020
"""
from consts import *
from game2d import *
from level import *
import introcs
from kivy.logger import Logger
# PRIMARY RULE: Froggit can only access attributes in level.py via getters/setters
# Froggit is NOT allowed to access anything in lanes.py or models.py.
class Froggit(GameApp):
"""
The primary controller class for the Froggit application
This class extends GameApp and implements the various methods necessary for
processing the player inputs and starting/running a game.
Method start begins the application.
Method update either changes the state or updates the Level object
Method draw displays the Level object and any other elements on screen
Because of some of the weird ways that Kivy works, you SHOULD NOT create an
initializer __init__ for this class. Any initialization should be done in
the start method instead. This is only for this class. All other classes
behave normally.
Most of the work handling the game is actually provided in the class Level.
Level should be modeled after subcontrollers.py from lecture, and will have
its own update and draw method.
The primary purpose of this class is managing the game state: when is the
game started, paused, completed, etc. It keeps track of that in a hidden
attribute
Attribute view: The game view, used in drawing (see examples from class)
Invariant: view is an instance of GView and is inherited from GameApp
Attribute input: The user input, used to control the frog and change state
Invariant: input is an instance of GInput and is inherited from GameApp
"""
# HIDDEN ATTRIBUTES
# Attribute _state: The current state of the game (taken from consts.py)
# Invariant: _state is one of STATE_INACTIVE, STATE_LOADING, STATE_PAUSED,
# STATE_ACTIVE, STATE_CONTINUE, or STATE_COMPLETE
#
# Attribute _level: The subcontroller for a level, managing the frog and obstacles
# Invariant: _level is a Level object or None if no level is currently active
#
# Attribute _title: The title of the game
# Invariant: _title is a GLabel, or None if there is no title to display
#
# Attribute _text: A message to display to the player
# Invariant: _text is a GLabel, or None if there is no message to display
# LIST MORE ATTRIBUTES (AND THEIR INVARIANTS) HERE IF NECESSARY
# Attribute _cover: A background underneath text to display to the player
# Invariant: _cover is a GLabel, or None if there is no text to display
# DO NOT MAKE A NEW INITIALIZER!
# THREE MAIN GAMEAPP METHODS
def start(self):
"""
Initializes the application.
This method is distinct from the built-in initializer __init__ (which
you should not override or change). This method is called once the
game is running. You should use it to initialize any game specific
attributes.
This method should make sure that all of the attributes satisfy the
given invariants. When done, it sets the _state to STATE_INACTIVE and
creates both the title (in attribute _title) and a message (in attribute
_text) saying that the user should press a key to play a game.
"""
#no need for assert statements bc no parameters
#initialize any game specific attributes
self._level = None
self._title = None
self._text = None
self._cover = None
self._state = STATE_INACTIVE
#invariants of _state
if self._state == STATE_ACTIVE:
self._text = None
if self._state != STATE_INACTIVE:
self._title = None
#when done, setting to inactive, creating title and message
self._state = STATE_INACTIVE
self._title = GLabel(text="FROGGIT",font_name=ALLOY_FONT,font_size=\
ALLOY_LARGE,x=self.width//2,y=self.height//1.75,linecolor="dark green")
self._text = GLabel(text="PRESS 'S' TO START",font_name=ALLOY_FONT,\
font_size=ALLOY_MEDIUM,x=self.width//2,y=self.height//2.5)
def update(self, dt):
"""
Updates the game objects each frame.
It is the method that does most of the work. It is NOT in charge of
playing the game. That is the purpose of the class Level. The primary
purpose of this game is to determine the current state, and -- if the
game is active -- pass the input to the Level object _level to play the
game.
As part of the assignment, you are allowed to add your own states.
However, at a minimum you must support the following states:
STATE_INACTIVE, STATE_LOADING, STATE_ACTIVE, STATE_PAUSED,
STATE_CONTINUE, and STATE_COMPLETE. Each one of these does its own
thing and might even needs its own helper. We describe these below.
STATE_INACTIVE: This is the state when the application first opens.
It is a paused state, waiting for the player to start the game. It
displays the title and a simple message on the screen. The application
remains in this state so long as the player never presses a key.
STATE_LOADING: This is the state that creates a new level and shows it on
the screen. The application switches to this state if the state was
STATE_INACTIVE in the previous frame, and the player pressed a key.
This state only lasts one animation frame (the amount of time to load
the data from the file) before switching to STATE_ACTIVE. One of the
key things about this state is that it resizes the window to match the
level file.
STATE_ACTIVE: This is a session of normal gameplay. The player can
move the frog towards the exit, and the game will move all obstacles
(cars and logs) about the screen. All of this should be handled inside
of class Level (NOT in this class). Hence the Level class should have
an update() method, just like the subcontroller example in lecture.
STATE_PAUSED: Like STATE_INACTIVE, this is a paused state. However,
the game is still visible on the screen.
STATE_CONTINUE: This state restores the frog after it was either killed
or reached safety. The application switches to this state if the state
was STATE_PAUSED in the previous frame, and the player pressed a key.
This state only lasts one animation frame before switching to STATE_ACTIVE.
STATE_COMPLETE: The wave is over (all lives are lost or all frogs are safe),
and is either won or lost.
You are allowed to add more states if you wish. Should you do so, you should
describe them here.
Parameter dt: The time in seconds since last update
Precondition: dt is a number (int or float)
"""
if self._state == STATE_INACTIVE and self.input.is_key_down('s'):
self._title = None
self._text = None
self._state = STATE_LOADING
if self._state == STATE_LOADING:
dic = self.load_json(DEFAULT_LEVEL)
hitdic = self.load_json(OBJECT_DATA)
self._level = Level(dic, hitdic)
self.width = self._level.getWidth()
self.height = self._level.getHeight()
self._state = STATE_ACTIVE
if self._state == STATE_ACTIVE and not self.isPaused():
self._level.update(dt, self.input)
if self._state == STATE_PAUSED:
if self._level.noLives():
self.youLoseText(self._level)
self._state = STATE_COMPLETE
elif self._level.pauseGame():
self.pausedTexts(self._level)
if self.input.is_key_down('c'):
self._state = STATE_CONTINUE
elif self._level.endGame():
self.youWinText(self._level)
self._state = STATE_COMPLETE
if self._state == STATE_CONTINUE:
self._level.resetFrog()
self._state = STATE_ACTIVE
def draw(self):
"""
Draws the game objects to the view.
Every single thing you want to draw in this game is a GObject. To draw a
GObject g, simply use the method g.draw(self.view). It is that easy!
Many of the GObjects (such as the cars, logs, and exits) are attributes
in either Level or Lane. In order to draw them, you either need to add
getters for these attributes or you need to add a draw method to
those two classes. We suggest the latter. See the example subcontroller.py
from the lesson videos.
"""
# IMPLEMENT ME
if self._text != None and self._title != None:
self._title.draw(self.view)
self._text.draw(self.view)
if self._state != STATE_INACTIVE:
self._level.draw(self.view)
if self._state == STATE_PAUSED or self._state == STATE_COMPLETE:
self._cover.draw(self.view)
self._text.draw(self.view)
# HELPER METHODS FOR THE STATES GO HERE
def isPaused(self):
"""
If pauseGame or endGame is prompted, we change the state to pause
"""
if self._level.pauseGame() or self._level.endGame():
self._state = STATE_PAUSED
def pausedTexts(self, level):
"""
Initializes the messages on the pause screen
Parameter level: Represents a single level of the game
Precondition: level is a Level object
"""
self._text = GLabel(height=GRID_SIZE,x= level.getCenter().x,\
y = level.getCenter().y, text="PRESS 'C' TO CONTINUE",\
font_name=ALLOY_FONT,font_size=ALLOY_SMALL, linecolor = "white")
self._cover = GLabel(width=self.width,height=GRID_SIZE,x=self._text.x,\
y = self._text.y, fillcolor="dark green")
def youLoseText(self, level):
"""
Initializes the messages on the you lose screen
Parameter level: Represents a single level of the game
Precondition: level is a Level object
"""
self._text = GLabel(height=GRID_SIZE,x= level.getCenter().x,\
y = level.getCenter().y, text="YOU LOSE",\
font_name=ALLOY_FONT,font_size=ALLOY_SMALL, linecolor = "white")
self._cover = GLabel(width=self.width,height=GRID_SIZE,x=self._text.x,\
y = self._text.y, fillcolor="dark green")
def youWinText(self, level):
"""
Initializes the messages on the you win screen
Parameter level: Represents a single level of the game
Precondition: level is a Level object
"""
self._text = GLabel(height=GRID_SIZE,x= level.getCenter().x,\
y = level.getCenter().y, text="YOU WIN!",\
font_name=ALLOY_FONT,font_size=ALLOY_SMALL, linecolor = "white")
self._cover = GLabel(width=self.width,height=GRID_SIZE,x=self._text.x,\
y = self._text.y, fillcolor="dark green")
| 41.443636 | 87 | 0.668246 | 10,737 | 0.94209 | 0 | 0 | 0 | 0 | 0 | 0 | 7,690 | 0.674739 |
f83913edc4b000ba4986205d63145c52269b4655 | 1,252 | py | Python | utils.py | rsoorajs/deecubes-telegram-bot | 223710eb117c1333fefcff22bcf473e89e41c769 | [
"MIT"
] | 2 | 2017-10-08T19:02:01.000Z | 2020-05-16T21:55:18.000Z | utils.py | rsoorajs/deecubes-telegram-bot | 223710eb117c1333fefcff22bcf473e89e41c769 | [
"MIT"
] | null | null | null | utils.py | rsoorajs/deecubes-telegram-bot | 223710eb117c1333fefcff22bcf473e89e41c769 | [
"MIT"
] | 3 | 2018-08-05T18:36:58.000Z | 2020-05-16T21:55:19.000Z | import logging
from functools import wraps
from PIL import Image, ImageFont, ImageDraw
from config import LIST_ALLOWED_USERS
def restricted(func):
@wraps(func)
def wrapped(_, bot, update, *args, **kwargs):
user_id = update.effective_user.id
if LIST_ALLOWED_USERS:
if user_id not in LIST_ALLOWED_USERS:
logging.error("Unauthorized access denied for {}.".format(user_id))
return
return func(_, bot, update, *args, **kwargs)
return wrapped
def text2jpg(text, fullpath, color="#000", bgcolor="#FFF"):
font = ImageFont.load_default()
leftpadding = 3
rightpadding = 3
lines = text.split('\n')
char_width, line_height = font.getsize(text)
# TODO: Workaround. getsize is giving wrong width, so fix it to an approx number for now
char_width = 6
img_height = line_height * (len(lines) + 1)
char_count = 0
for line in lines:
count = len(line)
if count > char_count:
char_count = count
width = leftpadding + (char_width * char_count) + rightpadding
img = Image.new("RGBA", (width, img_height), bgcolor)
draw = ImageDraw.Draw(img)
y = 0
for line in lines:
if line:
draw.text((leftpadding, y), line, color, font=font)
y += line_height
img.save(fullpath)
| 25.04 | 90 | 0.683706 | 0 | 0 | 0 | 0 | 310 | 0.247604 | 0 | 0 | 146 | 0.116613 |
f83abdd41d8480514557524b539c95519e6c83ef | 152 | py | Python | __init__.py | cmt-qo/cm-flakes | c11f37b50b088cf5c876ef8a6161b7d8d775e99b | [
"MIT"
] | 6 | 2019-11-04T07:04:24.000Z | 2021-02-10T21:35:00.000Z | __init__.py | cmt-qo/cm-flakes | c11f37b50b088cf5c876ef8a6161b7d8d775e99b | [
"MIT"
] | null | null | null | __init__.py | cmt-qo/cm-flakes | c11f37b50b088cf5c876ef8a6161b7d8d775e99b | [
"MIT"
] | 2 | 2020-08-07T09:29:41.000Z | 2021-02-10T21:35:05.000Z | from .Camera import *
from .GloveBox import *
from .Microscope import *
from .Stage import *
from .UserInterface import *
from .NeuralNetwork import * | 25.333333 | 28 | 0.756579 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f83ba25f5a20e6c46fa842756d48009b7d4b11f6 | 4,444 | py | Python | neural_semigroups/mace4_semigroups_dataset.py | zarebulic/neural-semigroup-experiment | c554acb17d264ba810009f8b86c35ee9f8c4d1f4 | [
"Apache-2.0"
] | 6 | 2020-04-05T23:24:54.000Z | 2021-11-15T11:17:09.000Z | neural_semigroups/mace4_semigroups_dataset.py | zarebulic/neural-semigroup-experiment | c554acb17d264ba810009f8b86c35ee9f8c4d1f4 | [
"Apache-2.0"
] | 23 | 2020-03-15T09:09:54.000Z | 2022-03-29T22:32:23.000Z | neural_semigroups/mace4_semigroups_dataset.py | zarebulic/neural-semigroup-experiment | c554acb17d264ba810009f8b86c35ee9f8c4d1f4 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2019-2021 Boris Shminke
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
import sqlite3
from typing import Callable, Optional
import torch
from tqdm import tqdm
from neural_semigroups.semigroups_dataset import SemigroupsDataset
from neural_semigroups.utils import connect_to_db
class Mace4Semigroups(SemigroupsDataset):
"""
a ``torch.util.data.Dataset`` wrapper for the data of ``mace4`` output
stored in a ``sqlite`` database
>>> import shutil
>>> from neural_semigroups.constants import TEST_TEMP_DATA
>>> import os
>>> from neural_semigroups.generate_data_with_mace4 import (
... generate_data_with_mace4)
>>> shutil.rmtree(TEST_TEMP_DATA, ignore_errors=True)
>>> os.mkdir(TEST_TEMP_DATA)
>>> database = os.path.join(TEST_TEMP_DATA,"test.db")
>>> torch.manual_seed(42) # doctest: +ELLIPSIS
<torch...
>>> generate_data_with_mace4([
... "--max_dim", "2",
... "--min_dim", "2",
... "--number_of_tasks", "1",
... "--database_name", database])
>>> mace4_semigroups = Mace4Semigroups(
... root=database,
... cardinality=2,
... transform=lambda x: x
... )
>>> mace4_semigroups[0][0]
tensor([[0, 0],
[0, 0]])
>>> mace4_semigroups.get_table_from_output("not a mace4 output file")
Traceback (most recent call last):
...
ValueError: wrong mace4 output file format!
"""
_where_clause = "WHERE output LIKE '%Process % exit (max_models)%'"
def __init__(
self,
cardinality: int,
root: str,
transform: Optional[Callable] = None,
):
"""
:param root: a full path to an ``sqlite`` database file
which has a table ``mace_output`` with a string column ``output``
:param cardinality: the cardinality of semigroups
:param transform: a function/transform that takes a Cayley table
and returns a transformed version.
"""
super().__init__(root, cardinality, transform)
self.load_data_from_mace_output()
def get_table_from_output(self, output: str) -> torch.Tensor:
"""
gets a Cayley table of a magma from the output of ``mace4``
:param output: output of ``mace4``
:returns: a Cayley table
"""
search_result = re.search(
r".*function\(\*\(_,_\), \[(.*)]\)\..*", output, re.DOTALL
)
if search_result is None:
raise ValueError("wrong mace4 output file format!")
input_lines = search_result.groups()[0]
# pylint: disable=not-callable
cayley_table = torch.tensor(
list(
map(
int,
input_lines.translate(
str.maketrans("", "", " \t\n])")
).split(","),
)
)
).view(self.cardinality, self.cardinality)
return cayley_table
def get_additional_info(self, cursor: sqlite3.Cursor) -> int:
"""
gets some info from an SQLite database with ``mace4`` outputs
:param cursor: an SQLite database cursor
:returns: a total number of rows in a table, a magma dimension
"""
cursor.execute(
f"SELECT COUNT(*) FROM mace_output {self._where_clause}"
)
row_count = cursor.fetchone()[0]
return row_count
def load_data_from_mace_output(self) -> None:
"""loads data generated by ``mace4`` from an ``sqlite`` database"""
cursor = connect_to_db(self.root)
row_count = self.get_additional_info(cursor)
cursor.execute(f"SELECT output FROM mace_output {self._where_clause}")
features = []
for _ in tqdm(range(row_count)):
output = cursor.fetchone()[0]
features.append(self.get_table_from_output(output))
self.tensors = (torch.stack(features),)
| 34.71875 | 78 | 0.617912 | 3,627 | 0.816157 | 0 | 0 | 0 | 0 | 0 | 0 | 2,727 | 0.613636 |
f83bb94361c259b35e4ff208fa028f2496100f01 | 7,501 | py | Python | samples/data_inspect_utils.py | shachargluska/centerpose | 01c2c8bfa9d3ee91807f2ffdcc48728d104265bd | [
"MIT"
] | 245 | 2019-11-29T02:55:25.000Z | 2022-03-30T07:30:18.000Z | samples/data_inspect_utils.py | shachargluska/centerpose | 01c2c8bfa9d3ee91807f2ffdcc48728d104265bd | [
"MIT"
] | 24 | 2019-11-29T10:05:00.000Z | 2022-03-30T07:16:06.000Z | samples/data_inspect_utils.py | FishLiuabc/centerpose | 555d753cd82693476f91f78c53aa4147f5a83015 | [
"MIT"
] | 45 | 2019-11-29T05:12:02.000Z | 2022-03-21T02:20:36.000Z | from __future__ import absolute_import, division, print_function
import cv2
import random
import numpy as np
import colorsys
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.patches import Polygon
from skimage.measure import find_contours
def log(text, array=None):
"""Prints a text message. And, optionally, if a Numpy array is provided it
prints it's shape, min, and max values.
"""
if array is not None:
text = text.ljust(25)
text += ("shape: {:20} ".format(str(array.shape)))
if array.size:
text += ("min: {:10.5f} max: {:10.5f}".format(array.min(),array.max()))
else:
text += ("min: {:10} max: {:10}".format("",""))
text += " {}".format(array.dtype)
print(text)
def random_colors(N, bright=True):
"""
Generate random colors.
To get visually distinct colors, generate them in HSV space then
convert to RGB.
"""
brightness = 1.0 if bright else 0.7
hsv = [(i / N, 1, brightness) for i in range(N)]
colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
random.shuffle(colors)
return colors
def rotate_bound(image, angle):
# grab the dimensions of the image and then determine the
# centre
(h, w) = image.shape[:2]
(cX, cY) = (w // 2, h // 2)
# grab the rotation matrix (applying the negative of the
# angle to rotate clockwise), then grab the sine and cosine
# (i.e., the rotation components of the matrix)
M = cv2.getRotationMatrix2D((cX, cY), angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
# compute the new bounding dimensions of the image
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
# adjust the rotation matrix to take into account translation
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
# perform the actual rotation and return the image
return cv2.warpAffine(image, M, (nW, nH))
def apply_mask(image, mask, color, alpha=0.5):
"""Apply the given mask to the image.
"""
for c in range(3):
image[:, :, c] = np.where(mask == 1,
image[:, :, c] *
(1 - alpha) + alpha * color[c] * 255,
image[:, :, c])
return image
def apply_keypoint(image, keypoint, num_joints=17):
image = image.astype(np.uint8)
edges = [[0, 1], [0, 2], [1, 3], [2, 4],
[3, 5], [4, 6], [5, 6],
[5, 7], [7, 9], [6, 8], [8, 10],
[5, 11], [6, 12], [11, 12],
[11, 13], [13, 15], [12, 14], [14, 16]]
for j in range(num_joints):
if keypoint[j][2]>0.:
cv2.circle(image,
(keypoint[j, 0], keypoint[j, 1]), 3, (255,255,255), 2)
stickwidth = 2
for j, e in enumerate(edges):
if keypoint[e[0],2] > 0. and keypoint[e[1],2] > 0.:
centerA = keypoint[e[0],:2]
centerB = keypoint[e[1],:2]
cv2.line(image,(centerA[0], centerA[1]),(centerB[0], centerB[1]),(255, 255,255),2)
return image
def display_instances(image, boxes, masks, keypoints, class_id=1, class_name='person',
scores=None, title="",
figsize=(16, 16), ax=None,
show_mask=True, show_bbox=True,
show_keypoint=True,
colors=None, captions=None):
"""
boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates.
masks: [height, width, num_instances]
class_ids: 1 for person
class_name: class name of the dataset
scores: (optional) confidence scores for each box
title: (optional) Figure title
show_mask, show_bbox: To show masks and bounding boxes or not
figsize: (optional) the size of the image
colors: (optional) An array or colors to use with each object
captions: (optional) A list of strings to use as captions for each object
"""
# Number of instances
N = boxes.shape[0]
if not N:
print("\n*** No instances to display *** \n")
else:
assert boxes.shape[0] == masks.shape[0]
# If no axis is passed, create one and automatically call show()
auto_show = False
if not ax:
_, ax = plt.subplots(1, figsize=figsize)
auto_show = True
# Generate random colors
colors = colors or random_colors(N)
# Show area outside image boundaries.
height, width = image.shape[:2]
ax.set_ylim(height + 10, -10)
ax.set_xlim(-10, width + 10)
ax.axis('off')
ax.set_title(title)
masked_image = image.astype(np.uint32).copy()
for i in range(N):
color = colors[i]
# Bounding box
if not np.any(boxes[i]):
# Skip this instance. Has no bbox. Likely lost in image cropping.
continue
y1, x1, y2, x2 = boxes[i]
if show_bbox:
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
alpha=0.7, linestyle="dashed",
edgecolor=color, facecolor='none')
ax.add_patch(p)
# Label
if not captions:
class_id = class_id
score = scores[i] if scores is not None else None
label = class_name
caption = "{} {:.3f}".format(label, score) if score else label
else:
caption = captions[i]
ax.text(x1, y1 + 8, caption,
color='w', size=11, backgroundcolor="none")
# Mask
mask = masks[i, :, :]
keypoint = keypoints[i]
if show_mask:
masked_image = apply_mask(masked_image, mask, color)
# Mask Polygon
# Pad to ensure proper polygons for masks that touch image edges.
padded_mask = np.zeros(
(mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)
padded_mask[1:-1, 1:-1] = mask
contours = find_contours(padded_mask, 0.5)
for verts in contours:
# Subtract the padding and flip (y, x) to (x, y)
verts = np.fliplr(verts) - 1
p = Polygon(verts, facecolor="none", edgecolor=color)
ax.add_patch(p)
if show_keypoint:
masked_image = apply_keypoint(masked_image, keypoint)
ax.imshow(masked_image.astype(np.uint8))
if auto_show:
plt.show()
def extract_bboxes(mask):
"""Compute bounding boxes from masks.
mask: [num_instances, height, width]. Mask pixels are either 1 or 0.
Returns: bbox array [num_instances, (y1, x1, y2, x2)].
"""
boxes = np.zeros([mask.shape[0], 4], dtype=np.int32)
for i in range(mask.shape[0]):
m = mask[i, :, :]
# Bounding box.
horizontal_indicies = np.where(np.any(m, axis=0))[0]
vertical_indicies = np.where(np.any(m, axis=1))[0]
if horizontal_indicies.shape[0]:
x1, x2 = horizontal_indicies[[0, -1]]
y1, y2 = vertical_indicies[[0, -1]]
# x2 and y2 should not be part of the box. Increment by 1.
x2 += 1
y2 += 1
else:
# No mask for this instance. Might happen due to
# resizing or cropping. Set bbox to zeros
x1, x2, y1, y2 = 0, 0, 0, 0
boxes[i] = np.array([y1, x1, y2, x2])
return boxes.astype(np.int32)
| 35.382075 | 94 | 0.55046 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,108 | 0.281029 |
f83bc822a6f47feb415380dd8f541756419c1e6c | 265 | py | Python | tests/conftest.py | sparkythehuman/sms-service--send-message | 8f095ba181f1d42df3968fe34d5e20f30851e021 | [
"MIT"
] | null | null | null | tests/conftest.py | sparkythehuman/sms-service--send-message | 8f095ba181f1d42df3968fe34d5e20f30851e021 | [
"MIT"
] | null | null | null | tests/conftest.py | sparkythehuman/sms-service--send-message | 8f095ba181f1d42df3968fe34d5e20f30851e021 | [
"MIT"
] | null | null | null | import pytest
@pytest.fixture(autouse=True)
def set_up(monkeypatch):
monkeypatch.setenv('TABLE_NAME', 'test-table')
monkeypatch.setenv('TWILIO_ACCOUNT_SID', 'ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')
monkeypatch.setenv('TWILIO_AUTH_TOKEN', 'my_auth_token') | 33.125 | 82 | 0.784906 | 0 | 0 | 0 | 0 | 249 | 0.939623 | 0 | 0 | 114 | 0.430189 |
f83c3a927ff9df79fe83f0ce7fdfd551b1c6f921 | 7,741 | py | Python | dapy/filters/particle.py | hassaniqbal209/data-assimilation | ec52d655395dbed547edf4b4f3df29f017633f1b | [
"MIT"
] | 11 | 2020-07-29T07:46:39.000Z | 2022-03-17T01:28:07.000Z | dapy/filters/particle.py | hassaniqbal209/data-assimilation | ec52d655395dbed547edf4b4f3df29f017633f1b | [
"MIT"
] | 1 | 2020-07-14T11:49:17.000Z | 2020-07-29T07:43:22.000Z | dapy/filters/particle.py | hassaniqbal209/data-assimilation | ec52d655395dbed547edf4b4f3df29f017633f1b | [
"MIT"
] | 10 | 2020-07-14T11:34:24.000Z | 2022-03-07T09:08:12.000Z | """Particle filters for inference in state space models."""
import abc
from typing import Tuple, Dict, Callable, Any, Optional
import numpy as np
from numpy.random import Generator
from scipy.special import logsumexp
from scipy.sparse import csr_matrix
from dapy.filters.base import AbstractEnsembleFilter
from dapy.models.base import AbstractModel
import dapy.ot as optimal_transport
class AbstractParticleFilter(AbstractEnsembleFilter):
"""Abstract base class for particle filters."""
def _calculate_weights(
self,
model: AbstractModel,
states: np.ndarray,
observation: np.ndarray,
time_index: int,
) -> np.ndarray:
"""Calculate importance weights for particles given observations."""
log_weights = model.log_density_observation_given_state(
observation, states, time_index
)
log_sum_weights = logsumexp(log_weights)
return np.exp(log_weights - log_sum_weights)
@abc.abstractmethod
def _assimilation_transform(
self, rng: Generator, state_particles: np.ndarray, weights: np.ndarray
) -> np.ndarray:
pass
def _assimilation_update(
self,
model: AbstractModel,
rng: Generator,
state_particles: np.ndarray,
observation: np.ndarray,
time_index: int,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
weights = self._calculate_weights(
model, state_particles, observation, time_index
)
state_mean = (weights[:, None] * state_particles).sum(0)
state_std = (
np.sum(weights[:, None] * (state_particles - state_mean) ** 2, axis=0)
** 0.5
)
state_particles = self._assimilation_transform(rng, state_particles, weights)
return state_particles, state_mean, state_std
class BootstrapParticleFilter(AbstractParticleFilter):
"""Bootstrap particle filter (sequential importance resampling).
The filtering distribution at each observation time index is approximated by
alternating propagating an ensemble of state particles forward through time under
the model dynamics and resampling according to weights calculated from the
conditional probability densities of the observations at the current time index
given the state particle values. Here the resampling step uses multinomial
resampling.
References:
1. Gordon, N.J.; Salmond, D.J.; Smith, A.F.M. (1993). Novel approach to
nonlinear / non-Gaussian Bayesian state estimation. Radar and Signal
Processing, IEE Proceedings F. 140 (2): 107--113.
2. Del Moral, Pierre (1996). Non Linear Filtering: Interacting Particle
Solution. Markov Processes and Related Fields. 2 (4): 555--580.
"""
def _assimilation_transform(self, rng, state_particles, weights):
"""Perform multinomial particle resampling given computed weights."""
num_particle = state_particles.shape[0]
resampled_indices = rng.choice(num_particle, num_particle, True, weights)
return state_particles[resampled_indices]
class EnsembleTransformParticleFilter(AbstractParticleFilter):
"""Ensemble transform particle filter.
The filtering distribution at each observation time index is approximated by
alternating propagating an ensemble of state particles forward through time under
the model dynamics and linearly transforming the ensemble with an optimal transport
map computed to transform a uniform empirical distribution at the particle locations
to an empirical distribution at the particle locations weighted according to the
conditional probability densities of the observations at the current time index
given the state particle values [1].
References:
1. Reich, S. (2013). A nonparametric ensemble transform method for
Bayesian inference. SIAM Journal on Scientific Computing, 35(4),
A2013-A2024.
"""
def __init__(
self,
optimal_transport_solver: Callable[
[np.ndarray, np.ndarray, np.ndarray], np.ndarray
] = optimal_transport.solve_optimal_transport_exact,
optimal_transport_solver_kwargs: Optional[Dict[str, Any]] = None,
transport_cost: Callable[
[np.ndarray, np.ndarray], np.ndarray
] = optimal_transport.pairwise_euclidean_distance,
weight_threshold: float = 1e-8,
use_sparse_matrix_multiply: bool = False,
):
"""
Args:
optimal_transport_solver: Optimal transport solver function with signature
transport_matrix = optimal_transport_solver(
source_dist, target_dist, cost_matrix,
**optimal_transport_solver_kwargs)
where `source_dist` and `target_dist` are the source and target
distribution weights respectively as 1D arrays, `cost_matrix` is a 2D
array of the transport costs for each particle pair.
optimal_transport_solver_kwargs: Any additional keyword parameters values
for the optimal transport solver.
transport_cost: Function calculating transport cost matrix with signature
cost_matrix = transport_cost(source_particles, target_particles)
where `source_particles` are the particles values of the source and
target empirical distributions respecitively.
weight_threshold: Threshold below which to set any particle weights to zero
prior to solving the optimal transport problem. Using a small non-zero
value can both improve the numerical stability of the optimal transport
solves, with problems with many small weights sometimes failing to
convergence, and also improve performance as some solvers (including)
the default network simplex based algorithm) are able to exploit
sparsity in the source / target distributions.
use_sparse_matrix_multiply: Whether to conver the optimal transport based
transform matrix used in the assimilation update to a sparse CSR format
before multiplying by the state particle ensemble matrix. This may
improve performance when the computed transport plan is sparse and the
number of particles is large.
"""
self.optimal_transport_solver = optimal_transport_solver
self.optimal_transport_solver_kwargs = (
{}
if optimal_transport_solver_kwargs is None
else optimal_transport_solver_kwargs
)
self.transport_cost = transport_cost
self.weight_threshold = weight_threshold
self.use_sparse_matrix_multiply = use_sparse_matrix_multiply
def _assimilation_transform(self, rng, state_particles, weights):
"""Solve optimal transport problem and transform ensemble."""
num_particle = state_particles.shape[0]
source_dist = np.ones(num_particle) / num_particle
target_dist = weights
if self.weight_threshold > 0:
target_dist[target_dist < self.weight_threshold] = 0
target_dist /= target_dist.sum()
cost_matrix = self.transport_cost(state_particles, state_particles)
transform_matrix = num_particle * self.optimal_transport_solver(
source_dist,
target_dist,
cost_matrix,
**self.optimal_transport_solver_kwargs
)
if self.use_sparse_matrix_multiply:
transform_matrix = csr_matrix(transform_matrix)
return transform_matrix @ state_particles
| 44.745665 | 88 | 0.689058 | 7,346 | 0.948973 | 0 | 0 | 165 | 0.021315 | 0 | 0 | 3,951 | 0.510399 |
f83d223baea30c7408f539bf887906161d4b99ea | 1,477 | py | Python | pokemon.py | bran-almeida/Pokemon_Game | 061c9e1b53d8cbaa7366634535288bb2868d6885 | [
"MIT"
] | null | null | null | pokemon.py | bran-almeida/Pokemon_Game | 061c9e1b53d8cbaa7366634535288bb2868d6885 | [
"MIT"
] | null | null | null | pokemon.py | bran-almeida/Pokemon_Game | 061c9e1b53d8cbaa7366634535288bb2868d6885 | [
"MIT"
] | null | null | null | import random
class Pokemon:
def __init__(self, especie, level=None, nome=None):
self.especie = especie
if nome:
self.nome = nome
else:
self.nome = especie
if level:
self.level = level
else:
self.level = random.randint(1,100)
self.ataque = self.level * 5
self.vida = self.level * 10
def __str__(self):
return f"Especie: {self.especie} | Level: {self.level} | Tipo: {self.tipo}"
def atacar(self, alvo):
ataque_efetivo = int((self.ataque * random.random() * 1.3))
alvo.vida -= ataque_efetivo
print(f"{alvo.especie} perdeu {ataque_efetivo} pontos de vida")
if alvo.vida <= 0:
print(f"{alvo.especie}, foi derrotado.")
return True
else:
return False
class PokemonEletrico(Pokemon):
tipo = "Elétrico"
def atacar(self, alvo):
print(f"{self.especie} lançou um ataque elétrico em {alvo.especie}")
return super().atacar(alvo)
class PokemonFogo(Pokemon):
tipo = "Fogo"
def atacar(self, alvo):
print(f"{self.especie} lançou um ataque de fogo em {alvo.especie}")
return super().atacar(alvo)
class PokemonAgua(Pokemon):
tipo = "Agua"
def atacar(self, alvo):
print(f"{self.especie} lançou um ataque de agua em {alvo.especie}")
return super().atacar(alvo) | 27.867925 | 83 | 0.564658 | 1,462 | 0.986505 | 0 | 0 | 0 | 0 | 0 | 0 | 365 | 0.246289 |
f83da86dbe71993fb962e0b2187a7e3ca515bae8 | 2,254 | py | Python | recipes/Python/577563_Vectorize_Operation/recipe-577563.py | tdiprima/code | 61a74f5f93da087d27c70b2efe779ac6bd2a3b4f | [
"MIT"
] | 2,023 | 2017-07-29T09:34:46.000Z | 2022-03-24T08:00:45.000Z | recipes/Python/577563_Vectorize_Operation/recipe-577563.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 32 | 2017-09-02T17:20:08.000Z | 2022-02-11T17:49:37.000Z | recipes/Python/577563_Vectorize_Operation/recipe-577563.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 780 | 2017-07-28T19:23:28.000Z | 2022-03-25T20:39:41.000Z | """
Copyright 2011 Shao-Chuan Wang <shaochuan.wang AT gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import operator
from itertools import imap, repeat
import functools
iterable = lambda obj: isinstance(obj, basestring) or hasattr(obj, '__iter__')
def vector_op(op, x, y):
if iterable(x) and iterable(y):
return type(x)(imap(op, x, y))
if not iterable(x):
return type(y)(imap(op, repeat(x), y))
if not iterable(y):
return type(x)(imap(op, x, repeat(y)))
vector_add = functools.partial(vector_op, operator.add)
vector_sub = functools.partial(vector_op, operator.sub)
vector_mul = functools.partial(vector_op, operator.mul)
vector_div = functools.partial(vector_op, operator.div)
vector_and = functools.partial(vector_op, operator.and_)
vector_or = functools.partial(vector_op, operator.or_)
def vector_sum(has_len):
if not has_len:
return has_len
return reduce(vector_add, has_len)
def vector_mean(has_len):
vsum = vector_sum(has_len)
return type(vsum)(float(e)/float(len(has_len)) for e in vsum)
if __name__ == '__main__':
positions = [(1,2,1), (3,4,3), (5,6,3)]
print vector_sum(positions)
print vector_mean(positions)
| 40.25 | 81 | 0.732476 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,175 | 0.521295 |