hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
248
max_stars_repo_name
stringlengths
5
125
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
sequencelengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
248
max_issues_repo_name
stringlengths
5
125
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
sequencelengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
248
max_forks_repo_name
stringlengths
5
125
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
sequencelengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
5
2.06M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.03M
alphanum_fraction
float64
0
1
count_classes
int64
0
1.6M
score_classes
float64
0
1
count_generators
int64
0
651k
score_generators
float64
0
1
count_decorators
int64
0
990k
score_decorators
float64
0
1
count_async_functions
int64
0
235k
score_async_functions
float64
0
1
count_documentation
int64
0
1.04M
score_documentation
float64
0
1
0a61c9cfc48e56723e2d98bba70acd01045f443c
1,357
py
Python
cv_recommender/account/urls.py
hhhameem/CV-Recommender
b85d53934f0d888835ab8201be388d7d69f0693d
[ "MIT" ]
1
2021-09-14T17:40:17.000Z
2021-09-14T17:40:17.000Z
cv_recommender/account/urls.py
mjohra/Cv-Recommender-Python-Django
d231092f7bd989b513210dd6031fb23e28bd5dfe
[ "MIT" ]
1
2021-03-31T17:45:15.000Z
2021-03-31T17:45:15.000Z
cv_recommender/account/urls.py
mjohra/Cv-Recommender-Python-Django
d231092f7bd989b513210dd6031fb23e28bd5dfe
[ "MIT" ]
1
2021-03-31T16:58:50.000Z
2021-03-31T16:58:50.000Z
from django.urls import path from django.contrib.auth import views as auth_views from . import views urlpatterns = [ path('register/', views.register, name='register'), path('login/', views.userlogin, name='login'), path('logout/', views.userlogout, name='logout'), path('password_change/', auth_views.PasswordChangeView.as_view(), name='password_change'), path('password_change/done/', auth_views.PasswordChangeDoneView.as_view(), name='password_change_done'), path('password_reset/', auth_views.PasswordResetView.as_view(), name='password_reset'), path('password_reset/done/', auth_views.PasswordResetDoneView.as_view(), name='password_reset_done'), path('reset/<uidb64>/<token>/', auth_views.PasswordResetConfirmView.as_view(), name='password_reset_confirm'), path('reset/done/', auth_views.PasswordResetCompleteView.as_view(), name='password_reset_complete'), path('applicantdashboard/', views.applicantdashboard, name='applicantdashboard'), path('recruiterdashboard/', views.recruiterdashboard, name='recruiterdashboard'), path('applicantdashboard/profile-edit/', views.applicantedit, name='editapplicantprofile'), path('recruiterdashboard/profile-edit/', views.recruiteredit, name='editrecruiterprofile'), ]
45.233333
82
0.709654
0
0
0
0
0
0
0
0
490
0.361091
0a62f6ea092332203dc81ebef45e051b04506ddf
12,246
py
Python
Moodle/scripts/edit_conf.py
nii-gakunin-cloud/ocs-templates
a2a39bb8824d489488af3c3972007317bb1ef6a2
[ "BSD-3-Clause" ]
4
2020-05-11T06:30:53.000Z
2022-01-26T03:31:55.000Z
Moodle/scripts/edit_conf.py
nii-gakunin-cloud/ocs-templates
a2a39bb8824d489488af3c3972007317bb1ef6a2
[ "BSD-3-Clause" ]
1
2021-06-17T01:34:27.000Z
2021-06-17T01:34:27.000Z
Moodle/scripts/edit_conf.py
nii-gakunin-cloud/ocs-templates
a2a39bb8824d489488af3c3972007317bb1ef6a2
[ "BSD-3-Clause" ]
3
2020-09-08T00:57:52.000Z
2022-01-18T10:42:22.000Z
from datetime import datetime from difflib import unified_diff from logging import basicConfig, getLogger, INFO import os from pathlib import Path import shutil import subprocess import sys import yaml from urllib.parse import urlparse from notebook import notebookapp from IPython.core.display import HTML WORKDIR = 'edit' META_YML = '.vcp-meta.yml' MOODLE_DIR = '/opt/moodle' CONF_RELATIVE = '/etc' ENV_INHERIT = ['VAULT_ADDR', 'VAULT_TOKEN', 'PATH', 'REQUESTS_CA_BUNDLE'] logger = getLogger(__name__) basicConfig(level=INFO, format='%(message)s') def generate_local_path(host, conf_path, version=None): ret = Path(WORKDIR).absolute() / host if version is None: ret /= datetime.now().strftime("%Y%m%d%H%M%S%f") else: ret /= version ret /= Path(conf_path).name return ret def generate_remote_path(container, conf_path, relative_to=CONF_RELATIVE): return (Path(MOODLE_DIR) / container / 'conf' / Path(conf_path).relative_to(relative_to)) def get_local_path(host, container, conf_path, version=None): if version is None: version = find_latest_version(host, container, conf_path) return generate_local_path(host, conf_path, version) def _match_metainfo(parent, container, conf_path): p = parent / META_YML if not p.exists(): return False with p.open() as f: params = yaml.safe_load(f) return ( isinstance(params, dict) and 'container' in params and 'container_path' in params and params['container'] == container and params['container_path'] == conf_path) def _match_metainfo_by_remote_path(parent, remote_path): p = parent / META_YML if not p.exists(): return False with p.open() as f: params = yaml.safe_load(f) return ( isinstance(params, dict) and 'remote_path' in params and params['remote_path'] == remote_path) def get_versions(host, *args, match=_match_metainfo): pdir = Path(WORKDIR).absolute() / host return sorted([ x.name for x in pdir.glob('*') if x.is_dir() and match(x, *args)]) def find_latest_version(host, container, conf_path): return get_versions(host, container, conf_path)[-1] def find_latest_version_by_remote_path(host, remote_path): return get_versions( host, remote_path, match=_match_metainfo_by_remote_path)[-1] def download_file(host, remote_path, conf_path=None): if conf_path is None: conf_path = Path(remote_path).name dest = generate_local_path(host, conf_path) ansible_arg = f'src={remote_path} dest={dest} flat=yes' out = subprocess.check_output( ['ansible', host, '-m', 'fetch', '-a', ansible_arg]) host_1 = out.decode('utf-8').split("\n")[0].split()[0] logger.info(f'Downloading {remote_path} from {host_1} to {dest}') return dest def download_conf_file(host, container, conf_path, relative_to=CONF_RELATIVE): src = generate_remote_path(container, conf_path, relative_to) return download_file(host, src, conf_path) def create_conf_file(host, conf_path): dest = generate_local_path(host, conf_path) dest.parent.mkdir(parents=True, exist_ok=True) dest.touch() return dest def _to_backup(conf): return conf.parent / (conf.name + '.orig') def make_backup(conf, quiet=False): org = _to_backup(conf) if not quiet: logger.info(f'Copy {conf} {org}') shutil.copy2(conf, org) def make_metainfo(local_path, container, conf_path, relative_to=CONF_RELATIVE): params = { 'container': container, 'container_path': conf_path, 'remote_path': str(generate_remote_path(container, conf_path, relative_to)), 'version': list(local_path.parts)[-2], } with (local_path.parent / META_YML).open(mode='w') as f: yaml.safe_dump(params, stream=f, default_flow_style=False) def make_simple_metainfo(local_path, remote_path): params = { 'remote_path': remote_path, 'version': list(local_path.parts)[-2], } with (local_path.parent / META_YML).open(mode='w') as f: yaml.safe_dump(params, stream=f, default_flow_style=False) def generate_edit_link(conf): nb_conf = list(notebookapp.list_running_servers())[0] p = (Path(nb_conf['base_url']) / 'edit' / conf.absolute().relative_to(nb_conf['notebook_dir'])) return HTML(f'<a href={p} target="_blank">{p.name}</a>') def show_diff(path_a, path_b): lines_a = [] lines_b = [] with path_a.open() as f: lines_a = f.readlines() with path_b.open() as f: lines_b = f.readlines() diff = list(unified_diff( lines_a, lines_b, fromfile=path_a.name, tofile=path_b.name)) sys.stdout.writelines(diff) return len(diff) def upload_conf_file(src, host, container, conf_path, relative_to=CONF_RELATIVE): dest = generate_remote_path(container, conf_path, relative_to) ansible_arg = f'mkdir -p {dest.parent}' subprocess.run( ['ansible', host, '-a', ansible_arg]) ansible_arg = f'dest={dest} src={src} backup=yes' out = subprocess.check_output( ['ansible', host, '-m', 'copy', '-b', '-a', ansible_arg]) host_1 = out.decode('utf-8').split("\n")[0].split()[0] logger.info(f'Uploading {dest} from {src} to {host_1}') def restart_container(host, container): cmd = f'chdir={MOODLE_DIR} docker-compose restart {container}' logger.info(f'Restart container {container}') subprocess.check_call(['ansible', host, '-a', cmd]) def fetch_conf(host, container, conf_path, relative_to=CONF_RELATIVE, create=False): local_path = download_conf_file(host, container, conf_path, relative_to) make_backup(local_path) make_metainfo(local_path, container, conf_path, relative_to) return generate_edit_link(local_path) def create_conf(host, container, conf_path, relative_to=CONF_RELATIVE, create=False): local_path = create_conf_file(host, conf_path) make_backup(local_path, quiet=True) make_metainfo(local_path, container, conf_path, relative_to) return generate_edit_link(local_path) def apply_conf(host, container, conf_path, relative_to=CONF_RELATIVE, version=None, restart=True): diff = show_local_conf_diff(host, container, conf_path, version) local_path = get_local_path(host, container, conf_path, version) upload_conf_file(local_path, host, container, conf_path, relative_to) if restart: restart_container(host, container) def revert_conf(host, container, conf_path, relative_to=CONF_RELATIVE, version=None): local_path = get_local_path(host, container, conf_path, version) backup_path = _to_backup(local_path) show_diff(local_path, backup_path) upload_conf_file(backup_path, host, container, conf_path, relative_to) restart_container(host, container) local_path.rename(local_path.parent / (local_path.name + '.revert')) def show_local_conf(host, container, conf_path, relative_to=CONF_RELATIVE, version=None): conf = get_local_path(host, container, conf_path, version) with conf.open() as f: print(f.read()) def edit_local_conf(host, container, conf_path, relative_to=CONF_RELATIVE, version=None): conf = get_local_path(host, container, conf_path, version) return generate_edit_link(conf) def show_local_conf_diff(host, container, conf_path, version=None): local_path = get_local_path(host, container, conf_path, version) show_diff(_to_backup(local_path), local_path) def save_shibboleth_part(conf_path): with conf_path.open() as f: data = yaml.safe_load(f) params = {} if 'shibboleth' in data['services']: params['shibboleth_container'] = yaml.safe_dump( data['services']['shibboleth']) vars_path = conf_path.parent / 'extra_vars.yml' with vars_path.open(mode='w') as f: yaml.safe_dump(params, f) return vars_path def init_shibboleth_part(conf_dir, hostname, volumes): shibboleth_volumes = ['/sys/fs/cgroup:/sys/fs/cgroup'] shibboleth_volumes.extend(volumes) params = { 'shibboleth_container': yaml.safe_dump({ 'image': 'harbor.vcloud.nii.ac.jp/vcp/moodle:shibboleth-3.0.4', 'privileged': True, 'ports': ['443:443'], 'volumes': shibboleth_volumes, 'container_name': 'shibboleth', 'hostname': hostname, }), } vars_path = conf_dir / 'shibboleth.yml' with vars_path.open(mode='w') as f: yaml.safe_dump(params, f) return vars_path def setup_shibboleth_part(local_path, **params): if params is None or len(params) == 0: return save_shibboleth_part(local_path) else: return init_shibboleth_part(local_path.parent, **params) def generate_docker_compose(host, conf_path, extra_vars, extra_vars_file): template = 'template/docker/compose/docker-compose.yml' ansible_arg = f'src={template} dest={conf_path.parent}/' env = dict([(x, os.environ[x]) for x in ENV_INHERIT]) args = ['ansible', host, '-m', 'template', '-c', 'local', '-a', ansible_arg] for k, v in extra_vars.items(): args.extend(['-e', f'{k}={v}']) for x in extra_vars_file: args.extend(['-e', f'@{str(x)}']) subprocess.run(args=args, env=env, check=True) def update_docker_compose(host, extra_vars={}, shibboleth_params={}): remote_path = MOODLE_DIR + '/docker-compose.yml' local_path = download_file(host, remote_path) make_backup(local_path) make_simple_metainfo(local_path, remote_path) shibboleth_vars = setup_shibboleth_part(local_path, **shibboleth_params) generate_docker_compose(host, local_path, extra_vars, [shibboleth_vars]) show_diff(_to_backup(local_path), local_path) return generate_edit_link(local_path) def append_shibboleth_container(host, moodle_url, volumes=[], extra_vars={}): hostname = urlparse(moodle_url).netloc return update_docker_compose( host, extra_vars, shibboleth_params={'hostname': hostname, 'volumes': volumes}, ) def upload_docker_compose(host, version=None, apply=False): remote_path = MOODLE_DIR + '/docker-compose.yml' if version is None: version = find_latest_version_by_remote_path(host, remote_path) local_path = ( Path(WORKDIR).absolute() / host / version / 'docker-compose.yml') ansible_arg = f'dest={remote_path} src={local_path} backup=yes' out = subprocess.check_output( ['ansible', host, '-m', 'copy', '-b', '-a', ansible_arg]) host_1 = out.decode('utf-8').split("\n")[0].split()[0] logger.info(f'Uploading {remote_path} from {local_path} to {host_1}') if not apply: return ansible_arg = f'chdir=/opt/moodle docker-compose up -d --remove-orphans' args = ['ansible', host, '-a', ansible_arg] logger.info('Apply the changes in docker-compose.yml.') subprocess.run(args=args, check=True) def generate_proxy_conf(host, conf_path, extra_vars): template = 'template/docker/compose/moodle-proxy.conf.template' ansible_arg = f'src={template} dest={conf_path.parent}/moodle-proxy.conf' env = dict([(x, os.environ[x]) for x in ENV_INHERIT]) args = [ 'ansible', host, '-m', 'template', '-c', 'local', '-a', ansible_arg] for k, v in extra_vars.items(): args.extend(['-e', f'{k}={v}']) subprocess.run(args=args, env=env, check=True) def update_proxy_conf(host, extra_vars={}): conf_path = Path('/usr/local/apache2/conf/moodle-proxy.conf') container = 'proxy' link = fetch_conf(host, container, str(conf_path), str(conf_path.parent)) version = find_latest_version(host, container, str(conf_path)) local_path = generate_local_path(host, conf_path, version) generate_proxy_conf(host, local_path, extra_vars) show_local_conf_diff(host, container, conf_path, version) return link def apply_proxy_conf(host, version=None, restart=True): conf_path = Path('/usr/local/apache2/conf/moodle-proxy.conf') apply_conf(host, 'proxy', str(conf_path), str(conf_path.parent), version, restart)
34.59322
79
0.682182
0
0
0
0
0
0
0
0
1,823
0.148865
0a63b2be4d7b2116c7bb45a2e0a6f93a06e01c5e
959
py
Python
other/minimum_edit_distance.py
newvicklee/nlp_algorithms
d2812398d96d345dcb50970bae6ebbf666ea5380
[ "MIT" ]
null
null
null
other/minimum_edit_distance.py
newvicklee/nlp_algorithms
d2812398d96d345dcb50970bae6ebbf666ea5380
[ "MIT" ]
null
null
null
other/minimum_edit_distance.py
newvicklee/nlp_algorithms
d2812398d96d345dcb50970bae6ebbf666ea5380
[ "MIT" ]
null
null
null
""" Minimum edit distance computes the cost it takes to get from one string to another string. This implementation uses the Levenshtein distance with a cost of 1 for insertions or deletions and a cost of 2 for substitutions. Resource: https://en.wikipedia.org/wiki/Edit_distance For example, getting from "intention" to "execution" is a cost of 8. minimum_edit_distance("intention", "execution") # 8 """ def minimum_edit_distance(source, target): n = len(source) m = len(target) D = {} # Initialization for i in range(0, n+1): D[i,0] = i for j in range(0, m+1): D[0,j] = j for i in range(1, n+1): for j in range(1, m+1): if source[i-1] == target[j-1]: D[i,j] = D[i-1, j-1] else: D[i,j] = min( D[i-1, j] + 1, D[i, j-1] + 1, D[i-1, j-1] + 2 ) return D[n-1, m-1]
28.205882
129
0.535975
0
0
0
0
0
0
0
0
423
0.441084
0a65447ee836106ce8cee612e580a711dcd38121
7,219
py
Python
varifier/dnadiff.py
iqbal-lab-org/varifier
718a787fd8490ea33a79b5095884e66e12106399
[ "MIT" ]
11
2020-04-06T11:22:50.000Z
2021-11-12T18:09:41.000Z
varifier/dnadiff.py
martinghunt/varifier
9f05477b5e48e96264c392fbd14ca98d1ed86e48
[ "MIT" ]
17
2020-04-01T15:19:55.000Z
2021-11-12T05:07:01.000Z
varifier/dnadiff.py
martinghunt/varifier
9f05477b5e48e96264c392fbd14ca98d1ed86e48
[ "MIT" ]
3
2020-04-01T10:41:27.000Z
2020-08-05T06:27:21.000Z
from operator import attrgetter import logging import os import shutil import subprocess import pyfastaq import pymummer from cluster_vcf_records import vcf_record from varifier import utils # We only want the .snps file from the dnadiff script from MUMmer. From reading # the docs inspecting that script, we need to run these commands: # # nucmer --maxmatch --delta out.delta ref.fasta query.fasta # delta-filter -1 out.delta > out.1delta # show-snps -rlTHC out.1delta > out.snps # # This is instead of just running show-snps, which runs several other commands # in addition to making the snps file. def _run_dnadiff_one_split(ref_fasta, query_fasta, outfile, threads=1, maxmatch=True): delta = f"{outfile}.tmp.delta" delta_1 = f"{outfile}.tmp.1delta" subprocess.check_output(f"rm -f {delta} {delta_1}", shell=True) maxmatch_opt = "--maxmatch" if maxmatch else "" commands = [ f"nucmer --threads {threads} {maxmatch_opt} --delta {delta} {ref_fasta} {query_fasta}", f"delta-filter -1 {delta} > {delta_1}", f"show-snps -rlTHC {delta_1} > {outfile}", ] for command in commands: logging.info("Start run command: " + command) subprocess.check_output(command, shell=True) logging.info("Finish run command: " + command) os.unlink(delta) os.unlink(delta_1) def _run_dnadiff( ref_fasta, query_fasta, outfile, split_query=False, debug=False, threads=1, maxmatch=True, ): if not split_query: _run_dnadiff_one_split( ref_fasta, query_fasta, outfile, threads=threads, maxmatch=maxmatch ) else: tmp_snp_files = [] seq_reader = pyfastaq.sequences.file_reader(query_fasta) for seq in seq_reader: prefix = f"{outfile}.tmp.split.{len(tmp_snp_files)}" tmp_fasta = f"{prefix}.fasta" with open(tmp_fasta, "w") as f: print(seq, file=f) snp_file = f"{prefix}.snps" _run_dnadiff_one_split( ref_fasta, tmp_fasta, snp_file, threads=threads, maxmatch=maxmatch ) os.unlink(tmp_fasta) tmp_snp_files.append(snp_file) with open(outfile, "wb") as f_out: for snp_file in tmp_snp_files: with open(snp_file, "rb") as f_in: shutil.copyfileobj(f_in, f_out) if not debug: os.unlink(snp_file) def _snps_file_to_vcf(snps_file, query_fasta, outfile): """Loads the .snps file made by dnadiff. query_fasta = fasta file of query sequences. Writes a new VCF file unmerged records.""" vcf_records = {} variants = pymummer.snp_file.get_all_variants(snps_file) query_seqs = utils.file_to_dict_of_seqs(query_fasta) for variant in variants: # If the variant is reversed, it means that either the ref or query had to be # reverse complemented when aligned by mummer. Need to do the appropriate # reverse (complement) fixes so the VCF has the correct REF and ALT sequences if variant.reverse: qry_seq = pyfastaq.sequences.Fasta("x", variant.qry_base) qry_seq.revcomp() variant.qry_base = "".join(reversed(qry_seq.seq)) ref_seq = pyfastaq.sequences.Fasta("x", variant.ref_base) ref_seq.revcomp() variant.ref_base = ref_seq.seq if variant.var_type == pymummer.variant.SNP: new_record = vcf_record.VcfRecord( "\t".join( [ variant.qry_name, str(variant.qry_start + 1), ".", variant.qry_base, variant.ref_base, ".", ".", "SVTYPE=DNADIFF_SNP", "GT", "1/1", ] ) ) elif variant.var_type == pymummer.variant.DEL: # The query has sequence missing, compared to the # reference. We're making VCF records w.r.t. the # query, so this is an insertion. So need to # get the nucleotide before the insertion as well. new_record = vcf_record.VcfRecord( "\t".join( [ variant.qry_name, str(variant.qry_start + 1), ".", query_seqs[variant.qry_name][variant.qry_start], query_seqs[variant.qry_name][variant.qry_start] + variant.ref_base, ".", ".", "SVTYPE=DNADIFF_INS", "GT", "1/1", ] ) ) elif variant.var_type == pymummer.variant.INS: # The ref has sequence missing, compared to the # query. We're making VCF records w.r.t. the # query, so this is a deletion. So need to # get the nucleotide before the deletion as well. new_record = vcf_record.VcfRecord( "\t".join( [ variant.qry_name, str(variant.qry_start), ".", query_seqs[variant.qry_name][variant.qry_start - 1] + variant.qry_base, query_seqs[variant.qry_name][variant.qry_start - 1], ".", ".", "SVTYPE=DNADIFF_DEL", "GT", "1/1", ] ) ) else: raise Exception("Unknown variant type: " + str(variant)) assert ( new_record.REF == query_seqs[new_record.CHROM][ new_record.POS : new_record.POS + len(new_record.REF) ] ) if new_record.CHROM not in vcf_records: vcf_records[new_record.CHROM] = [] vcf_records[new_record.CHROM].append(new_record) for vcf_list in vcf_records.values(): vcf_list.sort(key=attrgetter("POS")) with open(outfile, "w") as f: print("##fileformat=VCFv4.2", file=f) for seq in query_seqs.values(): print(f"##contig=<ID={seq.id},length={len(seq)}>", file=f) print("#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tsample", file=f) for key, vcf_list in sorted(vcf_records.items()): for record in vcf_list: print(record, file=f) def make_truth_vcf( ref_fasta, truth_fasta, outfile, debug=False, split_ref=False, threads=1, maxmatch=True, ): snps_file = f"{outfile}.tmp.snps" _run_dnadiff( truth_fasta, ref_fasta, snps_file, split_query=split_ref, debug=debug, threads=threads, maxmatch=maxmatch, ) _snps_file_to_vcf(snps_file, ref_fasta, outfile) if not debug: os.unlink(snps_file)
34.37619
95
0.543289
0
0
0
0
0
0
0
0
1,832
0.253775
0a658f2185402efce42f9a0cf262eb928b7b63f0
1,650
py
Python
modules/models.py
sbj-ss/github-watcher
7d7c4d2a0a6a014b93a2168dc6e508b2b867a414
[ "MIT" ]
null
null
null
modules/models.py
sbj-ss/github-watcher
7d7c4d2a0a6a014b93a2168dc6e508b2b867a414
[ "MIT" ]
null
null
null
modules/models.py
sbj-ss/github-watcher
7d7c4d2a0a6a014b93a2168dc6e508b2b867a414
[ "MIT" ]
null
null
null
from dataclasses import asdict, dataclass from typing import Any, Dict, List, Type @dataclass(frozen=True) class StatsBaseModel: """Base model for various reports""" @classmethod def key(cls: Type) -> str: name = cls.__name__ return name[0].lower() + name[1:] def to_table(self) -> List[str]: raise NotImplementedError def to_dict(self) -> Dict[str, Any]: return asdict(self) @dataclass(frozen=True) class Contributor: name: str commit_count: int @dataclass(frozen=True) class ContributorStats(StatsBaseModel): contributors: List[Contributor] def to_table(self) -> List[str]: return [ 'Most active contributors:', '-------------------------', 'Name' + (' ' * 20) + 'Commits', ] + [f'{c.name.ljust(24)}{c.commit_count}' for c in self.contributors] @dataclass(frozen=True) class PullRequestStats(StatsBaseModel): open_count: int closed_count: int old_count: int def to_table(self) -> List[str]: return [ 'Pull requests:', '--------------', 'Open Closed Old', f'{str(self.open_count).ljust(8)}{str(self.closed_count).ljust(8)}{str(self.old_count).ljust(8)}' ] @dataclass(frozen=True) class IssueStats(StatsBaseModel): open_count: int closed_count: int old_count: int def to_table(self) -> List[str]: return [ 'Issues:', '-------', 'Open Closed Old', f'{str(self.open_count).ljust(8)}{str(self.closed_count).ljust(8)}{str(self.old_count).ljust(8)}' ]
25
109
0.577576
1,432
0.867879
0
0
1,552
0.940606
0
0
431
0.261212
0a6616a10563e4ebc6f0a75abad1fbf54a72a196
2,776
py
Python
queryfilter/datetimefilter.py
iCHEF/queryfilter
0ae4faf525e162d2720d328b96fa179d68277f1e
[ "Apache-2.0" ]
4
2018-05-11T18:07:32.000Z
2019-07-30T13:38:49.000Z
queryfilter/datetimefilter.py
iCHEF/queryfilter
0ae4faf525e162d2720d328b96fa179d68277f1e
[ "Apache-2.0" ]
6
2018-02-26T04:46:36.000Z
2019-04-10T06:17:12.000Z
queryfilter/datetimefilter.py
iCHEF/queryfilter
0ae4faf525e162d2720d328b96fa179d68277f1e
[ "Apache-2.0" ]
null
null
null
from __future__ import absolute_import import datetime from dateutil import parser import pytz from .base import FieldFilter, DictFilterMixin, DjangoQueryFilterMixin from .queryfilter import QueryFilter WHOLE_DAY = datetime.timedelta(days=1) ONE_SECOND = datetime.timedelta(seconds=1) @QueryFilter.register_type_condition('datetime') class DatetimeRangeFilter(DjangoQueryFilterMixin, DictFilterMixin, FieldFilter): @property def start(self): return get_start(self.filter_args.get("start")) @property def end(self): end_datetime = get_end(self.filter_args.get("end")) if not end_datetime: return None if _has_no_time_info(end_datetime): end_datetime = end_datetime + WHOLE_DAY - ONE_SECOND return end_datetime def on_dicts(self, dicts): def in_range(datum): datetime_string = self.get(datum, self.field_name) if isinstance(datetime_string, datetime.datetime): to_compare = datetime_string else: to_compare = parse(datetime_string) if not self.start and not self.end: return False if self.start and (to_compare < self.start): return False if self.end and (self.end < to_compare): return False return True return list(filter(in_range, dicts)) @property def query_params(self): if not any((self.start, self.end)): return None query_params = dict() if self.start: query_params["{}__gte".format(self.field_name)] = self.start if self.end: query_params["{}__lte".format(self.field_name)] = self.end return query_params def _do_django_query(self, queryset): query_params = self.query_params if query_params: return queryset.filter(**query_params) else: return queryset.none() min_datetime = datetime.datetime.min.replace(tzinfo=pytz.utc) max_datetime = datetime.datetime.max.replace(tzinfo=pytz.utc) def get_start(start_date_str): if not start_date_str: return None return parse(start_date_str) def get_end(end_date_str): if not end_date_str: return None return parse(end_date_str) def parse(datetime_string): return make_time_aware(parser.parse(datetime_string)) def make_time_aware(datetime_data): if not datetime_data.tzinfo: datetime_data = datetime_data.replace(tzinfo=pytz.utc) return datetime_data def _has_no_time_info(value): return value.hour == 0 and \ value.minute == 0 and \ value.second == 0 and \ value.microsecond == 0
25.46789
72
0.648055
1,673
0.602666
0
0
1,722
0.620317
0
0
40
0.014409
0a6634c8b3d57a247c912406564142afedbbeba0
13,829
py
Python
built-in/TensorFlow/Research/cv/image_classification/Cars_for_TensorFlow/automl/vega/search_space/networks/pytorch/operator/rpn.py
Huawei-Ascend/modelzoo
df51ed9c1d6dbde1deef63f2a037a369f8554406
[ "Apache-2.0" ]
12
2020-12-13T08:34:24.000Z
2022-03-20T15:17:17.000Z
built-in/TensorFlow/Research/cv/image_classification/Cars_for_TensorFlow/automl/vega/search_space/networks/pytorch/operator/rpn.py
Huawei-Ascend/modelzoo
df51ed9c1d6dbde1deef63f2a037a369f8554406
[ "Apache-2.0" ]
3
2021-03-31T20:15:40.000Z
2022-02-09T23:50:46.000Z
built-in/TensorFlow/Research/cv/image_classification/Darts_for_TensorFlow/automl/vega/search_space/networks/pytorch/operator/rpn.py
Huawei-Ascend/modelzoo
df51ed9c1d6dbde1deef63f2a037a369f8554406
[ "Apache-2.0" ]
2
2021-07-10T12:40:46.000Z
2021-12-17T07:55:15.000Z
# -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. # This program is free software; you can redistribute it and/or modify # it under the terms of the MIT License. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # MIT License for more details. """Import all torch operators.""" import torch.nn.functional as F import torch.nn as nn import torch from vega.search_space.networks.network_factory import NetworkFactory from vega.search_space.networks.net_utils import NetTypes from vega.search_space.networks.pytorch.utils.anchor_utils.anchor_target import AnchorTarget from vega.search_space.networks.pytorch.utils.bbox_utils.anchor_generator import AnchorGenerator from vega.core.common.config import Config from functools import partial import numpy as np from six.moves import map, zip from vega.search_space.networks.pytorch.losses.reduce_loss import weighted_loss @NetworkFactory.register(NetTypes.Operator) class RpnClsLossInput(nn.Module): """Rpn input.""" def __init__(self): super(RpnClsLossInput, self).__init__() def forward(self, x): """Get cls score and bbox preds.""" cls_scores = x[0] bbox_preds = x[1] return cls_scores, bbox_preds @NetworkFactory.register(NetTypes.Operator) class RpnLossInput(nn.Module): """Rpn loss input.""" def __init__(self): super(RpnLossInput, self).__init__() def forward(self, x): """Get cls score.""" cls_scores = x[2][0] bbox_preds = x[2][1] gt_bboxes = x[0]['gt_bboxes'].cuda() img_metas = [x[0]['img_meta']] gt_bboxes_ignore = x[0]['gt_bboxes_ignore'].cuda() return cls_scores, bbox_preds, gt_bboxes, img_metas, gt_bboxes_ignore @NetworkFactory.register(NetTypes.Operator) class AnchorTargetOp(nn.Module): """Anchor Target.""" def __init__(self, target_means=None, target_stds=None, num_classes=2, use_sigmoid_cls=False, cfg=None, sampling=True): self.target_means = target_means or (.0, .0, .0, .0) self.target_stds = target_stds or (1.0, 1.0, 1.0, 1.0) self.label_channels = num_classes if use_sigmoid_cls else 1 self.cfg = Config({'assigner': {'name': 'MaxIoUAllNegAssigner', 'pos_iou_thr': 0.7, 'neg_iou_thr': tuple([-1, 0.3]), 'min_pos_iou': 0.3, 'ignore_iof_thr': 0.5}, 'sampler': {'name': 'RandomSampler', 'num': 256, 'pos_fraction': 0.5, 'neg_pos_ub': -1, 'add_gt_as_proposals': False}, 'allowed_border': 0, 'pos_weight': -1, 'debug': False}) self.sampling = sampling super(AnchorTargetOp, self).__init__() def forward(self, x): """Create X=(anchor_list,valid_flag_list,gt_bboxes,img_metas,).""" anchor_list, valid_flag_list, original_anchors, gt_bboxes, img_metas, gt_bboxes_ignore = x # out=(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos,num_total_neg). return AnchorTarget(anchor_list, valid_flag_list, gt_bboxes, img_metas, self.target_means, self.target_stds, self.cfg, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=None, label_channels=self.label_channels, sampling=self.sampling) @NetworkFactory.register(NetTypes.Operator) class Anchors(nn.Module): """Get anchors according to feature map sizes.""" def __init__(self, anchor_base_sizes_cfg=None, anchor_scales=None, anchor_ratios=None, anchor_strides=None): self.anchor_base_sizes_cfg = anchor_base_sizes_cfg self.anchor_scales = anchor_scales or [8, 16, 32] self.anchor_ratios = anchor_ratios or [0.5, 1.0, 2.0] self.anchor_strides = anchor_strides or [4, 8, 16, 32, 64] self.anchor_base_sizes = list( self.anchor_strides) if self.anchor_base_sizes_cfg is None else self.anchor_base_sizes_cfg super(Anchors, self).__init__() def forward(self, x): """Create anchor.""" cls_scores, bbox_preds, gt_bboxes, img_metas, gt_bboxes_ignore = x featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] anchor_generators = [] for anchor_base in self.anchor_base_sizes: anchor_generators.append(AnchorGenerator(anchor_base, self.anchor_scales, self.anchor_ratios)) num_imgs = len(img_metas) num_levels = len(featmap_sizes) multi_level_anchors = [] for i in range(num_levels): anchors = anchor_generators[i].grid_anchors(featmap_sizes[i], self.anchor_strides[i]) multi_level_anchors.append(anchors) anchor_list = [multi_level_anchors for _ in range(num_imgs)] valid_flag_list = [] for img_id, img_meta in enumerate(img_metas): multi_level_flags = [] for i in range(num_levels): anchor_stride = self.anchor_strides[i] feat_h, feat_w = featmap_sizes[i] h, w, _ = img_meta['pad_shape'] valid_feat_h = min(int(np.ceil(h / anchor_stride)), feat_h) valid_feat_w = min(int(np.ceil(w / anchor_stride)), feat_w) flags = anchor_generators[i].valid_flags((feat_h, feat_w), (valid_feat_h, valid_feat_w)) multi_level_flags.append(flags) valid_flag_list.append(multi_level_flags) return anchor_list, valid_flag_list, multi_level_anchors, gt_bboxes, img_metas, gt_bboxes_ignore def multi_apply(func, *args, **kwargs): """Multi apply. :param func: function :param args: args of function :return: result """ pfunc = partial(func, **kwargs) if kwargs else func map_results = map(pfunc, *args) return tuple(map(list, zip(*map_results))) @NetworkFactory.register(NetTypes.Operator) class RpnClsLoss(nn.Module): """Rpn Class Loss.""" def __init__(self, out_channels=2): super(RpnClsLoss, self).__init__() self.loss_cls = CustomCrossEntropyLoss() self.loss_bbox = CustomSmoothL1Loss() self.out_channels = out_channels def forward(self, x): """Get x.""" (cls_score, bbox_pred, labels, label_weights, bbox_targets, bbox_weights, num_total_pos, num_total_neg, num_total_samples) = x losses_cls, losses_bbox = multi_apply(self.loss, cls_score, bbox_pred, labels, label_weights, bbox_targets, bbox_weights, num_total_samples=num_total_samples) return losses_cls, losses_bbox def loss(self, cls_score, bbox_pred, labels, label_weights, bbox_targets, bbox_weights, num_total_samples): """Get loss.""" labels = labels.reshape(-1) label_weights = label_weights.reshape(-1) cls_score = cls_score.permute(0, 2, 3, 1).reshape(-1, self.out_channels) loss_cls = self.loss_cls(cls_score, labels, label_weights, avg_factor=num_total_samples) bbox_targets = bbox_targets.reshape(-1, 4) bbox_weights = bbox_weights.reshape(-1, 4) bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) loss_bbox = self.loss_bbox(bbox_pred, bbox_targets, bbox_weights, avg_factor=num_total_samples) return loss_cls, loss_bbox @NetworkFactory.register(NetTypes.Operator) class CustomCrossEntropyLoss(nn.Module): """Cross Entropy Loss.""" def __init__(self, use_sigmoid=False, use_mask=False, reduction='mean', loss_weight=1.0): """Init Cross Entropy loss. :param desc: config dict """ super(CustomCrossEntropyLoss, self).__init__() self.use_sigmoid = use_sigmoid self.use_mask = use_mask self.reduction = reduction self.loss_weight = loss_weight if self.use_sigmoid: self.loss_function = binary_cross_entropy elif self.use_mask: self.loss_function = mask_cross_entropy else: self.loss_function = cross_entropy def forward(self, cls_score, label, weight, avg_factor, reduction_override=None, **kwargs): """Forward compute.""" assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) loss_cls = self.loss_weight * self.loss_function(cls_score, label, weight, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss_cls @NetworkFactory.register(NetTypes.Operator) class CustomSmoothL1Loss(nn.Module): """Smooth L1 Loss.""" def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0): """Init smooth l1 loss.""" super(CustomSmoothL1Loss, self).__init__() self.beta = beta self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs): """Forward compute. :param pred: predict :param target: target :param weight: weight :param avg_factor: avg factor :param reduction_override: reduce override :return: loss """ reduction = ( reduction_override if reduction_override else self.reduction) if target.numel() > 0: loss_bbox = self.loss_weight * smooth_l1_loss( pred, target, weight, beta=self.beta, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss_bbox else: return torch.FloatTensor([0.0]).cuda() @weighted_loss def smooth_l1_loss(pred, target, beta=1.0): """Smooth l1 loss. :param pred: predict :param target: target :param beta: beta :return: loss """ assert beta > 0 assert pred.size() == target.size() and target.numel() > 0 diff = torch.abs(pred - target) loss = torch.where(diff < beta, 0.5 * diff * diff / beta, diff - 0.5 * beta) return loss def cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None): """Cross entropy losses. :param pred: predict result :param label: gt label :param weight: weight :param reduction: reduce function :param avg_factor: avg factor :return: loss """ loss = F.cross_entropy(pred, label, reduction='none') if weight is not None: weight = weight.float() loss = weight_reduce_loss(loss, weight=weight, reduction=reduction, avg_factor=avg_factor) return loss def _expand_binary_labels(labels, label_weights, label_channels): """Expand binary labels. :param labels: labels :param label_weights: label weights :param label_channels: label channels :return: binary label and label weights """ bin_labels = labels.new_full((labels.size(0), label_channels), 0) inds = torch.nonzero(labels >= 1).squeeze() if inds.numel() > 0: bin_labels[inds, labels[inds] - 1] = 1 if label_weights is None: bin_label_weights = None else: bin_label_weights = label_weights.view(-1, 1).expand(label_weights.size(0), label_channels) return bin_labels, bin_label_weights def binary_cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None): """Binary cross entropy loss. :param pred: predict result :param label: gt label :param weight: weight :param reduction: reduce function :param avg_factor: avg factor :return: loss """ if pred.dim() != label.dim(): label, weight = _expand_binary_labels(label, weight, pred.size(-1)) if weight is not None: weight = weight.float() loss = F.binary_cross_entropy_with_logits( pred, label.float(), weight, reduction='none') loss = weight_reduce_loss(loss, reduction=reduction, avg_factor=avg_factor) return loss def mask_cross_entropy(pred, target, label, reduction='mean', avg_factor=None): """Mask cross entropy loss. :param pred: predict result :param target: target :param label: gt label :param reduction: reduce function :param avg_factor: avg factor :return: loss """ assert reduction == 'mean' and avg_factor is None num_rois = pred.size()[0] inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device) pred_slice = pred[inds, label].squeeze(1) return F.binary_cross_entropy_with_logits(pred_slice, target, reduction='mean')[None] def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Weight reduce loss. :param loss: losses :param weight: weight :param reduction: reduce function :param avg_factor: avg factor :return: loss """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) else: if reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def reduce_loss(loss, reduction): """Reduce loss compute. :param loss: losses :param reduction: reduce funtion :return: loss """ reduction_function = F._Reduction.get_enum(reduction) if reduction_function == 0: return loss elif reduction_function == 1: return loss.mean() elif reduction_function == 2: return loss.sum()
37.991758
116
0.653048
8,317
0.601417
0
0
9,022
0.652397
0
0
2,982
0.215634
0a6637af877e66a30d055aa9bfab27307de91c10
5,292
py
Python
scrapy/http/request/__init__.py
joybhallaa/scrapy
e4750f2fbdacbeb7a20ae7c6b13bba3fb0f7ad54
[ "BSD-3-Clause" ]
1
2020-04-18T16:48:49.000Z
2020-04-18T16:48:49.000Z
scrapy/http/request/__init__.py
Venfox/scrapy
cf39602c3038d576e14c20a2ac22f88006deb63b
[ "BSD-3-Clause" ]
null
null
null
scrapy/http/request/__init__.py
Venfox/scrapy
cf39602c3038d576e14c20a2ac22f88006deb63b
[ "BSD-3-Clause" ]
null
null
null
""" This module implements the Request class which is used to represent HTTP requests in Scrapy. See documentation in docs/topics/request-response.rst """ from w3lib.url import safe_url_string from scrapy.http.headers import Headers from scrapy.utils.python import to_bytes from scrapy.utils.trackref import object_ref from scrapy.utils.url import escape_ajax from scrapy.http.common import obsolete_setter from scrapy.utils.curl import curl_to_request_kwargs class Request(object_ref): def __init__(self, url, callback=None, method='GET', headers=None, body=None, cookies=None, meta=None, encoding='utf-8', priority=0, dont_filter=False, errback=None, flags=None, cb_kwargs=None): self._encoding = encoding # this one has to be set first self.method = str(method).upper() self._set_url(url) self._set_body(body) assert isinstance(priority, int), "Request priority not an integer: %r" % priority self.priority = priority if callback is not None and not callable(callback): raise TypeError('callback must be a callable, got %s' % type(callback).__name__) if errback is not None and not callable(errback): raise TypeError('errback must be a callable, got %s' % type(errback).__name__) self.callback = callback self.errback = errback self.cookies = cookies or {} self.headers = Headers(headers or {}, encoding=encoding) self.dont_filter = dont_filter self._meta = dict(meta) if meta else None self._cb_kwargs = dict(cb_kwargs) if cb_kwargs else None self.flags = [] if flags is None else list(flags) @property def cb_kwargs(self): if self._cb_kwargs is None: self._cb_kwargs = {} return self._cb_kwargs @property def meta(self): if self._meta is None: self._meta = {} return self._meta def _get_url(self): return self._url def _set_url(self, url): if not isinstance(url, str): raise TypeError('Request url must be str or unicode, got %s:' % type(url).__name__) s = safe_url_string(url, self.encoding) self._url = escape_ajax(s) if ('://' not in self._url) and (not self._url.startswith('data:')): raise ValueError('Missing scheme in request url: %s' % self._url) url = property(_get_url, obsolete_setter(_set_url, 'url')) def _get_body(self): return self._body def _set_body(self, body): if body is None: self._body = b'' else: self._body = to_bytes(body, self.encoding) body = property(_get_body, obsolete_setter(_set_body, 'body')) @property def encoding(self): return self._encoding def __str__(self): return "<%s %s>" % (self.method, self.url) __repr__ = __str__ def copy(self): """Return a copy of this Request""" return self.replace() def replace(self, *args, **kwargs): """Create a new Request with the same attributes except for those given new values. """ for x in ['url', 'method', 'headers', 'body', 'cookies', 'meta', 'flags', 'encoding', 'priority', 'dont_filter', 'callback', 'errback', 'cb_kwargs']: kwargs.setdefault(x, getattr(self, x)) cls = kwargs.pop('cls', self.__class__) return cls(*args, **kwargs) @classmethod def from_curl(cls, curl_command, ignore_unknown_options=True, **kwargs): """Create a Request object from a string containing a `cURL <https://curl.haxx.se/>`_ command. It populates the HTTP method, the URL, the headers, the cookies and the body. It accepts the same arguments as the :class:`Request` class, taking preference and overriding the values of the same arguments contained in the cURL command. Unrecognized options are ignored by default. To raise an error when finding unknown options call this method by passing ``ignore_unknown_options=False``. .. caution:: Using :meth:`from_curl` from :class:`~scrapy.http.Request` subclasses, such as :class:`~scrapy.http.JSONRequest`, or :class:`~scrapy.http.XmlRpcRequest`, as well as having :ref:`downloader middlewares <topics-downloader-middleware>` and :ref:`spider middlewares <topics-spider-middleware>` enabled, such as :class:`~scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware`, :class:`~scrapy.downloadermiddlewares.useragent.UserAgentMiddleware`, or :class:`~scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware`, may modify the :class:`~scrapy.http.Request` object. To translate a cURL command into a Scrapy request, you may use `curl2scrapy <https://michael-shub.github.io/curl2scrapy/>`_. """ request_kwargs = curl_to_request_kwargs(curl_command, ignore_unknown_options) request_kwargs.update(kwargs) return cls(**request_kwargs)
38.071942
102
0.634732
4,827
0.912132
0
0
2,121
0.400794
0
0
2,229
0.421202
0a6664a131eebc11f4bbd4774aef93f20aa62a4d
7,261
py
Python
game.py
akaeme/BlackJackBot
04970107202a24059f8da933233fba7df9f3a0ef
[ "MIT" ]
null
null
null
game.py
akaeme/BlackJackBot
04970107202a24059f8da933233fba7df9f3a0ef
[ "MIT" ]
null
null
null
game.py
akaeme/BlackJackBot
04970107202a24059f8da933233fba7df9f3a0ef
[ "MIT" ]
null
null
null
#encoding: utf8 __author__ = 'Diogo Gomes' __email__ = 'dgomes@ua.pt' __license__ = "GPL" __version__ = "0.1" import copy import card from shoe import Shoe from dealer import Dealer from player import Player BET_MULTIPLIER = 2 class Game(object): class Rules(): def __init__(self, shoe_size=4, min_bet=1, max_bet=10): self.shoe_size = shoe_size self.min_bet = min_bet self.max_bet = max_bet self.bet_multiplier = BET_MULTIPLIER def __str__(self): return "RULES\tMin bet: {}, Max bet: {}, Shoe size: {}, Bet multiplier: {}".format(self.min_bet, self.max_bet, self.shoe_size, self.bet_multiplier) class PlayerState(): def __init__(self, p): self.player = p self.bet = 0 self.hand = [] self.bust = False self.done = False self.watch = False def copy(self): return copy.deepcopy(self) def __str__(self): if isinstance(self.player, Dealer): return "{}".format(self.hand) return "{} ({}€)".format(self.hand, self.bet) def __repr__(self): return "{}".format(self.player.name) def hide_card(self): h = self.copy() h.hand = h.hand[1:] return h def want_to_play(self, rules): return self.player.want_to_play(rules) def take_bet(self, state, rules): bet = 0 while (bet!=self.bet and self.bet!=0) or not (rules.min_bet <= bet <= rules.max_bet) : #bets can't be 0 and double down means double down bet = self.player.bet(state[0].hide_card(), state[1:]) self.bet += bet def __init__(self, players, shoe_size=4, debug=False, verbose=True, min_bet=1, max_bet=10, shoe=None): if verbose: # print(chr(27) + "[2J") print("-"*80) self.verbose = verbose self.debug = debug self.rules = self.Rules(shoe_size=shoe_size, min_bet=min_bet, max_bet=max_bet) self.shoe = Shoe(shoe_size) if shoe != None: self.shoe = shoe self.shoe.shuffle() self.state = [self.PlayerState(Dealer())] + [self.PlayerState(p) for p in players] self.done = False def str_players_hands(self): o = "" for p in self.state[1:]: o+="{!s:^45}".format(p) return o def str_players_names(self): o = "" for p in self.state[1:]: o+="{!s:^35}".format(p.player) return o def __str__(self): return (\ "{:^30}\n"\ "╔"+"═══════════════════════════════"*(len(self.state)-1)+"╗\n"\ "{!s:^45}\n"\ " \n"\ " \n"\ " \n"\ " \n"\ " \n"\ "{!s}\n"\ "╚"+"═══════════════════════════════"*(len(self.state)-1)+"╝\n"\ "{}\n"\ ).format(self.state[0].player.name, self.state[0].hand if self.done else (["**"]+self.state[0].hide_card().hand if len(self.state[0].hand) else []), self.str_players_hands(), self.str_players_names()) def deal(self, num): return self.shoe.deal_cards(1) def take_bets(self): if self.debug: print(self) for p in self.state[1:]: if p.want_to_play(self.rules): p.take_bet(self.state, self.rules) else: p.watch = True def loop(self): #deal initial cards self.state[0].hand += self.shoe.deal_cards(2) for p in self.state[1:]: if not p.watch: p.hand += self.shoe.deal_cards(2) turn = 0 if card.blackjack(self.state[0].hand): #if the dealer has blackjack there is no point in playing... self.done = True return [p for p in self.state[1:] if card.blackjack(p.hand)] #lets play while not self.done: turn += 1 hits = 0 for p in self.state[::-1]: if p.watch or p.bust or p.done or card.value(p.hand) == 21: #skip players watching, bust players, players who have double down and players who already have blackjack! continue if self.debug: print("TURN {}: {}".format(turn, p.player.name)) print(self) action = "" while action not in ["h", "s", "d", "u"]: if isinstance(p.player, Dealer): action = p.player.play(self.state[0], self.state[1:]) else: action = p.player.play(self.state[0].hide_card(), self.state[1:]) if action == "d" and turn != 1: print("YOU CAN'T DOUBLE DOWN!!! double down is only available on the 1st turn") action = "" if action == "u": p.watch = True continue if action == "d": p.take_bet(self.state,self.rules) p.done = True if action in ["h", "d"]: p.hand+=self.deal(1) hits +=1 if card.value(p.hand) >= 21: if card.value(p.hand) > 21: p.bust = True else: p.done = True #already has blackjack if isinstance(p.player, Dealer): self.done = True #game is over we already have a blackjack if hits == 0: self.done = True self.done = True return [p for p in self.state if not isinstance(p.player, Dealer) and #Dealer is not really a winner not card.blackjack(self.state[0].hand) and #If dealer gets blackjack no one wins not p.watch and #players watching can't win :) not p.bust and #bust players can't win :) (card.value(p.hand) >= card.value(self.state[0].hand) or self.state[0].bust) #winners have more points then the dealer or the dealer has gone bust ] def show_table(self): for p in self.state[1:]: p.player.show(self.state) def payback(self, winners): for p in self.state[1:]: if p.watch: #check if player surrendered if p.bet > 0: p.player.payback(-p.bet//2) #this means the player lost half his bet #skip watchers continue if p in winners and card.value(self.state[0].hand) == card.value(p.hand): p.player.payback(0) #bet is returned elif p in winners: p.player.payback(-p.bet + p.bet*BET_MULTIPLIER) else: p.player.payback(-p.bet) #this means the player lost def run(self): self.take_bets() winners = self.loop() self.show_table() self.payback(winners) if self.verbose: print(self) print("🏆 Winners: "+str(winners))
36.305
208
0.493596
7,166
0.96864
0
0
0
0
0
0
1,356
0.183293
0a66d37f0e15138eb333c83b7140c80ba5e24e15
284
py
Python
loops/for/for3.py
camipozas/python-exercises
c8c02d2b9ff77f21592c99038e10434aba08dbc7
[ "MIT" ]
null
null
null
loops/for/for3.py
camipozas/python-exercises
c8c02d2b9ff77f21592c99038e10434aba08dbc7
[ "MIT" ]
null
null
null
loops/for/for3.py
camipozas/python-exercises
c8c02d2b9ff77f21592c99038e10434aba08dbc7
[ "MIT" ]
null
null
null
# Escribir un programa que muestre la sumatoria de todos los múltiplos de 7 encontrados entre el 0 y el 100. # Summing all the multiples of 7 from 0 to 100. total = 0 for i in range(101): if i % 7 == 0: total = total+i print("Sumatoria de los múltiplos de 7:", total)
28.4
110
0.679577
0
0
0
0
0
0
0
0
193
0.674825
0a67bdcc24a12daa838689d0d299113ff13d2c1e
7,044
py
Python
lib/TWCManager/Status/HASSStatus.py
Saftwerk/TWCManager
9b17c063ada80fc159db82fe6e3ad8c4ca071a1a
[ "Unlicense" ]
1
2021-12-26T03:41:22.000Z
2021-12-26T03:41:22.000Z
lib/TWCManager/Status/HASSStatus.py
Saftwerk/TWCManager
9b17c063ada80fc159db82fe6e3ad8c4ca071a1a
[ "Unlicense" ]
null
null
null
lib/TWCManager/Status/HASSStatus.py
Saftwerk/TWCManager
9b17c063ada80fc159db82fe6e3ad8c4ca071a1a
[ "Unlicense" ]
null
null
null
# HomeAssistant Status Output # Publishes the provided sensor key and value pair to a HomeAssistant instance import logging import time from ww import f logger = logging.getLogger(__name__.rsplit(".")[-1]) class HASSStatus: import threading import requests apiKey = None config = None configConfig = None configHASS = None master = None msgRateInSeconds = 60 resendRateInSeconds = 3600 retryRateInSeconds = 60 msgQueue = {} status = False serverIP = None serverPort = 8123 useHttps = False timeout = 2 backgroundTasksLock = threading.Lock() backgroundTasksThread = None def __init__(self, master): self.config = master.config self.master = master try: self.configConfig = self.config["config"] except KeyError: self.configConfig = {} try: self.configHASS = self.config["status"]["HASS"] except KeyError: self.configHASS = {} self.status = self.configHASS.get("enabled", False) self.serverIP = self.configHASS.get("serverIP", None) self.serverPort = self.configHASS.get("serverPort", 8123) self.useHttps = self.configHASS.get("useHttps", False) self.apiKey = self.configHASS.get("apiKey", None) self.msgRateInSeconds = self.configHASS.get("msgRateInSeconds", 60) self.resendRateInSeconds = self.configHASS.get("resendRateInSeconds", 3600) self.retryRateInSeconds = self.configHASS.get("retryRateInSeconds", 60) # Unload if this module is disabled or misconfigured if ( (not self.status) or (not self.serverIP) or (int(self.serverPort) < 1) or (not self.apiKey) ): self.master.releaseModule("lib.TWCManager.Status", "HASSStatus") else: self.backgroundTasksThread = self.threading.Thread( target=self.background_task_thread, args=() ) self.backgroundTasksThread.daemon = True self.backgroundTasksThread.start() def getTwident(self, twcid): # Format TWCID nicely if len(twcid) == 2: return "%02X%02X" % (twcid[0], twcid[1]) else: return str(twcid.decode("utf-8")) def background_task_thread(self): while True: time.sleep(self.msgRateInSeconds) self.backgroundTasksLock.acquire() for msgKey in self.msgQueue: msg = self.msgQueue[msgKey] if msg.elapsingTime < time.time(): self.sendingStatusToHASS(msg) self.backgroundTasksLock.release() def getSensorName(self, twcid, key_underscore): return "sensor.twcmanager_" + str(self.getTwident(twcid)) + "_" + key_underscore def setStatus(self, twcid, key_underscore, key_camelcase, value, unit): self.backgroundTasksLock.acquire() sensor = self.getSensorName(twcid, key_underscore) if (sensor not in self.msgQueue) or (self.msgQueue[sensor].value != value): self.msgQueue[sensor] = HASSMessage( time.time(), sensor, twcid, key_underscore, key_camelcase, value, unit, ) self.backgroundTasksLock.release() def sendingStatusToHASS(self, msg): http = "http://" if not (self.useHttps) else "https://" url = http + self.serverIP + ":" + self.serverPort url = url + "/api/states/" + msg.sensor headers = { "Authorization": "Bearer " + self.apiKey, "content-type": "application/json", } try: logger.log( logging.INFO8, f( "Sending POST request to HomeAssistant for sensor {msg.sensor} (value {msg.value})." ), ) devclass = "" if str.upper(msg.unit) in ["W", "A", "V", "KWH"]: devclass = "power" if len(msg.unit) > 0: self.requests.post( url, json={ "state": msg.value, "attributes": { "unit_of_measurement": msg.unit, "device_class": devclass, "friendly_name": "TWC " + str(self.getTwident(msg.twcid)) + " " + msg.key_camelcase, }, }, timeout=self.timeout, headers=headers, ) else: self.requests.post( url, json={ "state": msg.value, "attributes": { "friendly_name": "TWC " + str(self.getTwident(msg.twcid)) + " " + msg.key_camelcase }, }, timeout=self.timeout, headers=headers, ) # Setting elapsing time to now + resendRateInSeconds self.msgQueue[msg.sensor].elapsingTime = ( time.time() + self.resendRateInSeconds ) except self.requests.exceptions.ConnectionError as e: logger.log( logging.INFO4, "Error connecting to HomeAssistant to publish sensor values", ) logger.debug(str(e)) self.settingRetryRate(msg) return False except self.requests.exceptions.ReadTimeout as e: logger.log( logging.INFO4, "Error connecting to HomeAssistant to publish sensor values", ) logger.debug(str(e)) self.settingRetryRate(msg) return False except Exception as e: logger.log( logging.INFO4, "Error during publishing HomeAssistant sensor values" ) logger.debug(str(e)) self.settingRetryRate(msg) return False def settingRetryRate(self, msg): # Setting elapsing time to now + retryRateInSeconds self.msgQueue[msg.sensor].elapsingTime = ( time.time() + self.retryRateInSeconds ) class HASSMessage: elapsingTime = 0 sensor = "" twcid = "" key_underscore = "" key_camelcase = "" value = None unit = "" def __init__( self, elapsingTime, sensor, twcid, key_underscore, key_camelcase, value, unit ): self.elapsingTime = elapsingTime self.sensor = sensor self.twcid = twcid self.key_underscore = key_underscore self.key_camelcase = key_camelcase self.value = value self.unit = unit
33.383886
104
0.52598
6,829
0.969478
0
0
0
0
0
0
994
0.141113
0a682a6477a9ae21b7ff09cd8fd4db9201909c6a
809
py
Python
Archive/routes/home_routes.py
taycurran/TwitOff
6e2ee13f83fa86c80988a91b3b41ed0958688c3c
[ "MIT" ]
null
null
null
Archive/routes/home_routes.py
taycurran/TwitOff
6e2ee13f83fa86c80988a91b3b41ed0958688c3c
[ "MIT" ]
3
2021-06-08T21:05:06.000Z
2022-01-13T02:20:50.000Z
Archive/routes/home_routes.py
taycurran/TwitOff
6e2ee13f83fa86c80988a91b3b41ed0958688c3c
[ "MIT" ]
null
null
null
from flask import Blueprint, jsonify, request, render_template home_routes = Blueprint("home_routes", __name__) @home_routes.route("/") def index(): users = User.query.all() return render_template('base.html', title='Home', users=users) @home_routes.route("/about") def about(): return "About Me" @home_routes.route('/reset') def reset(): DB.drop_all() DB.create_all() return render_template('base.html', title='Reset', users=[]) # # Add config for database # app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite3' # # stop tracking modifications on sqlalchemy config # app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False # # ? app.config["TWITTER_API_CLIENT"] = twitter # # Have the database know about the app # DB.init_app(app)
25.28125
64
0.678616
0
0
0
0
371
0.458591
0
0
380
0.469716
0a6a0fd024fe59393b29eb7bb5c4f5bdd676e60b
8,845
py
Python
intent/scripts/classification/ctn_to_classifier.py
rgeorgi/intent
9920798c126f6d354029f7bb0a345e7cdb649f3a
[ "MIT" ]
3
2016-08-05T01:11:57.000Z
2017-08-26T15:35:51.000Z
intent/scripts/classification/ctn_to_classifier.py
rgeorgi/intent
9920798c126f6d354029f7bb0a345e7cdb649f3a
[ "MIT" ]
2
2016-03-01T22:41:24.000Z
2016-09-14T18:39:25.000Z
intent/scripts/classification/ctn_to_classifier.py
rgeorgi/intent
9920798c126f6d354029f7bb0a345e7cdb649f3a
[ "MIT" ]
null
null
null
from argparse import ArgumentParser from collections import defaultdict import glob import os import pickle from random import shuffle, seed import sys from tempfile import mkdtemp import shutil import logging root_logger = logging.getLogger() root_logger.setLevel(logging.DEBUG) CTN_LOG = logging.getLogger('CTN_CLASS') CTN_LOG.setLevel(logging.DEBUG) logging.basicConfig() from intent.igt.metadata import set_intent_method, get_intent_method from intent.interfaces.stanford_tagger import StanfordPOSTagger from intent.pos.TagMap import TagMap from intent.utils.env import tagger_model, proj_root from xigt.codecs import xigtxml from xigt.consts import ALIGNMENT from intent.eval.pos_eval import poseval from intent.igt.consts import GLOSS_WORD_ID, POS_TIER_TYPE, LANG_WORD_ID, GLOSS_WORD_TYPE, POS_TIER_ID, \ INTENT_TOKEN_TYPE, INTENT_POS_PROJ, LANG_WORD_TYPE, TRANS_WORD_TYPE, TRANS_WORD_ID, MANUAL_POS, INTENT_POS_CLASS from intent.igt.rgxigt import RGCorpus, strip_pos, RGIgt, RGTokenTier, RGTier, gen_tier_id, RGToken, \ ProjectionTransGlossException, word_align from intent.interfaces.mallet_maxent import MalletMaxent from intent.scripts.classification.xigt_to_classifier import instances_to_classifier from intent.utils.token import POSToken, GoldTagPOSToken from intent.igt.igtutils import rgp __author__ = 'rgeorgi' """ The purpose of this module is to evaluate the POS-line classifiers trained on """ def eval_classifier(c, inst_list, context_feats=False, posdict=None): """ :param c: The classifier :param inst_list: A list of Igt instances to test against. Must already have POS tags. """ gold_sents = [] eval_sents = [] to_dump = RGCorpus() for inst in inst_list: to_tag = inst.copy() strip_pos(to_tag) # Do the classification. to_tag.classify_gloss_pos(c, lowercase=True, feat_next_gram=context_feats, feat_prev_gram=context_feats, posdict=posdict) to_dump.append(to_tag) # Fix the tags... # fix_ctn_gloss_line(to_tag, tag_method=INTENT_POS_CLASS) # Now, retrieve eval/gold. eval_tags = [v.value() for v in to_tag.get_pos_tags(GLOSS_WORD_ID, tag_method=INTENT_POS_CLASS)] gold_tags = [v.value() for v in inst.get_pos_tags(GLOSS_WORD_ID, tag_method=MANUAL_POS)] tag_tokens = [POSToken('a', label=l) for l in eval_tags] gold_tokens= [POSToken('a', label=l) for l in gold_tags] if not len(tag_tokens) == len(gold_tokens): print("LENGTH OF SEQUENCE IS MISMATCHED") continue gold_sents.append(gold_tokens) eval_sents.append(tag_tokens) xigtxml.dump(open('./enriched_ctn_dev.xml', 'w'), to_dump) return poseval(eval_sents, gold_sents, details=True,csv=True, matrix=True) def eval_proj(xc): prj_sents = [] sup_sents = [] for inst in xc: fix_ctn_gloss_line(inst, tag_method=INTENT_POS_PROJ) # Do the projection comparison sup = inst.get_pos_tags(GLOSS_WORD_ID, tag_method=MANUAL_POS) prj = inst.get_pos_tags(GLOSS_WORD_ID, tag_method=INTENT_POS_PROJ) sup_tags = [] prj_tags = [] for s in sup: sup_tags.append(POSToken(s.value(), label=s.value())) # If the same tag occurs in the projections... if not prj: prj_tags.append(POSToken('UNALIGNED', label='UNALIGNED')) continue proj_tag = prj.find(alignment=s.attributes[ALIGNMENT]) if proj_tag: prj_tags.append(POSToken(proj_tag.value(), label=proj_tag.value())) else: prj_tags.append(POSToken('UNALIGNED', label='UNALIGNED')) sup_sents.append(sup_tags) prj_sents.append(prj_tags) poseval(prj_sents, sup_sents, details=True) def fix_ctn_gloss_line(inst, tag_method=None): """ Given a CTN gloss line, do some specific fixes to attempt to fix the CTN tag mapping. :param inst: :type inst:RGIgt """ gpos_tier = inst.get_pos_tags(GLOSS_WORD_ID, tag_method=tag_method) # Get the gloss words for gw in inst.gloss: new_tag = None if gw.value().lower() in ['foc','top','seq','add','emph','cit','rep']: new_tag = 'PRT' elif gw.value().lower() in ['but','and','or']: new_tag = 'CONJ' elif 'dem' in gw.value().lower(): new_tag = 'PRON' elif gw.value().lower() in ['for','in']: new_tag = 'ADP' elif gw.value().lower() in ['the']: new_tag = 'DET' if new_tag: gpos = gpos_tier.find(alignment=gw.id) if not gpos: gpt = RGToken(id=gpos_tier.askItemId(), alignment=gw.id, text=new_tag) gpos_tier.add(gpt) else: gpos.text = new_tag if __name__ == '__main__': ctn_train = './data/xml-files/ctn/ctn_train.xml' ctn_dev = './data/xml-files/ctn/ctn_dev.xml' ctn_dev_processed = './data/xml-files/ctn/ctn_dev_processed.xml' ctn_train_processed = './data/xml-files/ctn/ctn_train_processed.xml' posdict = pickle.load(open('./data/dictionaries/CTN.dict', 'rb')) # print("Loading CTN Dev Corpus...", end=" ", flush=True) # dev_xc = RGCorpus.load(ctn_dev) # print("Done.") # # print("Loading CTN Train corpus...", end=" ", flush=True) # train_xc = RGCorpus.load(ctn_train) # print("Done.") print("Initializing tagger...", end=" ", flush=True) tagger = StanfordPOSTagger(tagger_model) print("Done.") # ============================================================================= # 1) Start by projecting the language line to the gloss line in the dev set, # remapping it from the CTN tagset to the universal tagset along the way. # ============================================================================= # # print("Processing DEV corpus...", end=' ', flush=True) # for inst in dev_xc: # word_align(inst.gloss, inst.lang) # inst.project_lang_to_gloss(tagmap = './data/tagset_mappings/ctn.txt') # fix_ctn_gloss_line(inst, tag_method=MANUAL_POS) # inst.tag_trans_pos(tagger) # inst.heur_align() # Align trans/gloss lines heuristically # inst.project_trans_to_gloss() # Now, project heuristically. # print('done.') # # xigtxml.dump(open(ctn_dev_processed, 'w', encoding='utf-8'), dev_xc) # # # print("Processing TRAIN Corpus...", end=' ', flush=True) # # Get the language line words projected onto the gloss... # for inst in train_xc: # word_align(inst.gloss, inst.lang) # inst.project_lang_to_gloss(tagmap = './data/tagset_mappings/ctn.txt') # inst.tag_trans_pos(tagger) # inst.heur_align() # inst.project_trans_to_gloss() # fix_ctn_gloss_line(inst, tag_method=INTENT_POS_PROJ) # # print("Done.") # # xigtxml.dump(open(ctn_train_processed, 'w', encoding='utf-8'), train_xc) # sys.exit() print("Loading Processed CTN Train corpus...", end=" ", flush=True) train_xc = RGCorpus.load(ctn_train_processed) print("Done.") print("Loading Processed CTN Dev corpus...", end=" ", flush=True) dev_xc = RGCorpus.load(ctn_dev_processed) print("Done.") # # # ============================================================================= # # 2) Train a classifier based on the projected gloss line. # # ============================================================================= # index_list = [35,70,106,141,284,569,854,1139,1424,1708,1993,7120] for train_stop_index in index_list: train_instances = list(train_xc)[0:train_stop_index] print('* '*50) tokens = 0 for inst in train_instances: tokens += len(inst.gloss) print("Now training with {} tokens, {} instances.".format(tokens, train_stop_index)) print("Training Classifier...", end=" ", flush=True) c = instances_to_classifier(train_instances, './ctn-train.class', tag_method=MANUAL_POS, posdict=posdict, context_feats=True, feat_path='./ctn-train_feats.txt') print("Done.") # c = MalletMaxent('/Users/rgeorgi/Documents/code/dissertation/gc.classifier') # c = MalletMaxent('./ctn_class.class.classifier') print("Evaluating classifier...", end=" ", flush=True) eval_classifier(c, dev_xc, posdict=posdict, context_feats=True) print("Done.") # eval_proj(dev_xc)
33.25188
116
0.614019
0
0
0
0
0
0
0
0
3,223
0.364387
0a6abcccf806b40379eafeffa1d5d6385d6c8a7c
1,358
py
Python
watchdog/back-end/v0.3.0/watchdog/app/resource/video.py
Havana3351/Low-cost-remote-monitor
9f86a62b8515c0f9fddda31f25548680f0ad8e2f
[ "MIT" ]
18
2021-12-03T13:18:07.000Z
2022-03-30T20:20:17.000Z
watchdog/back-end/v1.0.0/watchdogV1-3/app/resource/video.py
Fairywyt/Low-cost-remote-monitor
263b98d969251d2dbef5fb5e4d42a58075e744fa
[ "MIT" ]
null
null
null
watchdog/back-end/v1.0.0/watchdogV1-3/app/resource/video.py
Fairywyt/Low-cost-remote-monitor
263b98d969251d2dbef5fb5e4d42a58075e744fa
[ "MIT" ]
4
2022-03-22T09:58:00.000Z
2022-03-28T08:57:17.000Z
from flask_restful import Resource from flask import Response import os import cv2 picturecounter = 1 # 防止过多记录的标识 class Video(Resource): #如果方法为get 调用该方法 def get(self): global picturecounter # username = (request.get_json())['username'] # db = pymysql.connect("rm-2ze61i7u6d7a3fwp9yo.mysql.rds.aliyuncs.com", "team", "Aaa5225975", "pidata") # cursor = db.cursor() # # sql = "select rpiname from user where username=\'" + username + "\'" # 可能存在类型问题 # cursor.execute(sql) # row = cursor.fetchone() # # if not row: # rpiname = None # rpiname = str(row[0]) #覆盖取值 rpiname = 'raspberrypi' # 获取指针并赋值 path = r'/root/video/realtime/%s' % (rpiname) picnames = [] for filenames in os.walk(path): picnames = filenames print(picnames) pointer = int(((picnames[2])[0].split('.'))[0]) picturecounter = pointer picpath = r'/root/video/realtime/%s/%s.jpg' % (rpiname, picturecounter) image = cv2.imread(picpath) bs = cv2.imencode(".jpg", image)[1].tobytes() picturecounter += 1 if(picturecounter > 5): picturecounter = 1 return Response(bs, mimetype='image/jpeg') def post(self): print("post")
26.115385
111
0.563328
1,298
0.90516
0
0
0
0
0
0
571
0.398187
0a6b4ad6f031ba8193614f726faf3a710def3c48
22,385
py
Python
codes/ambfix.py
valgur/LEOGPS
f289f279ef55980a0e3fd82b3b3686e41c474a2e
[ "MIT" ]
null
null
null
codes/ambfix.py
valgur/LEOGPS
f289f279ef55980a0e3fd82b3b3686e41c474a2e
[ "MIT" ]
null
null
null
codes/ambfix.py
valgur/LEOGPS
f289f279ef55980a0e3fd82b3b3686e41c474a2e
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 ''' ############################################################################### ############################################################################### ## ## ## _ ___ ___ ___ ___ ___ ## ## | | | __ / \ / __| _ | __| ## ## | |__| __ ( ) | (_ | _|__ \ ## ## |____|___ \___/ \___|_| \___/ ## ## v 1.0 (Stable) ## ## ## ## FILE DESCRIPTION: ## ## ## ## This is the classical LAMBDA method that was originally authored by ## ## Teunissen, Jonge, and Tiberius (1993). The code was later written in ## ## MATLAB by Dr Sandra Verhagen and Dr Bofeng Li. It takes in a vector of ## ## float ambiguities to the integer least-squares problem, and covariance ## ## of the float ambiguities. It then runs the LAMBDA's ILS search-&-shrink ## ## and spits out the ambiguity integers. The other 5 methods in original ## ## LAMBDA MATLAB code are not supported here (feel free to edit the code ## ## and implement it youself!). The default ncands = 2, as per original code. ## ## All support functions from the original MATLAB code (decorrel, ldldecom) ## ## have been nested within the main function as sub functions. ## ## ## ## INPUTS: ## ## ## ## - ahat : numpy array of float ambiguities ## ## - Qahat : numpy covariance matrix for float ambiguities ## ## - ncands : number of candidates (optional parameter, default = 2) ## ## ## ## OUTPUT: ## ## ## ## - afixed : Array of size (n x ncands) with the estimated integer ## ## candidates, sorted according to the corresponding squared ## ## norms, best candidate first. ## ## - sqnorm : Distance between integer candidate and float ambiguity ## ## vectors in the metric of the variance-covariance matrix. ## ## ## ## REMARKS: ## ## ## ## Besides above changes, mostly syntax changes to this Python version: ## ## - Everything is identical EXCEPT MATLAB is ones-based indexing. ## ## - Python is zeros-based indexing, and range function does not ## ## include the upper limit index. Thus, only indices have changed. ## ## - Example in MATLAB: for i = 1:5 => {1,2,3,4,5} ## ## - Equivalently in Python: for i in range(0,5) => {0,1,2,3,4} ## ## - Indices are thus updated accordingly. ## ## ## ## DEVELOPER: Professor Peter Teunissen (TU Delft) ## ## ORIGINAL AUTHOR: Sandra Verhagen and Bofeng Li (TU Delft) ## ## AUTHOR MODIFIED: 26-07-2019, by Samuel Y.W. Low, with permissions. ## ## ## ############################################################################### ############################################################################### ''' import numpy as np def LAMBDA( ahat, Qahat, ncands = 2 ): ########################################################################### ########################################################################### # [afixed, sqnorm] = LAMBDA( ahat, Qahat, ncands ) # # This is the main routine of the LAMBDA software package. By default the # ILS method will be used for integer estimation based on the provided # float ambiguity vector ahat and associated variance-covariance matrix # Qahat. In this Pythonic version (modified by Samuel Low, 2019), only # the ILS method is implemented. For other techniques: integer rounding, # bootstrapping or Partial Ambiguity Resolution (PAR), the user is free # to modify this code and adapt it to their own needs. # # NOTE 1: LAMBDA always first applies a decorrelation before the integer # estimation (for ILS this is required to guarantee an efficient search, # for rounding and bootstrapping it is required in order to get higher # success rates). # # INPUTS: # # ahat: Float ambiguities (must be a column!) # Qahat: Variance/covariance matrix of ambiguities # ncands: number of candidates (optional parameter, default = 2) # # OUTPUTS: # # afixed: Array of size (n x ncands) with the estimated integer # candidates, sorted according to the corresponding squared # norms, best candidate first. # sqnorm: Distance between integer candidate and float ambiguity vectors # in the metric of the variance-covariance matrix Qahat. # Only available for ILS. # # ------------------------------------------------------------------------- # Release date : 1-SEPT-2012 # Authors : Bofeng LI and Sandra VERHAGEN # # GNSS Research Centre, Curtin University # Mathematical Geodesy and Positioning, Delft University of Technology # ------------------------------------------------------------------------- # # REFERENCES: # 1. LAMBDA Software Package: Matlab implementation, Version 3.0. # Documentation provided with this software package. # 2. Teunissen P (1993) Least-squares estimation of the integer GPS # ambiguities. In: Invited lecture, section IV theory and methodology, # IAG General Meeting, Beijing, China # 3. Teunissen P (1995) The least-squares ambiguity decorrelation # adjustment: a method for fast GPS ambiguity estitmation. J Geod # 70:651-7 # 4. De Jonge P, Tiberius C (1996) The LAMBDA method of intger ambiguity # estimation:implementation aspects. # 5. Chang X ,Yang X, Zhou T (2005) MLAMBDA: a modified LAMBDA method for # integer least-squares estimation ########################################################################### ########################################################################### ''' A function for obtaining the decimals only from float arrays ''' def floatrem( fltarray ): # This function is NECESSARY because of the differences between: # MATLAB's rem function # (computes the true mathematical remainder) # And Python's modulo % operator # (computes remainder complementary to the floor_divide function) fltarray = np.array(fltarray) fltarray = fltarray + 0.000001 intarray = fltarray.astype(int) decarray = fltarray - intarray return decarray, intarray ########################################################################### ########################################################################### ''' A function to perform LtDL decomposition of the covariance matrix ''' def ldldecom( Qahat1 ): # This routine finds the LtDL decomposition of a given variance or # covariance matrix. # # Input arguments: # Qahat: Symmetric n by n matrix to be factored # # Output arguments: # L: n by n factor matrix (strict lower triangular) # D: Diagonal n-vector # ------------------------------------------------------------------ # File.....: ldldecom # Date.....: 19-MAY-1999 # Author...: Peter Joosten # Mathematical Geodesy and Positioning # Delft University of Technology # ------------------------------------------------------------------ Qahat2 = Qahat1.copy() # If we do not use copy, we will overwrite the original Qahat... # ... even the one outside the function! This doesn't occur in MATLAB. n = len(Qahat2) D = np.zeros((n)) L = np.zeros((n,n)) for i in range(n-1,-1,-1): D[i] = Qahat2[i][i] L[i,0:i+1] = Qahat2[i,0:i+1] / ((Qahat2[i][i])**0.5) for j in range(0,i): Qahat2[j,0:j+1] = Qahat2[j,0:j+1] - L[i,0:j+1]*L[i][j] L[i,0:i+1] = L[i,0:i+1] / L[i][i] return L,D ########################################################################### ########################################################################### ''' Decorrelation function for LAMBDA ''' def decorrel( ahat, Qahat ): # function [Qzhat,Z,L,D,zhat,iZt] = decorrel (Qahat,ahat) # DECORREL: Decorrelate a (co)variance matrix of ambiguities # # [Qzhat,Z,L,D,zhat] = decorrel (Qahat,ahat) # # This routine creates a decorrelated Q-matrix, by finding the # Z-matrix and performing the corresponding transformation. # # The method is described in: # The routine is based on Fortran routines written by Paul de Jonge # and on Matlab-routines written by Kai Borre. # The resulting Z-matrix can be used as follows: # zhat = Zt * ahat; \hat(z) = Z' * \hat(a); # Q_\hat(z) = Z' * Q_\hat(a) * Z # # Input arguments: # Qahat: Variance-covariance matrix of ambiguities (original) # ahat: Original ambiguities (optional) # # Output arguments: # Qzhat: Variance-covariance matrix of decorrelated ambiguities # Z: Z-transformation matrix # L: L matrix (from LtDL-decomposition of Qzhat) # D: D matrix (from LtDL-decomposition of Qzhat) # zhat: Transformed ambiguities (optional) # iZt: inv(Z')-transformation matrix # # ------------------------------------------------------------------ # Function.: decorrel # Date.....: 19-MAY-1999 / modified 12-APRIL-2012 # Author...: Peter Joosten / Sandra Verhagen # Mathematical Geodesy and Positioning # Delft University of Technology # Modified.: Samuel Low, July 2019, DSO National Laboratories # ------------------------------------------------------------------ # Initialisations n = len(Qahat) iZt = np.identity(n) i1 = n - 1 sw = True # LtDL decomposition L, D = ldldecom(Qahat) while sw == 1: i = n # Loop for column from n to 1 sw = 0 while sw == 0 and i > 1: i = i - 1 # The i-th column if i <= i1: for j in range(i,n): # We have to do some manual coding here, as python's # rounding for .5's are different from MATLAB's mu = L[j,i-1] # Get the float mu mu_dec = mu%1 # Get the decimal float of mu if mu_dec == 0.5: mu += 0.01 # Just to make it round up properly. mu = round(mu) if mu != 0.0: L[j:n,i-1] = L[j:n,i-1] - mu * L[j:n,j] iZt[:,j] = iZt[:,j] + mu * iZt[:,i-1] delta = D[i-1] + (L[i,i-1]**2) * D[i] if delta < D[i]: lam = D[i] * L[i,i-1] / delta eta = D[i-1] / delta D[i-1] = eta * D[i] D[i] = delta mult1 = np.array([-1*L[i,i-1], 1]) mult2 = np.array([eta,lam]) mult3 = np.stack((mult1,mult2)) L[i-1:i+1,0:i-1] = np.matmul(mult3,L[i-1:i+1,0:i-1]) L[i,i-1] = lam # Flip rows i and i+1 L[i+1:n,i-1:i+1] = np.flip(L[i+1:n,i-1:i+1], axis=0) iZt[:,i-1:i+1] = np.flip(iZt[:,i-1:i+1], axis=0) i1 = i sw = 1 iZt = iZt + 0.000001 # Resolves Python 3's rounding definition Z = np.round(np.linalg.inv(iZt.transpose())) Qzhat = np.matmul( Qahat, Z ) Qzhat = np.matmul( Z.transpose(), Qzhat ) zhat = np.matmul(Z.transpose(),ahat) iZt = np.round(iZt) return Qzhat, Z, L, D, zhat, iZt ########################################################################### ########################################################################### def ssearch( ahat, L, D, ncands): #------------------------------------------------------------------| # # Integer ambiguity vector search via search-and-shrink technique. # # INPUTS: # # ahat : Float ambiguities (should be decorrelated for # computational efficiency) # L,D : LtDL-decomposition of the variance-covariance matrix # of the float ambiguities ahat # ncands: Number of requested candidates # # OUTPUTS: # # afixed: estimated integers (n, x, ncands) # sqnorm: corresponding squared norms (n-vector, ascending order) # #------------------------------------------------------------------| # Date : 02-SEPT-2010 | # Author : Bofeng LI | # GNSS Research Center, Department of Spatial Sciences | # Curtin University of Technology | # E-mail : bofeng.li@curtin.edu.au | #------------------------------------------------------------------| # First, check that float ambiguity and D have same length if len(ahat) != len(D): print('Error! Float ambiguity vector must be a column vector!') print('It must also have the same dimension as D') return None # Initialising outputs n = len(ahat) afixed = np.zeros((n, ncands)) sqnorm = np.zeros(ncands) # Initializing the variables for searching Chi2 = 1.0e+18 # Start search with an infinite chi-square dist = np.zeros(n) # MATLAB distance function endsearch = False # Search trigger count = 0 # Count the number of candidates acond = np.zeros(n) acond[n-1] = ahat[n-1] zcond = np.zeros(n) zcond[n-1] = np.round(acond[n-1]+0.000001) left = acond[n-1] - zcond[n-1] step = np.zeros(n) step[n-1] = np.sign(left) if step[n-1] == 0: step[n-1] = 1 # Give a positive step. imax = ncands - 1 # Initially, the maximum F(z) is at ncands S = np.zeros((n,n)) # Used to compute conditional ambiguities k = n # Now we start the main search loop. while endsearch == False: newdist = dist[k-1] + (left**2) / D[k-1] if newdist < Chi2: if k != 1: # Case 1: move down k -= 1 dist[k-1] = newdist S[k-1,0:k] = S[k,0:k] + (zcond[k] - acond[k])*L[k,0:k] acond[k-1] = ahat[k-1] + S[k-1,k-1] zcond[k-1] = np.round(acond[k-1]+0.000001) left = acond[k-1] - zcond[k-1] step[k-1] = np.sign(left) if step[k-1] == 0: # Very rarely would this happen... step[k-1] = 1 # ... but just in case, you know. else: # Case 2: store the found candidate and try the next. if count < (ncands - 1): # Store the 1st ncands-1 initial points as candidates count += 1 afixed[:,count-1] = zcond[0:n]; sqnorm[count-1] = newdist # Store F(zcond) else: afixed[:,imax] = zcond[0:n] sqnorm[imax] = newdist Chi2 = max(sqnorm) imax = np.argmax(sqnorm) # No need to add '-1' to imax zcond[0] = zcond[0] + step[0] left = acond[0] - zcond[0] step[0] = -1*step[0] - np.sign(step[0]) else: # Case 3: exit or move up if k == n: endsearch = True else: k += 1 # Move up zcond[k-1] = zcond[k-1] + step[k-1] left = acond[k-1] - zcond[k-1] step[k-1] = -1*step[k-1] - np.sign(step[k-1]) order = np.argsort(sqnorm) # Get an array of INDICES for a sort. sqnormf = np.sort(sqnorm) # Get an array of ACTUAL SORTS for sqnorm. afixedf = np.copy(afixed) for k in range(0,len(order)): afixedf[:,k] = afixed[:,order[k]] return afixedf, sqnormf ########################################################################### ########################################################################### ''' Initialisation and some initial sanity checks... ''' # Initialise all output variables sqnorm = np.array([]) # Test inputs: Is the Q-matrix symmetric? if np.array_equal(Qahat,Qahat.transpose()) == False: print('Variance-covariance matrix is not symmetric!') return None # Test inputs: Is the Q-matrix positive-definite? if np.sum(np.linalg.eig(Qahat)[0] > 0.0) != len(Qahat): print('Variance-covariance matrix is not positive definite!') return None # Test inputs: Does Q-matrix and amb vector have identical dimensions? if len(ahat) != len(Qahat): print('Variance-covariance matrix and vector of ambiguities...') print('... do not have identical dimensions!') return None ########################################################################### ########################################################################### ''' Begin least-squares ambiguity decorrelation adjustment! ''' # Remove integer numbers from float solution, so that all values are # between -1 and 1 (for computational convenience only) ahat, incr = floatrem( ahat ) # Compute Z matrix based on the decomposition Q=L^T*D*L; Qzhat, Z, L, D, zhat, iZt = decorrel( ahat, Qahat ) # Integer ambiguity vector search via search-and-shrink zfixedff, sqnormff = ssearch( zhat, L, D, ncands ) # Perform the back-transformation and add the increments afixed = np.matmul(iZt,zfixedff) repmat = np.repeat(np.array([incr]),ncands,axis=0) repmat = repmat.transpose() afixed = afixed + repmat afixed = afixed.transpose() ########################################################################### ########################################################################### ''' Returns best amb-fix, second best amb-fix, and the square norm ''' return afixed, sqnorm ########################################################################### ###########################################################################
42.638095
104
0.3979
0
0
0
0
0
0
0
0
13,564
0.605941
0a6bdfe36df3fc3c2674d86fa755f854cc5eacf6
133
py
Python
summarizer/test_summarizer.py
bmcilw1/text-summary
f594fd4f41279a6e11262ac859cfbdad6aaf1703
[ "MIT" ]
null
null
null
summarizer/test_summarizer.py
bmcilw1/text-summary
f594fd4f41279a6e11262ac859cfbdad6aaf1703
[ "MIT" ]
null
null
null
summarizer/test_summarizer.py
bmcilw1/text-summary
f594fd4f41279a6e11262ac859cfbdad6aaf1703
[ "MIT" ]
null
null
null
from summarizer.summarizer import summarize def test_summarize_whenPassedEmptyString_ReturnsEmpty(): assert summarize("") == ""
26.6
56
0.796992
0
0
0
0
0
0
0
0
4
0.030075
0a6d2f3733dce67a2fafd219a662c5c458e102f9
1,774
py
Python
XORCipher/XOREncrypt.py
KarthikGandrala/DataEncryption
6ed4dffead345bc9f7010ac2ea9afbff958c85af
[ "MIT" ]
1
2021-07-12T06:05:45.000Z
2021-07-12T06:05:45.000Z
XORCipher/XOREncrypt.py
KarthikGandrala/Encrypt-Your-Data
6ed4dffead345bc9f7010ac2ea9afbff958c85af
[ "MIT" ]
null
null
null
XORCipher/XOREncrypt.py
KarthikGandrala/Encrypt-Your-Data
6ed4dffead345bc9f7010ac2ea9afbff958c85af
[ "MIT" ]
null
null
null
#!/usr/bin/python # -*- coding: utf-8 -*- # Function to encrypt message using key is defined def encrypt(msg, key): # Defining empty strings and counters hexadecimal = '' iteration = 0 # Running for loop in the range of MSG and comparing the BITS for i in range(len(msg)): temp = ord(msg[i]) ^ ord(key[iteration]) # zfill will pad a single letter hex with 0, to make it two letter pair hexadecimal += hex(temp)[2:].zfill(2) # Checking if the iterations of the key are 1 iteration += 1 if iteration >= len(key): # once all of the key's letters are used, repeat the key iteration = 0 # Returning the final value return hexadecimal def decrypt(msg, key): # Defining hex to uni string to store hex_to_uni = '' # Running for loop to the length of message for i in range(0, len(msg), 2): # Decoding each individual bytes from hex hex_to_uni += bytes.fromhex(msg[i:i + 2]).decode('utf-8') decryp_text = '' iteration = 0 # For loop running for the length of the hex to unicode string for i in range(len(hex_to_uni)): # Comparing each individual bit temp = ord(hex_to_uni[i]) ^ ord(key[iteration]) # zfill will pad a single letter hex with 0, to make it two letter pair decryp_text += chr(temp) iteration += 1 if iteration >= len(key): # once all of the key's letters are used, repeat the key iteration = 0 # FInally return the decrypted text string return decryp_text
23.653333
79
0.558061
0
0
0
0
0
0
0
0
783
0.441375
0a6e68db8c94071ad8a29d0149ef1ef93e54c4c1
634
py
Python
02-Use-functions/21-Opening_a_file/secret_message.py
francisrod01/udacity_python_foundations
2a384cf35ce7eff547c88097cdc45cc4e8fc6041
[ "MIT" ]
null
null
null
02-Use-functions/21-Opening_a_file/secret_message.py
francisrod01/udacity_python_foundations
2a384cf35ce7eff547c88097cdc45cc4e8fc6041
[ "MIT" ]
null
null
null
02-Use-functions/21-Opening_a_file/secret_message.py
francisrod01/udacity_python_foundations
2a384cf35ce7eff547c88097cdc45cc4e8fc6041
[ "MIT" ]
null
null
null
#!/usr/bin/python3 import os import random def rename_files(path): file_list = os.listdir(path) print(file_list) for file_name in file_list: # Remove numbers from filename. # new_file_name file_name.translation(None, "0123456789") # Add random numbers to beginning of filename. new_file_name = str(random.randint(1, 99)) + file_name print("Renaming " + file_name + " to " + new_file_name) os.rename(os.path.join(path, file_name), os.path.join(path, new_file_name)) print("# Python program - Adding random numbers to beginning of filename.") rename_files("./prank")
25.36
83
0.679811
0
0
0
0
0
0
0
0
246
0.388013
0a6eef44c90456b4e29cb5273e1126093472758f
101,780
py
Python
xarray/core/variable.py
timgates42/xarray
bf0fe2caca1d2ebc4f1298f019758baa12f68b94
[ "Apache-2.0" ]
null
null
null
xarray/core/variable.py
timgates42/xarray
bf0fe2caca1d2ebc4f1298f019758baa12f68b94
[ "Apache-2.0" ]
null
null
null
xarray/core/variable.py
timgates42/xarray
bf0fe2caca1d2ebc4f1298f019758baa12f68b94
[ "Apache-2.0" ]
1
2021-07-13T07:06:10.000Z
2021-07-13T07:06:10.000Z
import copy import functools import itertools import numbers import warnings from collections import defaultdict from datetime import timedelta from distutils.version import LooseVersion from typing import ( Any, Dict, Hashable, Mapping, Optional, Sequence, Tuple, TypeVar, Union, ) import numpy as np import pandas as pd import xarray as xr # only for Dataset and DataArray from . import arithmetic, common, dtypes, duck_array_ops, indexing, nputils, ops, utils from .indexing import ( BasicIndexer, OuterIndexer, PandasIndexAdapter, VectorizedIndexer, as_indexable, ) from .npcompat import IS_NEP18_ACTIVE from .options import _get_keep_attrs from .pycompat import ( cupy_array_type, dask_array_type, integer_types, is_duck_dask_array, ) from .utils import ( OrderedSet, _default, decode_numpy_dict_values, drop_dims_from_indexers, either_dict_or_kwargs, ensure_us_time_resolution, infix_dims, is_duck_array, ) NON_NUMPY_SUPPORTED_ARRAY_TYPES = ( ( indexing.ExplicitlyIndexed, pd.Index, ) + dask_array_type + cupy_array_type ) # https://github.com/python/mypy/issues/224 BASIC_INDEXING_TYPES = integer_types + (slice,) # type: ignore VariableType = TypeVar("VariableType", bound="Variable") """Type annotation to be used when methods of Variable return self or a copy of self. When called from an instance of a subclass, e.g. IndexVariable, mypy identifies the output as an instance of the subclass. Usage:: class Variable: def f(self: VariableType, ...) -> VariableType: ... """ class MissingDimensionsError(ValueError): """Error class used when we can't safely guess a dimension name.""" # inherits from ValueError for backward compatibility # TODO: move this to an xarray.exceptions module? def as_variable(obj, name=None) -> "Union[Variable, IndexVariable]": """Convert an object into a Variable. Parameters ---------- obj : object Object to convert into a Variable. - If the object is already a Variable, return a shallow copy. - Otherwise, if the object has 'dims' and 'data' attributes, convert it into a new Variable. - If all else fails, attempt to convert the object into a Variable by unpacking it into the arguments for creating a new Variable. name : str, optional If provided: - `obj` can be a 1D array, which is assumed to label coordinate values along a dimension of this given name. - Variables with name matching one of their dimensions are converted into `IndexVariable` objects. Returns ------- var : Variable The newly created variable. """ from .dataarray import DataArray # TODO: consider extending this method to automatically handle Iris and if isinstance(obj, DataArray): # extract the primary Variable from DataArrays obj = obj.variable if isinstance(obj, Variable): obj = obj.copy(deep=False) elif isinstance(obj, tuple): try: obj = Variable(*obj) except (TypeError, ValueError) as error: # use .format() instead of % because it handles tuples consistently raise error.__class__( "Could not convert tuple of form " "(dims, data[, attrs, encoding]): " "{} to Variable.".format(obj) ) elif utils.is_scalar(obj): obj = Variable([], obj) elif isinstance(obj, (pd.Index, IndexVariable)) and obj.name is not None: obj = Variable(obj.name, obj) elif isinstance(obj, (set, dict)): raise TypeError("variable {!r} has invalid type {!r}".format(name, type(obj))) elif name is not None: data = as_compatible_data(obj) if data.ndim != 1: raise MissingDimensionsError( "cannot set variable %r with %r-dimensional data " "without explicit dimension names. Pass a tuple of " "(dims, data) instead." % (name, data.ndim) ) obj = Variable(name, data, fastpath=True) else: raise TypeError( "unable to convert object into a variable without an " "explicit list of dimensions: %r" % obj ) if name is not None and name in obj.dims: # convert the Variable into an Index if obj.ndim != 1: raise MissingDimensionsError( "%r has more than 1-dimension and the same name as one of its " "dimensions %r. xarray disallows such variables because they " "conflict with the coordinates used to label " "dimensions." % (name, obj.dims) ) obj = obj.to_index_variable() return obj def _maybe_wrap_data(data): """ Put pandas.Index and numpy.ndarray arguments in adapter objects to ensure they can be indexed properly. NumpyArrayAdapter, PandasIndexAdapter and LazilyOuterIndexedArray should all pass through unmodified. """ if isinstance(data, pd.Index): return PandasIndexAdapter(data) return data def _possibly_convert_objects(values): """Convert arrays of datetime.datetime and datetime.timedelta objects into datetime64 and timedelta64, according to the pandas convention. Also used for validating that datetime64 and timedelta64 objects are within the valid date range for ns precision, as pandas will raise an error if they are not. """ return np.asarray(pd.Series(values.ravel())).reshape(values.shape) def as_compatible_data(data, fastpath=False): """Prepare and wrap data to put in a Variable. - If data does not have the necessary attributes, convert it to ndarray. - If data has dtype=datetime64, ensure that it has ns precision. If it's a pandas.Timestamp, convert it to datetime64. - If data is already a pandas or xarray object (other than an Index), just use the values. Finally, wrap it up with an adapter if necessary. """ if fastpath and getattr(data, "ndim", 0) > 0: # can't use fastpath (yet) for scalars return _maybe_wrap_data(data) if isinstance(data, Variable): return data.data if isinstance(data, NON_NUMPY_SUPPORTED_ARRAY_TYPES): return _maybe_wrap_data(data) if isinstance(data, tuple): data = utils.to_0d_object_array(data) if isinstance(data, pd.Timestamp): # TODO: convert, handle datetime objects, too data = np.datetime64(data.value, "ns") if isinstance(data, timedelta): data = np.timedelta64(getattr(data, "value", data), "ns") # we don't want nested self-described arrays data = getattr(data, "values", data) if isinstance(data, np.ma.MaskedArray): mask = np.ma.getmaskarray(data) if mask.any(): dtype, fill_value = dtypes.maybe_promote(data.dtype) data = np.asarray(data, dtype=dtype) data[mask] = fill_value else: data = np.asarray(data) if not isinstance(data, np.ndarray): if hasattr(data, "__array_function__"): if IS_NEP18_ACTIVE: return data else: raise TypeError( "Got an NumPy-like array type providing the " "__array_function__ protocol but NEP18 is not enabled. " "Check that numpy >= v1.16 and that the environment " 'variable "NUMPY_EXPERIMENTAL_ARRAY_FUNCTION" is set to ' '"1"' ) # validate whether the data is valid data types. data = np.asarray(data) if isinstance(data, np.ndarray): if data.dtype.kind == "O": data = _possibly_convert_objects(data) elif data.dtype.kind == "M": data = _possibly_convert_objects(data) elif data.dtype.kind == "m": data = _possibly_convert_objects(data) return _maybe_wrap_data(data) def _as_array_or_item(data): """Return the given values as a numpy array, or as an individual item if it's a 0d datetime64 or timedelta64 array. Importantly, this function does not copy data if it is already an ndarray - otherwise, it will not be possible to update Variable values in place. This function mostly exists because 0-dimensional ndarrays with dtype=datetime64 are broken :( https://github.com/numpy/numpy/issues/4337 https://github.com/numpy/numpy/issues/7619 TODO: remove this (replace with np.asarray) once these issues are fixed """ if isinstance(data, cupy_array_type): data = data.get() else: data = np.asarray(data) if data.ndim == 0: if data.dtype.kind == "M": data = np.datetime64(data, "ns") elif data.dtype.kind == "m": data = np.timedelta64(data, "ns") return data class Variable( common.AbstractArray, arithmetic.SupportsArithmetic, utils.NdimSizeLenMixin ): """A netcdf-like variable consisting of dimensions, data and attributes which describe a single Array. A single Variable object is not fully described outside the context of its parent Dataset (if you want such a fully described object, use a DataArray instead). The main functional difference between Variables and numpy arrays is that numerical operations on Variables implement array broadcasting by dimension name. For example, adding an Variable with dimensions `('time',)` to another Variable with dimensions `('space',)` results in a new Variable with dimensions `('time', 'space')`. Furthermore, numpy reduce operations like ``mean`` or ``sum`` are overwritten to take a "dimension" argument instead of an "axis". Variables are light-weight objects used as the building block for datasets. They are more primitive objects, so operations with them provide marginally higher performance than using DataArrays. However, manipulating data in the form of a Dataset or DataArray should almost always be preferred, because they can use more complete metadata in context of coordinate labels. """ __slots__ = ("_dims", "_data", "_attrs", "_encoding") def __init__(self, dims, data, attrs=None, encoding=None, fastpath=False): """ Parameters ---------- dims : str or sequence of str Name(s) of the the data dimension(s). Must be either a string (only for 1D data) or a sequence of strings with length equal to the number of dimensions. data : array_like Data array which supports numpy-like data access. attrs : dict_like or None, optional Attributes to assign to the new variable. If None (default), an empty attribute dictionary is initialized. encoding : dict_like or None, optional Dictionary specifying how to encode this array's data into a serialized format like netCDF4. Currently used keys (for netCDF) include '_FillValue', 'scale_factor', 'add_offset' and 'dtype'. Well-behaved code to serialize a Variable should ignore unrecognized encoding items. """ self._data = as_compatible_data(data, fastpath=fastpath) self._dims = self._parse_dimensions(dims) self._attrs = None self._encoding = None if attrs is not None: self.attrs = attrs if encoding is not None: self.encoding = encoding @property def dtype(self): return self._data.dtype @property def shape(self): return self._data.shape @property def nbytes(self): return self.size * self.dtype.itemsize @property def _in_memory(self): return isinstance(self._data, (np.ndarray, np.number, PandasIndexAdapter)) or ( isinstance(self._data, indexing.MemoryCachedArray) and isinstance(self._data.array, indexing.NumpyIndexingAdapter) ) @property def data(self): if is_duck_array(self._data): return self._data else: return self.values @data.setter def data(self, data): data = as_compatible_data(data) if data.shape != self.shape: raise ValueError( f"replacement data must match the Variable's shape. " f"replacement data has shape {data.shape}; Variable has shape {self.shape}" ) self._data = data def astype( self: VariableType, dtype, *, order=None, casting=None, subok=None, copy=None, keep_attrs=True, ) -> VariableType: """ Copy of the Variable object, with data cast to a specified type. Parameters ---------- dtype : str or dtype Typecode or data-type to which the array is cast. order : {'C', 'F', 'A', 'K'}, optional Controls the memory layout order of the result. ‘C’ means C order, ‘F’ means Fortran order, ‘A’ means ‘F’ order if all the arrays are Fortran contiguous, ‘C’ order otherwise, and ‘K’ means as close to the order the array elements appear in memory as possible. casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional Controls what kind of data casting may occur. * 'no' means the data types should not be cast at all. * 'equiv' means only byte-order changes are allowed. * 'safe' means only casts which can preserve values are allowed. * 'same_kind' means only safe casts or casts within a kind, like float64 to float32, are allowed. * 'unsafe' means any data conversions may be done. subok : bool, optional If True, then sub-classes will be passed-through, otherwise the returned array will be forced to be a base-class array. copy : bool, optional By default, astype always returns a newly allocated array. If this is set to False and the `dtype` requirement is satisfied, the input array is returned instead of a copy. keep_attrs : bool, optional By default, astype keeps attributes. Set to False to remove attributes in the returned object. Returns ------- out : same as object New object with data cast to the specified type. Notes ----- The ``order``, ``casting``, ``subok`` and ``copy`` arguments are only passed through to the ``astype`` method of the underlying array when a value different than ``None`` is supplied. Make sure to only supply these arguments if the underlying array class supports them. See also -------- numpy.ndarray.astype dask.array.Array.astype sparse.COO.astype """ from .computation import apply_ufunc kwargs = dict(order=order, casting=casting, subok=subok, copy=copy) kwargs = {k: v for k, v in kwargs.items() if v is not None} return apply_ufunc( duck_array_ops.astype, self, dtype, kwargs=kwargs, keep_attrs=keep_attrs, dask="allowed", ) def load(self, **kwargs): """Manually trigger loading of this variable's data from disk or a remote source into memory and return this variable. Normally, it should not be necessary to call this method in user code, because all xarray functions should either work on deferred data or load data automatically. Parameters ---------- **kwargs : dict Additional keyword arguments passed on to ``dask.array.compute``. See Also -------- dask.array.compute """ if is_duck_dask_array(self._data): self._data = as_compatible_data(self._data.compute(**kwargs)) elif not is_duck_array(self._data): self._data = np.asarray(self._data) return self def compute(self, **kwargs): """Manually trigger loading of this variable's data from disk or a remote source into memory and return a new variable. The original is left unaltered. Normally, it should not be necessary to call this method in user code, because all xarray functions should either work on deferred data or load data automatically. Parameters ---------- **kwargs : dict Additional keyword arguments passed on to ``dask.array.compute``. See Also -------- dask.array.compute """ new = self.copy(deep=False) return new.load(**kwargs) def __dask_tokenize__(self): # Use v.data, instead of v._data, in order to cope with the wrappers # around NetCDF and the like from dask.base import normalize_token return normalize_token((type(self), self._dims, self.data, self._attrs)) def __dask_graph__(self): if is_duck_dask_array(self._data): return self._data.__dask_graph__() else: return None def __dask_keys__(self): return self._data.__dask_keys__() def __dask_layers__(self): return self._data.__dask_layers__() @property def __dask_optimize__(self): return self._data.__dask_optimize__ @property def __dask_scheduler__(self): return self._data.__dask_scheduler__ def __dask_postcompute__(self): array_func, array_args = self._data.__dask_postcompute__() return ( self._dask_finalize, (array_func, array_args, self._dims, self._attrs, self._encoding), ) def __dask_postpersist__(self): array_func, array_args = self._data.__dask_postpersist__() return ( self._dask_finalize, (array_func, array_args, self._dims, self._attrs, self._encoding), ) @staticmethod def _dask_finalize(results, array_func, array_args, dims, attrs, encoding): data = array_func(results, *array_args) return Variable(dims, data, attrs=attrs, encoding=encoding) @property def values(self): """The variable's data as a numpy.ndarray""" return _as_array_or_item(self._data) @values.setter def values(self, values): self.data = values def to_base_variable(self): """Return this variable as a base xarray.Variable""" return Variable( self.dims, self._data, self._attrs, encoding=self._encoding, fastpath=True ) to_variable = utils.alias(to_base_variable, "to_variable") def to_index_variable(self): """Return this variable as an xarray.IndexVariable""" return IndexVariable( self.dims, self._data, self._attrs, encoding=self._encoding, fastpath=True ) to_coord = utils.alias(to_index_variable, "to_coord") def to_index(self): """Convert this variable to a pandas.Index""" return self.to_index_variable().to_index() def to_dict(self, data=True): """Dictionary representation of variable.""" item = {"dims": self.dims, "attrs": decode_numpy_dict_values(self.attrs)} if data: item["data"] = ensure_us_time_resolution(self.values).tolist() else: item.update({"dtype": str(self.dtype), "shape": self.shape}) return item @property def dims(self): """Tuple of dimension names with which this variable is associated.""" return self._dims @dims.setter def dims(self, value): self._dims = self._parse_dimensions(value) def _parse_dimensions(self, dims): if isinstance(dims, str): dims = (dims,) dims = tuple(dims) if len(dims) != self.ndim: raise ValueError( "dimensions %s must have the same length as the " "number of data dimensions, ndim=%s" % (dims, self.ndim) ) return dims def _item_key_to_tuple(self, key): if utils.is_dict_like(key): return tuple(key.get(dim, slice(None)) for dim in self.dims) else: return key def _broadcast_indexes(self, key): """Prepare an indexing key for an indexing operation. Parameters ----------- key: int, slice, array-like, dict or tuple of integer, slice and array-like Any valid input for indexing. Returns ------- dims : tuple Dimension of the resultant variable. indexers : IndexingTuple subclass Tuple of integer, array-like, or slices to use when indexing self._data. The type of this argument indicates the type of indexing to perform, either basic, outer or vectorized. new_order : Optional[Sequence[int]] Optional reordering to do on the result of indexing. If not None, the first len(new_order) indexing should be moved to these positions. """ key = self._item_key_to_tuple(key) # key is a tuple # key is a tuple of full size key = indexing.expanded_indexer(key, self.ndim) # Convert a scalar Variable to an integer key = tuple( k.data.item() if isinstance(k, Variable) and k.ndim == 0 else k for k in key ) # Convert a 0d-array to an integer key = tuple( k.item() if isinstance(k, np.ndarray) and k.ndim == 0 else k for k in key ) if all(isinstance(k, BASIC_INDEXING_TYPES) for k in key): return self._broadcast_indexes_basic(key) self._validate_indexers(key) # Detect it can be mapped as an outer indexer # If all key is unlabeled, or # key can be mapped as an OuterIndexer. if all(not isinstance(k, Variable) for k in key): return self._broadcast_indexes_outer(key) # If all key is 1-dimensional and there are no duplicate labels, # key can be mapped as an OuterIndexer. dims = [] for k, d in zip(key, self.dims): if isinstance(k, Variable): if len(k.dims) > 1: return self._broadcast_indexes_vectorized(key) dims.append(k.dims[0]) elif not isinstance(k, integer_types): dims.append(d) if len(set(dims)) == len(dims): return self._broadcast_indexes_outer(key) return self._broadcast_indexes_vectorized(key) def _broadcast_indexes_basic(self, key): dims = tuple( dim for k, dim in zip(key, self.dims) if not isinstance(k, integer_types) ) return dims, BasicIndexer(key), None def _validate_indexers(self, key): """ Make sanity checks """ for dim, k in zip(self.dims, key): if isinstance(k, BASIC_INDEXING_TYPES): pass else: if not isinstance(k, Variable): k = np.asarray(k) if k.ndim > 1: raise IndexError( "Unlabeled multi-dimensional array cannot be " "used for indexing: {}".format(k) ) if k.dtype.kind == "b": if self.shape[self.get_axis_num(dim)] != len(k): raise IndexError( "Boolean array size {:d} is used to index array " "with shape {:s}.".format(len(k), str(self.shape)) ) if k.ndim > 1: raise IndexError( "{}-dimensional boolean indexing is " "not supported. ".format(k.ndim) ) if getattr(k, "dims", (dim,)) != (dim,): raise IndexError( "Boolean indexer should be unlabeled or on the " "same dimension to the indexed array. Indexer is " "on {:s} but the target dimension is {:s}.".format( str(k.dims), dim ) ) def _broadcast_indexes_outer(self, key): dims = tuple( k.dims[0] if isinstance(k, Variable) else dim for k, dim in zip(key, self.dims) if not isinstance(k, integer_types) ) new_key = [] for k in key: if isinstance(k, Variable): k = k.data if not isinstance(k, BASIC_INDEXING_TYPES): k = np.asarray(k) if k.size == 0: # Slice by empty list; numpy could not infer the dtype k = k.astype(int) elif k.dtype.kind == "b": (k,) = np.nonzero(k) new_key.append(k) return dims, OuterIndexer(tuple(new_key)), None def _nonzero(self): """ Equivalent numpy's nonzero but returns a tuple of Varibles. """ # TODO we should replace dask's native nonzero # after https://github.com/dask/dask/issues/1076 is implemented. nonzeros = np.nonzero(self.data) return tuple(Variable((dim), nz) for nz, dim in zip(nonzeros, self.dims)) def _broadcast_indexes_vectorized(self, key): variables = [] out_dims_set = OrderedSet() for dim, value in zip(self.dims, key): if isinstance(value, slice): out_dims_set.add(dim) else: variable = ( value if isinstance(value, Variable) else as_variable(value, name=dim) ) if variable.dtype.kind == "b": # boolean indexing case (variable,) = variable._nonzero() variables.append(variable) out_dims_set.update(variable.dims) variable_dims = set() for variable in variables: variable_dims.update(variable.dims) slices = [] for i, (dim, value) in enumerate(zip(self.dims, key)): if isinstance(value, slice): if dim in variable_dims: # We only convert slice objects to variables if they share # a dimension with at least one other variable. Otherwise, # we can equivalently leave them as slices aknd transpose # the result. This is significantly faster/more efficient # for most array backends. values = np.arange(*value.indices(self.sizes[dim])) variables.insert(i - len(slices), Variable((dim,), values)) else: slices.append((i, value)) try: variables = _broadcast_compat_variables(*variables) except ValueError: raise IndexError(f"Dimensions of indexers mismatch: {key}") out_key = [variable.data for variable in variables] out_dims = tuple(out_dims_set) slice_positions = set() for i, value in slices: out_key.insert(i, value) new_position = out_dims.index(self.dims[i]) slice_positions.add(new_position) if slice_positions: new_order = [i for i in range(len(out_dims)) if i not in slice_positions] else: new_order = None return out_dims, VectorizedIndexer(tuple(out_key)), new_order def __getitem__(self: VariableType, key) -> VariableType: """Return a new Variable object whose contents are consistent with getting the provided key from the underlying data. NB. __getitem__ and __setitem__ implement xarray-style indexing, where if keys are unlabeled arrays, we index the array orthogonally with them. If keys are labeled array (such as Variables), they are broadcasted with our usual scheme and then the array is indexed with the broadcasted key, like numpy's fancy indexing. If you really want to do indexing like `x[x > 0]`, manipulate the numpy array `x.values` directly. """ dims, indexer, new_order = self._broadcast_indexes(key) data = as_indexable(self._data)[indexer] if new_order: data = duck_array_ops.moveaxis(data, range(len(new_order)), new_order) return self._finalize_indexing_result(dims, data) def _finalize_indexing_result(self: VariableType, dims, data) -> VariableType: """Used by IndexVariable to return IndexVariable objects when possible.""" return type(self)(dims, data, self._attrs, self._encoding, fastpath=True) def _getitem_with_mask(self, key, fill_value=dtypes.NA): """Index this Variable with -1 remapped to fill_value.""" # TODO(shoyer): expose this method in public API somewhere (isel?) and # use it for reindex. # TODO(shoyer): add a sanity check that all other integers are # non-negative # TODO(shoyer): add an optimization, remapping -1 to an adjacent value # that is actually indexed rather than mapping it to the last value # along each axis. if fill_value is dtypes.NA: fill_value = dtypes.get_fill_value(self.dtype) dims, indexer, new_order = self._broadcast_indexes(key) if self.size: if is_duck_dask_array(self._data): # dask's indexing is faster this way; also vindex does not # support negative indices yet: # https://github.com/dask/dask/pull/2967 actual_indexer = indexing.posify_mask_indexer(indexer) else: actual_indexer = indexer data = as_indexable(self._data)[actual_indexer] mask = indexing.create_mask(indexer, self.shape, data) # we need to invert the mask in order to pass data first. This helps # pint to choose the correct unit # TODO: revert after https://github.com/hgrecco/pint/issues/1019 is fixed data = duck_array_ops.where(np.logical_not(mask), data, fill_value) else: # array cannot be indexed along dimensions of size 0, so just # build the mask directly instead. mask = indexing.create_mask(indexer, self.shape) data = np.broadcast_to(fill_value, getattr(mask, "shape", ())) if new_order: data = duck_array_ops.moveaxis(data, range(len(new_order)), new_order) return self._finalize_indexing_result(dims, data) def __setitem__(self, key, value): """__setitem__ is overloaded to access the underlying numpy values with orthogonal indexing. See __getitem__ for more details. """ dims, index_tuple, new_order = self._broadcast_indexes(key) if not isinstance(value, Variable): value = as_compatible_data(value) if value.ndim > len(dims): raise ValueError( "shape mismatch: value array of shape %s could not be " "broadcast to indexing result with %s dimensions" % (value.shape, len(dims)) ) if value.ndim == 0: value = Variable((), value) else: value = Variable(dims[-value.ndim :], value) # broadcast to become assignable value = value.set_dims(dims).data if new_order: value = duck_array_ops.asarray(value) value = value[(len(dims) - value.ndim) * (np.newaxis,) + (Ellipsis,)] value = duck_array_ops.moveaxis(value, new_order, range(len(new_order))) indexable = as_indexable(self._data) indexable[index_tuple] = value @property def attrs(self) -> Dict[Hashable, Any]: """Dictionary of local attributes on this variable.""" if self._attrs is None: self._attrs = {} return self._attrs @attrs.setter def attrs(self, value: Mapping[Hashable, Any]) -> None: self._attrs = dict(value) @property def encoding(self): """Dictionary of encodings on this variable.""" if self._encoding is None: self._encoding = {} return self._encoding @encoding.setter def encoding(self, value): try: self._encoding = dict(value) except ValueError: raise ValueError("encoding must be castable to a dictionary") def copy(self, deep=True, data=None): """Returns a copy of this object. If `deep=True`, the data array is loaded into memory and copied onto the new object. Dimensions, attributes and encodings are always copied. Use `data` to create a new object with the same structure as original but entirely new data. Parameters ---------- deep : bool, optional Whether the data array is loaded into memory and copied onto the new object. Default is True. data : array_like, optional Data to use in the new object. Must have same shape as original. When `data` is used, `deep` is ignored. Returns ------- object : Variable New object with dimensions, attributes, encodings, and optionally data copied from original. Examples -------- Shallow copy versus deep copy >>> var = xr.Variable(data=[1, 2, 3], dims="x") >>> var.copy() <xarray.Variable (x: 3)> array([1, 2, 3]) >>> var_0 = var.copy(deep=False) >>> var_0[0] = 7 >>> var_0 <xarray.Variable (x: 3)> array([7, 2, 3]) >>> var <xarray.Variable (x: 3)> array([7, 2, 3]) Changing the data using the ``data`` argument maintains the structure of the original object, but with the new data. Original object is unaffected. >>> var.copy(data=[0.1, 0.2, 0.3]) <xarray.Variable (x: 3)> array([0.1, 0.2, 0.3]) >>> var <xarray.Variable (x: 3)> array([7, 2, 3]) See Also -------- pandas.DataFrame.copy """ if data is None: data = self._data if isinstance(data, indexing.MemoryCachedArray): # don't share caching between copies data = indexing.MemoryCachedArray(data.array) if deep: data = copy.deepcopy(data) else: data = as_compatible_data(data) if self.shape != data.shape: raise ValueError( "Data shape {} must match shape of object {}".format( data.shape, self.shape ) ) # note: # dims is already an immutable tuple # attributes and encoding will be copied when the new Array is created return self._replace(data=data) def _replace( self, dims=_default, data=_default, attrs=_default, encoding=_default ) -> "Variable": if dims is _default: dims = copy.copy(self._dims) if data is _default: data = copy.copy(self.data) if attrs is _default: attrs = copy.copy(self._attrs) if encoding is _default: encoding = copy.copy(self._encoding) return type(self)(dims, data, attrs, encoding, fastpath=True) def __copy__(self): return self.copy(deep=False) def __deepcopy__(self, memo=None): # memo does nothing but is required for compatibility with # copy.deepcopy return self.copy(deep=True) # mutable objects should not be hashable # https://github.com/python/mypy/issues/4266 __hash__ = None # type: ignore @property def chunks(self): """Block dimensions for this array's data or None if it's not a dask array. """ return getattr(self._data, "chunks", None) _array_counter = itertools.count() def chunk(self, chunks={}, name=None, lock=False): """Coerce this array's data into a dask arrays with the given chunks. If this variable is a non-dask array, it will be converted to dask array. If it's a dask array, it will be rechunked to the given chunk sizes. If neither chunks is not provided for one or more dimensions, chunk sizes along that dimension will not be updated; non-dask arrays will be converted into dask arrays with a single block. Parameters ---------- chunks : int, tuple or dict, optional Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or ``{'x': 5, 'y': 5}``. name : str, optional Used to generate the name for this array in the internal dask graph. Does not need not be unique. lock : optional Passed on to :py:func:`dask.array.from_array`, if the array is not already as dask array. Returns ------- chunked : xarray.Variable """ import dask import dask.array as da if chunks is None: warnings.warn( "None value for 'chunks' is deprecated. " "It will raise an error in the future. Use instead '{}'", category=FutureWarning, ) chunks = {} if utils.is_dict_like(chunks): chunks = {self.get_axis_num(dim): chunk for dim, chunk in chunks.items()} data = self._data if is_duck_dask_array(data): data = data.rechunk(chunks) else: if isinstance(data, indexing.ExplicitlyIndexed): # Unambiguously handle array storage backends (like NetCDF4 and h5py) # that can't handle general array indexing. For example, in netCDF4 you # can do "outer" indexing along two dimensions independent, which works # differently from how NumPy handles it. # da.from_array works by using lazy indexing with a tuple of slices. # Using OuterIndexer is a pragmatic choice: dask does not yet handle # different indexing types in an explicit way: # https://github.com/dask/dask/issues/2883 data = indexing.ImplicitToExplicitIndexingAdapter( data, indexing.OuterIndexer ) if LooseVersion(dask.__version__) < "2.0.0": kwargs = {} else: # All of our lazily loaded backend array classes should use NumPy # array operations. kwargs = {"meta": np.ndarray} else: kwargs = {} if utils.is_dict_like(chunks): chunks = tuple(chunks.get(n, s) for n, s in enumerate(self.shape)) data = da.from_array(data, chunks, name=name, lock=lock, **kwargs) return type(self)(self.dims, data, self._attrs, self._encoding, fastpath=True) def _as_sparse(self, sparse_format=_default, fill_value=dtypes.NA): """ use sparse-array as backend. """ import sparse # TODO: what to do if dask-backended? if fill_value is dtypes.NA: dtype, fill_value = dtypes.maybe_promote(self.dtype) else: dtype = dtypes.result_type(self.dtype, fill_value) if sparse_format is _default: sparse_format = "coo" try: as_sparse = getattr(sparse, f"as_{sparse_format.lower()}") except AttributeError: raise ValueError(f"{sparse_format} is not a valid sparse format") data = as_sparse(self.data.astype(dtype), fill_value=fill_value) return self._replace(data=data) def _to_dense(self): """ Change backend from sparse to np.array """ if hasattr(self._data, "todense"): return self._replace(data=self._data.todense()) return self.copy(deep=False) def isel( self: VariableType, indexers: Mapping[Hashable, Any] = None, missing_dims: str = "raise", **indexers_kwargs: Any, ) -> VariableType: """Return a new array indexed along the specified dimension(s). Parameters ---------- **indexers : {dim: indexer, ...} Keyword arguments with names matching dimensions and values given by integers, slice objects or arrays. missing_dims : {"raise", "warn", "ignore"}, default: "raise" What to do if dimensions that should be selected from are not present in the DataArray: - "raise": raise an exception - "warning": raise a warning, and ignore the missing dimensions - "ignore": ignore the missing dimensions Returns ------- obj : Array object A new Array with the selected data and dimensions. In general, the new variable's data will be a view of this variable's data, unless numpy fancy indexing was triggered by using an array indexer, in which case the data will be a copy. """ indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "isel") indexers = drop_dims_from_indexers(indexers, self.dims, missing_dims) key = tuple(indexers.get(dim, slice(None)) for dim in self.dims) return self[key] def squeeze(self, dim=None): """Return a new object with squeezed data. Parameters ---------- dim : None or str or tuple of str, optional Selects a subset of the length one dimensions. If a dimension is selected with length greater than one, an error is raised. If None, all length one dimensions are squeezed. Returns ------- squeezed : same type as caller This object, but with with all or a subset of the dimensions of length 1 removed. See Also -------- numpy.squeeze """ dims = common.get_squeeze_dims(self, dim) return self.isel({d: 0 for d in dims}) def _shift_one_dim(self, dim, count, fill_value=dtypes.NA): axis = self.get_axis_num(dim) if count > 0: keep = slice(None, -count) elif count < 0: keep = slice(-count, None) else: keep = slice(None) trimmed_data = self[(slice(None),) * axis + (keep,)].data if fill_value is dtypes.NA: dtype, fill_value = dtypes.maybe_promote(self.dtype) else: dtype = self.dtype width = min(abs(count), self.shape[axis]) dim_pad = (width, 0) if count >= 0 else (0, width) pads = [(0, 0) if d != dim else dim_pad for d in self.dims] data = duck_array_ops.pad( trimmed_data.astype(dtype), pads, mode="constant", constant_values=fill_value, ) if is_duck_dask_array(data): # chunked data should come out with the same chunks; this makes # it feasible to combine shifted and unshifted data # TODO: remove this once dask.array automatically aligns chunks data = data.rechunk(self.data.chunks) return type(self)(self.dims, data, self._attrs, fastpath=True) def shift(self, shifts=None, fill_value=dtypes.NA, **shifts_kwargs): """ Return a new Variable with shifted data. Parameters ---------- shifts : mapping of the form {dim: offset} Integer offset to shift along each of the given dimensions. Positive offsets shift to the right; negative offsets shift to the left. fill_value: scalar, optional Value to use for newly missing values **shifts_kwargs The keyword arguments form of ``shifts``. One of shifts or shifts_kwargs must be provided. Returns ------- shifted : Variable Variable with the same dimensions and attributes but shifted data. """ shifts = either_dict_or_kwargs(shifts, shifts_kwargs, "shift") result = self for dim, count in shifts.items(): result = result._shift_one_dim(dim, count, fill_value=fill_value) return result def _pad_options_dim_to_index( self, pad_option: Mapping[Hashable, Union[int, Tuple[int, int]]], fill_with_shape=False, ): if fill_with_shape: return [ (n, n) if d not in pad_option else pad_option[d] for d, n in zip(self.dims, self.data.shape) ] return [(0, 0) if d not in pad_option else pad_option[d] for d in self.dims] def pad( self, pad_width: Mapping[Hashable, Union[int, Tuple[int, int]]] = None, mode: str = "constant", stat_length: Union[ int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]] ] = None, constant_values: Union[ int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]] ] = None, end_values: Union[ int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]] ] = None, reflect_type: str = None, **pad_width_kwargs: Any, ): """ Return a new Variable with padded data. Parameters ---------- pad_width : mapping of hashable to tuple of int Mapping with the form of {dim: (pad_before, pad_after)} describing the number of values padded along each dimension. {dim: pad} is a shortcut for pad_before = pad_after = pad mode : str, default: "constant" See numpy / Dask docs stat_length : int, tuple or mapping of hashable to tuple Used in 'maximum', 'mean', 'median', and 'minimum'. Number of values at edge of each axis used to calculate the statistic value. constant_values : scalar, tuple or mapping of hashable to tuple Used in 'constant'. The values to set the padded values for each axis. end_values : scalar, tuple or mapping of hashable to tuple Used in 'linear_ramp'. The values used for the ending value of the linear_ramp and that will form the edge of the padded array. reflect_type : {"even", "odd"}, optional Used in "reflect", and "symmetric". The "even" style is the default with an unaltered reflection around the edge value. For the "odd" style, the extended part of the array is created by subtracting the reflected values from two times the edge value. **pad_width_kwargs One of pad_width or pad_width_kwargs must be provided. Returns ------- padded : Variable Variable with the same dimensions and attributes but padded data. """ pad_width = either_dict_or_kwargs(pad_width, pad_width_kwargs, "pad") # change default behaviour of pad with mode constant if mode == "constant" and ( constant_values is None or constant_values is dtypes.NA ): dtype, constant_values = dtypes.maybe_promote(self.dtype) else: dtype = self.dtype # create pad_options_kwargs, numpy requires only relevant kwargs to be nonempty if isinstance(stat_length, dict): stat_length = self._pad_options_dim_to_index( stat_length, fill_with_shape=True ) if isinstance(constant_values, dict): constant_values = self._pad_options_dim_to_index(constant_values) if isinstance(end_values, dict): end_values = self._pad_options_dim_to_index(end_values) # workaround for bug in Dask's default value of stat_length https://github.com/dask/dask/issues/5303 if stat_length is None and mode in ["maximum", "mean", "median", "minimum"]: stat_length = [(n, n) for n in self.data.shape] # type: ignore # change integer values to a tuple of two of those values and change pad_width to index for k, v in pad_width.items(): if isinstance(v, numbers.Number): pad_width[k] = (v, v) pad_width_by_index = self._pad_options_dim_to_index(pad_width) # create pad_options_kwargs, numpy/dask requires only relevant kwargs to be nonempty pad_option_kwargs = {} if stat_length is not None: pad_option_kwargs["stat_length"] = stat_length if constant_values is not None: pad_option_kwargs["constant_values"] = constant_values if end_values is not None: pad_option_kwargs["end_values"] = end_values if reflect_type is not None: pad_option_kwargs["reflect_type"] = reflect_type # type: ignore array = duck_array_ops.pad( self.data.astype(dtype, copy=False), pad_width_by_index, mode=mode, **pad_option_kwargs, ) return type(self)(self.dims, array) def _roll_one_dim(self, dim, count): axis = self.get_axis_num(dim) count %= self.shape[axis] if count != 0: indices = [slice(-count, None), slice(None, -count)] else: indices = [slice(None)] arrays = [self[(slice(None),) * axis + (idx,)].data for idx in indices] data = duck_array_ops.concatenate(arrays, axis) if is_duck_dask_array(data): # chunked data should come out with the same chunks; this makes # it feasible to combine shifted and unshifted data # TODO: remove this once dask.array automatically aligns chunks data = data.rechunk(self.data.chunks) return type(self)(self.dims, data, self._attrs, fastpath=True) def roll(self, shifts=None, **shifts_kwargs): """ Return a new Variable with rolld data. Parameters ---------- shifts : mapping of hashable to int Integer offset to roll along each of the given dimensions. Positive offsets roll to the right; negative offsets roll to the left. **shifts_kwargs The keyword arguments form of ``shifts``. One of shifts or shifts_kwargs must be provided. Returns ------- shifted : Variable Variable with the same dimensions and attributes but rolled data. """ shifts = either_dict_or_kwargs(shifts, shifts_kwargs, "roll") result = self for dim, count in shifts.items(): result = result._roll_one_dim(dim, count) return result def transpose(self, *dims) -> "Variable": """Return a new Variable object with transposed dimensions. Parameters ---------- *dims : str, optional By default, reverse the dimensions. Otherwise, reorder the dimensions to this order. Returns ------- transposed : Variable The returned object has transposed data and dimensions with the same attributes as the original. Notes ----- This operation returns a view of this variable's data. It is lazy for dask-backed Variables but not for numpy-backed Variables. See Also -------- numpy.transpose """ if len(dims) == 0: dims = self.dims[::-1] dims = tuple(infix_dims(dims, self.dims)) axes = self.get_axis_num(dims) if len(dims) < 2 or dims == self.dims: # no need to transpose if only one dimension # or dims are in same order return self.copy(deep=False) data = as_indexable(self._data).transpose(axes) return type(self)(dims, data, self._attrs, self._encoding, fastpath=True) @property def T(self) -> "Variable": return self.transpose() def set_dims(self, dims, shape=None): """Return a new variable with given set of dimensions. This method might be used to attach new dimension(s) to variable. When possible, this operation does not copy this variable's data. Parameters ---------- dims : str or sequence of str or dict Dimensions to include on the new variable. If a dict, values are used to provide the sizes of new dimensions; otherwise, new dimensions are inserted with length 1. Returns ------- Variable """ if isinstance(dims, str): dims = [dims] if shape is None and utils.is_dict_like(dims): shape = dims.values() missing_dims = set(self.dims) - set(dims) if missing_dims: raise ValueError( "new dimensions %r must be a superset of " "existing dimensions %r" % (dims, self.dims) ) self_dims = set(self.dims) expanded_dims = tuple(d for d in dims if d not in self_dims) + self.dims if self.dims == expanded_dims: # don't use broadcast_to unless necessary so the result remains # writeable if possible expanded_data = self.data elif shape is not None: dims_map = dict(zip(dims, shape)) tmp_shape = tuple(dims_map[d] for d in expanded_dims) expanded_data = duck_array_ops.broadcast_to(self.data, tmp_shape) else: expanded_data = self.data[(None,) * (len(expanded_dims) - self.ndim)] expanded_var = Variable( expanded_dims, expanded_data, self._attrs, self._encoding, fastpath=True ) return expanded_var.transpose(*dims) def _stack_once(self, dims, new_dim): if not set(dims) <= set(self.dims): raise ValueError("invalid existing dimensions: %s" % dims) if new_dim in self.dims: raise ValueError( "cannot create a new dimension with the same " "name as an existing dimension" ) if len(dims) == 0: # don't stack return self.copy(deep=False) other_dims = [d for d in self.dims if d not in dims] dim_order = other_dims + list(dims) reordered = self.transpose(*dim_order) new_shape = reordered.shape[: len(other_dims)] + (-1,) new_data = reordered.data.reshape(new_shape) new_dims = reordered.dims[: len(other_dims)] + (new_dim,) return Variable(new_dims, new_data, self._attrs, self._encoding, fastpath=True) def stack(self, dimensions=None, **dimensions_kwargs): """ Stack any number of existing dimensions into a single new dimension. New dimensions will be added at the end, and the order of the data along each new dimension will be in contiguous (C) order. Parameters ---------- dimensions : mapping of hashable to tuple of hashable Mapping of form new_name=(dim1, dim2, ...) describing the names of new dimensions, and the existing dimensions that they replace. **dimensions_kwargs The keyword arguments form of ``dimensions``. One of dimensions or dimensions_kwargs must be provided. Returns ------- stacked : Variable Variable with the same attributes but stacked data. See also -------- Variable.unstack """ dimensions = either_dict_or_kwargs(dimensions, dimensions_kwargs, "stack") result = self for new_dim, dims in dimensions.items(): result = result._stack_once(dims, new_dim) return result def _unstack_once(self, dims, old_dim): new_dim_names = tuple(dims.keys()) new_dim_sizes = tuple(dims.values()) if old_dim not in self.dims: raise ValueError("invalid existing dimension: %s" % old_dim) if set(new_dim_names).intersection(self.dims): raise ValueError( "cannot create a new dimension with the same " "name as an existing dimension" ) if np.prod(new_dim_sizes) != self.sizes[old_dim]: raise ValueError( "the product of the new dimension sizes must " "equal the size of the old dimension" ) other_dims = [d for d in self.dims if d != old_dim] dim_order = other_dims + [old_dim] reordered = self.transpose(*dim_order) new_shape = reordered.shape[: len(other_dims)] + new_dim_sizes new_data = reordered.data.reshape(new_shape) new_dims = reordered.dims[: len(other_dims)] + new_dim_names return Variable(new_dims, new_data, self._attrs, self._encoding, fastpath=True) def unstack(self, dimensions=None, **dimensions_kwargs): """ Unstack an existing dimension into multiple new dimensions. New dimensions will be added at the end, and the order of the data along each new dimension will be in contiguous (C) order. Parameters ---------- dimensions : mapping of hashable to mapping of hashable to int Mapping of the form old_dim={dim1: size1, ...} describing the names of existing dimensions, and the new dimensions and sizes that they map to. **dimensions_kwargs The keyword arguments form of ``dimensions``. One of dimensions or dimensions_kwargs must be provided. Returns ------- unstacked : Variable Variable with the same attributes but unstacked data. See also -------- Variable.stack """ dimensions = either_dict_or_kwargs(dimensions, dimensions_kwargs, "unstack") result = self for old_dim, dims in dimensions.items(): result = result._unstack_once(dims, old_dim) return result def fillna(self, value): return ops.fillna(self, value) def where(self, cond, other=dtypes.NA): return ops.where_method(self, cond, other) def reduce( self, func, dim=None, axis=None, keep_attrs=None, keepdims=False, **kwargs, ): """Reduce this array by applying `func` along some dimension(s). Parameters ---------- func : callable Function which can be called in the form `func(x, axis=axis, **kwargs)` to return the result of reducing an np.ndarray over an integer valued axis. dim : str or sequence of str, optional Dimension(s) over which to apply `func`. axis : int or sequence of int, optional Axis(es) over which to apply `func`. Only one of the 'dim' and 'axis' arguments can be supplied. If neither are supplied, then the reduction is calculated over the flattened array (by calling `func(x)` without an axis argument). keep_attrs : bool, optional If True, the variable's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. keepdims : bool, default: False If True, the dimensions which are reduced are left in the result as dimensions of size one **kwargs : dict Additional keyword arguments passed on to `func`. Returns ------- reduced : Array Array with summarized data and the indicated dimension(s) removed. """ if dim == ...: dim = None if dim is not None and axis is not None: raise ValueError("cannot supply both 'axis' and 'dim' arguments") if dim is not None: axis = self.get_axis_num(dim) with warnings.catch_warnings(): warnings.filterwarnings( "ignore", r"Mean of empty slice", category=RuntimeWarning ) if axis is not None: data = func(self.data, axis=axis, **kwargs) else: data = func(self.data, **kwargs) if getattr(data, "shape", ()) == self.shape: dims = self.dims else: removed_axes = ( range(self.ndim) if axis is None else np.atleast_1d(axis) % self.ndim ) if keepdims: # Insert np.newaxis for removed dims slices = tuple( np.newaxis if i in removed_axes else slice(None, None) for i in range(self.ndim) ) if getattr(data, "shape", None) is None: # Reduce has produced a scalar value, not an array-like data = np.asanyarray(data)[slices] else: data = data[slices] dims = self.dims else: dims = [ adim for n, adim in enumerate(self.dims) if n not in removed_axes ] if keep_attrs is None: keep_attrs = _get_keep_attrs(default=False) attrs = self._attrs if keep_attrs else None return Variable(dims, data, attrs=attrs) @classmethod def concat(cls, variables, dim="concat_dim", positions=None, shortcut=False): """Concatenate variables along a new or existing dimension. Parameters ---------- variables : iterable of Variable Arrays to stack together. Each variable is expected to have matching dimensions and shape except for along the stacked dimension. dim : str or DataArray, optional Name of the dimension to stack along. This can either be a new dimension name, in which case it is added along axis=0, or an existing dimension name, in which case the location of the dimension is unchanged. Where to insert the new dimension is determined by the first variable. positions : None or list of array-like, optional List of integer arrays which specifies the integer positions to which to assign each dataset along the concatenated dimension. If not supplied, objects are concatenated in the provided order. shortcut : bool, optional This option is used internally to speed-up groupby operations. If `shortcut` is True, some checks of internal consistency between arrays to concatenate are skipped. Returns ------- stacked : Variable Concatenated Variable formed by stacking all the supplied variables along the given dimension. """ if not isinstance(dim, str): (dim,) = dim.dims # can't do this lazily: we need to loop through variables at least # twice variables = list(variables) first_var = variables[0] arrays = [v.data for v in variables] if dim in first_var.dims: axis = first_var.get_axis_num(dim) dims = first_var.dims data = duck_array_ops.concatenate(arrays, axis=axis) if positions is not None: # TODO: deprecate this option -- we don't need it for groupby # any more. indices = nputils.inverse_permutation(np.concatenate(positions)) data = duck_array_ops.take(data, indices, axis=axis) else: axis = 0 dims = (dim,) + first_var.dims data = duck_array_ops.stack(arrays, axis=axis) attrs = dict(first_var.attrs) encoding = dict(first_var.encoding) if not shortcut: for var in variables: if var.dims != first_var.dims: raise ValueError( f"Variable has dimensions {list(var.dims)} but first Variable has dimensions {list(first_var.dims)}" ) return cls(dims, data, attrs, encoding) def equals(self, other, equiv=duck_array_ops.array_equiv): """True if two Variables have the same dimensions and values; otherwise False. Variables can still be equal (like pandas objects) if they have NaN values in the same locations. This method is necessary because `v1 == v2` for Variables does element-wise comparisons (like numpy.ndarrays). """ other = getattr(other, "variable", other) try: return self.dims == other.dims and ( self._data is other._data or equiv(self.data, other.data) ) except (TypeError, AttributeError): return False def broadcast_equals(self, other, equiv=duck_array_ops.array_equiv): """True if two Variables have the values after being broadcast against each other; otherwise False. Variables can still be equal (like pandas objects) if they have NaN values in the same locations. """ try: self, other = broadcast_variables(self, other) except (ValueError, AttributeError): return False return self.equals(other, equiv=equiv) def identical(self, other, equiv=duck_array_ops.array_equiv): """Like equals, but also checks attributes.""" try: return utils.dict_equiv(self.attrs, other.attrs) and self.equals( other, equiv=equiv ) except (TypeError, AttributeError): return False def no_conflicts(self, other, equiv=duck_array_ops.array_notnull_equiv): """True if the intersection of two Variable's non-null data is equal; otherwise false. Variables can thus still be equal if there are locations where either, or both, contain NaN values. """ return self.broadcast_equals(other, equiv=equiv) def quantile( self, q, dim=None, interpolation="linear", keep_attrs=None, skipna=True ): """Compute the qth quantile of the data along the specified dimension. Returns the qth quantiles(s) of the array elements. Parameters ---------- q : float or sequence of float Quantile to compute, which must be between 0 and 1 inclusive. dim : str or sequence of str, optional Dimension(s) over which to apply quantile. interpolation : {"linear", "lower", "higher", "midpoint", "nearest"}, default: "linear" This optional parameter specifies the interpolation method to use when the desired quantile lies between two data points ``i < j``: * linear: ``i + (j - i) * fraction``, where ``fraction`` is the fractional part of the index surrounded by ``i`` and ``j``. * lower: ``i``. * higher: ``j``. * nearest: ``i`` or ``j``, whichever is nearest. * midpoint: ``(i + j) / 2``. keep_attrs : bool, optional If True, the variable's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. Returns ------- quantiles : Variable If `q` is a single quantile, then the result is a scalar. If multiple percentiles are given, first axis of the result corresponds to the quantile and a quantile dimension is added to the return array. The other dimensions are the dimensions that remain after the reduction of the array. See Also -------- numpy.nanquantile, pandas.Series.quantile, Dataset.quantile, DataArray.quantile """ from .computation import apply_ufunc _quantile_func = np.nanquantile if skipna else np.quantile if keep_attrs is None: keep_attrs = _get_keep_attrs(default=False) scalar = utils.is_scalar(q) q = np.atleast_1d(np.asarray(q, dtype=np.float64)) if dim is None: dim = self.dims if utils.is_scalar(dim): dim = [dim] def _wrapper(npa, **kwargs): # move quantile axis to end. required for apply_ufunc return np.moveaxis(_quantile_func(npa, **kwargs), 0, -1) axis = np.arange(-1, -1 * len(dim) - 1, -1) result = apply_ufunc( _wrapper, self, input_core_dims=[dim], exclude_dims=set(dim), output_core_dims=[["quantile"]], output_dtypes=[np.float64], dask_gufunc_kwargs=dict(output_sizes={"quantile": len(q)}), dask="parallelized", kwargs={"q": q, "axis": axis, "interpolation": interpolation}, ) # for backward compatibility result = result.transpose("quantile", ...) if scalar: result = result.squeeze("quantile") if keep_attrs: result.attrs = self._attrs return result def rank(self, dim, pct=False): """Ranks the data. Equal values are assigned a rank that is the average of the ranks that would have been otherwise assigned to all of the values within that set. Ranks begin at 1, not 0. If `pct`, computes percentage ranks. NaNs in the input array are returned as NaNs. The `bottleneck` library is required. Parameters ---------- dim : str Dimension over which to compute rank. pct : bool, optional If True, compute percentage ranks, otherwise compute integer ranks. Returns ------- ranked : Variable See Also -------- Dataset.rank, DataArray.rank """ import bottleneck as bn data = self.data if is_duck_dask_array(data): raise TypeError( "rank does not work for arrays stored as dask " "arrays. Load the data via .compute() or .load() " "prior to calling this method." ) elif not isinstance(data, np.ndarray): raise TypeError( "rank is not implemented for {} objects.".format(type(data)) ) axis = self.get_axis_num(dim) func = bn.nanrankdata if self.dtype.kind == "f" else bn.rankdata ranked = func(data, axis=axis) if pct: count = np.sum(~np.isnan(data), axis=axis, keepdims=True) ranked /= count return Variable(self.dims, ranked) def rolling_window( self, dim, window, window_dim, center=False, fill_value=dtypes.NA ): """ Make a rolling_window along dim and add a new_dim to the last place. Parameters ---------- dim : str Dimension over which to compute rolling_window. For nd-rolling, should be list of dimensions. window : int Window size of the rolling For nd-rolling, should be list of integers. window_dim : str New name of the window dimension. For nd-rolling, should be list of integers. center : bool, default: False If True, pad fill_value for both ends. Otherwise, pad in the head of the axis. fill_value value to be filled. Returns ------- Variable that is a view of the original array with a added dimension of size w. The return dim: self.dims + (window_dim, ) The return shape: self.shape + (window, ) Examples -------- >>> v = Variable(("a", "b"), np.arange(8).reshape((2, 4))) >>> v.rolling_window("b", 3, "window_dim") <xarray.Variable (a: 2, b: 4, window_dim: 3)> array([[[nan, nan, 0.], [nan, 0., 1.], [ 0., 1., 2.], [ 1., 2., 3.]], <BLANKLINE> [[nan, nan, 4.], [nan, 4., 5.], [ 4., 5., 6.], [ 5., 6., 7.]]]) >>> v.rolling_window("b", 3, "window_dim", center=True) <xarray.Variable (a: 2, b: 4, window_dim: 3)> array([[[nan, 0., 1.], [ 0., 1., 2.], [ 1., 2., 3.], [ 2., 3., nan]], <BLANKLINE> [[nan, 4., 5.], [ 4., 5., 6.], [ 5., 6., 7.], [ 6., 7., nan]]]) """ if fill_value is dtypes.NA: # np.nan is passed dtype, fill_value = dtypes.maybe_promote(self.dtype) array = self.astype(dtype, copy=False).data else: dtype = self.dtype array = self.data if isinstance(dim, list): assert len(dim) == len(window) assert len(dim) == len(window_dim) assert len(dim) == len(center) else: dim = [dim] window = [window] window_dim = [window_dim] center = [center] axis = [self.get_axis_num(d) for d in dim] new_dims = self.dims + tuple(window_dim) return Variable( new_dims, duck_array_ops.rolling_window( array, axis=axis, window=window, center=center, fill_value=fill_value ), ) def coarsen( self, windows, func, boundary="exact", side="left", keep_attrs=None, **kwargs ): """ Apply reduction function. """ windows = {k: v for k, v in windows.items() if k in self.dims} if not windows: return self.copy() if keep_attrs is None: keep_attrs = _get_keep_attrs(default=False) if keep_attrs: _attrs = self.attrs else: _attrs = None reshaped, axes = self._coarsen_reshape(windows, boundary, side) if isinstance(func, str): name = func func = getattr(duck_array_ops, name, None) if func is None: raise NameError(f"{name} is not a valid method.") return self._replace(data=func(reshaped, axis=axes, **kwargs), attrs=_attrs) def _coarsen_reshape(self, windows, boundary, side): """ Construct a reshaped-array for coarsen """ if not utils.is_dict_like(boundary): boundary = {d: boundary for d in windows.keys()} if not utils.is_dict_like(side): side = {d: side for d in windows.keys()} # remove unrelated dimensions boundary = {k: v for k, v in boundary.items() if k in windows} side = {k: v for k, v in side.items() if k in windows} for d, window in windows.items(): if window <= 0: raise ValueError(f"window must be > 0. Given {window}") variable = self for d, window in windows.items(): # trim or pad the object size = variable.shape[self._get_axis_num(d)] n = int(size / window) if boundary[d] == "exact": if n * window != size: raise ValueError( "Could not coarsen a dimension of size {} with " "window {}".format(size, window) ) elif boundary[d] == "trim": if side[d] == "left": variable = variable.isel({d: slice(0, window * n)}) else: excess = size - window * n variable = variable.isel({d: slice(excess, None)}) elif boundary[d] == "pad": # pad pad = window * n - size if pad < 0: pad += window if side[d] == "left": pad_width = {d: (0, pad)} else: pad_width = {d: (pad, 0)} variable = variable.pad(pad_width, mode="constant") else: raise TypeError( "{} is invalid for boundary. Valid option is 'exact', " "'trim' and 'pad'".format(boundary[d]) ) shape = [] axes = [] axis_count = 0 for i, d in enumerate(variable.dims): if d in windows: size = variable.shape[i] shape.append(int(size / windows[d])) shape.append(windows[d]) axis_count += 1 axes.append(i + axis_count) else: shape.append(variable.shape[i]) return variable.data.reshape(shape), tuple(axes) def isnull(self, keep_attrs: bool = None): """Test each value in the array for whether it is a missing value. Returns ------- isnull : Variable Same type and shape as object, but the dtype of the data is bool. See Also -------- pandas.isnull Examples -------- >>> var = xr.Variable("x", [1, np.nan, 3]) >>> var <xarray.Variable (x: 3)> array([ 1., nan, 3.]) >>> var.isnull() <xarray.Variable (x: 3)> array([False, True, False]) """ from .computation import apply_ufunc if keep_attrs is None: keep_attrs = _get_keep_attrs(default=False) return apply_ufunc( duck_array_ops.isnull, self, dask="allowed", keep_attrs=keep_attrs, ) def notnull(self, keep_attrs: bool = None): """Test each value in the array for whether it is not a missing value. Returns ------- notnull : Variable Same type and shape as object, but the dtype of the data is bool. See Also -------- pandas.notnull Examples -------- >>> var = xr.Variable("x", [1, np.nan, 3]) >>> var <xarray.Variable (x: 3)> array([ 1., nan, 3.]) >>> var.notnull() <xarray.Variable (x: 3)> array([ True, False, True]) """ from .computation import apply_ufunc if keep_attrs is None: keep_attrs = _get_keep_attrs(default=False) return apply_ufunc( duck_array_ops.notnull, self, dask="allowed", keep_attrs=keep_attrs, ) @property def real(self): return type(self)(self.dims, self.data.real, self._attrs) @property def imag(self): return type(self)(self.dims, self.data.imag, self._attrs) def __array_wrap__(self, obj, context=None): return Variable(self.dims, obj) @staticmethod def _unary_op(f): @functools.wraps(f) def func(self, *args, **kwargs): keep_attrs = kwargs.pop("keep_attrs", None) if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) with np.errstate(all="ignore"): result = self.__array_wrap__(f(self.data, *args, **kwargs)) if keep_attrs: result.attrs = self.attrs return result return func @staticmethod def _binary_op(f, reflexive=False, **ignored_kwargs): @functools.wraps(f) def func(self, other): if isinstance(other, (xr.DataArray, xr.Dataset)): return NotImplemented self_data, other_data, dims = _broadcast_compat_data(self, other) keep_attrs = _get_keep_attrs(default=False) attrs = self._attrs if keep_attrs else None with np.errstate(all="ignore"): new_data = ( f(self_data, other_data) if not reflexive else f(other_data, self_data) ) result = Variable(dims, new_data, attrs=attrs) return result return func @staticmethod def _inplace_binary_op(f): @functools.wraps(f) def func(self, other): if isinstance(other, xr.Dataset): raise TypeError("cannot add a Dataset to a Variable in-place") self_data, other_data, dims = _broadcast_compat_data(self, other) if dims != self.dims: raise ValueError("dimensions cannot change for in-place operations") with np.errstate(all="ignore"): self.values = f(self_data, other_data) return self return func def _to_numeric(self, offset=None, datetime_unit=None, dtype=float): """A (private) method to convert datetime array to numeric dtype See duck_array_ops.datetime_to_numeric """ numeric_array = duck_array_ops.datetime_to_numeric( self.data, offset, datetime_unit, dtype ) return type(self)(self.dims, numeric_array, self._attrs) def _unravel_argminmax( self, argminmax: str, dim: Union[Hashable, Sequence[Hashable], None], axis: Union[int, None], keep_attrs: Optional[bool], skipna: Optional[bool], ) -> Union["Variable", Dict[Hashable, "Variable"]]: """Apply argmin or argmax over one or more dimensions, returning the result as a dict of DataArray that can be passed directly to isel. """ if dim is None and axis is None: warnings.warn( "Behaviour of argmin/argmax with neither dim nor axis argument will " "change to return a dict of indices of each dimension. To get a " "single, flat index, please use np.argmin(da.data) or " "np.argmax(da.data) instead of da.argmin() or da.argmax().", DeprecationWarning, stacklevel=3, ) argminmax_func = getattr(duck_array_ops, argminmax) if dim is ...: # In future, should do this also when (dim is None and axis is None) dim = self.dims if ( dim is None or axis is not None or not isinstance(dim, Sequence) or isinstance(dim, str) ): # Return int index if single dimension is passed, and is not part of a # sequence return self.reduce( argminmax_func, dim=dim, axis=axis, keep_attrs=keep_attrs, skipna=skipna ) # Get a name for the new dimension that does not conflict with any existing # dimension newdimname = "_unravel_argminmax_dim_0" count = 1 while newdimname in self.dims: newdimname = f"_unravel_argminmax_dim_{count}" count += 1 stacked = self.stack({newdimname: dim}) result_dims = stacked.dims[:-1] reduce_shape = tuple(self.sizes[d] for d in dim) result_flat_indices = stacked.reduce(argminmax_func, axis=-1, skipna=skipna) result_unravelled_indices = duck_array_ops.unravel_index( result_flat_indices.data, reduce_shape ) result = { d: Variable(dims=result_dims, data=i) for d, i in zip(dim, result_unravelled_indices) } if keep_attrs is None: keep_attrs = _get_keep_attrs(default=False) if keep_attrs: for v in result.values(): v.attrs = self.attrs return result def argmin( self, dim: Union[Hashable, Sequence[Hashable]] = None, axis: int = None, keep_attrs: bool = None, skipna: bool = None, ) -> Union["Variable", Dict[Hashable, "Variable"]]: """Index or indices of the minimum of the Variable over one or more dimensions. If a sequence is passed to 'dim', then result returned as dict of Variables, which can be passed directly to isel(). If a single str is passed to 'dim' then returns a Variable with dtype int. If there are multiple minima, the indices of the first one found will be returned. Parameters ---------- dim : hashable, sequence of hashable or ..., optional The dimensions over which to find the minimum. By default, finds minimum over all dimensions - for now returning an int for backward compatibility, but this is deprecated, in future will return a dict with indices for all dimensions; to return a dict with all dimensions now, pass '...'. axis : int, optional Axis over which to apply `argmin`. Only one of the 'dim' and 'axis' arguments can be supplied. keep_attrs : bool, optional If True, the attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. skipna : bool, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or skipna=True has not been implemented (object, datetime64 or timedelta64). Returns ------- result : Variable or dict of Variable See also -------- DataArray.argmin, DataArray.idxmin """ return self._unravel_argminmax("argmin", dim, axis, keep_attrs, skipna) def argmax( self, dim: Union[Hashable, Sequence[Hashable]] = None, axis: int = None, keep_attrs: bool = None, skipna: bool = None, ) -> Union["Variable", Dict[Hashable, "Variable"]]: """Index or indices of the maximum of the Variable over one or more dimensions. If a sequence is passed to 'dim', then result returned as dict of Variables, which can be passed directly to isel(). If a single str is passed to 'dim' then returns a Variable with dtype int. If there are multiple maxima, the indices of the first one found will be returned. Parameters ---------- dim : hashable, sequence of hashable or ..., optional The dimensions over which to find the maximum. By default, finds maximum over all dimensions - for now returning an int for backward compatibility, but this is deprecated, in future will return a dict with indices for all dimensions; to return a dict with all dimensions now, pass '...'. axis : int, optional Axis over which to apply `argmin`. Only one of the 'dim' and 'axis' arguments can be supplied. keep_attrs : bool, optional If True, the attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. skipna : bool, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or skipna=True has not been implemented (object, datetime64 or timedelta64). Returns ------- result : Variable or dict of Variable See also -------- DataArray.argmax, DataArray.idxmax """ return self._unravel_argminmax("argmax", dim, axis, keep_attrs, skipna) ops.inject_all_ops_and_reduce_methods(Variable) class IndexVariable(Variable): """Wrapper for accommodating a pandas.Index in an xarray.Variable. IndexVariable preserve loaded values in the form of a pandas.Index instead of a NumPy array. Hence, their values are immutable and must always be one- dimensional. They also have a name property, which is the name of their sole dimension unless another name is given. """ __slots__ = () def __init__(self, dims, data, attrs=None, encoding=None, fastpath=False): super().__init__(dims, data, attrs, encoding, fastpath) if self.ndim != 1: raise ValueError("%s objects must be 1-dimensional" % type(self).__name__) # Unlike in Variable, always eagerly load values into memory if not isinstance(self._data, PandasIndexAdapter): self._data = PandasIndexAdapter(self._data) def __dask_tokenize__(self): from dask.base import normalize_token # Don't waste time converting pd.Index to np.ndarray return normalize_token((type(self), self._dims, self._data.array, self._attrs)) def load(self): # data is already loaded into memory for IndexVariable return self # https://github.com/python/mypy/issues/1465 @Variable.data.setter # type: ignore def data(self, data): raise ValueError( f"Cannot assign to the .data attribute of dimension coordinate a.k.a IndexVariable {self.name!r}. " f"Please use DataArray.assign_coords, Dataset.assign_coords or Dataset.assign as appropriate." ) @Variable.values.setter # type: ignore def values(self, values): raise ValueError( f"Cannot assign to the .values attribute of dimension coordinate a.k.a IndexVariable {self.name!r}. " f"Please use DataArray.assign_coords, Dataset.assign_coords or Dataset.assign as appropriate." ) def chunk(self, chunks={}, name=None, lock=False): # Dummy - do not chunk. This method is invoked e.g. by Dataset.chunk() return self.copy(deep=False) def _as_sparse(self, sparse_format=_default, fill_value=_default): # Dummy return self.copy(deep=False) def _to_dense(self): # Dummy return self.copy(deep=False) def _finalize_indexing_result(self, dims, data): if getattr(data, "ndim", 0) != 1: # returns Variable rather than IndexVariable if multi-dimensional return Variable(dims, data, self._attrs, self._encoding) else: return type(self)(dims, data, self._attrs, self._encoding, fastpath=True) def __setitem__(self, key, value): raise TypeError("%s values cannot be modified" % type(self).__name__) @classmethod def concat(cls, variables, dim="concat_dim", positions=None, shortcut=False): """Specialized version of Variable.concat for IndexVariable objects. This exists because we want to avoid converting Index objects to NumPy arrays, if possible. """ if not isinstance(dim, str): (dim,) = dim.dims variables = list(variables) first_var = variables[0] if any(not isinstance(v, cls) for v in variables): raise TypeError( "IndexVariable.concat requires that all input " "variables be IndexVariable objects" ) indexes = [v._data.array for v in variables] if not indexes: data = [] else: data = indexes[0].append(indexes[1:]) if positions is not None: indices = nputils.inverse_permutation(np.concatenate(positions)) data = data.take(indices) attrs = dict(first_var.attrs) if not shortcut: for var in variables: if var.dims != first_var.dims: raise ValueError("inconsistent dimensions") utils.remove_incompatible_items(attrs, var.attrs) return cls(first_var.dims, data, attrs) def copy(self, deep=True, data=None): """Returns a copy of this object. `deep` is ignored since data is stored in the form of pandas.Index, which is already immutable. Dimensions, attributes and encodings are always copied. Use `data` to create a new object with the same structure as original but entirely new data. Parameters ---------- deep : bool, optional Deep is ignored when data is given. Whether the data array is loaded into memory and copied onto the new object. Default is True. data : array_like, optional Data to use in the new object. Must have same shape as original. Returns ------- object : Variable New object with dimensions, attributes, encodings, and optionally data copied from original. """ if data is None: data = self._data.copy(deep=deep) else: data = as_compatible_data(data) if self.shape != data.shape: raise ValueError( "Data shape {} must match shape of object {}".format( data.shape, self.shape ) ) return type(self)(self.dims, data, self._attrs, self._encoding, fastpath=True) def equals(self, other, equiv=None): # if equiv is specified, super up if equiv is not None: return super().equals(other, equiv) # otherwise use the native index equals, rather than looking at _data other = getattr(other, "variable", other) try: return self.dims == other.dims and self._data_equals(other) except (TypeError, AttributeError): return False def _data_equals(self, other): return self.to_index().equals(other.to_index()) def to_index_variable(self): """Return this variable as an xarray.IndexVariable""" return self to_coord = utils.alias(to_index_variable, "to_coord") def to_index(self): """Convert this variable to a pandas.Index""" # n.b. creating a new pandas.Index from an old pandas.Index is # basically free as pandas.Index objects are immutable assert self.ndim == 1 index = self._data.array if isinstance(index, pd.MultiIndex): # set default names for multi-index unnamed levels so that # we can safely rename dimension / coordinate later valid_level_names = [ name or "{}_level_{}".format(self.dims[0], i) for i, name in enumerate(index.names) ] index = index.set_names(valid_level_names) else: index = index.set_names(self.name) return index @property def level_names(self): """Return MultiIndex level names or None if this IndexVariable has no MultiIndex. """ index = self.to_index() if isinstance(index, pd.MultiIndex): return index.names else: return None def get_level_variable(self, level): """Return a new IndexVariable from a given MultiIndex level.""" if self.level_names is None: raise ValueError("IndexVariable %r has no MultiIndex" % self.name) index = self.to_index() return type(self)(self.dims, index.get_level_values(level)) @property def name(self): return self.dims[0] @name.setter def name(self, value): raise AttributeError("cannot modify name of IndexVariable in-place") # for backwards compatibility Coordinate = utils.alias(IndexVariable, "Coordinate") def _unified_dims(variables): # validate dimensions all_dims = {} for var in variables: var_dims = var.dims if len(set(var_dims)) < len(var_dims): raise ValueError( "broadcasting cannot handle duplicate " "dimensions: %r" % list(var_dims) ) for d, s in zip(var_dims, var.shape): if d not in all_dims: all_dims[d] = s elif all_dims[d] != s: raise ValueError( "operands cannot be broadcast together " "with mismatched lengths for dimension %r: %s" % (d, (all_dims[d], s)) ) return all_dims def _broadcast_compat_variables(*variables): """Create broadcast compatible variables, with the same dimensions. Unlike the result of broadcast_variables(), some variables may have dimensions of size 1 instead of the the size of the broadcast dimension. """ dims = tuple(_unified_dims(variables)) return tuple(var.set_dims(dims) if var.dims != dims else var for var in variables) def broadcast_variables(*variables): """Given any number of variables, return variables with matching dimensions and broadcast data. The data on the returned variables will be a view of the data on the corresponding original arrays, but dimensions will be reordered and inserted so that both broadcast arrays have the same dimensions. The new dimensions are sorted in order of appearance in the first variable's dimensions followed by the second variable's dimensions. """ dims_map = _unified_dims(variables) dims_tuple = tuple(dims_map) return tuple( var.set_dims(dims_map) if var.dims != dims_tuple else var for var in variables ) def _broadcast_compat_data(self, other): if all(hasattr(other, attr) for attr in ["dims", "data", "shape", "encoding"]): # `other` satisfies the necessary Variable API for broadcast_variables new_self, new_other = _broadcast_compat_variables(self, other) self_data = new_self.data other_data = new_other.data dims = new_self.dims else: # rely on numpy broadcasting rules self_data = self.data other_data = other dims = self.dims return self_data, other_data, dims def concat(variables, dim="concat_dim", positions=None, shortcut=False): """Concatenate variables along a new or existing dimension. Parameters ---------- variables : iterable of Variable Arrays to stack together. Each variable is expected to have matching dimensions and shape except for along the stacked dimension. dim : str or DataArray, optional Name of the dimension to stack along. This can either be a new dimension name, in which case it is added along axis=0, or an existing dimension name, in which case the location of the dimension is unchanged. Where to insert the new dimension is determined by the first variable. positions : None or list of array-like, optional List of integer arrays which specifies the integer positions to which to assign each dataset along the concatenated dimension. If not supplied, objects are concatenated in the provided order. shortcut : bool, optional This option is used internally to speed-up groupby operations. If `shortcut` is True, some checks of internal consistency between arrays to concatenate are skipped. Returns ------- stacked : Variable Concatenated Variable formed by stacking all the supplied variables along the given dimension. """ variables = list(variables) if all(isinstance(v, IndexVariable) for v in variables): return IndexVariable.concat(variables, dim, positions, shortcut) else: return Variable.concat(variables, dim, positions, shortcut) def assert_unique_multiindex_level_names(variables): """Check for uniqueness of MultiIndex level names in all given variables. Not public API. Used for checking consistency of DataArray and Dataset objects. """ level_names = defaultdict(list) all_level_names = set() for var_name, var in variables.items(): if isinstance(var._data, PandasIndexAdapter): idx_level_names = var.to_index_variable().level_names if idx_level_names is not None: for n in idx_level_names: level_names[n].append(f"{n!r} ({var_name})") if idx_level_names: all_level_names.update(idx_level_names) for k, v in level_names.items(): if k in variables: v.append("(%s)" % k) duplicate_names = [v for v in level_names.values() if len(v) > 1] if duplicate_names: conflict_str = "\n".join(", ".join(v) for v in duplicate_names) raise ValueError("conflicting MultiIndex level name(s):\n%s" % conflict_str) # Check confliction between level names and dimensions GH:2299 for k, v in variables.items(): for d in v.dims: if d in all_level_names: raise ValueError( "conflicting level / dimension names. {} " "already exists as a level name.".format(d) )
36.664265
124
0.589674
87,504
0.859534
0
0
9,954
0.097776
0
0
47,897
0.470482
0a6f4ad99174d6090d3cbdb6c9bcc1d787eae3b4
223
py
Python
codeforces.com/1186A/solution.py
zubtsov/competitive-programming
919d63130144347d7f6eddcf8f5bc2afb85fddf3
[ "MIT" ]
null
null
null
codeforces.com/1186A/solution.py
zubtsov/competitive-programming
919d63130144347d7f6eddcf8f5bc2afb85fddf3
[ "MIT" ]
null
null
null
codeforces.com/1186A/solution.py
zubtsov/competitive-programming
919d63130144347d7f6eddcf8f5bc2afb85fddf3
[ "MIT" ]
null
null
null
number_of_participants, number_of_pens, number_of_notebooks = map(int, input().split()) if number_of_pens >= number_of_participants and number_of_notebooks >= number_of_participants: print('Yes') else: print('No')
31.857143
94
0.7713
0
0
0
0
0
0
0
0
9
0.040359
0a7052f7029ee061d74d603abefe9574ef7b3461
114
py
Python
DLA/__main__.py
StanczakDominik/DLA
bf63592a5ac96ffef639e7a0c80d7d52ff776322
[ "MIT" ]
null
null
null
DLA/__main__.py
StanczakDominik/DLA
bf63592a5ac96ffef639e7a0c80d7d52ff776322
[ "MIT" ]
null
null
null
DLA/__main__.py
StanczakDominik/DLA
bf63592a5ac96ffef639e7a0c80d7d52ff776322
[ "MIT" ]
null
null
null
from DLA import main_single d = main_single(1, gotosize=[1e4, 5e4]) d.plot_particles() d.plot_mass_distribution()
22.8
39
0.780702
0
0
0
0
0
0
0
0
0
0
0a70ca1b5958248a2b51b4d49a2d791ec9ec77e7
36,386
py
Python
pyamf/tests/test_util.py
bulutistan/Py3AMF
3de53095b52fe2bf82b69ba5ad0b894b53045f7e
[ "MIT" ]
42
2017-04-17T11:40:25.000Z
2021-09-19T09:59:31.000Z
pyamf/tests/test_util.py
bulutistan/Py3AMF
3de53095b52fe2bf82b69ba5ad0b894b53045f7e
[ "MIT" ]
8
2017-07-27T07:39:30.000Z
2021-10-19T09:49:09.000Z
pyamf/tests/test_util.py
bulutistan/Py3AMF
3de53095b52fe2bf82b69ba5ad0b894b53045f7e
[ "MIT" ]
15
2017-05-16T12:46:33.000Z
2021-09-20T02:30:57.000Z
# -*- coding: utf-8 -*- # # Copyright (c) The PyAMF Project. # See LICENSE.txt for details. """ Tests for AMF utilities. @since: 0.1.0 """ import unittest from datetime import datetime from io import BytesIO import pyamf from pyamf import util from pyamf.tests.util import replace_dict PosInf = 1e300000 NegInf = -1e300000 NaN = PosInf / PosInf def isNaN(val): return str(float(val)) == str(NaN) def isPosInf(val): return str(float(val)) == str(PosInf) def isNegInf(val): return str(float(val)) == str(NegInf) class TimestampTestCase(unittest.TestCase): """ Test UTC timestamps. """ def test_get_timestamp(self): self.assertEqual( util.get_timestamp(datetime(2007, 11, 12)), 1194825600 ) def test_get_datetime(self): self.assertEqual(util.get_datetime(1194825600), datetime(2007, 11, 12)) def test_get_negative_datetime(self): self.assertEqual(util.get_datetime(-31536000), datetime(1969, 1, 1)) def test_preserved_microseconds(self): dt = datetime(2009, 3, 8, 23, 30, 47, 770122) ts = util.get_timestamp(dt) self.assertEqual(util.get_datetime(ts), dt) class StringIOTestCase(unittest.TestCase): def test_create(self): sp = util.BufferedByteStream() self.assertEqual(sp.tell(), 0) self.assertEqual(sp.getvalue(), b'') self.assertEqual(len(sp), 0) self.assertEqual(sp.getvalue(), b'') sp = util.BufferedByteStream(None) self.assertEqual(sp.tell(), 0) self.assertEqual(sp.getvalue(), b'') self.assertEqual(len(sp), 0) sp = util.BufferedByteStream('') self.assertEqual(sp.tell(), 0) self.assertEqual(sp.getvalue(), b'') self.assertEqual(len(sp), 0) sp = util.BufferedByteStream('spam') self.assertEqual(sp.tell(), 0) self.assertEqual(sp.getvalue(), b'spam') self.assertEqual(len(sp), 4) sp = util.BufferedByteStream(BytesIO('this is a test'.encode())) self.assertEqual(sp.tell(), 0) self.assertEqual(sp.getvalue(), b'this is a test') self.assertEqual(len(sp), 14) self.assertRaises(TypeError, util.BufferedByteStream, self) def test_getvalue(self): sp = util.BufferedByteStream() sp.write('asdfasdf') self.assertEqual(sp.getvalue(), b'asdfasdf') sp.write('spam') self.assertEqual(sp.getvalue(), b'asdfasdfspam') def test_read(self): sp = util.BufferedByteStream('this is a test') self.assertEqual(len(sp), 14) self.assertEqual(sp.read(1), b't') self.assertEqual(sp.getvalue(), b'this is a test') self.assertEqual(len(sp), 14) self.assertEqual(sp.read(10), b'his is a t') self.assertEqual(sp.read(), b'est') def test_seek(self): sp = util.BufferedByteStream('abcdefghijklmnopqrstuvwxyz') self.assertEqual(sp.getvalue(), b'abcdefghijklmnopqrstuvwxyz') self.assertEqual(sp.tell(), 0) # Relative to the beginning of the stream sp.seek(0, 0) self.assertEqual(sp.tell(), 0) self.assertEqual(sp.getvalue(), b'abcdefghijklmnopqrstuvwxyz') self.assertEqual(sp.read(1), b'a') self.assertEqual(len(sp), 26) sp.seek(10, 0) self.assertEqual(sp.tell(), 10) self.assertEqual(sp.getvalue(), b'abcdefghijklmnopqrstuvwxyz') self.assertEqual(sp.read(1), b'k') self.assertEqual(len(sp), 26) sp.seek(-5, 1) self.assertEqual(sp.tell(), 6) self.assertEqual(sp.getvalue(), b'abcdefghijklmnopqrstuvwxyz') self.assertEqual(sp.read(1), b'g') self.assertEqual(len(sp), 26) sp.seek(-3, 2) self.assertEqual(sp.tell(), 23) self.assertEqual(sp.getvalue(), b'abcdefghijklmnopqrstuvwxyz') self.assertEqual(sp.read(1), b'x') self.assertEqual(len(sp), 26) def test_tell(self): sp = util.BufferedByteStream('abcdefghijklmnopqrstuvwxyz') self.assertEqual(sp.getvalue(), b'abcdefghijklmnopqrstuvwxyz') self.assertEqual(len(sp), 26) self.assertEqual(sp.tell(), 0) sp.read(1) self.assertEqual(sp.tell(), 1) self.assertEqual(sp.getvalue(), b'abcdefghijklmnopqrstuvwxyz') self.assertEqual(len(sp), 26) sp.read(5) self.assertEqual(sp.tell(), 6) def test_truncate(self): sp = util.BufferedByteStream('abcdef') self.assertEqual(sp.getvalue(), b'abcdef') self.assertEqual(len(sp), 6) sp.truncate() self.assertEqual(sp.getvalue(), b'') self.assertEqual(len(sp), 0) sp = util.BufferedByteStream('hello') self.assertEqual(sp.getvalue(), b'hello') self.assertEqual(len(sp), 5) sp.truncate(3) self.assertEqual(sp.getvalue(), b'hel') self.assertEqual(len(sp), 3) def test_write(self): sp = util.BufferedByteStream() self.assertEqual(sp.getvalue(), b'') self.assertEqual(len(sp), 0) self.assertEqual(sp.tell(), 0) sp.write('hello') self.assertEqual(sp.getvalue(), b'hello') self.assertEqual(len(sp), 5) self.assertEqual(sp.tell(), 5) sp = util.BufferedByteStream(b'xyz') self.assertEqual(sp.getvalue(), b'xyz') self.assertEqual(len(sp), 3) self.assertEqual(sp.tell(), 0) sp.write('abc') self.assertEqual(sp.getvalue(), b'abc') self.assertEqual(len(sp), 3) self.assertEqual(sp.tell(), 3) def test_len(self): sp = util.BufferedByteStream() self.assertEqual(sp.getvalue(), b'') self.assertEqual(len(sp), 0) self.assertEqual(sp.tell(), 0) sp.write('xyz') self.assertEqual(len(sp), 3) sp = util.BufferedByteStream('foo') self.assertEqual(len(sp), 3) sp.seek(0, 2) sp.write('xyz') self.assertEqual(len(sp), 6) def test_consume(self): sp = util.BufferedByteStream() self.assertEqual(sp.getvalue(), b'') self.assertEqual(sp.tell(), 0) sp.consume() self.assertEqual(sp.getvalue(), b'') self.assertEqual(sp.tell(), 0) sp = util.BufferedByteStream('foobar') self.assertEqual(sp.getvalue(), b'foobar') self.assertEqual(sp.tell(), 0) sp.seek(3) self.assertEqual(sp.tell(), 3) sp.consume() self.assertEqual(sp.getvalue(), b'bar') self.assertEqual(sp.tell(), 0) # from ticket 451 - http://pyamf.org/ticket/451 sp = util.BufferedByteStream('abcdef') # move the stream pos to the end sp.read() self.assertEqual(len(sp), 6) sp.consume() self.assertEqual(len(sp), 0) sp = util.BufferedByteStream('abcdef') sp.seek(6) sp.consume() self.assertEqual(sp.getvalue(), b'') class DataTypeMixInTestCase(unittest.TestCase): endians = ('>', '<') # big, little def _write_endian(self, obj, func, args, expected): old_endian = obj.endian for x in range(2): obj.truncate() obj.endian = self.endians[x] func(*args) self.assertEqual(obj.getvalue(), expected[x]) obj.endian = old_endian def _read_endian(self, data, func, args, expected): for x in range(2): obj = util.BufferedByteStream(data[x]) obj.endian = self.endians[x] result = getattr(obj, func)(*args) self.assertEqual(result, expected) def test_read_uchar(self): x = util.BufferedByteStream(b'\x00\xff') self.assertEqual(x.read_uchar(), 0) self.assertEqual(x.read_uchar(), 255) def test_write_uchar(self): x = util.BufferedByteStream() x.write_uchar(0) self.assertEqual(x.getvalue(), b'\x00') x.write_uchar(255) self.assertEqual(x.getvalue(), b'\x00\xff') self.assertRaises(OverflowError, x.write_uchar, 256) self.assertRaises(OverflowError, x.write_uchar, -1) self.assertRaises(TypeError, x.write_uchar, 'f') def test_read_char(self): x = util.BufferedByteStream(b'\x00\x7f\xff\x80') self.assertEqual(x.read_char(), 0) self.assertEqual(x.read_char(), 127) self.assertEqual(x.read_char(), -1) self.assertEqual(x.read_char(), -128) def test_write_char(self): x = util.BufferedByteStream() x.write_char(0) x.write_char(-128) x.write_char(127) self.assertEqual(x.getvalue(), b'\x00\x80\x7f') self.assertRaises(OverflowError, x.write_char, 128) self.assertRaises(OverflowError, x.write_char, -129) self.assertRaises(TypeError, x.write_char, 'f') def test_write_ushort(self): x = util.BufferedByteStream() self._write_endian(x, x.write_ushort, (0,), (b'\x00\x00', b'\x00\x00')) self._write_endian(x, x.write_ushort, (12345,), (b'09', b'90')) self._write_endian( x, x.write_ushort, (65535,), (b'\xff\xff', b'\xff\xff') ) self.assertRaises(OverflowError, x.write_ushort, 65536) self.assertRaises(OverflowError, x.write_ushort, -1) self.assertRaises(TypeError, x.write_ushort, 'aa') def test_read_ushort(self): self._read_endian([b'\x00\x00', b'\x00\x00'], 'read_ushort', (), 0) self._read_endian(['09', '90'], 'read_ushort', (), 12345) self._read_endian([b'\xff\xff', b'\xff\xff'], 'read_ushort', (), 65535) def test_write_short(self): x = util.BufferedByteStream() self._write_endian( x, x.write_short, (-5673,), (b'\xe9\xd7', b'\xd7\xe9') ) self._write_endian( x, x.write_short, (32767,), (b'\x7f\xff', b'\xff\x7f') ) self.assertRaises(OverflowError, x.write_ushort, 65537) self.assertRaises(OverflowError, x.write_ushort, -1) self.assertRaises(TypeError, x.write_short, '\x00\x00') def test_read_short(self): self._read_endian([b'\xe9\xd7', b'\xd7\xe9'], 'read_short', (), -5673) self._read_endian([b'\x7f\xff', b'\xff\x7f'], 'read_short', (), 32767) def test_write_ulong(self): x = util.BufferedByteStream() self._write_endian( x, x.write_ulong, (0,), (b'\x00\x00\x00\x00', b'\x00\x00\x00\x00') ) self._write_endian( x, x.write_ulong, (16810049,), (b'\x01\x00\x80A', b'A\x80\x00\x01') ) self._write_endian( x, x.write_ulong, (4294967295,), (b'\xff\xff\xff\xff', b'\xff\xff\xff\xff') ) self.assertRaises(OverflowError, x.write_ulong, 4294967296) self.assertRaises(OverflowError, x.write_ulong, -1) self.assertRaises(TypeError, x.write_ulong, '\x00\x00\x00\x00') def test_read_ulong(self): self._read_endian( [b'\x00\x00\x00\x00', b'\x00\x00\x00\x00'], 'read_ulong', (), 0 ) self._read_endian( [b'\x01\x00\x80A', b'A\x80\x00\x01'], 'read_ulong', (), 16810049 ) self._read_endian( [b'\xff\xff\xff\xff', b'\xff\xff\xff\xff'], 'read_ulong', (), 4294967295 ) def test_write_long(self): x = util.BufferedByteStream() self._write_endian( x, x.write_long, (0,), (b'\x00\x00\x00\x00', b'\x00\x00\x00\x00') ) self._write_endian( x, x.write_long, (16810049,), (b'\x01\x00\x80A', b'A\x80\x00\x01') ) self._write_endian( x, x.write_long, (2147483647,), (b'\x7f\xff\xff\xff', b'\xff\xff\xff\x7f') ) self._write_endian( x, x.write_long, (-2147483648,), (b'\x80\x00\x00\x00', b'\x00\x00\x00\x80') ) self.assertRaises(OverflowError, x.write_long, 2147483648) self.assertRaises(OverflowError, x.write_long, -2147483649) self.assertRaises(TypeError, x.write_long, '\x00\x00\x00\x00') def test_read_long(self): self._read_endian( [b'\xff\xff\xcf\xc7', b'\xc7\xcf\xff\xff'], 'read_long', (), -12345 ) self._read_endian( [b'\x00\x00\x00\x00', b'\x00\x00\x00\x00'], 'read_long', (), 0 ) self._read_endian( [b'\x01\x00\x80A', b'A\x80\x00\x01'], 'read_long', (), 16810049 ) self._read_endian( [b'\x7f\xff\xff\xff', b'\xff\xff\xff\x7f'], 'read_long', (), 2147483647 ) def test_write_u24bit(self): x = util.BufferedByteStream() self._write_endian( x, x.write_24bit_uint, (0,), (b'\x00\x00\x00', b'\x00\x00\x00') ) self._write_endian( x, x.write_24bit_uint, (4292609,), (b'A\x80\x01', b'\x01\x80A') ) self._write_endian( x, x.write_24bit_uint, (16777215,), (b'\xff\xff\xff', b'\xff\xff\xff') ) self.assertRaises(OverflowError, x.write_24bit_uint, 16777216) self.assertRaises(OverflowError, x.write_24bit_uint, -1) self.assertRaises(TypeError, x.write_24bit_uint, '\x00\x00\x00') def test_read_u24bit(self): self._read_endian( [b'\x00\x00\x00', b'\x00\x00\x00'], 'read_24bit_uint', (), 0 ) self._read_endian( [b'\x00\x00\x80', b'\x80\x00\x00'], 'read_24bit_uint', (), 128 ) self._read_endian( [b'\x80\x00\x00', b'\x00\x00\x80'], 'read_24bit_uint', (), 8388608 ) self._read_endian( [b'\xff\xff\x7f', b'\x7f\xff\xff'], 'read_24bit_uint', (), 16777087 ) self._read_endian( [b'\x7f\xff\xff', b'\xff\xff\x7f'], 'read_24bit_uint', (), 8388607 ) def test_write_24bit(self): x = util.BufferedByteStream() self._write_endian( x, x.write_24bit_int, (0,), (b'\x00\x00\x00', b'\x00\x00\x00') ) self._write_endian( x, x.write_24bit_int, (128,), (b'\x00\x00\x80', b'\x80\x00\x00') ) self._write_endian( x, x.write_24bit_int, (8388607,), (b'\x7f\xff\xff', b'\xff\xff\x7f') ) self._write_endian( x, x.write_24bit_int, (-1,), (b'\xff\xff\xff', b'\xff\xff\xff') ) self._write_endian( x, x.write_24bit_int, (-8388608,), (b'\x80\x00\x00', b'\x00\x00\x80') ) self.assertRaises(OverflowError, x.write_24bit_int, 8388608) self.assertRaises(OverflowError, x.write_24bit_int, -8388609) self.assertRaises(TypeError, x.write_24bit_int, '\x00\x00\x00') def test_read_24bit(self): self._read_endian( [b'\x00\x00\x00', b'\x00\x00\x00'], 'read_24bit_int', (), 0 ) self._read_endian( [b'\x00\x00\x80', b'\x80\x00\x00'], 'read_24bit_int', (), 128 ) self._read_endian( [b'\x80\x00\x00', b'\x00\x00\x80'], 'read_24bit_int', (), -8388608 ) self._read_endian( [b'\xff\xff\x7f', b'\x7f\xff\xff'], 'read_24bit_int', (), -129 ) self._read_endian( [b'\x7f\xff\xff', b'\xff\xff\x7f'], 'read_24bit_int', (), 8388607 ) def test_write_float(self): x = util.BufferedByteStream() self._write_endian( x, x.write_float, (0.2,), (b'>L\xcc\xcd', b'\xcd\xccL>') ) self.assertRaises(TypeError, x.write_float, 'foo') def test_read_float(self): self._read_endian( [b'?\x00\x00\x00', b'\x00\x00\x00?'], 'read_float', (), 0.5 ) def test_write_double(self): x = util.BufferedByteStream() self._write_endian( x, x.write_double, (0.2,), (b'?\xc9\x99\x99\x99\x99\x99\x9a', b'\x9a\x99\x99\x99\x99\x99\xc9?') ) self.assertRaises(TypeError, x.write_double, 'foo') def test_read_double(self): self._read_endian( [b'?\xc9\x99\x99\x99\x99\x99\x9a', b'\x9a\x99\x99\x99\x99\x99\xc9?'], 'read_double', (), 0.2 ) def test_write_utf8_string(self): x = util.BufferedByteStream() self._write_endian( x, x.write_utf8_string, (u'ᚠᛇᚻ',), [b'\xe1\x9a\xa0\xe1\x9b\x87\xe1\x9a\xbb'] * 2 ) self.assertRaises(TypeError, x.write_utf8_string, 1) self.assertRaises(TypeError, x.write_utf8_string, 1.0) self.assertRaises(TypeError, x.write_utf8_string, object()) x.write_utf8_string('\xff') def test_read_utf8_string(self): self._read_endian( [b'\xe1\x9a\xa0\xe1\x9b\x87\xe1\x9a\xbb'] * 2, 'read_utf8_string', (9,), u'ᚠᛇᚻ' ) def test_nan(self): x = util.BufferedByteStream(b'\xff\xf8\x00\x00\x00\x00\x00\x00') self.assertTrue(isNaN(x.read_double())) x = util.BufferedByteStream(b'\xff\xf0\x00\x00\x00\x00\x00\x00') self.assertTrue(isNegInf(x.read_double())) x = util.BufferedByteStream(b'\x7f\xf0\x00\x00\x00\x00\x00\x00') self.assertTrue(isPosInf(x.read_double())) # now test little endian x = util.BufferedByteStream(b'\x00\x00\x00\x00\x00\x00\xf8\xff') x.endian = '<' self.assertTrue(isNaN(x.read_double())) x = util.BufferedByteStream(b'\x00\x00\x00\x00\x00\x00\xf0\xff') x.endian = '<' self.assertTrue(isNegInf(x.read_double())) x = util.BufferedByteStream(b'\x00\x00\x00\x00\x00\x00\xf0\x7f') x.endian = '<' self.assertTrue(isPosInf(x.read_double())) def test_write_infinites(self): x = util.BufferedByteStream() self._write_endian(x, x.write_double, (NaN,), ( b'\xff\xf8\x00\x00\x00\x00\x00\x00', b'\x00\x00\x00\x00\x00\x00\xf8\xff' )) self._write_endian(x, x.write_double, (PosInf,), ( b'\x7f\xf0\x00\x00\x00\x00\x00\x00', b'\x00\x00\x00\x00\x00\x00\xf0\x7f' )) self._write_endian(x, x.write_double, (NegInf,), ( b'\xff\xf0\x00\x00\x00\x00\x00\x00', b'\x00\x00\x00\x00\x00\x00\xf0\xff' )) class BufferedByteStreamTestCase(unittest.TestCase): """ Tests for L{BufferedByteStream<util.BufferedByteStream>} """ def test_create(self): x = util.BufferedByteStream() self.assertEqual(x.getvalue(), b'') self.assertEqual(x.tell(), 0) x = util.BufferedByteStream('abc') self.assertEqual(x.getvalue(), b'abc') self.assertEqual(x.tell(), 0) def test_read(self): x = util.BufferedByteStream() self.assertEqual(x.tell(), 0) self.assertEqual(len(x), 0) self.assertRaises(IOError, x.read) self.assertRaises(IOError, x.read, 10) x.write('hello') x.seek(0) self.assertRaises(IOError, x.read, 10) self.assertEqual(x.read(), b'hello') def test_read_negative(self): """ @see: #799 """ x = util.BufferedByteStream() x.write('*' * 6000) x.seek(100) self.assertRaises(IOError, x.read, -345) def test_peek(self): x = util.BufferedByteStream('abcdefghijklmnopqrstuvwxyz') self.assertEqual(x.tell(), 0) self.assertEqual(x.peek(), b'a') self.assertEqual(x.peek(5), b'abcde') self.assertEqual(x.peek(-1), b'abcdefghijklmnopqrstuvwxyz') x.seek(10) self.assertEqual(x.peek(50), b'klmnopqrstuvwxyz') def test_eof(self): x = util.BufferedByteStream() self.assertTrue(x.at_eof()) x.write('hello') x.seek(0) self.assertFalse(x.at_eof()) x.seek(0, 2) self.assertTrue(x.at_eof()) def test_remaining(self): x = util.BufferedByteStream('spameggs') self.assertEqual(x.tell(), 0) self.assertEqual(x.remaining(), 8) x.seek(2) self.assertEqual(x.tell(), 2) self.assertEqual(x.remaining(), 6) def test_add(self): a = util.BufferedByteStream('a') b = util.BufferedByteStream('b') c = a + b self.assertTrue(isinstance(c, util.BufferedByteStream)) self.assertEqual(c.getvalue(), b'ab') self.assertEqual(c.tell(), 0) def test_add_pos(self): a = util.BufferedByteStream(b'abc') b = util.BufferedByteStream(b'def') a.seek(1) b.seek(0, 2) self.assertEqual(a.tell(), 1) self.assertEqual(b.tell(), 3) self.assertEqual(a.tell(), 1) self.assertEqual(b.tell(), 3) def test_append_types(self): # test non string types a = util.BufferedByteStream() self.assertRaises(TypeError, a.append, 234234) self.assertRaises(TypeError, a.append, 234.0) self.assertRaises(TypeError, a.append, 234234) self.assertRaises(TypeError, a.append, []) self.assertRaises(TypeError, a.append, {}) self.assertRaises(TypeError, a.append, lambda _: None) self.assertRaises(TypeError, a.append, ()) self.assertRaises(TypeError, a.append, object()) def test_append_string(self): """ Test L{util.BufferedByteStream.append} with C{str} objects. """ # test empty a = util.BufferedByteStream() self.assertEqual(a.getvalue(), b'') self.assertEqual(a.tell(), 0) self.assertEqual(len(a), 0) a.append('foo') self.assertEqual(a.getvalue(), b'foo') self.assertEqual(a.tell(), 0) # <-- pointer hasn't moved self.assertEqual(len(a), 3) # test pointer beginning, some data a = util.BufferedByteStream('bar') self.assertEqual(a.getvalue(), b'bar') self.assertEqual(a.tell(), 0) self.assertEqual(len(a), 3) a.append('gak') self.assertEqual(a.getvalue(), b'bargak') self.assertEqual(a.tell(), 0) # <-- pointer hasn't moved self.assertEqual(len(a), 6) # test pointer middle, some data a = util.BufferedByteStream('bar') a.seek(2) self.assertEqual(a.getvalue(), b'bar') self.assertEqual(a.tell(), 2) self.assertEqual(len(a), 3) a.append('gak') self.assertEqual(a.getvalue(), b'bargak') self.assertEqual(a.tell(), 2) # <-- pointer hasn't moved self.assertEqual(len(a), 6) # test pointer end, some data a = util.BufferedByteStream('bar') a.seek(0, 2) self.assertEqual(a.getvalue(), b'bar') self.assertEqual(a.tell(), 3) self.assertEqual(len(a), 3) a.append('gak') self.assertEqual(a.getvalue(), b'bargak') self.assertEqual(a.tell(), 3) # <-- pointer hasn't moved self.assertEqual(len(a), 6) class Foo(object): def getvalue(self): return b'foo' def __str__(self): raise AttributeError() a = util.BufferedByteStream() self.assertEqual(a.getvalue(), b'') self.assertEqual(a.tell(), 0) self.assertEqual(len(a), 0) a.append(Foo()) self.assertEqual(a.getvalue(), b'foo') self.assertEqual(a.tell(), 0) self.assertEqual(len(a), 3) def test_append_unicode(self): """ Test L{util.BufferedByteStream.append} with C{unicode} objects. """ # test empty a = util.BufferedByteStream() self.assertEqual(a.getvalue(), b'') self.assertEqual(a.tell(), 0) self.assertEqual(len(a), 0) a.append('foo') self.assertEqual(a.getvalue(), b'foo') self.assertEqual(a.tell(), 0) # <-- pointer hasn't moved self.assertEqual(len(a), 3) # test pointer beginning, some data a = util.BufferedByteStream('bar') self.assertEqual(a.getvalue(), b'bar') self.assertEqual(a.tell(), 0) self.assertEqual(len(a), 3) a.append('gak') self.assertEqual(a.getvalue(), b'bargak') self.assertEqual(a.tell(), 0) # <-- pointer hasn't moved self.assertEqual(len(a), 6) # test pointer middle, some data a = util.BufferedByteStream('bar') a.seek(2) self.assertEqual(a.getvalue(), b'bar') self.assertEqual(a.tell(), 2) self.assertEqual(len(a), 3) a.append('gak') self.assertEqual(a.getvalue(), b'bargak') self.assertEqual(a.tell(), 2) # <-- pointer hasn't moved self.assertEqual(len(a), 6) # test pointer end, some data a = util.BufferedByteStream('bar') a.seek(0, 2) self.assertEqual(a.getvalue(), b'bar') self.assertEqual(a.tell(), 3) self.assertEqual(len(a), 3) a.append('gak') self.assertEqual(a.getvalue(), b'bargak') self.assertEqual(a.tell(), 3) # <-- pointer hasn't moved self.assertEqual(len(a), 6) class Foo(object): def getvalue(self): return u'foo' def __str__(self): raise AttributeError() a = util.BufferedByteStream() self.assertEqual(a.getvalue(), b'') self.assertEqual(a.tell(), 0) self.assertEqual(len(a), 0) a.append(Foo()) self.assertEqual(a.getvalue(), b'foo') self.assertEqual(a.tell(), 0) self.assertEqual(len(a), 3) class DummyAlias(pyamf.ClassAlias): pass class AnotherDummyAlias(pyamf.ClassAlias): pass class YADummyAlias(pyamf.ClassAlias): pass class ClassAliasTestCase(unittest.TestCase): def setUp(self): self.old_aliases = pyamf.ALIAS_TYPES.copy() def tearDown(self): replace_dict(self.old_aliases, pyamf.ALIAS_TYPES) def test_simple(self): class A(object): pass pyamf.register_alias_type(DummyAlias, A) self.assertEqual(util.get_class_alias(A), DummyAlias) def test_nested(self): class A(object): pass class B(object): pass class C(object): pass pyamf.register_alias_type(DummyAlias, A, B, C) self.assertEqual(util.get_class_alias(B), DummyAlias) def test_multiple(self): class A(object): pass class B(object): pass class C(object): pass pyamf.register_alias_type(DummyAlias, A) pyamf.register_alias_type(AnotherDummyAlias, B) pyamf.register_alias_type(YADummyAlias, C) self.assertEqual(util.get_class_alias(B), AnotherDummyAlias) self.assertEqual(util.get_class_alias(C), YADummyAlias) self.assertEqual(util.get_class_alias(A), DummyAlias) def test_none_existant(self): self.assertEqual(util.get_class_alias(self.__class__), None) def test_subclass(self): class A(object): pass class B(A): pass pyamf.register_alias_type(DummyAlias, A) self.assertEqual(util.get_class_alias(B), DummyAlias) class IsClassSealedTestCase(unittest.TestCase): """ Tests for L{util.is_class_sealed} """ def test_new_mixed(self): class A(object): __slots__ = ['foo', 'bar'] class B(A): pass class C(B): __slots__ = ('spam', 'eggs') self.assertTrue(util.is_class_sealed(A)) self.assertFalse(util.is_class_sealed(B)) self.assertFalse(util.is_class_sealed(C)) def test_deep(self): class A(object): __slots__ = ['foo', 'bar'] class B(A): __slots__ = ('gak',) class C(B): pass self.assertTrue(util.is_class_sealed(A)) self.assertTrue(util.is_class_sealed(B)) self.assertFalse(util.is_class_sealed(C)) class GetClassMetaTestCase(unittest.TestCase): """ Tests for L{util.get_class_meta} """ def test_types(self): class A: pass class B(object): pass for t in ['', u'', 1, 1.0, 1, [], {}, object, object(), A(), B()]: self.assertRaises(TypeError, util.get_class_meta, t) def test_no_meta(self): class A: pass class B(object): pass empty = { 'readonly_attrs': None, 'static_attrs': None, 'synonym_attrs': None, 'proxy_attrs': None, 'dynamic': None, 'alias': None, 'amf3': None, 'exclude_attrs': None, 'proxy_attrs': None, 'external': None } self.assertEqual(util.get_class_meta(A), empty) self.assertEqual(util.get_class_meta(B), empty) def test_alias(self): class A: class __amf__: alias = 'foo.bar.Spam' class B(object): class __amf__: alias = 'foo.bar.Spam' meta = { 'readonly_attrs': None, 'static_attrs': None, 'synonym_attrs': None, 'proxy_attrs': None, 'dynamic': None, 'alias': 'foo.bar.Spam', 'amf3': None, 'proxy_attrs': None, 'exclude_attrs': None, 'external': None } self.assertEqual(util.get_class_meta(A), meta) self.assertEqual(util.get_class_meta(B), meta) def test_static(self): class A: class __amf__: static = ['foo', 'bar'] class B(object): class __amf__: static = ['foo', 'bar'] meta = { 'readonly_attrs': None, 'static_attrs': ['foo', 'bar'], 'synonym_attrs': None, 'proxy_attrs': None, 'dynamic': None, 'alias': None, 'amf3': None, 'exclude_attrs': None, 'external': None } self.assertEqual(util.get_class_meta(A), meta) self.assertEqual(util.get_class_meta(B), meta) def test_exclude(self): class A: class __amf__: exclude = ['foo', 'bar'] class B(object): class __amf__: exclude = ['foo', 'bar'] meta = { 'readonly_attrs': None, 'exclude_attrs': ['foo', 'bar'], 'synonym_attrs': None, 'proxy_attrs': None, 'dynamic': None, 'alias': None, 'amf3': None, 'static_attrs': None, 'proxy_attrs': None, 'external': None } self.assertEqual(util.get_class_meta(A), meta) self.assertEqual(util.get_class_meta(B), meta) def test_readonly(self): class A: class __amf__: readonly = ['foo', 'bar'] class B(object): class __amf__: readonly = ['foo', 'bar'] meta = { 'exclude_attrs': None, 'readonly_attrs': ['foo', 'bar'], 'synonym_attrs': None, 'proxy_attrs': None, 'dynamic': None, 'alias': None, 'amf3': None, 'static_attrs': None, 'external': None, 'proxy_attrs': None, } self.assertEqual(util.get_class_meta(A), meta) self.assertEqual(util.get_class_meta(B), meta) def test_amf3(self): class A: class __amf__: amf3 = True class B(object): class __amf__: amf3 = True meta = { 'exclude_attrs': None, 'proxy_attrs': None, 'synonym_attrs': None, 'readonly_attrs': None, 'proxy_attrs': None, 'dynamic': None, 'alias': None, 'amf3': True, 'static_attrs': None, 'external': None } self.assertEqual(util.get_class_meta(A), meta) self.assertEqual(util.get_class_meta(B), meta) def test_dynamic(self): class A: class __amf__: dynamic = False class B(object): class __amf__: dynamic = False meta = { 'exclude_attrs': None, 'proxy_attrs': None, 'synonym_attrs': None, 'readonly_attrs': None, 'proxy_attrs': None, 'dynamic': False, 'alias': None, 'amf3': None, 'static_attrs': None, 'external': None } self.assertEqual(util.get_class_meta(A), meta) self.assertEqual(util.get_class_meta(B), meta) def test_external(self): class A: class __amf__: external = True class B(object): class __amf__: external = True meta = { 'exclude_attrs': None, 'proxy_attrs': None, 'synonym_attrs': None, 'readonly_attrs': None, 'proxy_attrs': None, 'dynamic': None, 'alias': None, 'amf3': None, 'static_attrs': None, 'external': True } self.assertEqual(util.get_class_meta(A), meta) self.assertEqual(util.get_class_meta(B), meta) def test_dict(self): meta = { 'exclude': ['foo'], 'readonly': ['bar'], 'dynamic': False, 'alias': 'spam.eggs', 'proxy_attrs': None, 'synonym_attrs': None, 'amf3': True, 'static': ['baz'], 'external': True } class A: __amf__ = meta class B(object): __amf__ = meta ret = { 'readonly_attrs': ['bar'], 'static_attrs': ['baz'], 'proxy_attrs': None, 'dynamic': False, 'alias': 'spam.eggs', 'amf3': True, 'exclude_attrs': ['foo'], 'synonym_attrs': None, 'proxy_attrs': None, 'external': True } self.assertEqual(util.get_class_meta(A), ret) self.assertEqual(util.get_class_meta(B), ret) def test_proxy(self): class A: class __amf__: proxy = ['foo', 'bar'] class B(object): class __amf__: proxy = ['foo', 'bar'] meta = { 'exclude_attrs': None, 'readonly_attrs': None, 'proxy_attrs': ['foo', 'bar'], 'synonym_attrs': None, 'dynamic': None, 'alias': None, 'amf3': None, 'static_attrs': None, 'external': None } self.assertEqual(util.get_class_meta(A), meta) self.assertEqual(util.get_class_meta(B), meta) def test_synonym(self): class A: class __amf__: synonym = {'foo': 'bar'} class B(object): class __amf__: synonym = {'foo': 'bar'} meta = { 'exclude_attrs': None, 'readonly_attrs': None, 'proxy_attrs': None, 'synonym_attrs': {'foo': 'bar'}, 'dynamic': None, 'alias': None, 'amf3': None, 'static_attrs': None, 'external': None } self.assertEqual(util.get_class_meta(A), meta) self.assertEqual(util.get_class_meta(B), meta)
27.461132
81
0.542516
35,834
0.984505
0
0
0
0
0
0
6,202
0.170394
0a71715157a2be752f2c46cd1b41f44aab6ece59
3,087
py
Python
e-valuator.py
keocol/e-valuator
c2bab22e3debf08263fef57ee4135312a2bb2b0d
[ "MIT" ]
null
null
null
e-valuator.py
keocol/e-valuator
c2bab22e3debf08263fef57ee4135312a2bb2b0d
[ "MIT" ]
null
null
null
e-valuator.py
keocol/e-valuator
c2bab22e3debf08263fef57ee4135312a2bb2b0d
[ "MIT" ]
null
null
null
import dns.resolver import sys import colorama import platform from colorama import init, Fore, Back, Style import re # pip install -r requirements.txt (colorama) os = platform.platform() if os.find('Windows')!= (-1): init(convert=True) print(""" ███████╗░░░░░░██╗░░░██╗░█████╗░██╗░░░░░██╗░░░██╗░█████╗░████████╗░█████╗░██████╗░ ██╔════╝░░░░░░██║░░░██║██╔══██╗██║░░░░░██║░░░██║██╔══██╗╚══██╔══╝██╔══██╗██╔══██╗ █████╗░░█████╗╚██╗░██╔╝███████║██║░░░░░██║░░░██║███████║░░░██║░░░██║░░██║██████╔╝ ██╔══╝░░╚════╝░╚████╔╝░██╔══██║██║░░░░░██║░░░██║██╔══██║░░░██║░░░██║░░██║██╔══██╗ ███████╗░░░░░░░░╚██╔╝░░██║░░██║███████╗╚██████╔╝██║░░██║░░░██║░░░╚█████╔╝██║░░██║ ╚══════╝░░░░░░░░░╚═╝░░░╚═╝░░╚═╝╚══════╝░╚═════╝░╚═╝░░╚═╝░░░╚═╝░░░░╚════╝░╚═╝░░╚═╝ \x1B[3mSimple Python3 Script for Checking SPF & DMARC Records.\x1B[0m """ + '\n') Domain = input('Domain: ') # Checking SPF print ('\n[+] Checking SPF Record...') try: obj_answer = dns.resolver.resolve(Domain, 'TXT') except: sys.exit(Fore.RED + "\n[+] Domain can't be resolved! Check the domain name and try again..") answer = str(obj_answer.response) cond = answer.find("v=spf") if cond != -1: print ('[+] SPF Record Found!') spf_pos= answer.find("v=spf") spf_end_tmp= (answer[spf_pos:].find("\n"))-1 spf_end= answer[spf_pos:spf_pos+spf_end_tmp] print (Fore.GREEN + '[+] Domain: ' + Domain) print (Fore.GREEN + '[+] SPF Record: ' +spf_end) neutral_check = answer.find('?all') fail_check = answer.find('-all') soft_check = answer.find('~all') pass_check = answer.find('+all') if neutral_check != -1: print (Fore.RED +'[+] Result: ?all IS FOUND!! Domain emails can be spoofed!') elif fail_check != -1: print (Fore.GREEN +'[+] Result: -all is found. SPF is correctly configured.') elif soft_check != -1: print (Fore.GREEN +'[+] Result: ~all is found. SPF is correctly configured.') elif pass_check != -1: print (Fore.RED +'[+] Result: +all DOMAIN IS VERY BADLY CONFIGURED! Domain emails can be spoofed!') else: print (Fore.RED +'[+] Result: No condition is set for "all"! Domain emails can be spoofed!') else: print (Fore.RED +'[+] No SPF Record Found!!') # Checking DMARC print (Fore.WHITE + '\n\n[+] Checking DMARC Policy..') try: obj2_answer = dns.resolver.resolve('_dmarc.'+ Domain, 'TXT') except: sys.exit(Fore.RED + "[+] The domain doesn't have DMARC policy configured!") answer2 = str(obj2_answer.response) print (Fore.WHITE + '[+] DMARC Policy Found!') none_check = re.search("[\;\s]p\=none\;", answer2) reject_check = re.search("[\;\s]p\=reject\;", answer2) quarantine_check = re.search("[\;\s]p\=quarantine\;", answer2) if none_check: print (Fore.RED + '[+] Result: DMARC Policy is set as none! Domain emails can be spoofed!') if reject_check: print (Fore.GREEN + '[+] Result: DMARC Policy is set as reject! Domain emails are safe from spoofing.') if quarantine_check: print (Fore.GREEN + '[+] Result: DMARC Policy is set as quarantine! Domain emails are safe from spoofing.')
32.15625
108
0.547781
0
0
0
0
0
0
0
0
2,642
0.650899
6a50e8edb03f4c7852b3cc7809ccd49216f25af1
2,655
py
Python
api/server.py
qh73xe/HowAboutNatume
8d994a1e16e2153dc200097d8f8b43713d76a3d5
[ "MIT" ]
null
null
null
api/server.py
qh73xe/HowAboutNatume
8d994a1e16e2153dc200097d8f8b43713d76a3d5
[ "MIT" ]
7
2020-03-24T15:37:48.000Z
2021-06-01T22:01:22.000Z
api/server.py
qh73xe/HowAboutNatume
8d994a1e16e2153dc200097d8f8b43713d76a3d5
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -* """トルネードを使用した ask.api を作成します.""" from json import dumps from tornado.httpserver import HTTPServer from tornado.ioloop import IOLoop from tornado.options import parse_command_line from tornado.web import Application, RequestHandler from tornado.options import define, options from tokenizer import get_entity from logger import getLogger LOGGER = getLogger('API_MODULE') define("port", default=8000, help="run on the given port", type=int) class AskHandler(RequestHandler): """question に get された文章と親密度の高い語を返します.""" def get(self): """Question に答えます.""" from ask import ask author = self.get_argument('author') question = self.get_argument('question') answers = { 'answers': ask(author, get_entity(question)) } self.finish( dumps( answers, ensure_ascii=False, indent=4, sort_keys=True, separators=(',', ': ') ) ) def post(self): """Action on google の web フック用レスポンス""" from ask import ask import json data = json.loads(self.request.body) LOGGER.info('input: {data}'.format(data=data)) author = data.get('author', '夏目漱石') question = data.get('question') answers = ask(author, get_entity(question)) if answers: adjective = answers.get('adjective', None) nouns = answers.get('nouns') if adjective: speech = '。'.join([ 'それは {adjective} 質問ですね'.format(adjective=adjective[0]), 'きっと, {0} や {1} あるいは {2} のことです'.format(*nouns) ]) else: speech = 'それはきっと, {0} や {1} あるいは {2} のことです'.format(*nouns) else: speech = '。'.join([ '{q} についてですか'.format(q=question), '難しいことを聞きますね', '私にはわからないです' ]) displayText = speech respose = { 'speech': speech, 'displayText': displayText, 'data': answers, 'contextOut': [answers], 'source': 'how-about-natume' } self.finish( dumps( respose, ensure_ascii=False, indent=4, sort_keys=True, separators=(',', ': ') ) ) if __name__ == "__main__": parse_command_line() app = Application(handlers=[(r"/", AskHandler)]) http_server = HTTPServer(app) http_server.listen(options.port) IOLoop.instance().start()
27.947368
75
0.529567
2,187
0.754919
0
0
0
0
0
0
745
0.257163
6a5258097a7cb4af2ef28cde1153d8db7884fd80
3,012
py
Python
proxy/http/chunk_parser.py
GDGSNF/proxy.py
3ee2824217286df3c108beadf3185eee35c28b49
[ "BSD-3-Clause" ]
null
null
null
proxy/http/chunk_parser.py
GDGSNF/proxy.py
3ee2824217286df3c108beadf3185eee35c28b49
[ "BSD-3-Clause" ]
null
null
null
proxy/http/chunk_parser.py
GDGSNF/proxy.py
3ee2824217286df3c108beadf3185eee35c28b49
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- """ proxy.py ~~~~~~~~ ⚡⚡⚡ Fast, Lightweight, Pluggable, TLS interception capable proxy server focused on Network monitoring, controls & Application development, testing, debugging. :copyright: (c) 2013-present by Abhinav Singh and contributors. :license: BSD, see LICENSE for more details. """ from typing import NamedTuple, Tuple, List, Optional from ..common.utils import bytes_, find_http_line from ..common.constants import CRLF, DEFAULT_BUFFER_SIZE ChunkParserStates = NamedTuple( 'ChunkParserStates', [ ('WAITING_FOR_SIZE', int), ('WAITING_FOR_DATA', int), ('COMPLETE', int), ], ) chunkParserStates = ChunkParserStates(1, 2, 3) class ChunkParser: """HTTP chunked encoding response parser.""" def __init__(self) -> None: self.state = chunkParserStates.WAITING_FOR_SIZE self.body: bytes = b'' # Parsed chunks self.chunk: bytes = b'' # Partial chunk received # Expected size of next following chunk self.size: Optional[int] = None def parse(self, raw: bytes) -> bytes: more = len(raw) > 0 while more and self.state != chunkParserStates.COMPLETE: more, raw = self.process(raw) return raw def process(self, raw: bytes) -> Tuple[bool, bytes]: if self.state == chunkParserStates.WAITING_FOR_SIZE: # Consume prior chunk in buffer # in case chunk size without CRLF was received raw = self.chunk + raw self.chunk = b'' # Extract following chunk data size line, raw = find_http_line(raw) # CRLF not received or Blank line was received. if line is None or line.strip() == b'': self.chunk = raw raw = b'' else: self.size = int(line, 16) self.state = chunkParserStates.WAITING_FOR_DATA elif self.state == chunkParserStates.WAITING_FOR_DATA: assert self.size is not None remaining = self.size - len(self.chunk) self.chunk += raw[:remaining] raw = raw[remaining:] if len(self.chunk) == self.size: raw = raw[len(CRLF):] self.body += self.chunk if self.size == 0: self.state = chunkParserStates.COMPLETE else: self.state = chunkParserStates.WAITING_FOR_SIZE self.chunk = b'' self.size = None return len(raw) > 0, raw @staticmethod def to_chunks(raw: bytes, chunk_size: int = DEFAULT_BUFFER_SIZE) -> bytes: chunks: List[bytes] = [] for i in range(0, len(raw), chunk_size): chunk = raw[i: i + chunk_size] chunks.append(bytes_('{:x}'.format(len(chunk)))) chunks.append(chunk) chunks.append(bytes_('{:x}'.format(0))) chunks.append(b'') return CRLF.join(chunks) + CRLF
35.857143
86
0.581009
2,291
0.759112
0
0
426
0.141153
0
0
726
0.240557
6a53c43c787fb87b95985049d6273d36fc7dbdab
31,240
py
Python
nova/pci/stats.py
10088/nova
972c06c608f0b00e9066d7f581fd81197065cf49
[ "Apache-2.0" ]
null
null
null
nova/pci/stats.py
10088/nova
972c06c608f0b00e9066d7f581fd81197065cf49
[ "Apache-2.0" ]
null
null
null
nova/pci/stats.py
10088/nova
972c06c608f0b00e9066d7f581fd81197065cf49
[ "Apache-2.0" ]
null
null
null
# Copyright (c) 2013 Intel, Inc. # Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import typing as ty from oslo_config import cfg from oslo_log import log as logging from oslo_utils import strutils from nova import exception from nova import objects from nova.objects import fields from nova.objects import pci_device_pool from nova.pci.request import PCI_REMOTE_MANAGED_TAG from nova.pci import utils from nova.pci import whitelist CONF = cfg.CONF LOG = logging.getLogger(__name__) # TODO(stephenfin): We might want to use TypedDict here. Refer to # https://mypy.readthedocs.io/en/latest/kinds_of_types.html#typeddict for # more information. Pool = ty.Dict[str, ty.Any] class PciDeviceStats(object): """PCI devices summary information. According to the PCI SR-IOV spec, a PCI physical function can have up to 256 PCI virtual functions, thus the number of assignable PCI functions in a cloud can be big. The scheduler needs to know all device availability information in order to determine which compute hosts can support a PCI request. Passing individual virtual device information to the scheduler does not scale, so we provide summary information. Usually the virtual functions provided by a host PCI device have the same value for most properties, like vendor_id, product_id and class type. The PCI stats class summarizes this information for the scheduler. The pci stats information is maintained exclusively by compute node resource tracker and updated to database. The scheduler fetches the information and selects the compute node accordingly. If a compute node is selected, the resource tracker allocates the devices to the instance and updates the pci stats information. This summary information will be helpful for cloud management also. """ pool_keys = ['product_id', 'vendor_id', 'numa_node', 'dev_type'] def __init__( self, numa_topology: 'objects.NUMATopology', stats: 'objects.PCIDevicePoolList' = None, dev_filter: whitelist.Whitelist = None, ) -> None: self.numa_topology = numa_topology self.pools = ( [pci_pool.to_dict() for pci_pool in stats] if stats else [] ) self.pools.sort(key=lambda item: len(item)) self.dev_filter = dev_filter or whitelist.Whitelist( CONF.pci.passthrough_whitelist) def _equal_properties( self, dev: Pool, entry: Pool, matching_keys: ty.List[str], ) -> bool: return all(dev.get(prop) == entry.get(prop) for prop in matching_keys) def _find_pool(self, dev_pool: Pool) -> ty.Optional[Pool]: """Return the first pool that matches dev.""" for pool in self.pools: pool_keys = pool.copy() del pool_keys['count'] del pool_keys['devices'] if (len(pool_keys.keys()) == len(dev_pool.keys()) and self._equal_properties(dev_pool, pool_keys, list(dev_pool))): return pool return None @staticmethod def _ensure_remote_managed_tag( dev: 'objects.PciDevice', pool: Pool): """Add a remote_managed tag depending on a device type if needed. Network devices may be managed remotely, e.g. by a SmartNIC DPU. If a tag has not been explicitly provided, populate it by assuming that a device is not remote managed by default. """ if dev.dev_type not in (fields.PciDeviceType.SRIOV_VF, fields.PciDeviceType.SRIOV_PF, fields.PciDeviceType.VDPA): return # A tag is added here rather than at the client side to avoid an # issue with having objects without this tag specified during an # upgrade to the first version that supports handling this tag. if pool.get(PCI_REMOTE_MANAGED_TAG) is None: # NOTE: tags are compared as strings case-insensitively, see # pci_device_prop_match in nova/pci/utils.py. pool[PCI_REMOTE_MANAGED_TAG] = 'false' def _create_pool_keys_from_dev( self, dev: 'objects.PciDevice', ) -> ty.Optional[Pool]: """Create a stats pool dict that this dev is supposed to be part of Note that this pool dict contains the stats pool's keys and their values. 'count' and 'devices' are not included. """ # Don't add a device that doesn't have a matching device spec. # This can happen during initial sync up with the controller devspec = self.dev_filter.get_devspec(dev) if not devspec: return None tags = devspec.get_tags() pool = {k: getattr(dev, k) for k in self.pool_keys} if tags: pool.update(tags) # NOTE(gibi): parent_ifname acts like a tag during pci claim but # not provided as part of the whitelist spec as it is auto detected # by the virt driver. # This key is used for match InstancePciRequest backed by neutron ports # that has resource_request and therefore that has resource allocation # already in placement. if dev.extra_info.get('parent_ifname'): pool['parent_ifname'] = dev.extra_info['parent_ifname'] self._ensure_remote_managed_tag(dev, pool) return pool def _get_pool_with_device_type_mismatch( self, dev: 'objects.PciDevice', ) -> ty.Optional[ty.Tuple[Pool, 'objects.PciDevice']]: """Check for device type mismatch in the pools for a given device. Return (pool, device) if device type does not match or a single None if the device type matches. """ for pool in self.pools: for device in pool['devices']: if device.address == dev.address: if dev.dev_type != pool["dev_type"]: return pool, device return None return None def update_device(self, dev: 'objects.PciDevice') -> None: """Update a device to its matching pool.""" pool_device_info = self._get_pool_with_device_type_mismatch(dev) if pool_device_info is None: return None pool, device = pool_device_info pool['devices'].remove(device) self._decrease_pool_count(self.pools, pool) self.add_device(dev) def add_device(self, dev: 'objects.PciDevice') -> None: """Add a device to its matching pool.""" dev_pool = self._create_pool_keys_from_dev(dev) if dev_pool: pool = self._find_pool(dev_pool) if not pool: dev_pool['count'] = 0 dev_pool['devices'] = [] self.pools.append(dev_pool) self.pools.sort(key=lambda item: len(item)) pool = dev_pool pool['count'] += 1 pool['devices'].append(dev) @staticmethod def _decrease_pool_count( pool_list: ty.List[Pool], pool: Pool, count: int = 1, ) -> int: """Decrement pool's size by count. If pool becomes empty, remove pool from pool_list. """ if pool['count'] > count: pool['count'] -= count count = 0 else: count -= pool['count'] pool_list.remove(pool) return count def remove_device(self, dev: 'objects.PciDevice') -> None: """Remove one device from the first pool that it matches.""" dev_pool = self._create_pool_keys_from_dev(dev) if dev_pool: pool = self._find_pool(dev_pool) if not pool: raise exception.PciDevicePoolEmpty( compute_node_id=dev.compute_node_id, address=dev.address) pool['devices'].remove(dev) self._decrease_pool_count(self.pools, pool) def get_free_devs(self) -> ty.List['objects.PciDevice']: free_devs: ty.List[objects.PciDevice] = [] for pool in self.pools: free_devs.extend(pool['devices']) return free_devs def consume_requests( self, pci_requests: 'objects.InstancePCIRequests', numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']] = None, ) -> ty.Optional[ty.List['objects.PciDevice']]: alloc_devices: ty.List[objects.PciDevice] = [] for request in pci_requests: count = request.count pools = self._filter_pools(self.pools, request, numa_cells) # Failed to allocate the required number of devices. Return the # devices already allocated during previous iterations back to # their pools if not pools: LOG.error("Failed to allocate PCI devices for instance. " "Unassigning devices back to pools. " "This should not happen, since the scheduler " "should have accurate information, and allocation " "during claims is controlled via a hold " "on the compute node semaphore.") for d in range(len(alloc_devices)): self.add_device(alloc_devices.pop()) return None for pool in pools: if pool['count'] >= count: num_alloc = count else: num_alloc = pool['count'] count -= num_alloc pool['count'] -= num_alloc for d in range(num_alloc): pci_dev = pool['devices'].pop() self._handle_device_dependents(pci_dev) pci_dev.request_id = request.request_id alloc_devices.append(pci_dev) if count == 0: break return alloc_devices def _handle_device_dependents(self, pci_dev: 'objects.PciDevice') -> None: """Remove device dependents or a parent from pools. In case the device is a PF, all of it's dependent VFs should be removed from pools count, if these are present. When the device is a VF, or a VDPA device, it's parent PF pool count should be decreased, unless it is no longer in a pool. """ if pci_dev.dev_type == fields.PciDeviceType.SRIOV_PF: vfs_list = pci_dev.child_devices if vfs_list: free_devs = self.get_free_devs() for vf in vfs_list: # NOTE(gibi): do not try to remove a device that are # already removed if vf in free_devs: self.remove_device(vf) elif pci_dev.dev_type in ( fields.PciDeviceType.SRIOV_VF, fields.PciDeviceType.VDPA, ): try: parent = pci_dev.parent_device # Make sure not to decrease PF pool count if this parent has # been already removed from pools if parent in self.get_free_devs(): self.remove_device(parent) except exception.PciDeviceNotFound: return def _filter_pools_for_spec( self, pools: ty.List[Pool], request: 'objects.InstancePCIRequest', ) -> ty.List[Pool]: """Filter out pools that don't match the request's device spec. Exclude pools that do not match the specified ``vendor_id``, ``product_id`` and/or ``device_type`` field, or any of the other arbitrary tags such as ``physical_network``, specified in the request. :param pools: A list of PCI device pool dicts :param request: An InstancePCIRequest object describing the type, quantity and required NUMA affinity of device(s) we want. :returns: A list of pools that can be used to support the request if this is possible. """ request_specs = request.spec return [ pool for pool in pools if utils.pci_device_prop_match(pool, request_specs) ] def _filter_pools_for_numa_cells( self, pools: ty.List[Pool], request: 'objects.InstancePCIRequest', numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']], ) -> ty.List[Pool]: """Filter out pools with the wrong NUMA affinity, if required. Exclude pools that do not have *suitable* PCI NUMA affinity. ``numa_policy`` determines what *suitable* means, being one of PREFERRED (nice-to-have), LEGACY (must-have-if-available) and REQUIRED (must-have). We iterate through the various policies in order of strictness. This means that even if we only *prefer* PCI-NUMA affinity, we will still attempt to provide it if possible. :param pools: A list of PCI device pool dicts :param request: An InstancePCIRequest object describing the type, quantity and required NUMA affinity of device(s) we want. :param numa_cells: A list of InstanceNUMACell objects whose ``id`` corresponds to the ``id`` of host NUMACells. :returns: A list of pools that can, together, provide at least ``requested_count`` PCI devices with the level of NUMA affinity required by ``numa_policy``, else all pools that can satisfy this policy even if it's not enough. """ if not numa_cells: return pools # we default to the 'legacy' policy for...of course...legacy reasons requested_policy = fields.PCINUMAAffinityPolicy.LEGACY if 'numa_policy' in request: requested_policy = request.numa_policy or requested_policy requested_count = request.count numa_cell_ids = [cell.id for cell in numa_cells] # filter out pools which numa_node is not included in numa_cell_ids filtered_pools = [ pool for pool in pools if any(utils.pci_device_prop_match( pool, [{'numa_node': cell}]) for cell in numa_cell_ids)] # we can't apply a less strict policy than the one requested, so we # need to return if we've demanded a NUMA affinity of REQUIRED. # However, NUMA affinity is a good thing. If we can get enough devices # with the stricter policy then we will use them. if requested_policy == fields.PCINUMAAffinityPolicy.REQUIRED or sum( pool['count'] for pool in filtered_pools) >= requested_count: return filtered_pools # the SOCKET policy is a bit of a special case. It's less strict than # REQUIRED (so REQUIRED will automatically fulfil SOCKET, at least # with our assumption of never having multiple sockets per NUMA node), # but not always more strict than LEGACY: a PCI device with no NUMA # affinity will fulfil LEGACY but not SOCKET. If we have SOCKET, # process it here and don't continue. if requested_policy == fields.PCINUMAAffinityPolicy.SOCKET: return self._filter_pools_for_socket_affinity(pools, numa_cells) # some systems don't report NUMA node info for PCI devices, in which # case None is reported in 'pci_device.numa_node'. The LEGACY policy # allows us to use these devices so we include None in the list of # suitable NUMA cells. numa_cell_ids.append(None) # filter out pools which numa_node is not included in numa_cell_ids filtered_pools = [ pool for pool in pools if any(utils.pci_device_prop_match( pool, [{'numa_node': cell}]) for cell in numa_cell_ids)] # once again, we can't apply a less strict policy than the one # requested, so we need to return if we've demanded a NUMA affinity of # LEGACY. Similarly, we will also return if we have enough devices to # satisfy this somewhat strict policy. if requested_policy == fields.PCINUMAAffinityPolicy.LEGACY or sum( pool['count'] for pool in filtered_pools) >= requested_count: return filtered_pools # if we've got here, we're using the PREFERRED policy and weren't able # to provide anything with stricter affinity. Use whatever devices you # can, folks. return sorted( pools, key=lambda pool: pool.get('numa_node') not in numa_cell_ids) def _filter_pools_for_socket_affinity( self, pools: ty.List[Pool], numa_cells: ty.List['objects.InstanceNUMACell'], ) -> ty.List[Pool]: host_cells = self.numa_topology.cells # bail early if we don't have socket information for all host_cells. # This could happen if we're running on an weird older system with # multiple sockets per NUMA node, which is a configuration that we # explicitly chose not to support. if any(cell.socket is None for cell in host_cells): LOG.debug('No socket information in host NUMA cell(s).') return [] # get a set of host sockets that the guest cells are in. Since guest # cell IDs map to host cell IDs, we can just lookup the latter's # socket. socket_ids = set() for guest_cell in numa_cells: for host_cell in host_cells: if guest_cell.id == host_cell.id: socket_ids.add(host_cell.socket) # now get a set of host NUMA nodes that are in the above sockets allowed_numa_nodes = set() for host_cell in host_cells: if host_cell.socket in socket_ids: allowed_numa_nodes.add(host_cell.id) # filter out pools that are not in one of the correct host NUMA nodes. return [ pool for pool in pools if any( utils.pci_device_prop_match(pool, [{'numa_node': numa_node}]) for numa_node in allowed_numa_nodes ) ] def _filter_pools_for_unrequested_pfs( self, pools: ty.List[Pool], request: 'objects.InstancePCIRequest', ) -> ty.List[Pool]: """Filter out pools with PFs, unless these are required. This is necessary in cases where PFs and VFs have the same product_id and generally useful elsewhere. :param pools: A list of PCI device pool dicts :param request: An InstancePCIRequest object describing the type, quantity and required NUMA affinity of device(s) we want. :returns: A list of pools that can be used to support the request if this is possible. """ if all( spec.get('dev_type') != fields.PciDeviceType.SRIOV_PF for spec in request.spec ): pools = [ pool for pool in pools if not pool.get('dev_type') == fields.PciDeviceType.SRIOV_PF ] return pools def _filter_pools_for_unrequested_vdpa_devices( self, pools: ty.List[Pool], request: 'objects.InstancePCIRequest', ) -> ty.List[Pool]: """Filter out pools with VDPA devices, unless these are required. This is necessary as vdpa devices require special handling and should not be allocated to generic pci device requests. :param pools: A list of PCI device pool dicts :param request: An InstancePCIRequest object describing the type, quantity and required NUMA affinity of device(s) we want. :returns: A list of pools that can be used to support the request if this is possible. """ if all( spec.get('dev_type') != fields.PciDeviceType.VDPA for spec in request.spec ): pools = [ pool for pool in pools if not pool.get('dev_type') == fields.PciDeviceType.VDPA ] return pools def _filter_pools_for_unrequested_remote_managed_devices( self, pools: ty.List[Pool], request: 'objects.InstancePCIRequest', ) -> ty.List[Pool]: """Filter out pools with remote_managed devices, unless requested. Remote-managed devices are not usable for legacy SR-IOV or hardware offload scenarios and must be excluded from allocation. :param pools: A list of PCI device pool dicts :param request: An InstancePCIRequest object describing the type, quantity and required NUMA affinity of device(s) we want. :returns: A list of pools that can be used to support the request if this is possible. """ if all(not strutils.bool_from_string(spec.get(PCI_REMOTE_MANAGED_TAG)) for spec in request.spec): pools = [pool for pool in pools if not strutils.bool_from_string( pool.get(PCI_REMOTE_MANAGED_TAG))] return pools def _filter_pools( self, pools: ty.List[Pool], request: 'objects.InstancePCIRequest', numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']], ) -> ty.Optional[ty.List[Pool]]: """Determine if an individual PCI request can be met. Filter pools, which are collections of devices with similar traits, to identify those that can support the provided PCI request. If ``numa_cells`` is provided then NUMA locality may be taken into account, depending on the value of ``request.numa_policy``. :param pools: A list of PCI device pool dicts :param request: An InstancePCIRequest object describing the type, quantity and required NUMA affinity of device(s) we want. :param numa_cells: A list of InstanceNUMACell objects whose ``id`` corresponds to the ``id`` of host NUMACell objects. :returns: A list of pools that can be used to support the request if this is possible, else None. """ # NOTE(vladikr): This code may be open to race conditions. # Two concurrent requests may succeed when called support_requests # because this method does not remove related devices from the pools # Firstly, let's exclude all devices that don't match our spec (e.g. # they've got different PCI IDs or something) before_count = sum([pool['count'] for pool in pools]) pools = self._filter_pools_for_spec(pools, request) after_count = sum([pool['count'] for pool in pools]) if after_count < before_count: LOG.debug( 'Dropped %d device(s) due to mismatched PCI attribute(s)', before_count - after_count ) if after_count < request.count: LOG.debug('Not enough PCI devices left to satisfy request') return None # Next, let's exclude all devices that aren't on the correct NUMA node # or socket, *assuming* we have devices and care about that, as # determined by policy before_count = after_count pools = self._filter_pools_for_numa_cells(pools, request, numa_cells) after_count = sum([pool['count'] for pool in pools]) if after_count < before_count: LOG.debug( 'Dropped %d device(s) as they are on the wrong NUMA node(s)', before_count - after_count ) if after_count < request.count: LOG.debug('Not enough PCI devices left to satisfy request') return None # If we're not requesting PFs then we should not use these. # Exclude them. before_count = after_count pools = self._filter_pools_for_unrequested_pfs(pools, request) after_count = sum([pool['count'] for pool in pools]) if after_count < before_count: LOG.debug( 'Dropped %d device(s) as they are PFs which we have not ' 'requested', before_count - after_count ) if after_count < request.count: LOG.debug('Not enough PCI devices left to satisfy request') return None # If we're not requesting VDPA devices then we should not use these # either. Exclude them. before_count = after_count pools = self._filter_pools_for_unrequested_vdpa_devices(pools, request) after_count = sum([pool['count'] for pool in pools]) if after_count < before_count: LOG.debug( 'Dropped %d device(s) as they are VDPA devices which we have ' 'not requested', before_count - after_count ) # If we're not requesting remote_managed devices then we should not # use these either. Exclude them. before_count = after_count pools = self._filter_pools_for_unrequested_remote_managed_devices( pools, request) after_count = sum([pool['count'] for pool in pools]) if after_count < before_count: LOG.debug( 'Dropped %d device(s) as they are remote-managed devices which' 'we have not requested', before_count - after_count ) if after_count < request.count: LOG.debug('Not enough PCI devices left to satisfy request') return None return pools def support_requests( self, requests: ty.List['objects.InstancePCIRequest'], numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']] = None, ) -> bool: """Determine if the PCI requests can be met. Determine, based on a compute node's PCI stats, if an instance can be scheduled on the node. **Support does not mean real allocation**. If ``numa_cells`` is provided then NUMA locality may be taken into account, depending on the value of ``numa_policy``. :param requests: A list of InstancePCIRequest object describing the types, quantities and required NUMA affinities of devices we want. :type requests: nova.objects.InstancePCIRequests :param numa_cells: A list of InstanceNUMACell objects whose ``id`` corresponds to the ``id`` of host NUMACells, or None. :returns: Whether this compute node can satisfy the given request. """ # NOTE(yjiang5): this function has high possibility to fail, # so no exception should be triggered for performance reason. return all( self._filter_pools(self.pools, r, numa_cells) for r in requests ) def _apply_request( self, pools: ty.List[Pool], request: 'objects.InstancePCIRequest', numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']] = None, ) -> bool: """Apply an individual PCI request. Apply a PCI request against a given set of PCI device pools, which are collections of devices with similar traits. If ``numa_cells`` is provided then NUMA locality may be taken into account, depending on the value of ``request.numa_policy``. :param pools: A list of PCI device pool dicts :param request: An InstancePCIRequest object describing the type, quantity and required NUMA affinity of device(s) we want. :param numa_cells: A list of InstanceNUMACell objects whose ``id`` corresponds to the ``id`` of host NUMACell objects. :returns: True if the request was applied against the provided pools successfully, else False. """ # NOTE(vladikr): This code maybe open to race conditions. # Two concurrent requests may succeed when called support_requests # because this method does not remove related devices from the pools filtered_pools = self._filter_pools(pools, request, numa_cells) if not filtered_pools: return False count = request.count for pool in filtered_pools: count = self._decrease_pool_count(pools, pool, count) if not count: break return True def apply_requests( self, requests: ty.List['objects.InstancePCIRequest'], numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']] = None, ) -> None: """Apply PCI requests to the PCI stats. This is used in multiple instance creation, when the scheduler has to maintain how the resources are consumed by the instances. If ``numa_cells`` is provided then NUMA locality may be taken into account, depending on the value of ``numa_policy``. :param requests: A list of InstancePCIRequest object describing the types, quantities and required NUMA affinities of devices we want. :type requests: nova.objects.InstancePCIRequests :param numa_cells: A list of InstanceNUMACell objects whose ``id`` corresponds to the ``id`` of host NUMACells, or None. :raises: exception.PciDeviceRequestFailed if this compute node cannot satisfy the given request. """ if not all( self._apply_request(self.pools, r, numa_cells) for r in requests ): raise exception.PciDeviceRequestFailed(requests=requests) def __iter__(self) -> ty.Iterator[Pool]: pools: ty.List[Pool] = [] for pool in self.pools: pool = copy.deepcopy(pool) # 'devices' shouldn't be part of stats if 'devices' in pool: del pool['devices'] pools.append(pool) return iter(pools) def clear(self) -> None: """Clear all the stats maintained.""" self.pools = [] def __eq__(self, other: object) -> bool: if not isinstance(other, PciDeviceStats): return NotImplemented return self.pools == other.pools def to_device_pools_obj(self) -> 'objects.PciDevicePoolList': """Return the contents of the pools as a PciDevicePoolList object.""" stats = [x for x in self] return pci_device_pool.from_pci_stats(stats) def has_remote_managed_device_pools(self) -> bool: """Determine whether remote managed device pools are present on a host. The check is pool-based, not free device-based and is NUMA cell agnostic. """ dummy_req = objects.InstancePCIRequest( count=0, spec=[{'remote_managed': True}] ) pools = self._filter_pools_for_spec(self.pools, dummy_req) return bool(pools)
41.708945
79
0.628073
29,958
0.958963
0
0
1,480
0.047375
0
0
15,984
0.511652
6a54175f824a3a8a92a61c38d426dd45948b4848
364
py
Python
Use.py
Codingprivacy/Multiple-Rename
486289e8158487dad058cd8f781ac27bc9a5fc02
[ "MIT" ]
2
2018-04-01T06:16:33.000Z
2018-05-04T18:57:50.000Z
Use.py
codingprivacy/Multiple-Rename
486289e8158487dad058cd8f781ac27bc9a5fc02
[ "MIT" ]
null
null
null
Use.py
codingprivacy/Multiple-Rename
486289e8158487dad058cd8f781ac27bc9a5fc02
[ "MIT" ]
null
null
null
import multiple multiple.rename("C:/Users/Username/Desktop",'new_name',33,'.exe') """this above lines renames all the files of the folder Desktop to 'new_name' and count starts from 33 to further (we can also provide 1 to start it from 1) and extension is given '.exe' hence the files will be renamed like : 1. new_name33.exe 2. new_name34.exe and so on """
28
81
0.739011
0
0
0
0
0
0
0
0
323
0.887363
6a55c2af9ac7243f141edb694902ca98eb95a939
278
py
Python
ReadSymLink.py
ohel/pyorbital-gizmod-tweaks
4c02783d1c6287df508351467a5c203a11430b07
[ "Unlicense" ]
null
null
null
ReadSymLink.py
ohel/pyorbital-gizmod-tweaks
4c02783d1c6287df508351467a5c203a11430b07
[ "Unlicense" ]
null
null
null
ReadSymLink.py
ohel/pyorbital-gizmod-tweaks
4c02783d1c6287df508351467a5c203a11430b07
[ "Unlicense" ]
null
null
null
import os def readlinkabs(l): """ Return an absolute path for the destination of a symlink """ if not (os.path.islink(l)): return None p = os.readlink(l) if os.path.isabs(p): return p return os.path.join(os.path.dirname(l), p)
18.533333
48
0.582734
0
0
0
0
0
0
0
0
77
0.276978
6a55f8c89efdf9367ae5e51c6555c781fae366b6
1,368
py
Python
examples/capture_circular.py
IanTBlack/picamera2
4d31a56cdb0d8360e71927e754fc6bef50bec360
[ "BSD-2-Clause" ]
71
2022-02-15T14:24:34.000Z
2022-03-29T16:36:46.000Z
examples/capture_circular.py
IanTBlack/picamera2
4d31a56cdb0d8360e71927e754fc6bef50bec360
[ "BSD-2-Clause" ]
37
2022-02-16T12:35:45.000Z
2022-03-31T13:18:42.000Z
examples/capture_circular.py
IanTBlack/picamera2
4d31a56cdb0d8360e71927e754fc6bef50bec360
[ "BSD-2-Clause" ]
15
2022-02-16T12:12:57.000Z
2022-03-31T15:17:58.000Z
#!/usr/bin/python3 import time import numpy as np from picamera2.encoders import H264Encoder from picamera2.outputs import CircularOutput from picamera2 import Picamera2 lsize = (320, 240) picam2 = Picamera2() video_config = picam2.video_configuration(main={"size": (1280, 720), "format": "RGB888"}, lores={"size": lsize, "format": "YUV420"}) picam2.configure(video_config) picam2.start_preview() encoder = H264Encoder(1000000, repeat=True) encoder.output = CircularOutput() picam2.encoder = encoder picam2.start() picam2.start_encoder() w, h = lsize prev = None encoding = False ltime = 0 while True: cur = picam2.capture_buffer("lores") cur = cur[:w * h].reshape(h, w) if prev is not None: # Measure pixels differences between current and # previous frame mse = np.square(np.subtract(cur, prev)).mean() if mse > 7: if not encoding: epoch = int(time.time()) encoder.output.fileoutput = "{}.h264".format(epoch) encoder.output.start() encoding = True print("New Motion", mse) ltime = time.time() else: if encoding and time.time() - ltime > 5.0: encoder.output.stop() encoding = False prev = cur picam2.stop_encoder()
27.918367
89
0.604532
0
0
0
0
0
0
0
0
154
0.112573
6a561a673ebb04da901d20e99ce9c86e3955a26e
8,933
py
Python
Bio/NeuralNetwork/Gene/Pattern.py
barendt/biopython
391bcdbee7f821bff3e12b75c635a06bc1b2dcea
[ "PostgreSQL" ]
3
2017-10-23T21:53:57.000Z
2019-09-23T05:14:12.000Z
Bio/NeuralNetwork/Gene/Pattern.py
eoc21/biopython
c0f8db8f55a506837c320459957a0ce99b0618b6
[ "PostgreSQL" ]
null
null
null
Bio/NeuralNetwork/Gene/Pattern.py
eoc21/biopython
c0f8db8f55a506837c320459957a0ce99b0618b6
[ "PostgreSQL" ]
6
2020-02-26T16:34:20.000Z
2020-03-04T15:34:00.000Z
"""Generic functionality useful for all gene representations. This module contains classes which can be used for all the different types of patterns available for representing gene information (ie. motifs, signatures and schemas). These are the general classes which should be handle any of the different specific patterns. """ # standard library import random # biopython from Bio import utils from Bio.Seq import Seq, MutableSeq class PatternIO: """Allow reading and writing of patterns to files. This just defines a simple persistance class for patterns, making it easy to write them to a file and read 'em back. """ def __init__(self, alphabet = None): """Intialize the reader and writer class. Arguments: o alphabet - An optional argument specifying the alphabet which patterns should follow. If an alphabet is set it'll be used to verify that all patterns follow it. Attributes: o separator - A character to use in separating items in a signature when it is written to a file and read back. This character should not be in the possible alphabet of the sequences, or there will be trouble. """ self._alphabet = alphabet self.separator = ";" def write(self, pattern_list, output_handle): """Write a list of patterns to the given handle. """ for pattern in pattern_list: # deal with signatures, concatentate them with the separator if (type(pattern) == type([]) or type(pattern) == type(tuple([]))): string_pattern = self.separator.join(pattern) # deal with the normal cases else: string_pattern = pattern output_handle.write("%s\n" % string_pattern) def write_seq(self, seq_pattern_list, output_handle): """Convenience function to write Seq objects to a file. This can take Seqs and MutableSeqs, and write them to a file as strings. """ # convert the seq patterns into just string patterns all_patterns = [] for seq_pattern in seq_pattern_list: if isinstance(seq_pattern, MutableSeq): seq = seq_pattern.toseq() all_patterns.append(seq.data) elif isinstance(seq_pattern, Seq): all_patterns.append(seq_pattern.data) else: raise ValueError("Unexpected pattern type %r" % seq_pattern) self.write(all_patterns, output_handle) def read(self, input_handle): """Read patterns from the specified handle. """ all_patterns = [] while 1: cur_line = input_handle.readline() if not(cur_line): break cur_pattern = cur_line.rstrip() # split up signatures if cur_pattern.find(self.separator) >= 0: cur_pattern = tuple(cur_pattern.split(self.separator)) if self._alphabet is not None: # make single patterns (not signatures) into lists, so we # can check signatures and single patterns the same if type(cur_pattern) != type(tuple([])): test_pattern = [cur_pattern] else: test_pattern = cur_pattern for pattern_item in test_pattern: pattern_seq = Seq(pattern_item, self._alphabet) if not(utils.verify_alphabet(pattern_seq)): raise ValueError("Pattern %s not matching alphabet %s" % (cur_pattern, self._alphabet)) all_patterns.append(cur_pattern) return all_patterns class PatternRepository: """This holds a list of specific patterns found in sequences. This is designed to be a general holder for a set of patterns and should be subclassed for specific implementations (ie. holding Motifs or Signatures. """ def __init__(self, pattern_info): """Initialize a repository with patterns, Arguments: o pattern_info - A representation of all of the patterns found in a *Finder search. This should be a dictionary, where the keys are patterns, and the values are the number of times a pattern is found. The patterns are represented interally as a list of two tuples, where the first element is the number of times a pattern occurs, and the second is the pattern itself. This makes it easy to sort the list and return the top N patterns. """ self._pattern_dict = pattern_info # create the list representation self._pattern_list = [] for pattern_name in self._pattern_dict.keys(): self._pattern_list.append((self._pattern_dict[pattern_name], pattern_name)) self._pattern_list.sort() self._pattern_list.reverse() def get_all(self): """Retrieve all of the patterns in the repository. """ patterns = [] for pattern_info in self._pattern_list: patterns.append(pattern_info[1]) return patterns def get_random(self, num_patterns): """Retrieve the specified number of patterns randomly. Randomly selects patterns from the list and returns them. Arguments: o num_patterns - The total number of patterns to return. """ all_patterns = [] while len(all_patterns) < num_patterns: # pick a pattern, and only add it if it is not already present new_pattern_info = random.choice(self._pattern_list) if new_pattern_info[1] not in all_patterns: all_patterns.append(new_pattern_info[1]) return all_patterns def get_top_percentage(self, percent): """Return a percentage of the patterns. This returns the top 'percent' percentage of the patterns in the repository. """ all_patterns = self.get_all() num_to_return = int(len(all_patterns) * percent) return all_patterns[:num_to_return] def get_top(self, num_patterns): """Return the specified number of most frequently occurring patterns Arguments: o num_patterns - The number of patterns to return. """ all_patterns = [] for pattern_info in self._pattern_list[:num_patterns]: all_patterns.append(pattern_info[1]) return all_patterns def get_differing(self, top_num, bottom_num): """Retrieve patterns that are at the extreme ranges. This returns both patterns at the top of the list (ie. the same as returned by get_top) and at the bottom of the list. This is especially useful for patterns that are the differences between two sets of patterns. Arguments: o top_num - The number of patterns to take from the top of the list. o bottom_num - The number of patterns to take from the bottom of the list. """ all_patterns = [] # first get from the top of the list for pattern_info in self._pattern_list[:top_num]: all_patterns.append(pattern_info[1]) # then from the bottom for pattern_info in self._pattern_list[-bottom_num:]: all_patterns.append(pattern_info[1]) return all_patterns def remove_polyA(self, at_percentage = .9): """Remove patterns which are likely due to polyA tails from the lists. This is just a helper function to remove pattenrs which are likely just due to polyA tails, and thus are not really great motifs. This will also get rid of stuff like ATATAT, which might be a useful motif, so use at your own discretion. XXX Could we write a more general function, based on info content or something like that? Arguments: o at_percentage - The percentage of A and T residues in a pattern that qualifies it for being removed. """ remove_list = [] # find all of the really AT rich patterns for pattern_info in self._pattern_list: pattern_at = float(pattern_info[1].count('A') + pattern_info[1].count('T')) / len(pattern_info[1]) if pattern_at > at_percentage: remove_list.append(pattern_info) # now remove them from the master list for to_remove in remove_list: self._pattern_list.remove(to_remove) def count(self, pattern): """Return the number of times the specified pattern is found. """ try: return self._pattern_dict[pattern] except KeyError: return 0
35.169291
110
0.622187
8,496
0.95108
0
0
0
0
0
0
4,491
0.502743
6a565a6b3597c1dbb9a2e86bdaf31bd17e76951c
58
py
Python
neslter/parsing/nut/__init__.py
WHOIGit/nes-lter-ims
d4cc96c10da56ca33286af84d669625b67170522
[ "MIT" ]
3
2019-01-24T16:32:50.000Z
2021-11-05T02:18:12.000Z
neslter/parsing/nut/__init__.py
WHOIGit/nes-lter-ims
d4cc96c10da56ca33286af84d669625b67170522
[ "MIT" ]
45
2019-05-23T15:15:32.000Z
2022-03-15T14:09:20.000Z
neslter/parsing/nut/__init__.py
WHOIGit/nes-lter-ims
d4cc96c10da56ca33286af84d669625b67170522
[ "MIT" ]
null
null
null
from .nut import parse_nut, format_nut, merge_nut_bottles
29
57
0.844828
0
0
0
0
0
0
0
0
0
0
6a57cefd47f3150e0a9d0bbdcd3affcfe90d72c9
15,520
py
Python
legtool/tabs/servo_tab.py
jpieper/legtool
ab3946051bd16817b61d3073ce7be8bd27af90d0
[ "Apache-2.0" ]
10
2015-09-23T19:28:06.000Z
2021-04-27T02:32:27.000Z
legtool/tabs/servo_tab.py
jpieper/legtool
ab3946051bd16817b61d3073ce7be8bd27af90d0
[ "Apache-2.0" ]
null
null
null
legtool/tabs/servo_tab.py
jpieper/legtool
ab3946051bd16817b61d3073ce7be8bd27af90d0
[ "Apache-2.0" ]
9
2015-10-16T07:26:18.000Z
2021-01-13T07:18:35.000Z
# Copyright 2014 Josh Pieper, jjp@pobox.com. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import functools import trollius as asyncio from trollius import Task, From, Return import PySide.QtCore as QtCore import PySide.QtGui as QtGui from ..servo import selector from .common import BoolContext from . import gazebo_config_dialog def spawn(callback): def start(): Task(callback()) return start class ServoTab(object): def __init__(self, ui, status): self.ui = ui self.status = status self.servo_controls = [] self.monitor_thread = None self.servo_model = '' self.servo_name_map = {} self.ui.statusText.setText('not connected') self.ui.connectButton.clicked.connect( spawn(self.handle_connect_clicked)) self.ui.typeCombo.currentIndexChanged.connect(self.handle_type_change) self.handle_type_change() self.ui.configureGazeboButton.clicked.connect( self.handle_configure_gazebo) servo_layout = QtGui.QVBoxLayout() servo_layout.setSpacing(0) servo_layout.setContentsMargins(0, 0, 0, 0) self.ui.scrollContents.setLayout(servo_layout) self.ui.servoCountSpin.valueChanged.connect(self.handle_servo_count) self.handle_servo_count() self.ui.powerCombo.currentIndexChanged.connect( spawn(self.handle_power)) self.ui.captureCurrentButton.clicked.connect( spawn(self.handle_capture_current)) self.update_connected(False) self.ui.addPoseButton.clicked.connect(self.handle_add_pose) self.ui.removePoseButton.clicked.connect(self.handle_remove_pose) self.ui.moveToPoseButton.clicked.connect( spawn(self.handle_move_to_pose)) self.ui.updatePoseButton.clicked.connect(self.handle_update_pose) self.ui.poseList.currentItemChanged.connect( self.handle_poselist_current_changed) self.controller = None self.servo_update = BoolContext() def resizeEvent(self, event): pass def poses(self): result = [] for i in range(self.ui.poseList.count()): result.append(self.ui.poseList.item(i).text()) return result def pose(self, name): for i in range(self.ui.poseList.count()): if self.ui.poseList.item(i).text() == name: return self.ui.poseList.item(i).data(QtCore.Qt.UserRole) return dict([(i, 0.0) for i in range(self.ui.servoCountSpin.value())]) @asyncio.coroutine def handle_connect_clicked(self): val = self.ui.typeCombo.currentText().lower() try: self.controller = yield From( selector.select_servo( val, serial_port=self.ui.serialPortCombo.currentText(), model_name=self.servo_model, servo_name_map=self.servo_name_map)) self.ui.statusText.setText('connected') self.update_connected(True) except Exception as e: self.ui.statusText.setText('error: %s' % str(e)) self.update_connected(False) def handle_type_change(self): val = self.ui.typeCombo.currentText().lower() self.ui.serialPortCombo.setEnabled(val == 'herkulex') self.ui.configureGazeboButton.setEnabled(val == 'gazebo') def handle_configure_gazebo(self): servo_name_map = self.servo_name_map.copy() for x in range(self.ui.servoCountSpin.value()): if not x in servo_name_map: servo_name_map[x] = '' dialog = gazebo_config_dialog.GazeboConfigDialog( self.servo_model, servo_name_map) dialog.setModal(True) result = dialog.exec_() if result == QtGui.QDialog.Rejected: return self.servo_model = dialog.model_name() self.servo_name_map = dialog.servo_name_map() def handle_servo_count(self): count = self.ui.servoCountSpin.value() while len(self.servo_controls) > count: # Remove the last one last = self.servo_controls[-1] widget = last['widget'] self.ui.scrollContents.layout().removeWidget(widget) widget.deleteLater() self.servo_controls = self.servo_controls[:-1] while len(self.servo_controls) < count: # Add a new one. servo_id = len(self.servo_controls) label = QtGui.QLabel() label.setText('ID %d:' % servo_id) slider = QtGui.QSlider(QtCore.Qt.Horizontal) slider.setRange(-180, 180) doublespin = QtGui.QDoubleSpinBox() doublespin.setRange(-180, 180) doublespin.setDecimals(1) save = QtGui.QPushButton() save.setText("Save") move = QtGui.QPushButton() move.setText("Move") current = QtGui.QLabel() current.setText('N/A') current.setMinimumWidth(60) widget = QtGui.QWidget() layout = QtGui.QHBoxLayout(widget) layout.addWidget(label) layout.addWidget(slider) layout.addWidget(doublespin) layout.addWidget(save) layout.addWidget(move) layout.addWidget(current) slider.valueChanged.connect( functools.partial(self.handle_servo_slider, servo_id)) doublespin.valueChanged.connect( functools.partial(self.handle_servo_spin, servo_id)) save.clicked.connect( functools.partial(self.handle_servo_save, servo_id)) move.clicked.connect( functools.partial(self.handle_servo_move, servo_id)) self.ui.scrollContents.layout().addWidget(widget) self.servo_controls.append({ 'widget': widget, 'label': label, 'slider': slider, 'doublespin': doublespin, 'save': save, 'move': move, 'current': current}) @asyncio.coroutine def handle_power(self): text = self.ui.powerCombo.currentText().lower() value = None if text == 'free': value = selector.POWER_FREE elif text == 'brake': value = selector.POWER_BRAKE elif text == 'drive': value = selector.POWER_ENABLE else: raise NotImplementedError() yield From(self.controller.enable_power(value)) def update_connected(self, value): self.ui.controlGroup.setEnabled(value) self.ui.posesGroup.setEnabled(value) if self.monitor_thread is not None: self.monitor_thread.cancel() self.monitor_thread = None if value: self.handle_power() self.monitor_thread = Task(self.monitor_status()) @asyncio.coroutine def monitor_status(self): voltages = {} temperatures = {} ident = 0 while True: if (self.controller is not None and hasattr(self.controller, 'get_voltage')): try: ident = (ident + 1) % len(self.servo_controls) this_voltage = yield From( self.controller.get_voltage([ident])) voltages.update(this_voltage) # Get all temperatures. this_temp = yield From( self.controller.get_temperature([ident])) temperatures.update(this_temp) def non_None(value): return [x for x in value if x is not None] message = "Servo status: " if len(non_None(voltages.values())): message += "%.1f/%.1fV" % ( min(non_None(voltages.values())), max(non_None(voltages.values()))) if len(non_None(temperatures.values())): message += " %.1f/%.1fC" % ( min(non_None(temperatures.values())), max(non_None(temperatures.values()))) self.status.showMessage(message, 10000) except Exception as e: traceback.print_exc() print "Error reading servo:", type(e), e yield From(asyncio.sleep(2.0)) @asyncio.coroutine def set_single_pose(self, servo_id, value): yield From( self.controller.set_single_pose(servo_id, value, pose_time=0.2)) def handle_servo_slider(self, servo_id, event): if self.servo_update.value: return with self.servo_update: control = self.servo_controls[servo_id] value = control['slider'].value() control['doublespin'].setValue(value) Task(self.set_single_pose(servo_id, value)) def handle_servo_spin(self, servo_id, event): if self.servo_update.value: return with self.servo_update: control = self.servo_controls[servo_id] value = control['doublespin'].value() control['slider'].setSliderPosition(int(value)) Task(self.set_single_pose(servo_id, value)) def handle_servo_save(self, servo_id): if self.ui.poseList.currentRow() < 0: return current_data = self.ui.poseList.currentItem().data( QtCore.Qt.UserRole) current_data[servo_id] = ( self.servo_controls[servo_id]['doublespin'].value()) self.ui.poseList.currentItem().setData( QtCore.Qt.UserRole, current_data) self.handle_poselist_current_changed(None, None) def handle_servo_move(self, servo_id): if self.ui.poseList.currentRow() < 0: return data = self.ui.poseList.currentItem().data(QtCore.Qt.UserRole) self.servo_controls[servo_id]['doublespin'].setValue(data[servo_id]) @asyncio.coroutine def handle_capture_current(self): with self.servo_update: results = yield From( self.controller.get_pose(range(len(self.servo_controls)))) for ident, angle in results.iteritems(): if angle is None: continue control = self.servo_controls[ident] control['slider'].setSliderPosition(int(angle)) control['doublespin'].setValue(angle) def add_list_pose(self, name): self.ui.poseList.addItem(name) item = self.ui.poseList.item(self.ui.poseList.count() - 1) item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEditable | QtCore.Qt.ItemIsSelectable) return item def get_new_pose_name(self): poses = set([self.ui.poseList.item(x).text() for x in range(self.ui.poseList.count())]) count = 0 while True: name = 'new_pose_%d' % count if name not in poses: return name count += 1 def generate_pose_data(self): return dict( [ (i, control['doublespin'].value()) for i, control in enumerate(self.servo_controls) ]) def handle_add_pose(self): pose_name = self.get_new_pose_name() item = self.add_list_pose(pose_name) item.setData(QtCore.Qt.UserRole, self.generate_pose_data()) self.ui.poseList.editItem(item) def handle_remove_pose(self): if self.ui.poseList.currentRow() < 0: return pose_name = self.ui.poseList.currentItem().text() del self.poses[pose_name] self.ui.poseList.takeItem(self.ui.poseList.currentRow()) @asyncio.coroutine def handle_move_to_pose(self): if self.ui.poseList.currentRow() < 0: return values = self.ui.poseList.currentItem().data(QtCore.Qt.UserRole) yield From(self.controller.set_pose(values, pose_time=1.0)) with self.servo_update: for ident, angle_deg in values.iteritems(): control = self.servo_controls[ident] control['slider'].setSliderPosition(int(angle_deg)) control['doublespin'].setValue(angle_deg) def handle_update_pose(self): if self.ui.poseList.currentRow() < 0: return self.ui.poseList.currentItem().setData( QtCore.Qt.UserRole, self.generate_pose_data()) self.handle_poselist_current_changed(None, None) def handle_poselist_current_changed(self, current, previous): if self.ui.poseList.currentRow() < 0: return data = self.ui.poseList.currentItem().data(QtCore.Qt.UserRole) for i, control in enumerate(self.servo_controls): control['current'].setText('%.1f' % data[i]) def read_settings(self, config): if not config.has_section('servo'): return self.ui.typeCombo.setCurrentIndex(config.getint('servo', 'type')) self.ui.serialPortCombo.setEditText(config.get('servo', 'port')) self.ui.servoCountSpin.setValue(config.getint('servo', 'count')) self.servo_model = config.get('servo', 'model') if config.has_section('servo.names'): self.servo_name_map = {} for name, value in config.items('servo.names'): self.servo_name_map[int(name)] = value if config.has_section('servo.poses'): for name, value in config.items('servo.poses'): this_data = {} for element in value.split(','): ident, angle_deg = element.split('=') this_data[int(ident)] = float(angle_deg) item = self.add_list_pose(name) item.setData(QtCore.Qt.UserRole, this_data) def write_settings(self, config): config.add_section('servo') config.add_section('servo.poses') config.add_section('servo.names') config.set('servo', 'type', self.ui.typeCombo.currentIndex()) config.set('servo', 'port', self.ui.serialPortCombo.currentText()) config.set('servo', 'count', self.ui.servoCountSpin.value()) config.set('servo', 'model', self.servo_model) for key, value in self.servo_name_map.iteritems(): config.set('servo.names', str(key), value) for row in range(self.ui.poseList.count()): item = self.ui.poseList.item(row) pose_name = item.text() values = item.data(QtCore.Qt.UserRole) config.set( 'servo.poses', pose_name, ','.join(['%d=%.2f' % (ident, angle_deg) for ident, angle_deg in values.iteritems()]))
35.514874
78
0.593814
14,600
0.940722
3,690
0.237758
3,828
0.246649
0
0
1,270
0.08183
6a588636dc362efae84b790a87924f429a4e4039
33,745
py
Python
epsilon/juice.py
twisted/epsilon
783910e1829688e95719a7d3151ec3e2cbb101fd
[ "MIT" ]
4
2017-09-01T18:49:11.000Z
2020-04-21T10:11:33.000Z
epsilon/juice.py
twisted/epsilon
783910e1829688e95719a7d3151ec3e2cbb101fd
[ "MIT" ]
35
2015-01-16T22:12:44.000Z
2021-07-11T11:28:58.000Z
epsilon/juice.py
twisted/epsilon
783910e1829688e95719a7d3151ec3e2cbb101fd
[ "MIT" ]
8
2015-01-24T17:43:58.000Z
2019-09-01T12:38:41.000Z
# -*- test-case-name: epsilon.test.test_juice -*- # Copyright 2005 Divmod, Inc. See LICENSE file for details import warnings, pprint import keyword import io import six from twisted.internet.main import CONNECTION_LOST from twisted.internet.defer import Deferred, maybeDeferred, fail from twisted.internet.protocol import ServerFactory, ClientFactory from twisted.internet.ssl import Certificate from twisted.python.failure import Failure from twisted.python import log, filepath from epsilon.liner import LineReceiver from epsilon.compat import long from epsilon import extime ASK = '_ask' ANSWER = '_answer' COMMAND = '_command' ERROR = '_error' ERROR_CODE = '_error_code' ERROR_DESCRIPTION = '_error_description' LENGTH = '_length' BODY = 'body' debug = False class JuiceBox(dict): """ I am a packet in the JUICE protocol. """ def __init__(self, __body='', **kw): self.update(kw) if __body: assert isinstance(__body, str), "body must be a string: %r" % ( repr(__body),) self['body'] = __body def body(): def get(self): warnings.warn("body attribute of boxes is now just a regular field", stacklevel=2) return self['body'] def set(self, newbody): warnings.warn("body attribute of boxes is now just a regular field", stacklevel=2) self['body'] = newbody return get,set body = property(*body()) def copy(self): newBox = self.__class__() newBox.update(self) return newBox def serialize(self, delimiter=b'\r\n', escaped=b'\r\n '): assert LENGTH not in self delimiter = six.ensure_binary(delimiter) escaped = six.ensure_binary(escaped) L = [] for (k, v) in six.viewitems(self): if k == BODY: k = LENGTH v = str(len(self[BODY])) L.append(six.ensure_binary(k).replace(b'_', b'-').title()) L.append(b': ') L.append(six.ensure_binary(v).replace(delimiter, escaped)) L.append(delimiter) L.append(delimiter) if BODY in self: L.append(six.ensure_binary(self[BODY])) return b''.join(L) def sendTo(self, proto): """ Serialize and send this box to a Juice instance. By the time it is being sent, several keys are required. I must have exactly ONE of:: -ask -answer -error If the '-ask' header is set, then the '-command' header must also be set. """ proto.sendPacket(self) # juice.Box => JuiceBox Box = JuiceBox class TLSBox(JuiceBox): def __repr__(self): return 'TLS(**%s)' % (super(TLSBox, self).__repr__(),) def __init__(self, __certificate, __verify=None, __sslstarted=None, **kw): super(TLSBox, self).__init__(**kw) self.certificate = __certificate self.verify = __verify self.sslstarted = __sslstarted def sendTo(self, proto): super(TLSBox, self).sendTo(proto) if self.verify is None: proto.startTLS(self.certificate) else: proto.startTLS(self.certificate, self.verify) if self.sslstarted is not None: self.sslstarted() class QuitBox(JuiceBox): def __repr__(self): return 'Quit(**%s)' % (super(QuitBox, self).__repr__(),) def sendTo(self, proto): super(QuitBox, self).sendTo(proto) proto.transport.loseConnection() class _SwitchBox(JuiceBox): def __repr__(self): return 'Switch(**%s)' % (super(_SwitchBox, self).__repr__(),) def __init__(self, __proto, **kw): super(_SwitchBox, self).__init__(**kw) self.innerProto = __proto def sendTo(self, proto): super(_SwitchBox, self).sendTo(proto) proto._switchTo(self.innerProto) class NegotiateBox(JuiceBox): def __repr__(self): return 'Negotiate(**%s)' % (super(NegotiateBox, self).__repr__(),) def sendTo(self, proto): super(NegotiateBox, self).sendTo(proto) proto._setProtocolVersion(int(self['version'])) class JuiceError(Exception): pass class RemoteJuiceError(JuiceError): """ This error indicates that something went wrong on the remote end of the connection, and the error was serialized and transmitted to you. """ def __init__(self, errorCode, description, fatal=False): """Create a remote error with an error code and description. """ Exception.__init__(self, "Remote[%s]: %s" % (errorCode, description)) self.errorCode = errorCode self.description = description self.fatal = fatal class UnhandledRemoteJuiceError(RemoteJuiceError): def __init__(self, description): errorCode = b"UNHANDLED" RemoteJuiceError.__init__(self, errorCode, description) class JuiceBoxError(JuiceError): pass class MalformedJuiceBox(JuiceBoxError): pass class UnhandledCommand(JuiceError): pass class IncompatibleVersions(JuiceError): pass class _Transactor: def __init__(self, store, callable): self.store = store self.callable = callable def __call__(self, box): return self.store.transact(self.callable, box) def __repr__(self): return '<Transaction in: %s of: %s>' % (self.store, self.callable) class DispatchMixin: baseDispatchPrefix = 'juice_' autoDispatchPrefix = 'command_' wrapper = None def _auto(self, aCallable, proto, namespace=None): if aCallable is None: return None command = aCallable.command if namespace not in command.namespaces: # if you're in the wrong namespace, you are very likely not allowed # to invoke the command you are trying to invoke. some objects # have commands exposed in a separate namespace for security # reasons, since the security model is a role : namespace mapping. log.msg('WRONG NAMESPACE: %r, %r' % (namespace, command.namespaces)) return None def doit(box): kw = stringsToObjects(box, command.arguments, proto) for name, extraArg in command.extra: kw[name] = extraArg.fromTransport(proto.transport) # def checkIsDict(result): # if not isinstance(result, dict): # raise RuntimeError("%r returned %r, not dictionary" % ( # aCallable, result)) # return result def checkKnownErrors(error): key = error.trap(*command.allErrors) code = command.allErrors[key] desc = str(error.value) return Failure(RemoteJuiceError( code, desc, error in command.fatalErrors)) return maybeDeferred(aCallable, **kw).addCallback( command.makeResponse, proto).addErrback( checkKnownErrors) return doit def _wrap(self, aCallable): if aCallable is None: return None wrap = self.wrapper if wrap is not None: return wrap(aCallable) else: return aCallable def normalizeCommand(self, cmd): """Return the canonical form of a command. """ return cmd.upper().strip().replace('-', '_') def lookupFunction(self, proto, name, namespace): """Return a callable to invoke when executing the named command. """ # Try to find a method to be invoked in a transaction first # Otherwise fallback to a "regular" method fName = self.autoDispatchPrefix + name fObj = getattr(self, fName, None) if fObj is not None: # pass the namespace along return self._auto(fObj, proto, namespace) assert namespace is None, 'Old-style parsing' # Fall back to simplistic command dispatching - we probably want to get # rid of this eventually, there's no reason to do extra work and write # fewer docs all the time. fName = self.baseDispatchPrefix + name return getattr(self, fName, None) def dispatchCommand(self, proto, cmd, box, namespace=None): fObj = self.lookupFunction(proto, self.normalizeCommand(cmd), namespace) if fObj is None: return fail(UnhandledCommand(cmd)) return maybeDeferred(self._wrap(fObj), box) def normalizeKey(key): lkey = six.ensure_str(key).lower().replace('-', '_') if keyword.iskeyword(lkey): return lkey.title() return lkey def parseJuiceHeaders(lines): """ Create a JuiceBox from a list of header lines. @param lines: a list of lines. @type lines: a list of L{bytes} """ b = JuiceBox() key = None for L in lines: if L[0:1] == b' ': # continuation assert key is not None b[key] += six.ensure_str(b'\r\n' + L[1:]) continue parts = L.split(b': ', 1) if len(parts) != 2: raise MalformedJuiceBox("Wrong number of parts: %r" % (L,)) key, value = parts key = normalizeKey(key) b[key] = six.ensure_str(value) return int(b.pop(LENGTH, 0)), b class JuiceParserBase(DispatchMixin): def __init__(self): self._outstandingRequests = {} def _puke(self, failure): log.msg("Juice server or network failure " "unhandled by client application:") log.err(failure) log.msg( "Dropping connection! " "To avoid, add errbacks to ALL remote commands!") if self.transport is not None: self.transport.loseConnection() _counter = long(0) def _nextTag(self): self._counter += 1 return '%x' % (self._counter,) def failAllOutgoing(self, reason): OR = self._outstandingRequests.items() self._outstandingRequests = None # we can never send another request for key, value in OR: value.errback(reason) def juiceBoxReceived(self, box): if debug: log.msg("Juice receive: %s" % pprint.pformat(dict(six.viewitems(box)))) if ANSWER in box: question = self._outstandingRequests.pop(box[ANSWER]) question.addErrback(self._puke) self._wrap(question.callback)(box) elif ERROR in box: question = self._outstandingRequests.pop(box[ERROR]) question.addErrback(self._puke) self._wrap(question.errback)( Failure(RemoteJuiceError(box[ERROR_CODE], box[ERROR_DESCRIPTION]))) elif COMMAND in box: cmd = box[COMMAND] def sendAnswer(answerBox): if ASK not in box: return if self.transport is None: return answerBox[ANSWER] = box[ASK] answerBox.sendTo(self) def sendError(error): if ASK not in box: return error if error.check(RemoteJuiceError): code = error.value.errorCode desc = error.value.description if error.value.fatal: errorBox = QuitBox() else: errorBox = JuiceBox() else: errorBox = QuitBox() log.err(error) # here is where server-side logging happens # if the error isn't handled code = 'UNHANDLED' desc = "Unhandled Remote System Exception " errorBox[ERROR] = box[ASK] errorBox[ERROR_DESCRIPTION] = desc errorBox[ERROR_CODE] = code if self.transport is not None: errorBox.sendTo(self) return None # intentionally stop the error here: don't log the # traceback if it's handled, do log it (earlier) if # it isn't self.dispatchCommand(self, cmd, box).addCallbacks(sendAnswer, sendError ).addErrback(self._puke) else: raise RuntimeError( "Empty packet received over connection-oriented juice: %r" % (box,)) def sendBoxCommand(self, command, box, requiresAnswer=True): """ Send a command across the wire with the given C{juice.Box}. Returns a Deferred which fires with the response C{juice.Box} when it is received, or fails with a C{juice.RemoteJuiceError} if an error is received. If the Deferred fails and the error is not handled by the caller of this method, the failure will be logged and the connection dropped. """ if self._outstandingRequests is None: return fail(CONNECTION_LOST) box[COMMAND] = command tag = self._nextTag() if requiresAnswer: box[ASK] = tag result = self._outstandingRequests[tag] = Deferred() else: result = None box.sendTo(self) return result class Argument: optional = False def __init__(self, optional=False): self.optional = optional def retrieve(self, d, name): if self.optional: value = d.get(name) if value is not None: del d[name] else: value = d.pop(name) return value def fromBox(self, name, strings, objects, proto): st = self.retrieve(strings, name) if self.optional and st is None: objects[name] = None else: objects[name] = self.fromStringProto(st, proto) def toBox(self, name, strings, objects, proto): obj = self.retrieve(objects, name) if self.optional and obj is None: # strings[name] = None return else: strings[name] = self.toStringProto(obj, proto) def fromStringProto(self, inString, proto): return self.fromString(inString) def toStringProto(self, inObject, proto): return self.toString(inObject) def fromString(self, inString): raise NotImplementedError() def toString(self, inObject): raise NotImplementedError() class JuiceList(Argument): def __init__(self, subargs): self.subargs = subargs def fromStringProto(self, inString, proto): boxes = parseString(six.ensure_binary(inString)) values = [stringsToObjects(box, self.subargs, proto) for box in boxes] return values def toStringProto(self, inObject, proto): return b''.join([ objectsToStrings(objects, self.subargs, Box(), proto).serialize() for objects in inObject ]) class ListOf(Argument): def __init__(self, subarg, delimiter=', '): self.subarg = subarg self.delimiter = delimiter def fromStringProto(self, inString, proto): strings = inString.split(self.delimiter) L = [self.subarg.fromStringProto(string, proto) for string in strings] return L def toStringProto(self, inObject, proto): L = [] for inSingle in inObject: outString = self.subarg.toStringProto(inSingle, proto) assert self.delimiter not in outString L.append(outString) return self.delimiter.join(L) class Integer(Argument): fromString = int def toString(self, inObject): return str(int(inObject)) class String(Argument): def toString(self, inObject): return inObject def fromString(self, inString): return inString class EncodedString(Argument): def __init__(self, encoding): self.encoding = encoding def toString(self, inObject): return inObject.encode(self.encoding) def fromString(self, inString): return inString.decode(self.encoding) # Temporary backwards compatibility for Exponent Body = String class Unicode(String): def toString(self, inObject): # assert isinstance(inObject, unicode) return String.toString(self, inObject.encode('utf-8')) def fromString(self, inString): # assert isinstance(inString, str) return String.fromString(self, inString).decode('utf-8') class Path(Unicode): def fromString(self, inString): return filepath.FilePath(Unicode.fromString(self, inString)) def toString(self, inObject): return Unicode.toString(self, inObject.path) class Float(Argument): fromString = float toString = str class Base64Binary(Argument): def toString(self, inObject): return inObject.encode('base64').replace('\n', '') def fromString(self, inString): return inString.decode('base64') class Time(Argument): def toString(self, inObject): return inObject.asISO8601TimeAndDate() def fromString(self, inString): return extime.Time.fromISO8601TimeAndDate(inString) class ExtraArg: def fromTransport(self, inTransport): raise NotImplementedError() class Peer(ExtraArg): def fromTransport(self, inTransport): return inTransport.getQ2QPeer() class PeerDomain(ExtraArg): def fromTransport(self, inTransport): return inTransport.getQ2QPeer().domain class PeerUser(ExtraArg): def fromTransport(self, inTransport): return inTransport.getQ2QPeer().resource class Host(ExtraArg): def fromTransport(self, inTransport): return inTransport.getQ2QHost() class HostDomain(ExtraArg): def fromTransport(self, inTransport): return inTransport.getQ2QHost().domain class HostUser(ExtraArg): def fromTransport(self, inTransport): return inTransport.getQ2QHost().resource class Boolean(Argument): def fromString(self, inString): if inString == 'True': return True elif inString == 'False': return False else: raise RuntimeError("Bad boolean value: %r" % (inString,)) def toString(self, inObject): if inObject: return 'True' else: return 'False' class _CommandMeta(type): def __new__(cls, name, bases, attrs): re = attrs['reverseErrors'] = {} er = attrs['allErrors'] = {} for v, k in six.viewitems(attrs.get('errors',{})): re[k] = v er[v] = k for v, k in six.viewitems(attrs.get('fatalErrors',{})): re[k] = v er[v] = k return type.__new__(cls, name, bases, attrs) @six.add_metaclass(_CommandMeta) class Command: arguments = [] response = [] extra = [] namespaces = [None] # This is set to [None] on purpose: None means # "no namespace", not "empty list". "empty # list" will make your command invalid in _all_ # namespaces, effectively uncallable. errors = {} fatalErrors = {} commandType = Box responseType = Box def commandName(): def get(self): return self.__class__.__name__ raise NotImplementedError("Missing command name") return get, commandName = property(*commandName()) def __init__(self, **kw): self.structured = kw givenArgs = [normalizeKey(k) for k in kw.keys()] forgotten = [] for name, arg in self.arguments: if normalizeKey(name) not in givenArgs and not arg.optional: forgotten.append(normalizeKey(name)) # for v in kw.itervalues(): # if v is None: # from pprint import pformat # raise RuntimeError("ARGH: %s" % pformat(kw)) if forgotten: if len(forgotten) == 1: plural = 'an argument' else: plural = 'some arguments' raise RuntimeError("You forgot %s to %r: %s" % ( plural, self.commandName, ', '.join(forgotten))) forgotten = [] def makeResponse(cls, objects, proto): try: return objectsToStrings(objects, cls.response, cls.responseType(), proto) except: log.msg("Exception in %r.makeResponse" % (cls,)) raise makeResponse = classmethod(makeResponse) def do(self, proto, namespace=None, requiresAnswer=True): if namespace is not None: cmd = namespace + ":" + self.commandName else: cmd = self.commandName def _massageError(error): error.trap(RemoteJuiceError) rje = error.value return Failure(self.reverseErrors.get(rje.errorCode, UnhandledRemoteJuiceError)(rje.description)) d = proto.sendBoxCommand( cmd, objectsToStrings(self.structured, self.arguments, self.commandType(), proto), requiresAnswer) if requiresAnswer: d.addCallback(stringsToObjects, self.response, proto) d.addCallback(self.addExtra, proto.transport) d.addErrback(_massageError) return d def addExtra(self, d, transport): for name, extraArg in self.extra: d[name] = extraArg.fromTransport(transport) return d class ProtocolSwitchCommand(Command): """Use this command to switch from something Juice-derived to a different protocol mid-connection. This can be useful to use juice as the connection-startup negotiation phase. Since TLS is a different layer entirely, you can use Juice to negotiate the security parameters of your connection, then switch to a different protocol, and the connection will remain secured. """ def __init__(self, __protoToSwitchToFactory, **kw): self.protoToSwitchToFactory = __protoToSwitchToFactory super(ProtocolSwitchCommand, self).__init__(**kw) def makeResponse(cls, innerProto, proto): return _SwitchBox(innerProto) makeResponse = classmethod(makeResponse) def do(self, proto, namespace=None): d = super(ProtocolSwitchCommand, self).do(proto) proto._lock() def switchNow(ign): innerProto = self.protoToSwitchToFactory.buildProtocol(proto.transport.getPeer()) proto._switchTo(innerProto, self.protoToSwitchToFactory) return ign def die(ign): proto.transport.loseConnection() return ign def handle(ign): self.protoToSwitchToFactory.clientConnectionFailed(None, Failure(CONNECTION_LOST)) return ign return d.addCallbacks(switchNow, handle).addErrback(die) class Negotiate(Command): commandName = 'Negotiate' arguments = [('versions', ListOf(Integer()))] response = [('version', Integer())] responseType = NegotiateBox class Juice(LineReceiver, JuiceParserBase, object): """ JUICE (JUice Is Concurrent Events) is a simple connection-oriented request/response protocol. Packets, or "boxes", are collections of RFC2822-inspired headers, plus a body. Note that this is NOT a literal interpretation of any existing RFC, 822, 2822 or otherwise, but a simpler version that does not do line continuations, does not specify any particular format for header values, dispatches semantic meanings of most headers on the -Command header rather than giving them global meaning, and allows multiple sets of headers (messages, or JuiceBoxes) on a connection. All headers whose names begin with a dash ('-') are reserved for use by the protocol. All others are for application use - their meaning depends on the value of the "-Command" header. """ protocolName = b'juice-base' hostCertificate = None MAX_LENGTH = 1024 * 1024 isServer = property(lambda self: self._issueGreeting, doc=""" True if this is a juice server, e.g. it is going to issue or has issued a server greeting upon connection. """) isClient = property(lambda self: not self._issueGreeting, doc=""" True if this is a juice server, e.g. it is not going to issue or did not issue a server greeting upon connection. """) def __init__(self, issueGreeting): """ @param issueGreeting: whether to issue a greeting when connected. This should be set on server-side Juice protocols. """ JuiceParserBase.__init__(self) self._issueGreeting = issueGreeting def __repr__(self): return '<%s %s/%s at 0x%x>' % (self.__class__.__name__, self.isClient and 'client' or 'server', self.innerProtocol, id(self)) __locked = False def _lock(self): """ Lock this Juice instance so that no further Juice traffic may be sent. This is used when sending a request to switch underlying protocols. You probably want to subclass ProtocolSwitchCommand rather than calling this directly. """ self.__locked = True innerProtocol = None def _switchTo(self, newProto, clientFactory=None): """ Switch this Juice instance to a new protocol. You need to do this 'simultaneously' on both ends of a connection; the easiest way to do this is to use a subclass of ProtocolSwitchCommand. """ assert self.innerProtocol is None, "Protocol can only be safely switched once." self.setRawMode() self.innerProtocol = newProto self.innerProtocolClientFactory = clientFactory newProto.makeConnection(self.transport) innerProtocolClientFactory = None def juiceBoxReceived(self, box): if self.__locked and COMMAND in box and ASK in box: # This is a command which will trigger an answer, and we can no # longer answer anything, so don't bother delivering it. return return super(Juice, self).juiceBoxReceived(box) def sendPacket(self, completeBox): """ Send a juice.Box to my peer. Note: transport.write is never called outside of this method. """ assert not self.__locked, "You cannot send juice packets when a connection is locked" if self._startingTLSBuffer is not None: self._startingTLSBuffer.append(completeBox) else: if debug: log.msg("Juice send: %s" % pprint.pformat(dict(six.viewitems(completeBox)))) result = completeBox.serialize() self.transport.write(result) def sendCommand(self, command, __content='', __answer=True, **kw): box = JuiceBox(__content, **kw) return self.sendBoxCommand(command, box, requiresAnswer=__answer) _outstandingRequests = None _justStartedTLS = False def makeConnection(self, transport): self._transportPeer = transport.getPeer() self._transportHost = transport.getHost() log.msg("%s %s connection established (HOST:%s PEER:%s)" % (self.isClient and "client" or "server", self.__class__.__name__, self._transportHost, self._transportPeer)) self._outstandingRequests = {} self._requestBuffer = [] LineReceiver.makeConnection(self, transport) _startingTLSBuffer = None def prepareTLS(self): self._startingTLSBuffer = [] def startTLS(self, certificate, *verifyAuthorities): if self.hostCertificate is None: self.hostCertificate = certificate self._justStartedTLS = True self.transport.startTLS(certificate.options(*verifyAuthorities)) stlsb = self._startingTLSBuffer if stlsb is not None: self._startingTLSBuffer = None for box in stlsb: self.sendPacket(box) else: raise RuntimeError( "Previously authenticated connection between %s and %s " "is trying to re-establish as %s" % ( self.hostCertificate, Certificate.peerFromTransport(self.transport), (certificate, verifyAuthorities))) def dataReceived(self, data): # If we successfully receive any data after TLS has been started, that # means the connection was secured properly. Make a note of that fact. if self._justStartedTLS: self._justStartedTLS = False return LineReceiver.dataReceived(self, data) def connectionLost(self, reason): log.msg("%s %s connection lost (HOST:%s PEER:%s)" % ( self.isClient and 'client' or 'server', self.__class__.__name__, self._transportHost, self._transportPeer)) self.failAllOutgoing(reason) if self.innerProtocol is not None: self.innerProtocol.connectionLost(reason) if self.innerProtocolClientFactory is not None: self.innerProtocolClientFactory.clientConnectionLost(None, reason) def lineReceived(self, line): if line: self._requestBuffer.append(line) else: buf = self._requestBuffer self._requestBuffer = [] bodylen, b = parseJuiceHeaders(buf) if bodylen: self._bodyRemaining = bodylen self._bodyBuffer = [] self._pendingBox = b self.setRawMode() else: self.juiceBoxReceived(b) def rawDataReceived(self, data): if self.innerProtocol is not None: self.innerProtocol.dataReceived(data) return self._bodyRemaining -= len(data) if self._bodyRemaining <= 0: if self._bodyRemaining < 0: self._bodyBuffer.append(data[:self._bodyRemaining]) extraData = data[self._bodyRemaining:] else: self._bodyBuffer.append(data) extraData = '' self._pendingBox['body'] = six.ensure_str(b''.join(six.ensure_binary(each) for each in self._bodyBuffer)) self._bodyBuffer = None b, self._pendingBox = self._pendingBox, None self.juiceBoxReceived(b) if self.innerProtocol is not None: self.innerProtocol.makeConnection(self.transport) if extraData: self.innerProtocol.dataReceived(extraData) else: self.setLineMode(extraData) else: self._bodyBuffer.append(data) protocolVersion = 0 def _setProtocolVersion(self, version): # if we ever want to actually mangle encodings, this is the place to do # it! self.protocolVersion = version return version def renegotiateVersion(self, newVersion): assert newVersion in VERSIONS, ( "This side of the connection doesn't support version %r" % (newVersion,)) v = VERSIONS[:] v.remove(newVersion) return Negotiate(versions=[newVersion]).do(self).addCallback( lambda ver: self._setProtocolVersion(ver['version'])) def command_NEGOTIATE(self, versions): for version in versions: if version in VERSIONS: return dict(version=version) raise IncompatibleVersions() command_NEGOTIATE.command = Negotiate VERSIONS = [1] class _ParserHelper(Juice): def __init__(self): Juice.__init__(self, False) self.boxes = [] self.results = Deferred() def getPeer(self): return 'string' def getHost(self): return 'string' disconnecting = False def juiceBoxReceived(self, box): self.boxes.append(box) # Synchronous helpers def parse(cls, fileObj): p = cls() p.makeConnection(p) p.dataReceived(fileObj.read()) return p.boxes parse = classmethod(parse) def parseString(cls, data): with io.BytesIO(data) as f: return cls.parse(f) parseString = classmethod(parseString) parse = _ParserHelper.parse parseString = _ParserHelper.parseString def stringsToObjects(strings, arglist, proto): objects = {} myStrings = strings.copy() for argname, argparser in arglist: argparser.fromBox(argname, myStrings, objects, proto) return objects def objectsToStrings(objects, arglist, strings, proto): myObjects = {} for (k, v) in objects.items(): myObjects[normalizeKey(k)] = v for argname, argparser in arglist: argparser.toBox(argname, strings, myObjects, proto) return strings class JuiceServerFactory(ServerFactory): protocol = Juice def buildProtocol(self, addr): prot = self.protocol(True) prot.factory = self return prot class JuiceClientFactory(ClientFactory): protocol = Juice def buildProtocol(self, addr): prot = self.protocol(False) prot.factory = self return prot
33.410891
133
0.602874
31,350
0.929027
0
0
2,744
0.081316
0
0
7,178
0.212713
6a5913eb8964167841ec2eb740f4b32d39ad706a
7,290
py
Python
bonsai3/simulator_client.py
kirillpol-ms/bonsai3-py
ede9c2c1d25d784d61b7cbf1438a257b5d592274
[ "MIT" ]
null
null
null
bonsai3/simulator_client.py
kirillpol-ms/bonsai3-py
ede9c2c1d25d784d61b7cbf1438a257b5d592274
[ "MIT" ]
3
2020-06-01T18:43:55.000Z
2020-08-14T17:44:54.000Z
bonsai3/simulator_client.py
BonsaiAI/bonsai3-py
29158cc58f39604fa96e10e41ff00fc195f6b315
[ "MIT" ]
2
2020-06-16T14:24:17.000Z
2020-08-13T00:27:31.000Z
""" Client for simulator requests """ __copyright__ = "Copyright 2020, Microsoft Corp." # pyright: strict from random import uniform import time from typing import Union import jsons import requests from .exceptions import RetryTimeoutError, ServiceError from .logger import Logger from .simulator_protocol import ( ServiceConfig, SimulatorEvent, SimulatorEventRequest, SimulatorInterface, ) log = Logger() _RETRYABLE_ERROR_CODES = {502, 503, 504} _MAXIMUM_BACKOFF_SECONDS = 60 _BACKOFF_BASE_MULTIPLIER_MILLISECONDS = 50 class SimulatorClient: def __init__(self, config: ServiceConfig): self._config = config self._retry_attempts = 0 self._retry_timeout = None self._session = requests.session() self._session.headers.update( {"Authorization": config.access_key, "Content-type": "application/json"} ) def register_simulator(self, interface: SimulatorInterface) -> SimulatorEvent: return self._http_request(interface, self._config) def get_next_event(self, event_request: SimulatorEventRequest) -> SimulatorEvent: return self._http_request(event_request, self._config) def unregister_simulator(self, session_id: str): url = "{}/v2/workspaces/{}/simulatorSessions/{}".format( self._config.server, self._config.workspace, session_id ) log.debug("Sending unregister request to {}".format(url)) return self._session.delete(url, timeout=self._config.network_timeout_seconds) def _http_request( self, payload: Union[SimulatorInterface, SimulatorEventRequest], config: ServiceConfig, ) -> SimulatorEvent: res = None if self._retry_attempts >= 1: self._handle_retry() try: # NOTE: we assert these for the user here to allow the config object to be partially initialized before use. assert len( config.access_key ), "Environment variable SIM_ACCESS_KEY is unset or access_key is empty." assert len( config.workspace ), "Environment variable SIM_WORKSPACE is unset or workspace is empty." assert len( config.server ), "Environment variable SIM_API_HOST is unset or server is empty." # Register request if isinstance(payload, SimulatorInterface): reg_url = "{}/v2/workspaces/{}/simulatorSessions".format( config.server, config.workspace ) log.debug("Sending registration to {}".format(reg_url)) log.debug("Registration payload: {}".format(jsons.dumps(payload))) res = self._session.post( reg_url, json=jsons.loads(payload.json), headers={ "Authorization": config.access_key, "Content-type": "application/json", }, timeout=self._config.network_timeout_seconds, ) log.debug("Response to registration received.") # Get next event request if isinstance(payload, SimulatorEventRequest): log.network("Sending get next event request.") res = self._session.post( "{}/v2/workspaces/{}/simulatorSessions/{}/advance".format( config.server, config.workspace, payload.sessionId ), json=jsons.loads(jsons.dumps(payload)), headers={ "Authorization": config.access_key, "Content-type": "application/json", }, timeout=self._config.network_timeout_seconds, ) log.network("Response to get next event request received.") except requests.exceptions.Timeout as err: log.error(err) self._retry_attempts += 1 return self._http_request(payload, config) except requests.exceptions.RequestException as err: if res is not None: log.error(res.text) log.error(err) raise if res is not None: if res.status_code in _RETRYABLE_ERROR_CODES: log.debug( "Service returned {}, a retryable response error code." " Retrying request.".format(res.status_code) ) self._retry_attempts += 1 return self._http_request(payload, config) # bail on error if res.status_code != 200 and res.status_code != 201: log.error( "Received response with {} http status code. " "Raising exception.".format(res.status_code) ) if res.text: log.error(res.text) raise ServiceError( "Unable to get next event for simulator, " "received {} http status code".format(res.status_code) ) # TODO estee: this needs validation # SimulatorEvent self._retry_attempts = 0 self._retry_timeout = None return self._event_from_json(res.text) raise RuntimeError( "Usage error: Somehow http response ended up as none. " "Check arguments to _http_request and ensure the payload " "is either of type SimulatorInterface or SimulatorEventRequest" ) def _event_from_json(self, json_text: str) -> SimulatorEvent: """Converts a json string into a SimulatorEvent.""" event_dict = jsons.loads(json_text) log.debug("Event Response: {}".format(event_dict)) return SimulatorEvent(event_dict) def _handle_retry(self): log.network("handling retry.") if ( self._retry_timeout and time.time() > self._retry_timeout ) or self._config.retry_timeout_seconds == 0: raise RetryTimeoutError("Simulator Retry time exceeded.") if self._config.retry_timeout_seconds > 0 and self._retry_timeout is None: self._retry_timeout = time.time() + self._config.retry_timeout_seconds log.info( "Simulator will timeout in {} seconds if it is not able " "to connect to the platform.".format(self._retry_timeout - time.time()) ) self._backoff() log.network("retry handled.") def _backoff(self): """ Implements Exponential backoff algorithm with full jitter Check the following url for more information https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/ """ power_of_two = 2 ** self._retry_attempts max_sleep = min( power_of_two * _BACKOFF_BASE_MULTIPLIER_MILLISECONDS / 1000.0, _MAXIMUM_BACKOFF_SECONDS, ) sleep = uniform(0, max_sleep) log.debug( "Retry attempt: {}, backing off for {} seconds".format( self._retry_attempts, sleep ) ) time.sleep(sleep)
38.167539
120
0.58834
6,744
0.925103
0
0
0
0
0
0
1,854
0.254321
6a5a09a1f1eb09c5b1fb6c4e179dd1021a0b354e
47,088
py
Python
perturbed_images_generation_multiProcess.py
gwxie/Synthesize-Distorted-Image-and-Its-Control-Points
ed6de3e05a7ee1f3aecf65fcbb87c11d2ede41e7
[ "Apache-2.0" ]
8
2022-03-27T18:37:57.000Z
2022-03-30T09:17:26.000Z
perturbed_images_generation_multiProcess.py
gwxie/Synthesize-Distorted-Image-and-Its-Control-Points
ed6de3e05a7ee1f3aecf65fcbb87c11d2ede41e7
[ "Apache-2.0" ]
null
null
null
perturbed_images_generation_multiProcess.py
gwxie/Synthesize-Distorted-Image-and-Its-Control-Points
ed6de3e05a7ee1f3aecf65fcbb87c11d2ede41e7
[ "Apache-2.0" ]
1
2022-03-31T02:22:58.000Z
2022-03-31T02:22:58.000Z
''' GuoWang xie set up :2020-1-9 intergrate img and label into one file -- fiducial1024_v1 ''' import argparse import sys, os import pickle import random import collections import json import numpy as np import scipy.io as io import scipy.misc as m import matplotlib.pyplot as plt import glob import math import time import threading import multiprocessing as mp from multiprocessing import Pool import re import cv2 # sys.path.append('/lustre/home/gwxie/hope/project/dewarp/datasets/') # /lustre/home/gwxie/program/project/unwarp/perturbed_imgaes/GAN import utils def getDatasets(dir): return os.listdir(dir) class perturbed(utils.BasePerturbed): def __init__(self, path, bg_path, save_path, save_suffix): self.path = path self.bg_path = bg_path self.save_path = save_path self.save_suffix = save_suffix def save_img(self, m, n, fold_curve='fold', repeat_time=4, fiducial_points = 16, relativeShift_position='relativeShift_v2'): origin_img = cv2.imread(self.path, flags=cv2.IMREAD_COLOR) save_img_shape = [512*2, 480*2] # 320 # reduce_value = np.random.choice([2**4, 2**5, 2**6, 2**7, 2**8], p=[0.01, 0.1, 0.4, 0.39, 0.1]) reduce_value = np.random.choice([2*2, 4*2, 8*2, 16*2, 24*2, 32*2, 40*2, 48*2], p=[0.02, 0.18, 0.2, 0.3, 0.1, 0.1, 0.08, 0.02]) # reduce_value = np.random.choice([8*2, 16*2, 24*2, 32*2, 40*2, 48*2], p=[0.01, 0.02, 0.2, 0.4, 0.19, 0.18]) # reduce_value = np.random.choice([16, 24, 32, 40, 48, 64], p=[0.01, 0.1, 0.2, 0.4, 0.2, 0.09]) base_img_shrink = save_img_shape[0] - reduce_value # enlarge_img_shrink = [1024, 768] # enlarge_img_shrink = [896, 672] # 420 enlarge_img_shrink = [512*4, 480*4] # 420 # enlarge_img_shrink = [896*2, 768*2] # 420 # enlarge_img_shrink = [896, 768] # 420 # enlarge_img_shrink = [768, 576] # 420 # enlarge_img_shrink = [640, 480] # 420 '''''' im_lr = origin_img.shape[0] im_ud = origin_img.shape[1] reduce_value_v2 = np.random.choice([2*2, 4*2, 8*2, 16*2, 24*2, 28*2, 32*2, 48*2], p=[0.02, 0.18, 0.2, 0.2, 0.1, 0.1, 0.1, 0.1]) # reduce_value_v2 = np.random.choice([16, 24, 28, 32, 48, 64], p=[0.01, 0.1, 0.2, 0.3, 0.25, 0.14]) if im_lr > im_ud: im_ud = min(int(im_ud / im_lr * base_img_shrink), save_img_shape[1] - reduce_value_v2) im_lr = save_img_shape[0] - reduce_value else: base_img_shrink = save_img_shape[1] - reduce_value im_lr = min(int(im_lr / im_ud * base_img_shrink), save_img_shape[0] - reduce_value_v2) im_ud = base_img_shrink if round(im_lr / im_ud, 2) < 0.5 or round(im_ud / im_lr, 2) < 0.5: repeat_time = min(repeat_time, 8) edge_padding = 3 im_lr -= im_lr % (fiducial_points-1) - (2*edge_padding) # im_lr % (fiducial_points-1) - 1 im_ud -= im_ud % (fiducial_points-1) - (2*edge_padding) # im_ud % (fiducial_points-1) - 1 im_hight = np.linspace(edge_padding, im_lr - edge_padding, fiducial_points, dtype=np.int64) im_wide = np.linspace(edge_padding, im_ud - edge_padding, fiducial_points, dtype=np.int64) # im_lr -= im_lr % (fiducial_points-1) - (1+2*edge_padding) # im_lr % (fiducial_points-1) - 1 # im_ud -= im_ud % (fiducial_points-1) - (1+2*edge_padding) # im_ud % (fiducial_points-1) - 1 # im_hight = np.linspace(edge_padding, im_lr - (1+edge_padding), fiducial_points, dtype=np.int64) # im_wide = np.linspace(edge_padding, im_ud - (1+edge_padding), fiducial_points, dtype=np.int64) im_x, im_y = np.meshgrid(im_hight, im_wide) segment_x = (im_lr) // (fiducial_points-1) segment_y = (im_ud) // (fiducial_points-1) # plt.plot(im_x, im_y, # color='limegreen', # marker='.', # linestyle='') # plt.grid(True) # plt.show() self.origin_img = cv2.resize(origin_img, (im_ud, im_lr), interpolation=cv2.INTER_CUBIC) perturbed_bg_ = getDatasets(self.bg_path) perturbed_bg_img_ = self.bg_path+random.choice(perturbed_bg_) perturbed_bg_img = cv2.imread(perturbed_bg_img_, flags=cv2.IMREAD_COLOR) mesh_shape = self.origin_img.shape[:2] self.synthesis_perturbed_img = np.full((enlarge_img_shrink[0], enlarge_img_shrink[1], 3), 256, dtype=np.float32)#np.zeros_like(perturbed_bg_img) # self.synthesis_perturbed_img = np.full((enlarge_img_shrink[0], enlarge_img_shrink[1], 3), 0, dtype=np.int16)#np.zeros_like(perturbed_bg_img) self.new_shape = self.synthesis_perturbed_img.shape[:2] perturbed_bg_img = cv2.resize(perturbed_bg_img, (save_img_shape[1], save_img_shape[0]), cv2.INPAINT_TELEA) origin_pixel_position = np.argwhere(np.zeros(mesh_shape, dtype=np.uint32) == 0).reshape(mesh_shape[0], mesh_shape[1], 2) pixel_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape(self.new_shape[0], self.new_shape[1], 2) self.perturbed_xy_ = np.zeros((self.new_shape[0], self.new_shape[1], 2)) # self.perturbed_xy_ = pixel_position.copy().astype(np.float32) # fiducial_points_grid = origin_pixel_position[im_x, im_y] self.synthesis_perturbed_label = np.zeros((self.new_shape[0], self.new_shape[1], 2)) x_min, y_min, x_max, y_max = self.adjust_position_v2(0, 0, mesh_shape[0], mesh_shape[1], save_img_shape) origin_pixel_position += [x_min, y_min] x_min, y_min, x_max, y_max = self.adjust_position(0, 0, mesh_shape[0], mesh_shape[1]) x_shift = random.randint(-enlarge_img_shrink[0]//16, enlarge_img_shrink[0]//16) y_shift = random.randint(-enlarge_img_shrink[1]//16, enlarge_img_shrink[1]//16) x_min += x_shift x_max += x_shift y_min += y_shift y_max += y_shift '''im_x,y''' im_x += x_min im_y += y_min self.synthesis_perturbed_img[x_min:x_max, y_min:y_max] = self.origin_img self.synthesis_perturbed_label[x_min:x_max, y_min:y_max] = origin_pixel_position synthesis_perturbed_img_map = self.synthesis_perturbed_img.copy() synthesis_perturbed_label_map = self.synthesis_perturbed_label.copy() foreORbackground_label = np.full((mesh_shape), 1, dtype=np.int16) foreORbackground_label_map = np.full((self.new_shape), 0, dtype=np.int16) foreORbackground_label_map[x_min:x_max, y_min:y_max] = foreORbackground_label # synthesis_perturbed_img_map = self.pad(self.synthesis_perturbed_img.copy(), x_min, y_min, x_max, y_max) # synthesis_perturbed_label_map = self.pad(synthesis_perturbed_label_map, x_min, y_min, x_max, y_max) '''*****************************************************************''' is_normalizationFun_mixture = self.is_perform(0.2, 0.8) # if not is_normalizationFun_mixture: normalizationFun_0_1 = False # normalizationFun_0_1 = self.is_perform(0.5, 0.5) if fold_curve == 'fold': fold_curve_random = True # is_normalizationFun_mixture = False normalizationFun_0_1 = self.is_perform(0.2, 0.8) if is_normalizationFun_mixture: alpha_perturbed = random.randint(80, 120) / 100 else: if normalizationFun_0_1 and repeat_time < 8: alpha_perturbed = random.randint(50, 70) / 100 else: alpha_perturbed = random.randint(70, 130) / 100 else: fold_curve_random = self.is_perform(0.1, 0.9) # False # self.is_perform(0.01, 0.99) alpha_perturbed = random.randint(80, 160) / 100 # is_normalizationFun_mixture = False # self.is_perform(0.01, 0.99) synthesis_perturbed_img = np.full_like(self.synthesis_perturbed_img, 256) # synthesis_perturbed_img = np.full_like(self.synthesis_perturbed_img, 0, dtype=np.int16) synthesis_perturbed_label = np.zeros_like(self.synthesis_perturbed_label) alpha_perturbed_change = self.is_perform(0.5, 0.5) p_pp_choice = self.is_perform(0.8, 0.2) if fold_curve == 'fold' else self.is_perform(0.1, 0.9) for repeat_i in range(repeat_time): if alpha_perturbed_change: if fold_curve == 'fold': if is_normalizationFun_mixture: alpha_perturbed = random.randint(80, 120) / 100 else: if normalizationFun_0_1 and repeat_time < 8: alpha_perturbed = random.randint(50, 70) / 100 else: alpha_perturbed = random.randint(70, 130) / 100 else: alpha_perturbed = random.randint(80, 160) / 100 '''''' linspace_x = [0, (self.new_shape[0] - im_lr) // 2 - 1, self.new_shape[0] - (self.new_shape[0] - im_lr) // 2 - 1, self.new_shape[0] - 1] linspace_y = [0, (self.new_shape[1] - im_ud) // 2 - 1, self.new_shape[1] - (self.new_shape[1] - im_ud) // 2 - 1, self.new_shape[1] - 1] linspace_x_seq = [1, 2, 3] linspace_y_seq = [1, 2, 3] r_x = random.choice(linspace_x_seq) r_y = random.choice(linspace_y_seq) perturbed_p = np.array( [random.randint(linspace_x[r_x-1] * 10, linspace_x[r_x] * 10), random.randint(linspace_y[r_y-1] * 10, linspace_y[r_y] * 10)])/10 if ((r_x == 1 or r_x == 3) and (r_y == 1 or r_y == 3)) and p_pp_choice: linspace_x_seq.remove(r_x) linspace_y_seq.remove(r_y) r_x = random.choice(linspace_x_seq) r_y = random.choice(linspace_y_seq) perturbed_pp = np.array( [random.randint(linspace_x[r_x-1] * 10, linspace_x[r_x] * 10), random.randint(linspace_y[r_y-1] * 10, linspace_y[r_y] * 10)])/10 # perturbed_p, perturbed_pp = np.array( # [random.randint(0, self.new_shape[0] * 10) / 10, # random.randint(0, self.new_shape[1] * 10) / 10]) \ # , np.array([random.randint(0, self.new_shape[0] * 10) / 10, # random.randint(0, self.new_shape[1] * 10) / 10]) # perturbed_p, perturbed_pp = np.array( # [random.randint((self.new_shape[0]-im_lr)//2*10, (self.new_shape[0]-(self.new_shape[0]-im_lr)//2) * 10) / 10, # random.randint((self.new_shape[1]-im_ud)//2*10, (self.new_shape[1]-(self.new_shape[1]-im_ud)//2) * 10) / 10]) \ # , np.array([random.randint((self.new_shape[0]-im_lr)//2*10, (self.new_shape[0]-(self.new_shape[0]-im_lr)//2) * 10) / 10, # random.randint((self.new_shape[1]-im_ud)//2*10, (self.new_shape[1]-(self.new_shape[1]-im_ud)//2) * 10) / 10]) '''''' perturbed_vp = perturbed_pp - perturbed_p perturbed_vp_norm = np.linalg.norm(perturbed_vp) perturbed_distance_vertex_and_line = np.dot((perturbed_p - pixel_position), perturbed_vp) / perturbed_vp_norm '''''' # perturbed_v = np.array([random.randint(-3000, 3000) / 100, random.randint(-3000, 3000) / 100]) # perturbed_v = np.array([random.randint(-4000, 4000) / 100, random.randint(-4000, 4000) / 100]) if fold_curve == 'fold' and self.is_perform(0.6, 0.4): # self.is_perform(0.3, 0.7): # perturbed_v = np.array([random.randint(-9000, 9000) / 100, random.randint(-9000, 9000) / 100]) perturbed_v = np.array([random.randint(-10000, 10000) / 100, random.randint(-10000, 10000) / 100]) # perturbed_v = np.array([random.randint(-11000, 11000) / 100, random.randint(-11000, 11000) / 100]) else: # perturbed_v = np.array([random.randint(-9000, 9000) / 100, random.randint(-9000, 9000) / 100]) # perturbed_v = np.array([random.randint(-16000, 16000) / 100, random.randint(-16000, 16000) / 100]) perturbed_v = np.array([random.randint(-8000, 8000) / 100, random.randint(-8000, 8000) / 100]) # perturbed_v = np.array([random.randint(-3500, 3500) / 100, random.randint(-3500, 3500) / 100]) # perturbed_v = np.array([random.randint(-600, 600) / 10, random.randint(-600, 600) / 10]) '''''' if fold_curve == 'fold': if is_normalizationFun_mixture: if self.is_perform(0.5, 0.5): perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line)) else: perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), random.randint(1, 2)) else: if normalizationFun_0_1: perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), 2) else: perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line)) else: if is_normalizationFun_mixture: if self.is_perform(0.5, 0.5): perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line)) else: perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), random.randint(1, 2)) else: if normalizationFun_0_1: perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), 2) else: perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line)) '''''' if fold_curve_random: # omega_perturbed = (alpha_perturbed+0.2) / (perturbed_d + alpha_perturbed) # omega_perturbed = alpha_perturbed**perturbed_d omega_perturbed = alpha_perturbed / (perturbed_d + alpha_perturbed) else: omega_perturbed = 1 - perturbed_d ** alpha_perturbed '''shadow''' if self.is_perform(0.6, 0.4): synthesis_perturbed_img_map[x_min:x_max, y_min:y_max] = np.minimum(np.maximum(synthesis_perturbed_img_map[x_min:x_max, y_min:y_max] - np.int16(np.round(omega_perturbed[x_min:x_max, y_min:y_max].repeat(3).reshape(x_max-x_min, y_max-y_min, 3) * abs(np.linalg.norm(perturbed_v//2))*np.array([0.4-random.random()*0.1, 0.4-random.random()*0.1, 0.4-random.random()*0.1]))), 0), 255) '''''' if relativeShift_position in ['position', 'relativeShift_v2']: self.perturbed_xy_ += np.array([omega_perturbed * perturbed_v[0], omega_perturbed * perturbed_v[1]]).transpose(1, 2, 0) else: print('relativeShift_position error') exit() ''' flat_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape( self.new_shape[0] * self.new_shape[1], 2) vtx, wts = self.interp_weights(self.perturbed_xy_.reshape(self.new_shape[0] * self.new_shape[1], 2), flat_position) wts_sum = np.abs(wts).sum(-1) # flat_img.reshape(flat_shape[0] * flat_shape[1], 3)[:] = interpolate(pixel, vtx, wts) wts = wts[wts_sum <= 1, :] vtx = vtx[wts_sum <= 1, :] synthesis_perturbed_img.reshape(self.new_shape[0] * self.new_shape[1], 3)[wts_sum <= 1, :] = self.interpolate(synthesis_perturbed_img_map.reshape(self.new_shape[0] * self.new_shape[1], 3), vtx, wts) synthesis_perturbed_label.reshape(self.new_shape[0] * self.new_shape[1], 2)[wts_sum <= 1, :] = self.interpolate(synthesis_perturbed_label_map.reshape(self.new_shape[0] * self.new_shape[1], 2), vtx, wts) foreORbackground_label = np.zeros(self.new_shape) foreORbackground_label.reshape(self.new_shape[0] * self.new_shape[1], 1)[wts_sum <= 1, :] = self.interpolate(foreORbackground_label_map.reshape(self.new_shape[0] * self.new_shape[1], 1), vtx, wts) foreORbackground_label[foreORbackground_label < 0.99] = 0 foreORbackground_label[foreORbackground_label >= 0.99] = 1 # synthesis_perturbed_img = np.around(synthesis_perturbed_img).astype(np.uint8) synthesis_perturbed_label[:, :, 0] *= foreORbackground_label synthesis_perturbed_label[:, :, 1] *= foreORbackground_label synthesis_perturbed_img[:, :, 0] *= foreORbackground_label synthesis_perturbed_img[:, :, 1] *= foreORbackground_label synthesis_perturbed_img[:, :, 2] *= foreORbackground_label self.synthesis_perturbed_img = synthesis_perturbed_img self.synthesis_perturbed_label = synthesis_perturbed_label ''' '''perspective''' perspective_shreshold = random.randint(26, 36)*10 # 280 x_min_per, y_min_per, x_max_per, y_max_per = self.adjust_position(perspective_shreshold, perspective_shreshold, self.new_shape[0]-perspective_shreshold, self.new_shape[1]-perspective_shreshold) pts1 = np.float32([[x_min_per, y_min_per], [x_max_per, y_min_per], [x_min_per, y_max_per], [x_max_per, y_max_per]]) e_1_ = x_max_per - x_min_per e_2_ = y_max_per - y_min_per e_3_ = e_2_ e_4_ = e_1_ perspective_shreshold_h = e_1_*0.02 perspective_shreshold_w = e_2_*0.02 a_min_, a_max_ = 70, 110 # if self.is_perform(1, 0): if fold_curve == 'curve' and self.is_perform(0.5, 0.5): if self.is_perform(0.5, 0.5): while True: pts2 = np.around( np.float32([[x_min_per - (random.random()) * perspective_shreshold, y_min_per + (random.random()) * perspective_shreshold], [x_max_per - (random.random()) * perspective_shreshold, y_min_per - (random.random()) * perspective_shreshold], [x_min_per + (random.random()) * perspective_shreshold, y_max_per + (random.random()) * perspective_shreshold], [x_max_per + (random.random()) * perspective_shreshold, y_max_per - (random.random()) * perspective_shreshold]])) # right e_1 = np.linalg.norm(pts2[0]-pts2[1]) e_2 = np.linalg.norm(pts2[0]-pts2[2]) e_3 = np.linalg.norm(pts2[1]-pts2[3]) e_4 = np.linalg.norm(pts2[2]-pts2[3]) if e_1_+perspective_shreshold_h > e_1 and e_2_+perspective_shreshold_w > e_2 and e_3_+perspective_shreshold_w > e_3 and e_4_+perspective_shreshold_h > e_4 and \ e_1_ - perspective_shreshold_h < e_1 and e_2_ - perspective_shreshold_w < e_2 and e_3_ - perspective_shreshold_w < e_3 and e_4_ - perspective_shreshold_h < e_4 and \ abs(e_1-e_4) < perspective_shreshold_h and abs(e_2-e_3) < perspective_shreshold_w: a0_, a1_, a2_, a3_ = self.get_angle_4(pts2) if (a0_ > a_min_ and a0_ < a_max_) or (a1_ > a_min_ and a1_ < a_max_) or (a2_ > a_min_ and a2_ < a_max_) or (a3_ > a_min_ and a3_ < a_max_): break else: while True: pts2 = np.around( np.float32([[x_min_per + (random.random()) * perspective_shreshold, y_min_per - (random.random()) * perspective_shreshold], [x_max_per + (random.random()) * perspective_shreshold, y_min_per + (random.random()) * perspective_shreshold], [x_min_per - (random.random()) * perspective_shreshold, y_max_per - (random.random()) * perspective_shreshold], [x_max_per - (random.random()) * perspective_shreshold, y_max_per + (random.random()) * perspective_shreshold]])) e_1 = np.linalg.norm(pts2[0]-pts2[1]) e_2 = np.linalg.norm(pts2[0]-pts2[2]) e_3 = np.linalg.norm(pts2[1]-pts2[3]) e_4 = np.linalg.norm(pts2[2]-pts2[3]) if e_1_+perspective_shreshold_h > e_1 and e_2_+perspective_shreshold_w > e_2 and e_3_+perspective_shreshold_w > e_3 and e_4_+perspective_shreshold_h > e_4 and \ e_1_ - perspective_shreshold_h < e_1 and e_2_ - perspective_shreshold_w < e_2 and e_3_ - perspective_shreshold_w < e_3 and e_4_ - perspective_shreshold_h < e_4 and \ abs(e_1-e_4) < perspective_shreshold_h and abs(e_2-e_3) < perspective_shreshold_w: a0_, a1_, a2_, a3_ = self.get_angle_4(pts2) if (a0_ > a_min_ and a0_ < a_max_) or (a1_ > a_min_ and a1_ < a_max_) or (a2_ > a_min_ and a2_ < a_max_) or (a3_ > a_min_ and a3_ < a_max_): break else: while True: pts2 = np.around(np.float32([[x_min_per+(random.random()-0.5)*perspective_shreshold, y_min_per+(random.random()-0.5)*perspective_shreshold], [x_max_per+(random.random()-0.5)*perspective_shreshold, y_min_per+(random.random()-0.5)*perspective_shreshold], [x_min_per+(random.random()-0.5)*perspective_shreshold, y_max_per+(random.random()-0.5)*perspective_shreshold], [x_max_per+(random.random()-0.5)*perspective_shreshold, y_max_per+(random.random()-0.5)*perspective_shreshold]])) e_1 = np.linalg.norm(pts2[0]-pts2[1]) e_2 = np.linalg.norm(pts2[0]-pts2[2]) e_3 = np.linalg.norm(pts2[1]-pts2[3]) e_4 = np.linalg.norm(pts2[2]-pts2[3]) if e_1_+perspective_shreshold_h > e_1 and e_2_+perspective_shreshold_w > e_2 and e_3_+perspective_shreshold_w > e_3 and e_4_+perspective_shreshold_h > e_4 and \ e_1_ - perspective_shreshold_h < e_1 and e_2_ - perspective_shreshold_w < e_2 and e_3_ - perspective_shreshold_w < e_3 and e_4_ - perspective_shreshold_h < e_4 and \ abs(e_1-e_4) < perspective_shreshold_h and abs(e_2-e_3) < perspective_shreshold_w: a0_, a1_, a2_, a3_ = self.get_angle_4(pts2) if (a0_ > a_min_ and a0_ < a_max_) or (a1_ > a_min_ and a1_ < a_max_) or (a2_ > a_min_ and a2_ < a_max_) or (a3_ > a_min_ and a3_ < a_max_): break M = cv2.getPerspectiveTransform(pts1, pts2) one = np.ones((self.new_shape[0], self.new_shape[1], 1), dtype=np.int16) matr = np.dstack((pixel_position, one)) new = np.dot(M, matr.reshape(-1, 3).T).T.reshape(self.new_shape[0], self.new_shape[1], 3) x = new[:, :, 0]/new[:, :, 2] y = new[:, :, 1]/new[:, :, 2] perturbed_xy_ = np.dstack((x, y)) # perturbed_xy_round_int = np.around(cv2.bilateralFilter(perturbed_xy_round_int, 9, 75, 75)) # perturbed_xy_round_int = np.around(cv2.blur(perturbed_xy_, (17, 17))) # perturbed_xy_round_int = cv2.blur(perturbed_xy_round_int, (17, 17)) # perturbed_xy_round_int = cv2.GaussianBlur(perturbed_xy_round_int, (7, 7), 0) perturbed_xy_ = perturbed_xy_-np.min(perturbed_xy_.T.reshape(2, -1), 1) # perturbed_xy_round_int = np.around(perturbed_xy_round_int-np.min(perturbed_xy_round_int.T.reshape(2, -1), 1)).astype(np.int16) self.perturbed_xy_ += perturbed_xy_ '''perspective end''' '''to img''' flat_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape( self.new_shape[0] * self.new_shape[1], 2) # self.perturbed_xy_ = cv2.blur(self.perturbed_xy_, (7, 7)) self.perturbed_xy_ = cv2.GaussianBlur(self.perturbed_xy_, (7, 7), 0) '''get fiducial points''' fiducial_points_coordinate = self.perturbed_xy_[im_x, im_y] vtx, wts = self.interp_weights(self.perturbed_xy_.reshape(self.new_shape[0] * self.new_shape[1], 2), flat_position) wts_sum = np.abs(wts).sum(-1) # flat_img.reshape(flat_shape[0] * flat_shape[1], 3)[:] = interpolate(pixel, vtx, wts) wts = wts[wts_sum <= 1, :] vtx = vtx[wts_sum <= 1, :] synthesis_perturbed_img.reshape(self.new_shape[0] * self.new_shape[1], 3)[wts_sum <= 1, :] = self.interpolate(synthesis_perturbed_img_map.reshape(self.new_shape[0] * self.new_shape[1], 3), vtx, wts) synthesis_perturbed_label.reshape(self.new_shape[0] * self.new_shape[1], 2)[wts_sum <= 1, :] = self.interpolate(synthesis_perturbed_label_map.reshape(self.new_shape[0] * self.new_shape[1], 2), vtx, wts) foreORbackground_label = np.zeros(self.new_shape) foreORbackground_label.reshape(self.new_shape[0] * self.new_shape[1], 1)[wts_sum <= 1, :] = self.interpolate(foreORbackground_label_map.reshape(self.new_shape[0] * self.new_shape[1], 1), vtx, wts) foreORbackground_label[foreORbackground_label < 0.99] = 0 foreORbackground_label[foreORbackground_label >= 0.99] = 1 self.synthesis_perturbed_img = synthesis_perturbed_img self.synthesis_perturbed_label = synthesis_perturbed_label self.foreORbackground_label = foreORbackground_label '''draw fiducial points stepSize = 0 fiducial_points_synthesis_perturbed_img = self.synthesis_perturbed_img.copy() for l in fiducial_points_coordinate.astype(np.int64).reshape(-1,2): cv2.circle(fiducial_points_synthesis_perturbed_img, (l[1] + math.ceil(stepSize / 2), l[0] + math.ceil(stepSize / 2)), 5, (0, 0, 255), -1) cv2.imwrite('/lustre/home/gwxie/program/project/unwarp/unwarp_perturbed/TPS/img/cv_TPS_large.jpg', fiducial_points_synthesis_perturbed_img) ''' '''clip''' perturbed_x_min, perturbed_y_min, perturbed_x_max, perturbed_y_max = -1, -1, self.new_shape[0], self.new_shape[1] for x in range(self.new_shape[0] // 2, perturbed_x_max): if np.sum(self.synthesis_perturbed_img[x, :]) == 768 * self.new_shape[1] and perturbed_x_max - 1 > x: perturbed_x_max = x break for x in range(self.new_shape[0] // 2, perturbed_x_min, -1): if np.sum(self.synthesis_perturbed_img[x, :]) == 768 * self.new_shape[1] and x > 0: perturbed_x_min = x break for y in range(self.new_shape[1] // 2, perturbed_y_max): if np.sum(self.synthesis_perturbed_img[:, y]) == 768 * self.new_shape[0] and perturbed_y_max - 1 > y: perturbed_y_max = y break for y in range(self.new_shape[1] // 2, perturbed_y_min, -1): if np.sum(self.synthesis_perturbed_img[:, y]) == 768 * self.new_shape[0] and y > 0: perturbed_y_min = y break if perturbed_x_min == 0 or perturbed_x_max == self.new_shape[0] or perturbed_y_min == self.new_shape[1] or perturbed_y_max == self.new_shape[1]: raise Exception('clip error') if perturbed_x_max - perturbed_x_min < im_lr//2 or perturbed_y_max - perturbed_y_min < im_ud//2: raise Exception('clip error') perfix_ = self.save_suffix+'_'+str(m)+'_'+str(n) is_shrink = False if perturbed_x_max - perturbed_x_min > save_img_shape[0] or perturbed_y_max - perturbed_y_min > save_img_shape[1]: is_shrink = True synthesis_perturbed_img = cv2.resize(self.synthesis_perturbed_img[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max, :].copy(), (im_ud, im_lr), interpolation=cv2.INTER_LINEAR) synthesis_perturbed_label = cv2.resize(self.synthesis_perturbed_label[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max, :].copy(), (im_ud, im_lr), interpolation=cv2.INTER_LINEAR) foreORbackground_label = cv2.resize(self.foreORbackground_label[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max].copy(), (im_ud, im_lr), interpolation=cv2.INTER_LINEAR) foreORbackground_label[foreORbackground_label < 0.99] = 0 foreORbackground_label[foreORbackground_label >= 0.99] = 1 '''shrink fiducial points''' center_x_l, center_y_l = perturbed_x_min + (perturbed_x_max - perturbed_x_min) // 2, perturbed_y_min + (perturbed_y_max - perturbed_y_min) // 2 fiducial_points_coordinate_copy = fiducial_points_coordinate.copy() shrink_x = im_lr/(perturbed_x_max - perturbed_x_min) shrink_y = im_ud/(perturbed_y_max - perturbed_y_min) fiducial_points_coordinate *= [shrink_x, shrink_y] center_x_l *= shrink_x center_y_l *= shrink_y # fiducial_points_coordinate[1:, 1:] *= [shrink_x, shrink_y] # fiducial_points_coordinate[1:, :1, 0] *= shrink_x # fiducial_points_coordinate[:1, 1:, 1] *= shrink_y # perturbed_x_min_copy, perturbed_y_min_copy, perturbed_x_max_copy, perturbed_y_max_copy = perturbed_x_min, perturbed_y_min, perturbed_x_max, perturbed_y_max perturbed_x_min, perturbed_y_min, perturbed_x_max, perturbed_y_max = self.adjust_position_v2(0, 0, im_lr, im_ud, self.new_shape) self.synthesis_perturbed_img = np.full_like(self.synthesis_perturbed_img, 256) self.synthesis_perturbed_label = np.zeros_like(self.synthesis_perturbed_label) self.foreORbackground_label = np.zeros_like(self.foreORbackground_label) self.synthesis_perturbed_img[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max, :] = synthesis_perturbed_img self.synthesis_perturbed_label[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max, :] = synthesis_perturbed_label self.foreORbackground_label[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max] = foreORbackground_label center_x, center_y = perturbed_x_min + (perturbed_x_max - perturbed_x_min) // 2, perturbed_y_min + (perturbed_y_max - perturbed_y_min) // 2 if is_shrink: fiducial_points_coordinate += [center_x-center_x_l, center_y-center_y_l] '''draw fiducial points stepSize = 0 fiducial_points_synthesis_perturbed_img = self.synthesis_perturbed_img.copy() for l in fiducial_points_coordinate.astype(np.int64).reshape(-1, 2): cv2.circle(fiducial_points_synthesis_perturbed_img, (l[1] + math.ceil(stepSize / 2), l[0] + math.ceil(stepSize / 2)), 5, (0, 0, 255), -1) cv2.imwrite('/lustre/home/gwxie/program/project/unwarp/unwarp_perturbed/TPS/img/cv_TPS_small.jpg',fiducial_points_synthesis_perturbed_img) ''' self.new_shape = save_img_shape self.synthesis_perturbed_img = self.synthesis_perturbed_img[ center_x - self.new_shape[0] // 2:center_x + self.new_shape[0] // 2, center_y - self.new_shape[1] // 2:center_y + self.new_shape[1] // 2, :].copy() self.synthesis_perturbed_label = self.synthesis_perturbed_label[ center_x - self.new_shape[0] // 2:center_x + self.new_shape[0] // 2, center_y - self.new_shape[1] // 2:center_y + self.new_shape[1] // 2, :].copy() self.foreORbackground_label = self.foreORbackground_label[ center_x - self.new_shape[0] // 2:center_x + self.new_shape[0] // 2, center_y - self.new_shape[1] // 2:center_y + self.new_shape[1] // 2].copy() perturbed_x_ = max(self.new_shape[0] - (perturbed_x_max - perturbed_x_min), 0) perturbed_x_min = perturbed_x_ // 2 perturbed_x_max = self.new_shape[0] - perturbed_x_ // 2 if perturbed_x_%2 == 0 else self.new_shape[0] - (perturbed_x_ // 2 + 1) perturbed_y_ = max(self.new_shape[1] - (perturbed_y_max - perturbed_y_min), 0) perturbed_y_min = perturbed_y_ // 2 perturbed_y_max = self.new_shape[1] - perturbed_y_ // 2 if perturbed_y_%2 == 0 else self.new_shape[1] - (perturbed_y_ // 2 + 1) '''clip perturbed_x_min, perturbed_y_min, perturbed_x_max, perturbed_y_max = -1, -1, self.new_shape[0], self.new_shape[1] for x in range(self.new_shape[0] // 2, perturbed_x_max): if np.sum(self.synthesis_perturbed_img[x, :]) == 768 * self.new_shape[1] and perturbed_x_max - 1 > x: perturbed_x_max = x break for x in range(self.new_shape[0] // 2, perturbed_x_min, -1): if np.sum(self.synthesis_perturbed_img[x, :]) == 768 * self.new_shape[1] and x > 0: perturbed_x_min = x break for y in range(self.new_shape[1] // 2, perturbed_y_max): if np.sum(self.synthesis_perturbed_img[:, y]) == 768 * self.new_shape[0] and perturbed_y_max - 1 > y: perturbed_y_max = y break for y in range(self.new_shape[1] // 2, perturbed_y_min, -1): if np.sum(self.synthesis_perturbed_img[:, y]) == 768 * self.new_shape[0] and y > 0: perturbed_y_min = y break center_x, center_y = perturbed_x_min+(perturbed_x_max - perturbed_x_min)//2, perturbed_y_min+(perturbed_y_max - perturbed_y_min)//2 perfix_ = self.save_suffix+'_'+str(m)+'_'+str(n) self.new_shape = save_img_shape perturbed_x_ = max(self.new_shape[0] - (perturbed_x_max - perturbed_x_min), 0) perturbed_x_min = perturbed_x_ // 2 perturbed_x_max = self.new_shape[0] - perturbed_x_ // 2 if perturbed_x_%2 == 0 else self.new_shape[0] - (perturbed_x_ // 2 + 1) perturbed_y_ = max(self.new_shape[1] - (perturbed_y_max - perturbed_y_min), 0) perturbed_y_min = perturbed_y_ // 2 perturbed_y_max = self.new_shape[1] - perturbed_y_ // 2 if perturbed_y_%2 == 0 else self.new_shape[1] - (perturbed_y_ // 2 + 1) self.synthesis_perturbed_img = self.synthesis_perturbed_img[center_x-self.new_shape[0]//2:center_x+self.new_shape[0]//2, center_y-self.new_shape[1]//2:center_y+self.new_shape[1]//2, :].copy() self.synthesis_perturbed_label = self.synthesis_perturbed_label[center_x-self.new_shape[0]//2:center_x+self.new_shape[0]//2, center_y-self.new_shape[1]//2:center_y+self.new_shape[1]//2, :].copy() self.foreORbackground_label = self.foreORbackground_label[center_x-self.new_shape[0]//2:center_x+self.new_shape[0]//2, center_y-self.new_shape[1]//2:center_y+self.new_shape[1]//2].copy() ''' '''save''' pixel_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape(self.new_shape[0], self.new_shape[1], 2) if relativeShift_position == 'relativeShift_v2': self.synthesis_perturbed_label -= pixel_position fiducial_points_coordinate -= [center_x - self.new_shape[0] // 2, center_y - self.new_shape[1] // 2] self.synthesis_perturbed_label[:, :, 0] *= self.foreORbackground_label self.synthesis_perturbed_label[:, :, 1] *= self.foreORbackground_label self.synthesis_perturbed_img[:, :, 0] *= self.foreORbackground_label self.synthesis_perturbed_img[:, :, 1] *= self.foreORbackground_label self.synthesis_perturbed_img[:, :, 2] *= self.foreORbackground_label ''' synthesis_perturbed_img_filter = self.synthesis_perturbed_img.copy() synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (3, 3), 0) # if self.is_perform(0.9, 0.1) or repeat_time > 5: # # if self.is_perform(0.1, 0.9) and repeat_time > 9: # # synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (7, 7), 0) # # else: # synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (5, 5), 0) # else: # synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (3, 3), 0) self.synthesis_perturbed_img[self.foreORbackground_label == 1] = synthesis_perturbed_img_filter[self.foreORbackground_label == 1] ''' ''' perturbed_bg_img = perturbed_bg_img.astype(np.float32) perturbed_bg_img[:, :, 0] *= 1 - self.foreORbackground_label perturbed_bg_img[:, :, 1] *= 1 - self.foreORbackground_label perturbed_bg_img[:, :, 2] *= 1 - self.foreORbackground_label self.synthesis_perturbed_img += perturbed_bg_img HSV perturbed_bg_img = perturbed_bg_img.astype(np.float32) if self.is_perform(0.1, 0.9): if self.is_perform(0.2, 0.8): synthesis_perturbed_img_clip_HSV = self.synthesis_perturbed_img.copy() synthesis_perturbed_img_clip_HSV = cv2.cvtColor(synthesis_perturbed_img_clip_HSV, cv2.COLOR_RGB2HSV) H_, S_, V_ = (random.random()-0.2)*20, (random.random()-0.2)/8, (random.random()-0.2)*20 synthesis_perturbed_img_clip_HSV[:, :, 0], synthesis_perturbed_img_clip_HSV[:, :, 1], synthesis_perturbed_img_clip_HSV[:, :, 2] = synthesis_perturbed_img_clip_HSV[:, :, 0]-H_, synthesis_perturbed_img_clip_HSV[:, :, 1]-S_, synthesis_perturbed_img_clip_HSV[:, :, 2]-V_ synthesis_perturbed_img_clip_HSV = cv2.cvtColor(synthesis_perturbed_img_clip_HSV, cv2.COLOR_HSV2RGB) perturbed_bg_img[:, :, 0] *= 1-self.foreORbackground_label perturbed_bg_img[:, :, 1] *= 1-self.foreORbackground_label perturbed_bg_img[:, :, 2] *= 1-self.foreORbackground_label synthesis_perturbed_img_clip_HSV += perturbed_bg_img self.synthesis_perturbed_img = synthesis_perturbed_img_clip_HSV else: perturbed_bg_img_HSV = perturbed_bg_img perturbed_bg_img_HSV = cv2.cvtColor(perturbed_bg_img_HSV, cv2.COLOR_RGB2HSV) H_, S_, V_ = (random.random()-0.5)*20, (random.random()-0.5)/8, (random.random()-0.2)*20 perturbed_bg_img_HSV[:, :, 0], perturbed_bg_img_HSV[:, :, 1], perturbed_bg_img_HSV[:, :, 2] = perturbed_bg_img_HSV[:, :, 0]-H_, perturbed_bg_img_HSV[:, :, 1]-S_, perturbed_bg_img_HSV[:, :, 2]-V_ perturbed_bg_img_HSV = cv2.cvtColor(perturbed_bg_img_HSV, cv2.COLOR_HSV2RGB) perturbed_bg_img_HSV[:, :, 0] *= 1-self.foreORbackground_label perturbed_bg_img_HSV[:, :, 1] *= 1-self.foreORbackground_label perturbed_bg_img_HSV[:, :, 2] *= 1-self.foreORbackground_label self.synthesis_perturbed_img += perturbed_bg_img_HSV # self.synthesis_perturbed_img[np.sum(self.synthesis_perturbed_img, 2) == 771] = perturbed_bg_img_HSV[np.sum(self.synthesis_perturbed_img, 2) == 771] else: synthesis_perturbed_img_clip_HSV = self.synthesis_perturbed_img.copy() perturbed_bg_img[:, :, 0] *= 1 - self.foreORbackground_label perturbed_bg_img[:, :, 1] *= 1 - self.foreORbackground_label perturbed_bg_img[:, :, 2] *= 1 - self.foreORbackground_label synthesis_perturbed_img_clip_HSV += perturbed_bg_img # synthesis_perturbed_img_clip_HSV[np.sum(self.synthesis_perturbed_img, 2) == 771] = perturbed_bg_img[np.sum(self.synthesis_perturbed_img, 2) == 771] synthesis_perturbed_img_clip_HSV = cv2.cvtColor(synthesis_perturbed_img_clip_HSV, cv2.COLOR_RGB2HSV) H_, S_, V_ = (random.random()-0.5)*20, (random.random()-0.5)/10, (random.random()-0.4)*20 synthesis_perturbed_img_clip_HSV[:, :, 0], synthesis_perturbed_img_clip_HSV[:, :, 1], synthesis_perturbed_img_clip_HSV[:, :, 2] = synthesis_perturbed_img_clip_HSV[:, :, 0]-H_, synthesis_perturbed_img_clip_HSV[:, :, 1]-S_, synthesis_perturbed_img_clip_HSV[:, :, 2]-V_ synthesis_perturbed_img_clip_HSV = cv2.cvtColor(synthesis_perturbed_img_clip_HSV, cv2.COLOR_HSV2RGB) self.synthesis_perturbed_img = synthesis_perturbed_img_clip_HSV ''' '''HSV_v2''' perturbed_bg_img = perturbed_bg_img.astype(np.float32) # if self.is_perform(1, 0): # if self.is_perform(1, 0): if self.is_perform(0.1, 0.9): if self.is_perform(0.2, 0.8): synthesis_perturbed_img_clip_HSV = self.synthesis_perturbed_img.copy() synthesis_perturbed_img_clip_HSV = self.HSV_v1(synthesis_perturbed_img_clip_HSV) perturbed_bg_img[:, :, 0] *= 1-self.foreORbackground_label perturbed_bg_img[:, :, 1] *= 1-self.foreORbackground_label perturbed_bg_img[:, :, 2] *= 1-self.foreORbackground_label synthesis_perturbed_img_clip_HSV += perturbed_bg_img self.synthesis_perturbed_img = synthesis_perturbed_img_clip_HSV else: perturbed_bg_img_HSV = perturbed_bg_img perturbed_bg_img_HSV = self.HSV_v1(perturbed_bg_img_HSV) perturbed_bg_img_HSV[:, :, 0] *= 1-self.foreORbackground_label perturbed_bg_img_HSV[:, :, 1] *= 1-self.foreORbackground_label perturbed_bg_img_HSV[:, :, 2] *= 1-self.foreORbackground_label self.synthesis_perturbed_img += perturbed_bg_img_HSV # self.synthesis_perturbed_img[np.sum(self.synthesis_perturbed_img, 2) == 771] = perturbed_bg_img_HSV[np.sum(self.synthesis_perturbed_img, 2) == 771] else: synthesis_perturbed_img_clip_HSV = self.synthesis_perturbed_img.copy() perturbed_bg_img[:, :, 0] *= 1 - self.foreORbackground_label perturbed_bg_img[:, :, 1] *= 1 - self.foreORbackground_label perturbed_bg_img[:, :, 2] *= 1 - self.foreORbackground_label synthesis_perturbed_img_clip_HSV += perturbed_bg_img synthesis_perturbed_img_clip_HSV = self.HSV_v1(synthesis_perturbed_img_clip_HSV) self.synthesis_perturbed_img = synthesis_perturbed_img_clip_HSV '''''' # cv2.imwrite(self.save_path+'clip/'+perfix_+'_'+fold_curve+str(perturbed_time)+'-'+str(repeat_time)+'.png', synthesis_perturbed_img_clip) self.synthesis_perturbed_img[self.synthesis_perturbed_img < 0] = 0 self.synthesis_perturbed_img[self.synthesis_perturbed_img > 255] = 255 self.synthesis_perturbed_img = np.around(self.synthesis_perturbed_img).astype(np.uint8) label = np.zeros_like(self.synthesis_perturbed_img, dtype=np.float32) label[:, :, :2] = self.synthesis_perturbed_label label[:, :, 2] = self.foreORbackground_label # grey = np.around(self.synthesis_perturbed_img[:, :, 0] * 0.2989 + self.synthesis_perturbed_img[:, :, 1] * 0.5870 + self.synthesis_perturbed_img[:, :, 0] * 0.1140).astype(np.int16) # synthesis_perturbed_grey = np.concatenate((grey.reshape(self.new_shape[0], self.new_shape[1], 1), label), axis=2) synthesis_perturbed_color = np.concatenate((self.synthesis_perturbed_img, label), axis=2) self.synthesis_perturbed_color = np.zeros_like(synthesis_perturbed_color, dtype=np.float32) # self.synthesis_perturbed_grey = np.zeros_like(synthesis_perturbed_grey, dtype=np.float32) reduce_value_x = int(round(min((random.random() / 2) * (self.new_shape[0] - (perturbed_x_max - perturbed_x_min)), min(reduce_value, reduce_value_v2)))) reduce_value_y = int(round(min((random.random() / 2) * (self.new_shape[1] - (perturbed_y_max - perturbed_y_min)), min(reduce_value, reduce_value_v2)))) perturbed_x_min = max(perturbed_x_min - reduce_value_x, 0) perturbed_x_max = min(perturbed_x_max + reduce_value_x, self.new_shape[0]) perturbed_y_min = max(perturbed_y_min - reduce_value_y, 0) perturbed_y_max = min(perturbed_y_max + reduce_value_y, self.new_shape[1]) if im_lr >= im_ud: self.synthesis_perturbed_color[:, perturbed_y_min:perturbed_y_max, :] = synthesis_perturbed_color[:, perturbed_y_min:perturbed_y_max, :] # self.synthesis_perturbed_grey[:, perturbed_y_min:perturbed_y_max, :] = synthesis_perturbed_grey[:, perturbed_y_min:perturbed_y_max, :] else: self.synthesis_perturbed_color[perturbed_x_min:perturbed_x_max, :, :] = synthesis_perturbed_color[perturbed_x_min:perturbed_x_max, :, :] # self.synthesis_perturbed_grey[perturbed_x_min:perturbed_x_max, :, :] = synthesis_perturbed_grey[perturbed_x_min:perturbed_x_max, :, :] '''blur''' if self.is_perform(0.1, 0.9): synthesis_perturbed_img_filter = self.synthesis_perturbed_color[:, :, :3].copy() if self.is_perform(0.1, 0.9): synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (5, 5), 0) else: synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (3, 3), 0) if self.is_perform(0.5, 0.5): self.synthesis_perturbed_color[:, :, :3][self.synthesis_perturbed_color[:, :, 5] == 1] = synthesis_perturbed_img_filter[self.synthesis_perturbed_color[:, :, 5] == 1] else: self.synthesis_perturbed_color[:, :, :3] = synthesis_perturbed_img_filter fiducial_points_coordinate = fiducial_points_coordinate[:, :, ::-1] '''draw fiducial points''' stepSize = 0 fiducial_points_synthesis_perturbed_img = self.synthesis_perturbed_color[:, :, :3].copy() for l in fiducial_points_coordinate.astype(np.int64).reshape(-1, 2): cv2.circle(fiducial_points_synthesis_perturbed_img, (l[0] + math.ceil(stepSize / 2), l[1] + math.ceil(stepSize / 2)), 2, (0, 0, 255), -1) cv2.imwrite(self.save_path + 'fiducial_points/' + perfix_ + '_' + fold_curve + '.png', fiducial_points_synthesis_perturbed_img) cv2.imwrite(self.save_path + 'png/' + perfix_ + '_' + fold_curve + '.png', self.synthesis_perturbed_color[:, :, :3]) '''forward-begin''' self.forward_mapping = np.full((save_img_shape[0], save_img_shape[1], 2), 0, dtype=np.float32) forward_mapping = np.full((save_img_shape[0], save_img_shape[1], 2), 0, dtype=np.float32) forward_position = (self.synthesis_perturbed_color[:, :, 3:5] + pixel_position)[self.synthesis_perturbed_color[:, :, 5] != 0, :] flat_position = np.argwhere(np.zeros(save_img_shape, dtype=np.uint32) == 0) vtx, wts = self.interp_weights(forward_position, flat_position) wts_sum = np.abs(wts).sum(-1) wts = wts[wts_sum <= 1, :] vtx = vtx[wts_sum <= 1, :] flat_position_forward = flat_position.reshape(save_img_shape[0], save_img_shape[1], 2)[self.synthesis_perturbed_color[:, :, 5] != 0, :] forward_mapping.reshape(save_img_shape[0] * save_img_shape[1], 2)[wts_sum <= 1, :] = self.interpolate(flat_position_forward, vtx, wts) forward_mapping = forward_mapping.reshape(save_img_shape[0], save_img_shape[1], 2) mapping_x_min_, mapping_y_min_, mapping_x_max_, mapping_y_max_ = self.adjust_position_v2(0, 0, im_lr, im_ud, self.new_shape) shreshold_zoom_out = 2 mapping_x_min = mapping_x_min_ + shreshold_zoom_out mapping_y_min = mapping_y_min_ + shreshold_zoom_out mapping_x_max = mapping_x_max_ - shreshold_zoom_out mapping_y_max = mapping_y_max_ - shreshold_zoom_out self.forward_mapping[mapping_x_min:mapping_x_max, mapping_y_min:mapping_y_max] = forward_mapping[mapping_x_min:mapping_x_max, mapping_y_min:mapping_y_max] self.scan_img = np.full((save_img_shape[0], save_img_shape[1], 3), 0, dtype=np.float32) self.scan_img[mapping_x_min_:mapping_x_max_, mapping_y_min_:mapping_y_max_] = self.origin_img self.origin_img = self.scan_img # flat_img = np.full((save_img_shape[0], save_img_shape[1], 3), 0, dtype=np.float32) # cv2.remap(self.synthesis_perturbed_color[:, :, :3], self.forward_mapping[:, :, 1], self.forward_mapping[:, :, 0], cv2.INTER_LINEAR, flat_img) # cv2.imwrite(self.save_path + 'outputs/1.jpg', flat_img) '''forward-end''' synthesis_perturbed_data = { 'fiducial_points': fiducial_points_coordinate, 'segment': np.array((segment_x, segment_y)) } cv2.imwrite(self.save_path + 'png/' + perfix_ + '_' + fold_curve + '.png', self.synthesis_perturbed_color[:, :, :3]) with open(self.save_path+'color/'+perfix_+'_'+fold_curve+'.gw', 'wb') as f: pickle_perturbed_data = pickle.dumps(synthesis_perturbed_data) f.write(pickle_perturbed_data) # with open(self.save_path+'grey/'+perfix_+'_'+fold_curve+'.gw', 'wb') as f: # pickle_perturbed_data = pickle.dumps(self.synthesis_perturbed_grey) # f.write(pickle_perturbed_data) # cv2.imwrite(self.save_path+'grey_im/'+perfix_+'_'+fold_curve+'.png', self.synthesis_perturbed_color[:, :, :1]) # cv2.imwrite(self.save_path + 'scan/' + self.save_suffix + '_' + str(m) + '.png', self.origin_img) trian_t = time.time() - begin_train mm, ss = divmod(trian_t, 60) hh, mm = divmod(mm, 60) print(str(m)+'_'+str(n)+'_'+fold_curve+' '+str(repeat_time)+" Time : %02d:%02d:%02d\n" % (hh, mm, ss)) def multiThread(m, n, img_path_, bg_path_, save_path, save_suffix): saveFold = perturbed(img_path_, bg_path_, save_path, save_suffix) saveCurve = perturbed(img_path_, bg_path_, save_path, save_suffix) repeat_time = min(max(round(np.random.normal(10, 3)), 5), 16) fold = threading.Thread(target=saveFold.save_img, args=(m, n, 'fold', repeat_time, 'relativeShift_v2'), name='fold') curve = threading.Thread(target=saveCurve.save_img, args=(m, n, 'curve', repeat_time, 'relativeShift_v2'), name='curve') fold.start() curve.start() curve.join() fold.join() def xgw(args): path = args.path bg_path = args.bg_path if args.output_path is None: save_path = '/lustre/home/gwxie/data/unwarp_new/train/general1024/general1024_v1/' else: save_path = args.output_path # if not os.path.exists(save_path + 'grey/'): # os.makedirs(save_path + 'grey/') if not os.path.exists(save_path + 'color/'): os.makedirs(save_path + 'color/') if not os.path.exists(save_path + 'fiducial_points/'): os.makedirs(save_path + 'fiducial_points/') if not os.path.exists(save_path + 'png/'): os.makedirs(save_path + 'png/') if not os.path.exists(save_path + 'scan/'): os.makedirs(save_path + 'scan/') if not os.path.exists(save_path + 'outputs/'): os.makedirs(save_path + 'outputs/') save_suffix = str.split(args.path, '/')[-2] all_img_path = getDatasets(path) all_bgImg_path = getDatasets(bg_path) global begin_train begin_train = time.time() fiducial_points = 61 # 31 process_pool = Pool(2) for m, img_path in enumerate(all_img_path): for n in range(args.sys_num): img_path_ = path+img_path bg_path_ = bg_path+random.choice(all_bgImg_path)+'/' for m_n in range(10): try: saveFold = perturbed(img_path_, bg_path_, save_path, save_suffix) saveCurve = perturbed(img_path_, bg_path_, save_path, save_suffix) repeat_time = min(max(round(np.random.normal(12, 4)), 1), 18) # repeat_time = min(max(round(np.random.normal(8, 4)), 1), 12) # random.randint(1, 2) # min(max(round(np.random.normal(8, 4)), 1), 12) process_pool.apply_async(func=saveFold.save_img, args=(m, n, 'fold', repeat_time, fiducial_points, 'relativeShift_v2')) repeat_time = min(max(round(np.random.normal(8, 4)), 1), 13) # repeat_time = min(max(round(np.random.normal(6, 4)), 1), 10) process_pool.apply_async(func=saveCurve.save_img, args=(m, n, 'curve', repeat_time, fiducial_points, 'relativeShift_v2')) except BaseException as err: print(err) continue break # print('end') process_pool.close() process_pool.join() if __name__ == '__main__': parser = argparse.ArgumentParser(description='Hyperparams') parser.add_argument('--path', default='./scan/new/', type=str, help='the path of origin img.') parser.add_argument('--bg_path', default='./background/', type=str, help='the path of bg img.') parser.add_argument('--output_path', default='./output/', type=str, help='the path of origin img.') # parser.set_defaults(output_path='test') parser.add_argument('--count_from', '-p', default=0, type=int, metavar='N', help='print frequency (default: 10)') # print frequency parser.add_argument('--repeat_T', default=0, type=int) parser.add_argument('--sys_num', default=6, type=int) args = parser.parse_args() xgw(args)
53.692132
380
0.720417
43,129
0.915923
0
0
0
0
0
0
17,168
0.364594
6a5a90584312df812f9d84d198fd00ed22ebcb67
3,042
py
Python
tweet_evaluator.py
tw-ddis/Gnip-Tweet-Evaluation
c5c847698bd6deb891870e5cf2514dfe78caa1c2
[ "MIT" ]
3
2019-11-14T11:46:27.000Z
2021-01-16T06:04:46.000Z
tweet_evaluator.py
pen-corsica/Gnip-Tweet-Evaluation
c5c847698bd6deb891870e5cf2514dfe78caa1c2
[ "MIT" ]
1
2017-09-19T22:59:03.000Z
2017-09-19T23:06:12.000Z
tweet_evaluator.py
pen-corsica/Gnip-Tweet-Evaluation
c5c847698bd6deb891870e5cf2514dfe78caa1c2
[ "MIT" ]
4
2016-06-13T16:34:32.000Z
2017-08-01T20:20:56.000Z
#!/usr/bin/env python import argparse import logging try: import ujson as json except ImportError: import json import sys import datetime import os import importlib from gnip_tweet_evaluation import analysis,output """ Perform audience and/or conversation analysis on a set of Tweets. """ logger = logging.getLogger('analysis') logger.setLevel(logging.INFO) logger.addHandler(logging.StreamHandler()) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("-n","--identifier",dest="unique_identifier", default='0',type=str, help="a unique name to identify the conversation/audience; default is '%(default)s'") parser.add_argument("-c","--do-conversation-analysis",dest="do_conversation_analysis",action="store_true",default=False, help="do conversation analysis on Tweets") parser.add_argument("-a","--do-audience-analysis",dest="do_audience_analysis",action="store_true",default=False, help="do audience analysis on users") parser.add_argument("-i","--input-file-name",dest="input_file_name",default=None, help="file containing Tweet data; take input from stdin if not present") parser.add_argument('-o','--output-dir',dest='output_directory',default=os.environ['HOME'] + '/tweet_evaluation/', help='directory for output files; default is %(default)s') parser.add_argument('-b','--baseline-input-file',dest='baseline_input_name',default=None, help='Tweets against which to run a relative analysis') args = parser.parse_args() # get the time right now, to use in output naming time_now = datetime.datetime.now() output_directory = '{0}/{1:04d}/{2:02d}/{3:02d}/'.format(args.output_directory.rstrip('/') ,time_now.year ,time_now.month ,time_now.day ) # get the empty results object, which defines the measurements to be run results = analysis.setup_analysis(do_conversation = args.do_conversation_analysis, do_audience = args.do_audience_analysis) baseline_results = None if args.baseline_input_name is not None: baseline_results = analysis.setup_analysis(do_conversation = args.do_conversation_analysis, do_audience = args.do_audience_analysis) # manage input sources, file opening, and deserialization if args.input_file_name is not None: tweet_generator = analysis.deserialize_tweets(open(args.input_file_name)) else: tweet_generator = analysis.deserialize_tweets(sys.stdin) # run analysis analysis.analyze_tweets(tweet_generator, results) # run baseline analysis, if requests if baseline_results is not None: baseline_tweet_generator = analysis.deserialize_tweets(open(args.baseline_input_name)) analysis.analyze_tweets(baseline_tweet_generator, baseline_results) results = analysis.compare_results(results,baseline_results) # dump the output output.dump_results(results, output_directory, args.unique_identifier)
42.25
140
0.72288
0
0
0
0
0
0
0
0
1,027
0.337607
6a5b61c287644aa1eac5b1af996dc433d21c0841
2,621
py
Python
app.py
admiral-aokiji/whatsapp-bot
5a0b0d4afddc679cda3670771934cb472629587a
[ "MIT" ]
null
null
null
app.py
admiral-aokiji/whatsapp-bot
5a0b0d4afddc679cda3670771934cb472629587a
[ "MIT" ]
null
null
null
app.py
admiral-aokiji/whatsapp-bot
5a0b0d4afddc679cda3670771934cb472629587a
[ "MIT" ]
null
null
null
from flask import Flask, request import os from twilio.twiml.messaging_response import MessagingResponse from selenium import webdriver chrome_options = webdriver.ChromeOptions() chrome_options.binary_location = os.environ.get("GOOGLE_CHROME_BIN") chrome_options.add_argument("--headless") chrome_options.add_argument("--disable-dev-shm-usage") chrome_options.add_argument("--no-sandbox") driver = webdriver.Chrome(executable_path=os.environ.get("CHROMEDRIVER_PATH"), chrome_options=chrome_options) app = Flask(__name__) import utils @app.route("/") def hello(): return "Hello World!" @app.route('/bot', methods=['POST']) def bot(): incoming_msg = request.values.get('Body', '') print(incoming_msg) resp = MessagingResponse() msg = resp.message() responded = False if incoming_msg in ['Hi', 'Hey', 'Menu']: text = f'Hello\n For any suggestions or requests 👇 \n 📞 : 9537701631 \n ✉ : rohit.saxena.met17@itbhu.ac.in \n\n Please enter one of the following option 👇 \n *TPC*. TPC portal willingness \n *B*. __________. ' msg.body(text) responded = True elif 'TPC' in incoming_msg: if incoming_msg == 'TPC': text = 'Menu of options for TPC command' msg.body(text) h = 7 responded = True utils.portalLogin(os.environ.get('TPC_EMAIL'),os.environ.get('TPC_PWD')) if incoming_msg == 'TPC -willingness -short' or incoming_msg == 'TPC -w -s': utils.getWillingness() utils.shortenWillingness() elif incoming_msg == 'TPC -willingness -details' or incoming_msg == 'TPC -w -d': utils.portalLogin(os.environ.get('TPC_EMAIL'),os.environ.get('TPC_PWD')) utils.getWillingness() elif incoming_msg == 'TPC -willingness -details' or incoming_msg == 'TPC -w -d': utils.portalLogin(os.environ.get('TPC_EMAIL'),os.environ.get('TPC_PWD')) utils.getWillingness() elif incoming_msg[:15] == 'TPC -experience' or (incoming_msg[:7] == 'TPC - e ' and len(incoming_msg)>8): utils.portalLogin(os.environ.get('TPC_EMAIL'),os.environ.get('TPC_PWD')) companyName = incoming_msg.split(' ')[2] print(companyName) utils.getInterviewExperience(companyName) else: # send custom error msg for TPC commands pass else: # Checking for formality if responded == False: msg.body('Please enter valid commands') return str(resp) if __name__ == "__main__": app.run(host="localhost", port=5000, debug=True)
39.119403
217
0.645174
0
0
0
0
2,011
0.764058
0
0
733
0.278495
6a5b876bee110f96f947af456cbf93cb78d5e1bc
94
py
Python
nflfastpy/errors.py
hchaozhe/nflfastpy
11e4894d7fee4ff8baac2c08b000a39308b41143
[ "MIT" ]
47
2020-10-24T10:10:51.000Z
2022-03-07T19:48:05.000Z
nflfastpy/errors.py
jbf302/nflfastpy
c1e2365966e0f0f8efeb651be804d84caba57807
[ "MIT" ]
3
2021-05-03T11:58:00.000Z
2021-11-14T16:17:30.000Z
nflfastpy/errors.py
jbf302/nflfastpy
c1e2365966e0f0f8efeb651be804d84caba57807
[ "MIT" ]
7
2020-12-14T15:03:12.000Z
2021-11-17T23:41:37.000Z
""" Custom exceptions for nflfastpy module """ class SeasonNotFoundError(Exception): pass
15.666667
38
0.755319
46
0.489362
0
0
0
0
0
0
46
0.489362
6a5ce615b33cd197b365d6e3673610f15fbcf59b
12,289
py
Python
assignment1/cs231n/classifiers/neural_net.py
zeevikal/CS231n-spring2018
50691a947b877047099e7a1fe99a3fdea4a4fcf8
[ "MIT" ]
null
null
null
assignment1/cs231n/classifiers/neural_net.py
zeevikal/CS231n-spring2018
50691a947b877047099e7a1fe99a3fdea4a4fcf8
[ "MIT" ]
3
2019-12-09T06:04:00.000Z
2019-12-09T06:05:23.000Z
assignment1/cs231n/classifiers/neural_net.py
zeevikal/CS231n-spring2018
50691a947b877047099e7a1fe99a3fdea4a4fcf8
[ "MIT" ]
null
null
null
from __future__ import print_function import numpy as np import matplotlib.pyplot as plt class TwoLayerNet(object): """ A two-layer fully-connected neural network. The net has an input dimension of N, a hidden layer dimension of H, and performs classification over C classes. We train the network with a softmax loss function and L2 regularization on the weight matrices. The network uses a ReLU nonlinearity after the first fully connected layer. In other words, the network has the following architecture: input - fully connected layer - ReLU - fully connected layer - softmax The outputs of the second fully-connected layer are the scores for each class. """ def __init__(self, input_size, hidden_size, output_size, std=1e-4): """ Initialize the model. Weights are initialized to small random values and biases are initialized to zero. Weights and biases are stored in the variable self.params, which is a dictionary with the following keys W1: First layer weights; has shape (D, H) b1: First layer biases; has shape (H,) W2: Second layer weights; has shape (H, C) b2: Second layer biases; has shape (C,) Inputs: - input_size: The dimension D of the input data. - hidden_size: The number of neurons H in the hidden layer. - output_size: The number of classes C. """ self.params = {} self.params['W1'] = std * np.random.randn(input_size, hidden_size) self.params['b1'] = np.zeros(hidden_size) self.params['W2'] = std * np.random.randn(hidden_size, output_size) self.params['b2'] = np.zeros(output_size) def loss(self, X, y=None, reg=0.0): """ Compute the loss and gradients for a two layer fully connected neural network. Inputs: - X: Input data of shape (N, D). Each X[i] is a training sample. - y: Vector of training labels. y[i] is the label for X[i], and each y[i] is an integer in the range 0 <= y[i] < C. This parameter is optional; if it is not passed then we only return scores, and if it is passed then we instead return the loss and gradients. - reg: Regularization strength. Returns: If y is None, return a matrix scores of shape (N, C) where scores[i, c] is the score for class c on input X[i]. If y is not None, instead return a tuple of: - loss: Loss (data loss and regularization loss) for this batch of training samples. - grads: Dictionary mapping parameter names to gradients of those parameters with respect to the loss function; has the same keys as self.params. """ # Unpack variables from the params dictionary W1, b1 = self.params['W1'], self.params['b1'] W2, b2 = self.params['W2'], self.params['b2'] N, D = X.shape # Compute the forward pass scores = None ####################################################################### # TODO: Perform the forward pass, computing the class scores for the # # input. Store the result in the scores variable, which should be an # # array of shape (N, C). # ####################################################################### scores1 = X.dot(W1) + b1 # FC1 X2 = np.maximum(0, scores1) # ReLU FC1 scores = X2.dot(W2) + b2 # FC2 ####################################################################### # END OF YOUR CODE # ####################################################################### # If the targets are not given then jump out, we're done if y is None: return scores scores -= np.max(scores) # Fix Number instability scores_exp = np.exp(scores) probs = scores_exp / np.sum(scores_exp, axis=1, keepdims=True) # Compute the loss loss = None ####################################################################### # TODO: Finish the forward pass, and compute the loss. This should # # include both the data loss and L2 regularization for W1 and W2. # # Store the result in the variable loss, which should be a scalar. Use# # the Softmax classifier loss. # ####################################################################### correct_probs = -np.log(probs[np.arange(N), y]) # L_i = -log(e^correct_score/sum(e^scores))) = -log(correct_probs) loss = np.sum(correct_probs) loss /= N # L2 regularization WRT W1 and W2 loss += reg * (np.sum(W1 * W1) + np.sum(W2 * W2)) ####################################################################### # END OF YOUR CODE # ####################################################################### # Backward pass: compute gradients grads = {} ############################################################################# # TODO: Compute the backward pass, computing the derivatives of the weights # # and biases. Store the results in the grads dictionary. For example, # # grads['W1'] should store the gradient on W1, and be a matrix of same size # ############################################################################# # gradient of loss_i WRT scores_k # dL_i/ds_k = probs_k-1(y_i == k) # this means the gradient is the score for "other" classes and score-1 # for the target class d_scores = probs.copy() d_scores[np.arange(N), y] -= 1 d_scores /= N # W2 were multiplied with X2, by chain rule and multiplication # derivative, WRT W2 we need to multiply downstream derivative by X2 d_W2 = X2.T.dot(d_scores) # b2 was added, so it's d is 1 but we must multiply it with chain rule # (downstream), in this case d_scores d_b2 = np.sum(d_scores, axis=0) # W1 is upstream of X2, so we continue this way d_X2 = d_scores.dot(W2.T) # ReLU derivative is 1 for > 0, else 0 d_scores1 = d_X2 * (scores1 > 0) d_W1 = X.T.dot(d_scores1) # b1 gradient d_b1 = d_scores1.sum(axis=0) # regularization gradient (reg*W2^2) d_W2 += reg * 2 * W2 d_W1 += reg * 2 * W1 grads['W1'] = d_W1 grads['b1'] = d_b1 grads['W2'] = d_W2 grads['b2'] = d_b2 ####################################################################### # END OF YOUR CODE # ####################################################################### return loss, grads def train(self, X, y, X_val, y_val, learning_rate=1e-3, learning_rate_decay=0.95, reg=5e-6, num_iters=100, batch_size=200, verbose=False): """ Train this neural network using stochastic gradient descent. Inputs: - X: A numpy array of shape (N, D) giving training data. - y: A numpy array f shape (N,) giving training labels; y[i] = c means that X[i] has label c, where 0 <= c < C. - X_val: A numpy array of shape (N_val, D) giving validation data. - y_val: A numpy array of shape (N_val,) giving validation labels. - learning_rate: Scalar giving learning rate for optimization. - learning_rate_decay: Scalar giving factor used to decay the learning rate after each epoch. - reg: Scalar giving regularization strength. - num_iters: Number of steps to take when optimizing. - batch_size: Number of training examples to use per step. - verbose: boolean; if true print progress during optimization. """ num_train = X.shape[0] iterations_per_epoch = max(num_train / batch_size, 1) # Use SGD to optimize the parameters in self.model loss_history = [] train_acc_history = [] val_acc_history = [] for it in range(num_iters): X_batch = None y_batch = None ################################################################### # TODO: Create a random minibatch of training data and labels, # # storing them in X_batch and y_batch respectively. # ################################################################### # random indexes to sample training data/labels sample_idx = np.random.choice(num_train, batch_size, replace=True) X_batch = X[sample_idx] y_batch = y[sample_idx] ################################################################### # END OF YOUR CODE # ################################################################### # Compute loss and gradients using the current minibatch loss, grads = self.loss(X_batch, y=y_batch, reg=reg) loss_history.append(loss) ################################################################### # TODO: Use the gradients in the grads dictionary to update the # # parameters of the network (stored in the dictionary self.params)# # using stochastic gradient descent. You'll need to use the # # gradients stored in the grads dictionary defined above. # ################################################################### # For each weight in network parameters, update it with the # corresponding calculated gradient for key in self.params: self.params[key] -= learning_rate * grads[key] ################################################################### # END OF YOUR CODE # ################################################################### if verbose and it % 100 == 0: print('iteration %d / %d: loss %f' % (it, num_iters, loss)) # Every epoch, check train and val accuracy and decay learning rate if it % iterations_per_epoch == 0: # Check accuracy train_acc = (self.predict(X_batch) == y_batch).mean() val_acc = (self.predict(X_val) == y_val).mean() train_acc_history.append(train_acc) val_acc_history.append(val_acc) # Decay learning rate learning_rate *= learning_rate_decay return { 'loss_history': loss_history, 'train_acc_history': train_acc_history, 'val_acc_history': val_acc_history, } def predict(self, X): """ Use the trained weights of this two-layer network to predict labels for data points. For each data point we predict scores for each of the C classes, and assign each data point to the class with the highest score Inputs: - X: A numpy array of shape (N, D) giving N D-dimensional data points to classify. Returns: - y_pred: A numpy array of shape (N,) giving predicted labels for each of the elements of X. For all i, y_pred[i] = c means that X[i] is predicted to have class c, where 0 <= c < C. """ y_pred = None ####################################################################### # TODO: Implement this function; it should be VERY simple! # ####################################################################### y_pred = np.argmax(self.loss(X), axis=1) ####################################################################### # END OF YOUR CODE # ####################################################################### return y_pred
45.854478
85
0.487509
12,197
0.992514
0
0
0
0
0
0
8,345
0.679063
6a5cfd1895fbfd5a40ac1b9716a706c236f16372
2,309
py
Python
dynamic_setting/tests/test_models.py
koralarts/django-dynamic-settings
8a3c5f44ad71f6d8fb78af9e7a3f5a380dd3d318
[ "MIT" ]
2
2015-02-11T05:07:19.000Z
2015-11-24T17:49:03.000Z
dynamic_setting/tests/test_models.py
koralarts/django-dynamic-settings
8a3c5f44ad71f6d8fb78af9e7a3f5a380dd3d318
[ "MIT" ]
1
2018-03-02T13:26:08.000Z
2018-03-02T13:26:08.000Z
dynamic_setting/tests/test_models.py
koralarts/django-dynamic-settings
8a3c5f44ad71f6d8fb78af9e7a3f5a380dd3d318
[ "MIT" ]
null
null
null
from django.test import TestCase from dynamic_setting.models import Setting class SettingTestCase(TestCase): def _create_setting(self, name, **kwargs): return Setting.objects.create(name=name, **kwargs) def test_create_setting(self): """ Test Creating a new Setting. """ name = 'TEST_SETTING' data = 'Setting Data' setting = self._create_setting(name, data=data) self.assertEqual(setting.name, name) self.assertEqual(setting.__str__(), name) self.assertEqual(setting.data, data) def test_create_setting_no_data(self): """ Test Creating a new setting without Data. """ name = 'TEST_SETTING' data = '-' setting = self._create_setting(name) self.assertEqual(setting.name, name) self.assertEqual(setting.__str__(), name) self.assertEqual(setting.data, data) def test_delete_setting(self): """ Test Deleting a setting object. """ name = 'TEST_SETTING' setting = self._create_setting(name) setting_pk = setting.pk setting.delete() try: Setting.objects.get(pk=setting_pk) except Setting.DoesNotExist: pass else: self.fail('Setting with ID {} should not exist.'.format(setting_pk)) def test_get_setting(self): """ Test Getting a setting object. """ name = 'TEST_SETTING' data = 'Setting data' setting = self._create_setting(name, data=data) try: setting2 = Setting.objects.get(pk=setting.pk) except Setting.DoesNotExist: self.fail('Setting with ID {} should exist'.format(setting.pk)) self.assertEqual(setting.name, setting2.name) self.assertEqual(setting.__str__(), setting2.__str__()) self.assertEqual(setting.data, setting2.data) self.assertEqual(setting.pk, setting2.pk) def test_update_setting(self): """ Test Updating a setting object. """ name = 'TEST_SETTING' data = 'Setting data' data2 = 'New Setting Data' setting = self._create_setting(name, data=data) setting.data = data2 setting.save() setting2 = Setting.objects.get(pk=setting.pk) self.assertEqual(setting2.data, data2)
36.078125
80
0.628411
2,231
0.966219
0
0
0
0
0
0
405
0.175401
6a5d7ccdf81701102bd40960b2c34a8fefe0bff7
3,973
py
Python
homeassistant/components/zamg/weather.py
MrDelik/core
93a66cc357b226389967668441000498a10453bb
[ "Apache-2.0" ]
30,023
2016-04-13T10:17:53.000Z
2020-03-02T12:56:31.000Z
homeassistant/components/zamg/weather.py
MrDelik/core
93a66cc357b226389967668441000498a10453bb
[ "Apache-2.0" ]
24,710
2016-04-13T08:27:26.000Z
2020-03-02T12:59:13.000Z
homeassistant/components/zamg/weather.py
MrDelik/core
93a66cc357b226389967668441000498a10453bb
[ "Apache-2.0" ]
11,956
2016-04-13T18:42:31.000Z
2020-03-02T09:32:12.000Z
"""Sensor for data from Austrian Zentralanstalt für Meteorologie.""" from __future__ import annotations import logging import voluptuous as vol from homeassistant.components.weather import ( ATTR_WEATHER_HUMIDITY, ATTR_WEATHER_PRESSURE, ATTR_WEATHER_TEMPERATURE, ATTR_WEATHER_WIND_BEARING, ATTR_WEATHER_WIND_SPEED, PLATFORM_SCHEMA, WeatherEntity, ) from homeassistant.const import CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME, TEMP_CELSIUS from homeassistant.core import HomeAssistant from homeassistant.helpers import config_validation as cv from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType # Reuse data and API logic from the sensor implementation from .sensor import ( ATTRIBUTION, CONF_STATION_ID, ZamgData, closest_station, zamg_stations, ) _LOGGER = logging.getLogger(__name__) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_STATION_ID): cv.string, vol.Inclusive( CONF_LATITUDE, "coordinates", "Latitude and longitude must exist together" ): cv.latitude, vol.Inclusive( CONF_LONGITUDE, "coordinates", "Latitude and longitude must exist together" ): cv.longitude, } ) def setup_platform( hass: HomeAssistant, config: ConfigType, add_entities: AddEntitiesCallback, discovery_info: DiscoveryInfoType | None = None, ) -> None: """Set up the ZAMG weather platform.""" name = config.get(CONF_NAME) latitude = config.get(CONF_LATITUDE, hass.config.latitude) longitude = config.get(CONF_LONGITUDE, hass.config.longitude) station_id = config.get(CONF_STATION_ID) or closest_station( latitude, longitude, hass.config.config_dir ) if station_id not in zamg_stations(hass.config.config_dir): _LOGGER.error( "Configured ZAMG %s (%s) is not a known station", CONF_STATION_ID, station_id, ) return probe = ZamgData(station_id=station_id) try: probe.update() except (ValueError, TypeError) as err: _LOGGER.error("Received error from ZAMG: %s", err) return add_entities([ZamgWeather(probe, name)], True) class ZamgWeather(WeatherEntity): """Representation of a weather condition.""" def __init__(self, zamg_data, stationname=None): """Initialise the platform with a data instance and station name.""" self.zamg_data = zamg_data self.stationname = stationname @property def name(self): """Return the name of the sensor.""" return ( self.stationname or f"ZAMG {self.zamg_data.data.get('Name') or '(unknown station)'}" ) @property def condition(self): """Return the current condition.""" return None @property def attribution(self): """Return the attribution.""" return ATTRIBUTION @property def temperature(self): """Return the platform temperature.""" return self.zamg_data.get_data(ATTR_WEATHER_TEMPERATURE) @property def temperature_unit(self): """Return the unit of measurement.""" return TEMP_CELSIUS @property def pressure(self): """Return the pressure.""" return self.zamg_data.get_data(ATTR_WEATHER_PRESSURE) @property def humidity(self): """Return the humidity.""" return self.zamg_data.get_data(ATTR_WEATHER_HUMIDITY) @property def wind_speed(self): """Return the wind speed.""" return self.zamg_data.get_data(ATTR_WEATHER_WIND_SPEED) @property def wind_bearing(self): """Return the wind bearing.""" return self.zamg_data.get_data(ATTR_WEATHER_WIND_BEARING) def update(self): """Update current conditions.""" self.zamg_data.update()
28.582734
87
0.678832
1,647
0.414444
0
0
1,210
0.304479
0
0
850
0.21389
6a5e2a2e683b7b168a4a8789ce91b511ae5da26d
19,403
py
Python
rasa/model.py
martasls/rasa
6e535a847f6be0c05e7b89208f16a53d2c478629
[ "Apache-2.0" ]
null
null
null
rasa/model.py
martasls/rasa
6e535a847f6be0c05e7b89208f16a53d2c478629
[ "Apache-2.0" ]
null
null
null
rasa/model.py
martasls/rasa
6e535a847f6be0c05e7b89208f16a53d2c478629
[ "Apache-2.0" ]
null
null
null
import copy import glob import hashlib import logging import os import shutil from subprocess import CalledProcessError, DEVNULL, check_output # skipcq:BAN-B404 import tempfile import typing from pathlib import Path from typing import Any, Text, Tuple, Union, Optional, List, Dict, NamedTuple from packaging import version from rasa.constants import MINIMUM_COMPATIBLE_VERSION import rasa.shared.utils.io import rasa.utils.io from rasa.cli.utils import create_output_path from rasa.shared.utils.cli import print_success from rasa.shared.constants import ( CONFIG_KEYS_CORE, CONFIG_KEYS_NLU, CONFIG_KEYS, DEFAULT_DOMAIN_PATH, DEFAULT_MODELS_PATH, DEFAULT_CORE_SUBDIRECTORY_NAME, DEFAULT_NLU_SUBDIRECTORY_NAME, ) from rasa.exceptions import ModelNotFound from rasa.utils.common import TempDirectoryPath if typing.TYPE_CHECKING: from rasa.shared.importers.importer import TrainingDataImporter logger = logging.getLogger(__name__) # Type alias for the fingerprint Fingerprint = Dict[Text, Union[Text, List[Text], int, float]] FINGERPRINT_FILE_PATH = "fingerprint.json" FINGERPRINT_CONFIG_KEY = "config" FINGERPRINT_CONFIG_CORE_KEY = "core-config" FINGERPRINT_CONFIG_NLU_KEY = "nlu-config" FINGERPRINT_CONFIG_WITHOUT_EPOCHS_KEY = "config-without-epochs" FINGERPRINT_DOMAIN_WITHOUT_NLG_KEY = "domain" FINGERPRINT_NLG_KEY = "nlg" FINGERPRINT_RASA_VERSION_KEY = "version" FINGERPRINT_STORIES_KEY = "stories" FINGERPRINT_NLU_DATA_KEY = "messages" FINGERPRINT_NLU_LABELS_KEY = "nlu_labels" FINGERPRINT_PROJECT = "project" FINGERPRINT_TRAINED_AT_KEY = "trained_at" class Section(NamedTuple): """Specifies which fingerprint keys decide whether this sub-model is retrained.""" name: Text relevant_keys: List[Text] SECTION_CORE = Section( name="Core model", relevant_keys=[ FINGERPRINT_CONFIG_KEY, FINGERPRINT_CONFIG_CORE_KEY, FINGERPRINT_DOMAIN_WITHOUT_NLG_KEY, FINGERPRINT_STORIES_KEY, FINGERPRINT_RASA_VERSION_KEY, ], ) SECTION_NLU = Section( name="NLU model", relevant_keys=[ FINGERPRINT_CONFIG_KEY, FINGERPRINT_CONFIG_NLU_KEY, FINGERPRINT_NLU_DATA_KEY, FINGERPRINT_RASA_VERSION_KEY, ], ) SECTION_NLG = Section(name="NLG responses", relevant_keys=[FINGERPRINT_NLG_KEY]) class FingerprintComparisonResult: """Container for the results of a fingerprint comparison.""" def __init__( self, nlu: bool = True, core: bool = True, nlg: bool = True, force_training: bool = False, ): """Creates a `FingerprintComparisonResult` instance. Args: nlu: `True` if the NLU model should be retrained. core: `True` if the Core model should be retrained. nlg: `True` if the responses in the domain should be updated. force_training: `True` if a training of all parts is forced. """ self.nlu = nlu self.core = core self.nlg = nlg self.force_training = force_training def is_training_required(self) -> bool: """Check if anything has to be retrained.""" return any([self.nlg, self.nlu, self.core, self.force_training]) def should_retrain_core(self) -> bool: """Check if the Core model has to be updated.""" return self.force_training or self.core def should_retrain_nlg(self) -> bool: """Check if the responses have to be updated.""" return self.should_retrain_core() or self.nlg def should_retrain_nlu(self) -> bool: """Check if the NLU model has to be updated.""" return self.force_training or self.nlu def get_model(model_path: Text = DEFAULT_MODELS_PATH) -> TempDirectoryPath: """Get a model and unpack it. Raises a `ModelNotFound` exception if no model could be found at the provided path. Args: model_path: Path to the zipped model. If it's a directory, the latest trained model is returned. Returns: Path to the unpacked model. """ if not model_path: raise ModelNotFound("No path specified.") elif not os.path.exists(model_path): raise ModelNotFound(f"No file or directory at '{model_path}'.") if os.path.isdir(model_path): model_path = get_latest_model(model_path) if not model_path: raise ModelNotFound( f"Could not find any Rasa model files in '{model_path}'." ) elif not model_path.endswith(".tar.gz"): raise ModelNotFound(f"Path '{model_path}' does not point to a Rasa model file.") try: model_relative_path = os.path.relpath(model_path) except ValueError: model_relative_path = model_path logger.info(f"Loading model {model_relative_path}...") return unpack_model(model_path) def get_latest_model(model_path: Text = DEFAULT_MODELS_PATH) -> Optional[Text]: """Get the latest model from a path. Args: model_path: Path to a directory containing zipped models. Returns: Path to latest model in the given directory. """ if not os.path.exists(model_path) or os.path.isfile(model_path): model_path = os.path.dirname(model_path) list_of_files = glob.glob(os.path.join(model_path, "*.tar.gz")) if len(list_of_files) == 0: return None return max(list_of_files, key=os.path.getctime) def unpack_model( model_file: Text, working_directory: Optional[Union[Path, Text]] = None ) -> TempDirectoryPath: """Unpack a zipped Rasa model. Args: model_file: Path to zipped model. working_directory: Location where the model should be unpacked to. If `None` a temporary directory will be created. Returns: Path to unpacked Rasa model. """ import tarfile if working_directory is None: working_directory = tempfile.mkdtemp() # All files are in a subdirectory. try: with tarfile.open(model_file, mode="r:gz") as tar: tar.extractall(working_directory) logger.debug(f"Extracted model to '{working_directory}'.") except Exception as e: logger.error(f"Failed to extract model at {model_file}. Error: {e}") raise return TempDirectoryPath(working_directory) def get_model_subdirectories( unpacked_model_path: Text, ) -> Tuple[Optional[Text], Optional[Text]]: """Return paths for Core and NLU model directories, if they exist. If neither directories exist, a `ModelNotFound` exception is raised. Args: unpacked_model_path: Path to unpacked Rasa model. Returns: Tuple (path to Core subdirectory if it exists or `None` otherwise, path to NLU subdirectory if it exists or `None` otherwise). """ core_path = os.path.join(unpacked_model_path, DEFAULT_CORE_SUBDIRECTORY_NAME) nlu_path = os.path.join(unpacked_model_path, DEFAULT_NLU_SUBDIRECTORY_NAME) if not os.path.isdir(core_path): core_path = None if not os.path.isdir(nlu_path): nlu_path = None if not core_path and not nlu_path: raise ModelNotFound( "No NLU or Core data for unpacked model at: '{}'.".format( unpacked_model_path ) ) return core_path, nlu_path def create_package_rasa( training_directory: Text, output_filename: Text, fingerprint: Optional[Fingerprint] = None, ) -> Text: """Create a zipped Rasa model from trained model files. Args: training_directory: Path to the directory which contains the trained model files. output_filename: Name of the zipped model file to be created. fingerprint: A unique fingerprint to identify the model version. Returns: Path to zipped model. """ import tarfile if fingerprint: persist_fingerprint(training_directory, fingerprint) output_directory = os.path.dirname(output_filename) if not os.path.exists(output_directory): os.makedirs(output_directory) with tarfile.open(output_filename, "w:gz") as tar: for elem in os.scandir(training_directory): tar.add(elem.path, arcname=elem.name) shutil.rmtree(training_directory) return output_filename def project_fingerprint() -> Optional[Text]: """Create a hash for the project in the current working directory. Returns: project hash """ try: remote = check_output( # skipcq:BAN-B607,BAN-B603 ["git", "remote", "get-url", "origin"], stderr=DEVNULL ) return hashlib.sha256(remote).hexdigest() except (CalledProcessError, OSError): return None async def model_fingerprint(file_importer: "TrainingDataImporter") -> Fingerprint: """Create a model fingerprint from its used configuration and training data. Args: file_importer: File importer which provides the training data and model config. Returns: The fingerprint. """ import time config = await file_importer.get_config() domain = await file_importer.get_domain() stories = await file_importer.get_stories() nlu_data = await file_importer.get_nlu_data() responses = domain.responses # Do a copy of the domain to not change the actual domain (shallow is enough) domain = copy.copy(domain) # don't include the response texts in the fingerprint. # Their fingerprint is separate. domain.responses = {} return { FINGERPRINT_CONFIG_KEY: _get_fingerprint_of_config( config, exclude_keys=CONFIG_KEYS ), FINGERPRINT_CONFIG_CORE_KEY: _get_fingerprint_of_config( config, include_keys=CONFIG_KEYS_CORE ), FINGERPRINT_CONFIG_NLU_KEY: _get_fingerprint_of_config( config, include_keys=CONFIG_KEYS_NLU ), FINGERPRINT_CONFIG_WITHOUT_EPOCHS_KEY: _get_fingerprint_of_config_without_epochs( config ), FINGERPRINT_DOMAIN_WITHOUT_NLG_KEY: domain.fingerprint(), FINGERPRINT_NLG_KEY: rasa.shared.utils.io.deep_container_fingerprint(responses), FINGERPRINT_PROJECT: project_fingerprint(), FINGERPRINT_NLU_DATA_KEY: nlu_data.fingerprint(), FINGERPRINT_NLU_LABELS_KEY: nlu_data.label_fingerprint(), FINGERPRINT_STORIES_KEY: stories.fingerprint(), FINGERPRINT_TRAINED_AT_KEY: time.time(), FINGERPRINT_RASA_VERSION_KEY: rasa.__version__, } def _get_fingerprint_of_config( config: Optional[Dict[Text, Any]], include_keys: Optional[List[Text]] = None, exclude_keys: Optional[List[Text]] = None, ) -> Text: if not config: return "" keys = include_keys or list(filter(lambda k: k not in exclude_keys, config.keys())) sub_config = {k: config[k] for k in keys if k in config} return rasa.shared.utils.io.deep_container_fingerprint(sub_config) def _get_fingerprint_of_config_without_epochs( config: Optional[Dict[Text, Any]], ) -> Text: if not config: return "" copied_config = copy.deepcopy(config) for key in ["pipeline", "policies"]: if copied_config.get(key): for p in copied_config[key]: if "epochs" in p: del p["epochs"] return rasa.shared.utils.io.deep_container_fingerprint(copied_config) def fingerprint_from_path(model_path: Text) -> Fingerprint: """Load a persisted fingerprint. Args: model_path: Path to directory containing the fingerprint. Returns: The fingerprint or an empty dict if no fingerprint was found. """ if not model_path or not os.path.exists(model_path): return {} fingerprint_path = os.path.join(model_path, FINGERPRINT_FILE_PATH) if os.path.isfile(fingerprint_path): return rasa.shared.utils.io.read_json_file(fingerprint_path) else: return {} def persist_fingerprint(output_path: Text, fingerprint: Fingerprint): """Persist a model fingerprint. Args: output_path: Directory in which the fingerprint should be saved. fingerprint: The fingerprint to be persisted. """ path = os.path.join(output_path, FINGERPRINT_FILE_PATH) rasa.shared.utils.io.dump_obj_as_json_to_file(path, fingerprint) def did_section_fingerprint_change( fingerprint1: Fingerprint, fingerprint2: Fingerprint, section: Section ) -> bool: """Check whether the fingerprint of a section has changed.""" for k in section.relevant_keys: if fingerprint1.get(k) != fingerprint2.get(k): logger.info(f"Data ({k}) for {section.name} section changed.") return True return False def move_model(source: Text, target: Text) -> bool: """Move two model directories. Args: source: The original folder which should be merged in another. target: The destination folder where it should be moved to. Returns: `True` if the merge was successful, else `False`. """ try: shutil.move(source, target) return True except Exception as e: logging.debug(f"Could not merge model: {e}") return False def should_retrain( new_fingerprint: Fingerprint, old_model: Text, train_path: Text, has_e2e_examples: bool = False, force_training: bool = False, ) -> FingerprintComparisonResult: """Check which components of a model should be retrained. Args: new_fingerprint: The fingerprint of the new model to be trained. old_model: Path to the old zipped model file. train_path: Path to the directory in which the new model will be trained. has_e2e_examples: Whether the new training data contains e2e examples. force_training: Indicates if the model needs to be retrained even if the data has not changed. Returns: A FingerprintComparisonResult object indicating whether Rasa Core and/or Rasa NLU needs to be retrained or not. """ fingerprint_comparison = FingerprintComparisonResult() if old_model is None or not os.path.exists(old_model): return fingerprint_comparison with unpack_model(old_model) as unpacked: last_fingerprint = fingerprint_from_path(unpacked) old_core, old_nlu = get_model_subdirectories(unpacked) fingerprint_comparison = FingerprintComparisonResult( core=did_section_fingerprint_change( last_fingerprint, new_fingerprint, SECTION_CORE ), nlu=did_section_fingerprint_change( last_fingerprint, new_fingerprint, SECTION_NLU ), nlg=did_section_fingerprint_change( last_fingerprint, new_fingerprint, SECTION_NLG ), force_training=force_training, ) # We should retrain core if nlu data changes and there are e2e stories. if has_e2e_examples and fingerprint_comparison.should_retrain_nlu(): fingerprint_comparison.core = True core_merge_failed = False if not fingerprint_comparison.should_retrain_core(): target_path = os.path.join(train_path, DEFAULT_CORE_SUBDIRECTORY_NAME) core_merge_failed = not move_model(old_core, target_path) fingerprint_comparison.core = core_merge_failed if not fingerprint_comparison.should_retrain_nlg() and core_merge_failed: # If moving the Core model failed, we should also retrain NLG fingerprint_comparison.nlg = True if not fingerprint_comparison.should_retrain_nlu(): target_path = os.path.join(train_path, "nlu") fingerprint_comparison.nlu = not move_model(old_nlu, target_path) return fingerprint_comparison def can_finetune( last_fingerprint: Fingerprint, new_fingerprint: Fingerprint, core: bool = False, nlu: bool = False, ) -> bool: """Checks if components of a model can be finetuned with incremental training. Args: last_fingerprint: The fingerprint of the old model to potentially be fine-tuned. new_fingerprint: The fingerprint of the new model. core: Check sections for finetuning a core model. nlu: Check sections for finetuning an nlu model. Returns: `True` if the old model can be finetuned, `False` otherwise. """ section_keys = [ FINGERPRINT_CONFIG_WITHOUT_EPOCHS_KEY, ] if core: section_keys.append(FINGERPRINT_DOMAIN_WITHOUT_NLG_KEY) if nlu: section_keys.append(FINGERPRINT_NLU_LABELS_KEY) fingerprint_changed = did_section_fingerprint_change( last_fingerprint, new_fingerprint, Section(name="finetune", relevant_keys=section_keys), ) old_model_above_min_version = version.parse( last_fingerprint.get(FINGERPRINT_RASA_VERSION_KEY) ) >= version.parse(MINIMUM_COMPATIBLE_VERSION) return old_model_above_min_version and not fingerprint_changed def package_model( fingerprint: Fingerprint, output_directory: Text, train_path: Text, fixed_model_name: Optional[Text] = None, model_prefix: Text = "", ) -> Text: """ Compress a trained model. Args: fingerprint: fingerprint of the model output_directory: path to the directory in which the model should be stored train_path: path to uncompressed model fixed_model_name: name of the compressed model file model_prefix: prefix of the compressed model file Returns: path to 'tar.gz' model file """ output_directory = create_output_path( output_directory, prefix=model_prefix, fixed_name=fixed_model_name ) create_package_rasa(train_path, output_directory, fingerprint) print_success( "Your Rasa model is trained and saved at '{}'.".format( os.path.abspath(output_directory) ) ) return output_directory async def update_model_with_new_domain( importer: "TrainingDataImporter", unpacked_model_path: Union[Path, Text] ) -> None: """Overwrites the domain of an unpacked model with a new domain. Args: importer: Importer which provides the new domain. unpacked_model_path: Path to the unpacked model. """ model_path = Path(unpacked_model_path) / DEFAULT_CORE_SUBDIRECTORY_NAME domain = await importer.get_domain() domain.persist(model_path / DEFAULT_DOMAIN_PATH) def get_model_for_finetuning( previous_model_file: Optional[Union[Path, Text]] ) -> Optional[Text]: """Gets validated path for model to finetune. Args: previous_model_file: Path to model file which should be used for finetuning or a directory in case the latest trained model should be used. Returns: Path to model archive. `None` if there is no model. """ if Path(previous_model_file).is_dir(): logger.debug( f"Trying to load latest model from '{previous_model_file}' for " f"finetuning." ) return get_latest_model(previous_model_file) if Path(previous_model_file).is_file(): return previous_model_file logger.debug( "No valid model for finetuning found as directory either " "contains no model or model file cannot be found." ) return None
31.345719
89
0.687368
1,516
0.078132
0
0
0
0
2,285
0.117765
6,649
0.342679
6a5e9ccfe0101a01a8c7498e619dd38d0b22d208
2,484
py
Python
algorithmic_trading/backester_framework_test.py
CatalaniCD/quantitative_finance
c752516a43cd80914dcc8411aadd7b15a258d6a4
[ "MIT" ]
1
2021-08-20T19:17:10.000Z
2021-08-20T19:17:10.000Z
algorithmic_trading/backester_framework_test.py
CatalaniCD/quantitative_finance
c752516a43cd80914dcc8411aadd7b15a258d6a4
[ "MIT" ]
null
null
null
algorithmic_trading/backester_framework_test.py
CatalaniCD/quantitative_finance
c752516a43cd80914dcc8411aadd7b15a258d6a4
[ "MIT" ]
1
2021-10-04T07:44:02.000Z
2021-10-04T07:44:02.000Z
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Fri Jul 16 11:20:01 2021 @author: q GOAL : develop a backtester from a .py framework / library # installation : pip install backtesting # Documentation Index : - Manuals - Tutorials - Example Strategies - FAQ - License - API Reference Documentation source : https://kernc.github.io/backtesting.py/doc/backtesting/ # Features * Simple, well-documented API * Blazing fast execution * Built-in optimizer * Library of composable base strategies and utilities * Indicator-library-agnostic * Supports any financial instrument with candlestick data * Detailed results * Interactive visualizations """ # ============================================================================= # imports and settings # ============================================================================= # data handling import pandas as pd import numpy as np # import backtesting and set options import backtesting # Set notebook False backtesting.set_bokeh_output(notebook=False) from backtesting import Backtest, Strategy from backtesting.lib import crossover, cross from backtesting.test import SMA, GOOG # ============================================================================= # strategy definition # ============================================================================= class PriceAboveSMA(Strategy): _ma_period = 21 # Moving Average def init(self): # compute momentum """ Simple Moving Average Calc""" self.sma = self.I(SMA, self.data.Close, self._ma_period) def next(self): price = self.data.Close[-1] if not self.position and price > self.sma[-1]: # market entry self.buy() elif self.position and price < self.sma[-1]: # market exit self.position.close() # ============================================================================= # Program Execution # ============================================================================= if __name__ == '__main__': """ Instantiate the Backtester """ backtester = Backtest(GOOG, PriceAboveSMA, commission=.002, exclusive_orders=True, cash = 10000) PLOT = True """ Run a Single Backtest """ stats = backtester.run() print(stats) if PLOT: backtester.plot()
25.090909
79
0.515298
509
0.204911
0
0
0
0
0
0
1,531
0.616345
6a5edc2a9e4d4da78b37be28e1fdb8023841826f
2,215
py
Python
Sec_10_expr_lambdas_fun_integradas/f_generators.py
PauloAlexSilva/Python
690913cdcfd8bde52d9ddd15e3c838e6aef27730
[ "MIT" ]
null
null
null
Sec_10_expr_lambdas_fun_integradas/f_generators.py
PauloAlexSilva/Python
690913cdcfd8bde52d9ddd15e3c838e6aef27730
[ "MIT" ]
null
null
null
Sec_10_expr_lambdas_fun_integradas/f_generators.py
PauloAlexSilva/Python
690913cdcfd8bde52d9ddd15e3c838e6aef27730
[ "MIT" ]
null
null
null
"""" Generator Expression Em aulas anteriores foi abordado: - List Comprehension; - Dictionary Comprehension; - Set Comprehension. Não foi abordado: - Tuple Comprehension ... porque elas se chamam Generators nomes = ['Carlos', 'Camila', 'Carla', 'Cristiana', 'Cristina', 'Vanessa'] print(any8[nomes[0] == 'C' for nome in nomes]) # Poderia ter sido feito usando os Generators nomes = ['Carlos', 'Camila', 'Carla', 'Cristiana', 'Cristina', 'Vanessa'] print(any(nome[0] == 'C' for nome in nomes)) # List Comprehension res = [nome[0] == 'C' for nome in nomes] print(type(res)) print(res) # [True, True, True, True, True, False] # Generator - mais efeciente res2 = (nome[0] == 'C' for nome in nomes) print(type(res2)) print(res2) # O que faz a função de getsizeof()? -> retorna a quantidade de bytes em memória do elemento # passado como parâmetro from sys import getsizeof # Mostra quantos bytes a string 'Paulo' está ocupando em memória. # Quanto maior a string mais espaço ocupa. print(getsizeof('Paulo')) print(getsizeof('Quanto maior a string mais espaço ocupa.')) print(getsizeof(9)) print(getsizeof(91)) print(getsizeof(12345667890)) print(getsizeof(True)) from sys import getsizeof # Gerando uma lista de números com List Comprehension list_comp = getsizeof([x * 10 for x in range(1000)]) # Gerando uma lista de números com Set Comprehension set_comp = getsizeof({x * 10 for x in range(1000)}) # Gerando uma lista de números com Dictionary Comprehension dic_comp = getsizeof({x: x * 10 for x in range(1000)}) # Gerando uma lista de números com Generator gen = getsizeof(x * 10 for x in range(1000)) print('Para fazer a mesma gastamos em memória: ') print(f'List Comprehension: {list_comp} bytes!') print(f'Set Comprehension: {set_comp} bytes!') print(f'Dictionary Comprehension: {dic_comp} bytes!') print(f'Generator Expression: {gen} bytes!') Para fazer a mesma gastamos em memória: List Comprehension: 8856 bytes! Set Comprehension: 32984 bytes! Dictionary Comprehension: 36960 bytes! Generator Expression: 112 bytes! """ # Posso iterar no Generator Expression? Sim gen = (x * 10 for x in range(1000)) print(gen) print(type(gen)) for num in gen: print(num)
24.340659
92
0.719187
0
0
0
0
0
0
0
0
2,129
0.954709
6a5f51cf2ae3a67fb99172b7bd4214f43d0d42bc
269
py
Python
python/ordenacao.py
valdirsjr/learning.data
a4b72dfd27f55f2f04120644b73232bf343f71e3
[ "MIT" ]
null
null
null
python/ordenacao.py
valdirsjr/learning.data
a4b72dfd27f55f2f04120644b73232bf343f71e3
[ "MIT" ]
null
null
null
python/ordenacao.py
valdirsjr/learning.data
a4b72dfd27f55f2f04120644b73232bf343f71e3
[ "MIT" ]
null
null
null
numero1 = int(input("Digite o primeiro número: ")) numero2 = int(input("Digite o segundo número: ")) numero3 = int(input("Digite o terceiro número: ")) if (numero1 < numero2 and numero2 < numero3): print("crescente") else: print("não está em ordem crescente")
38.428571
50
0.69145
0
0
0
0
0
0
0
0
128
0.467153
6a5f7c637685db9897573cf124a2ab2c3a9ea578
408
py
Python
_sources/5-extra/opg-parameters-sneeuwvlok_solution.py
kooi/ippt-od
f1ba44ccfb72e6fcdfdc392fbfbec3e37c47b354
[ "MIT" ]
1
2018-08-21T21:05:41.000Z
2018-08-21T21:05:41.000Z
_sources/5-extra/opg-parameters-sneeuwvlok_solution.py
kooi/ippt-od
f1ba44ccfb72e6fcdfdc392fbfbec3e37c47b354
[ "MIT" ]
null
null
null
_sources/5-extra/opg-parameters-sneeuwvlok_solution.py
kooi/ippt-od
f1ba44ccfb72e6fcdfdc392fbfbec3e37c47b354
[ "MIT" ]
null
null
null
import turtle tina = turtle.Turtle() tina.shape("turtle") tina.speed(10) def parallellogram(lengte): for i in range(2): tina.forward(lengte) tina.right(60) tina.forward(lengte) tina.right(120) def sneeuwvlok(lengte, num): for i in range(num): parallellogram(lengte) tina.right(360.0/num) # 360.0 zorgt voor cast van int naar float sneeuwvlok(30, 6)
21.473684
72
0.644608
0
0
0
0
0
0
0
0
50
0.122549
6a5ff44d20ced0eb4ad46edf90219db489f08973
5,153
py
Python
nikola/plugins/task_render_listings.py
servalproject/nikola
4d78504d93597894f3da4a434dfafdec907601a7
[ "MIT" ]
1
2015-12-14T21:38:33.000Z
2015-12-14T21:38:33.000Z
nikola/plugins/task_render_listings.py
servalproject/nikola
4d78504d93597894f3da4a434dfafdec907601a7
[ "MIT" ]
null
null
null
nikola/plugins/task_render_listings.py
servalproject/nikola
4d78504d93597894f3da4a434dfafdec907601a7
[ "MIT" ]
null
null
null
# Copyright (c) 2012 Roberto Alsina y otros. # Permission is hereby granted, free of charge, to any # person obtaining a copy of this software and associated # documentation files (the "Software"), to deal in the # Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the # Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice # shall be included in all copies or substantial portions of # the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. from __future__ import unicode_literals, print_function import os from pygments import highlight from pygments.lexers import get_lexer_for_filename, TextLexer from pygments.formatters import HtmlFormatter from nikola.plugin_categories import Task from nikola import utils class Listings(Task): """Render pretty listings.""" name = "render_listings" def gen_tasks(self): """Render pretty code listings.""" kw = { "default_lang": self.site.config["DEFAULT_LANG"], "listings_folder": self.site.config["LISTINGS_FOLDER"], "output_folder": self.site.config["OUTPUT_FOLDER"], "index_file": self.site.config["INDEX_FILE"], } # Things to ignore in listings ignored_extensions = (".pyc",) def render_listing(in_name, out_name, folders=[], files=[]): if in_name: with open(in_name, 'r') as fd: try: lexer = get_lexer_for_filename(in_name) except: lexer = TextLexer() code = highlight(fd.read(), lexer, HtmlFormatter(cssclass='code', linenos="table", nowrap=False, lineanchors=utils.slugify(f), anchorlinenos=True)) title = os.path.basename(in_name) else: code = '' title = '' crumbs = utils.get_crumbs(os.path.relpath(out_name, kw['output_folder']), is_file=True) context = { 'code': code, 'title': title, 'crumbs': crumbs, 'lang': kw['default_lang'], 'folders': folders, 'files': files, 'description': title, } self.site.render_template('listing.tmpl', out_name, context) flag = True template_deps = self.site.template_system.template_deps('listing.tmpl') for root, dirs, files in os.walk(kw['listings_folder']): flag = False # Render all files out_name = os.path.join( kw['output_folder'], root, kw['index_file'] ) yield { 'basename': self.name, 'name': out_name, 'file_dep': template_deps, 'targets': [out_name], 'actions': [(render_listing, [None, out_name, dirs, files])], # This is necessary to reflect changes in blog title, # sidebar links, etc. 'uptodate': [utils.config_changed( self.site.config['GLOBAL_CONTEXT'])], 'clean': True, } for f in files: ext = os.path.splitext(f)[-1] if ext in ignored_extensions: continue in_name = os.path.join(root, f) out_name = os.path.join( kw['output_folder'], root, f) + '.html' yield { 'basename': self.name, 'name': out_name, 'file_dep': template_deps + [in_name], 'targets': [out_name], 'actions': [(render_listing, [in_name, out_name])], # This is necessary to reflect changes in blog title, # sidebar links, etc. 'uptodate': [utils.config_changed( self.site.config['GLOBAL_CONTEXT'])], 'clean': True, } if flag: yield { 'basename': self.name, 'actions': [], }
39.335878
81
0.523578
3,765
0.730642
3,674
0.712983
0
0
0
0
1,856
0.360179
6a60999063f76386f01b79b85ecc655ec0929c57
25,232
py
Python
csld/phonon/head.py
jsyony37/csld
b0e6d5845d807174f24ca7b591bc164c608c99c8
[ "MIT" ]
null
null
null
csld/phonon/head.py
jsyony37/csld
b0e6d5845d807174f24ca7b591bc164c608c99c8
[ "MIT" ]
null
null
null
csld/phonon/head.py
jsyony37/csld
b0e6d5845d807174f24ca7b591bc164c608c99c8
[ "MIT" ]
null
null
null
# to include all module here in order to cite from numpy import * from numpy.linalg import * import string import os import scipy import scipy.sparse #import rwposcar #import anaxdat import math #define touch file def touch(file):#input string if os.path.isfile(file): os.system(str("rm"+" "+file)) os.system(str("touch"+" "+file)) else: os.system(str("touch"+" "+file)) def mkdir(dir): if os.path.isdir(dir): os.system(str("rm"+" -r "+dir)) os.system(str("mkdir"+" "+dir)) else: os.system(str("mkdir"+" "+dir)) if False: mkdir("xixi/") #define rm file def rm(file): if os.path.isfile(file): os.system(str("rm"+" "+file)) else: print("No file found, dont need to rm") #define check file(1 exist; else0) def check(file): if os.path.isfile(file): return int(1) else: return int(0) #define check the file status (print the status) def checkfile(file): if os.path.isfile(file): print(str(file)+" exists :)") else: print(str(file)+" not found :(") #define readallline function def readinline(file): dataout=[] if check(file): fin=open(file,"r") for line in fin: dataout.append(line.split())#map(float,line.split())) fin.close() else: print(str(file)+" not found :(") return array(dataout) #define write1dmat def write1dmat(datain, file): if check(file): rm(file) touch(file) else: touch(file) fout=open(file, "w") fout.writelines("\n".join(map(str,datain))) fout.close() #define one number to file def writenumber(datain, file): if check(file): rm(file) touch(file) else: touch(file) fout=open(file,"w") fout.writelines(str(datain)) fout.close() #define write2dmat def write2dmat(datain, file): if check(file): rm(file) touch(file) else: touch(file) fout=open(file, "w") #cout line number fout.writelines(str(len(datain))+"\n") for i in datain: fout.writelines(" ".join(map(str,i))+"\n") fout.close() #define write2dMTX def write2dMTX(datain, file): if check(file): rm(file) touch(file) else: touch(file) fout=open(file, "w") fout.writelines("%%MatrixMarket matrix coordinate real general\n") fout.writelines("%Created by Wolfram Mathematica 9.0 : www.wolfram.com\n") print("Transfering to sparse matrix----") #get rid of small numbers #for i in range(len(datain)): # for j in range(len(datain[i])): # datain[i][j]=round(datain[i][j],3) BB=scipy.sparse.coo_matrix(datain) print("Spare matrix obtained!") # print BB.row # print BB.col # print BB.data fout.writelines(str(len(datain))+" "+str(len(datain[0]))+" "+str(len(BB.data))+"\n") for i in range(len(BB.data)): fout.writelines(str(BB.row[i]+1)+" "+str(BB.col[i]+1)+" "+str(BB.data[i])+"\n") #for i in range(len(datain)): #for j in range(len(datain[0])): #fout.writelines(str(i+1)+" "+str(j+1)+" "+str(datain[i][j])+"\n") fout.close() def read2dMTX(file): if check(file): counter=0 for line in open(file): counter=counter+1 if counter <=2: continue if counter ==3: inlist=list(map(int,line.split())) nrow=inlist[0] ncol=inlist[1] dataout=array([[0.0]*ncol]*nrow) continue if counter >=4: tmp=line.split() #print str(tmp)+", "+str(tmp[2]) dataout[int(tmp[0])-1][int(tmp[1])-1]=float(tmp[2]) #print "\n" return dataout.tolist() else: print(str(file)+" not found :(") #test if False: Amat=[[0,1],[2,0],[0,0],[0,16]] print(Amat) write2dMTX(Amat, "test.mtx") print(read2dMTX("test.mtx")) #define read1dmat #read float def read1dmat(file): mat=[] if check(file): for line in open(file): mat.append(float(line)) return mat else: print(str(file)+" not found :(") if False: haha=[1,2,3,4,5] write1dmat(haha, "haha") xixi=read1dmat("haha") print(xixi) #define read2dmat (this is a relatively fast way: iter or chunck read) def read2dmat(file,icomplex=False): mat=[] if check(file): print("Read matrix start") for line in open(file): if not icomplex: mat.append(list(map(float,line.split()))) else: mat.append(list(map(complex,line.split()))) print("Read matrix end") #delete line counter del mat[0] return mat else: print(str(file)+" not found :(") #test #mat=read2dmat("C-isoo.mat") #print len(mat) #print len(mat[0]) def clusstr(clus): dataout="" for item in clus: dataout=dataout+str(item[0])+" "+str(item[1])+" "+str(item[2])+"\n" return dataout def lptstr(lpt): dataout="" for item in lpt: dataout=dataout+str(item[0][0])+" "+str(item[0][1])+" "+str(item[0][2])+" "+str(item[1])+"\n" return dataout #define writeorb(orb) def writeorb(orbset, file): if check(file): rm(file) touch(file) else: touch(file) fout=open(file, "w") fout.write(str(len(orbset))+"\n\n") for orb in orbset: fout.write(str(len(orb))+"\n\n") for item in orb: npt=len(item[0]) fout.write(str(npt)+"\n") fout.write(clusstr(item[0])) fout.write(str(item[1])+"\n") fout.write(str(item[2])+"\n") fout.write(lptstr(item[3])) fout.write("\n") fout.close() def writeclus(clus, file): if check(file): rm(file) touch(file) else: touch(file) fout=open(file,"w") fout.write(str(len(clus))+"\n\n") for item in clus: fout.write(str(len(item))+"\n") fout.write(clusstr(item)) fout.write("\n") fout.close() def writeSCinfo(SCinfo, file): if check(file): rm(file) touch(file) else: touch(file) fout=open(file, "w") tmp=[SCinfo['SC'], SCinfo['invSC'], SCinfo['SCref'], SCinfo['SCpos'], SCinfo['SCmat'], SCinfo['invSCmat'], SCinfo['order']] lentmp=[len(i) for i in tmp] fout.write(" ".join(map(str,lentmp))+"\n") for i in tmp: if i==SCinfo['order']: fout.write("\n".join(map(str,i))+"\n") else: for j in i: fout.write(" ".join(map(str,j))+"\n") fout.close() def readSCinfo(file): SCinfo={} if check(file): fin=open(file, "r") lenlist=list(map(int,(fin.readline()).split())) # tmp=[SCinfo['SC'], SCinfo['invSC'], SCinfo['SCref'], SCinfo['SCpos'], SCinfo['SCmat'], SCinfo['invSCmat'], SCinfo['order']] tmp=[] for i in range(7): tmp1=[] for j in range(lenlist[i]): if i in [0,1,3,4,5]: tmp1.append(list(map(float,(fin.readline()).split()))) elif i in [2]: tmp1.append(list(map(int,(fin.readline()).split()))) else: tmp1.append(list(map(int,(fin.readline()).split()))[0]) tmp.append(tmp1) SCinfo['SC']=tmp[0] SCinfo['invSC']=tmp[1] SCinfo['SCref']=tmp[2] SCinfo['SCpos']=tmp[3] SCinfo['SCmat']=tmp[4] SCinfo['invSCmat']=tmp[5] SCinfo['order']=tmp[6] else: print(str(file)+" not found :(") return SCinfo #test if False: SCinfo={'invSCmat': [[-0.25, 0.25, 0.25], [0.25, -0.25, 0.25], [0.25, 0.25, -0.25]], 'SCmat': [[0.0, 2.0, 2.0], [2.0, 0.0, 2.0], [2.0, 2.0, 0.0]], 'SCref': [[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0], [1, 1, 1], [1, 1, 2], [1, 2, 1], [1, 2, 2], [2, 1, 1], [2, 1, 2], [2, 2, 1], [2, 2, 2], [2, 2, 3], [2, 3, 2], [3, 2, 2], [3, 3, 3]], 'SCpos': [[0.75, 0.25, 0.5], [0.25, 0.75, 0.5], [0.5, 0.25, 0.75], [0.5, 0.75, 0.25], [0.25, 0.5, 0.75], [0.75, 0.5, 0.25], [0.785, 0.785, 0.0], [0.215, 0.215, 0.0], [0.0, 0.215, 0.215], [0.0, 0.785, 0.785], [0.785, 0.0, 0.785], [0.215, 0.0, 0.215], [0.5239, 0.0, 0.7543], [0.7543, 0.0, 0.5239], [0.4761, 0.2304, 0.4761], [0.2457, 0.7696, 0.2457], [0.5239, 0.7543, 0.0], [0.7543, 0.5239, 0.0], [0.2457, 0.2457, 0.7696], [0.4761, 0.4761, 0.2304], [0.7696, 0.2457, 0.2457], [0.2304, 0.4761, 0.4761], [0.0, 0.5239, 0.7543], [0.0, 0.7543, 0.5239], [0.0, 0.0, 0.0], [0.4636, 0.0, 0.0], [0.0, 0.0, 0.4636], [0.5364, 0.5364, 0.5364], [0.0, 0.4636, 0.0], [0.75, 1.25, 1.5], [0.25, 1.75, 1.5], [0.5, 1.25, 1.75], [0.5, 1.75, 1.25], [0.25, 1.5, 1.75], [0.75, 1.5, 1.25], [0.785, 1.785, 1.0], [0.215, 1.215, 1.0], [0.0, 1.215, 1.215], [0.0, 1.785, 1.785], [0.785, 1.0, 1.785], [0.215, 1.0, 1.215], [0.5239, 1.0, 1.7543], [0.7543, 1.0, 1.5239], [0.4761, 1.2304, 1.4761], [0.2457, 1.7696, 1.2457], [0.5239, 1.7543, 1.0], [0.7543, 1.5239, 1.0], [0.2457, 1.2457, 1.7696], [0.4761, 1.4761, 1.2304], [0.7696, 1.2457, 1.2457], [0.2304, 1.4761, 1.4761], [0.0, 1.5239, 1.7543], [0.0, 1.7543, 1.5239], [0.0, 1.0, 1.0], [0.4636, 1.0, 1.0], [0.0, 1.0, 1.4636], [0.5364, 1.5364, 1.5364], [0.0, 1.4636, 1.0], [1.75, 0.25, 1.5], [1.25, 0.75, 1.5], [1.5, 0.25, 1.75], [1.5, 0.75, 1.25], [1.25, 0.5, 1.75], [1.75, 0.5, 1.25], [1.785, 0.785, 1.0], [1.215, 0.215, 1.0], [1.0, 0.215, 1.215], [1.0, 0.785, 1.785], [1.785, 0.0, 1.785], [1.215, 0.0, 1.215], [1.5239, 0.0, 1.7543], [1.7543, 0.0, 1.5239], [1.4761, 0.2304, 1.4761], [1.2457, 0.7696, 1.2457], [1.5239, 0.7543, 1.0], [1.7543, 0.5239, 1.0], [1.2457, 0.2457, 1.7696], [1.4761, 0.4761, 1.2304], [1.7696, 0.2457, 1.2457], [1.2304, 0.4761, 1.4761], [1.0, 0.5239, 1.7543], [1.0, 0.7543, 1.5239], [1.0, 0.0, 1.0], [1.4636, 0.0, 1.0], [1.0, 0.0, 1.4636], [1.5364, 0.5364, 1.5364], [1.0, 0.4636, 1.0], [1.75, 1.25, 0.5], [1.25, 1.75, 0.5], [1.5, 1.25, 0.75], [1.5, 1.75, 0.25], [1.25, 1.5, 0.75], [1.75, 1.5, 0.25], [1.785, 1.785, 0.0], [1.215, 1.215, 0.0], [1.0, 1.215, 0.215], [1.0, 1.785, 0.785], [1.785, 1.0, 0.785], [1.215, 1.0, 0.215], [1.5239, 1.0, 0.7543], [1.7543, 1.0, 0.5239], [1.4761, 1.2304, 0.4761], [1.2457, 1.7696, 0.2457], [1.5239, 1.7543, 0.0], [1.7543, 1.5239, 0.0], [1.2457, 1.2457, 0.7696], [1.4761, 1.4761, 0.2304], [1.7696, 1.2457, 0.2457], [1.2304, 1.4761, 0.4761], [1.0, 1.5239, 0.7543], [1.0, 1.7543, 0.5239], [1.0, 1.0, 0.0], [1.4636, 1.0, 0.0], [1.0, 1.0, 0.4636], [1.5364, 1.5364, 0.5364], [1.0, 1.4636, 0.0], [1.75, 1.25, 1.5], [1.25, 1.75, 1.5], [1.5, 1.25, 1.75], [1.5, 1.75, 1.25], [1.25, 1.5, 1.75], [1.75, 1.5, 1.25], [1.785, 1.785, 1.0], [1.215, 1.215, 1.0], [1.0, 1.215, 1.215], [1.0, 1.785, 1.785], [1.785, 1.0, 1.785], [1.215, 1.0, 1.215], [1.5239, 1.0, 1.7543], [1.7543, 1.0, 1.5239], [1.4761, 1.2304, 1.4761], [1.2457, 1.7696, 1.2457], [1.5239, 1.7543, 1.0], [1.7543, 1.5239, 1.0], [1.2457, 1.2457, 1.7696], [1.4761, 1.4761, 1.2304], [1.7696, 1.2457, 1.2457], [1.2304, 1.4761, 1.4761], [1.0, 1.5239, 1.7543], [1.0, 1.7543, 1.5239], [1.0, 1.0, 1.0], [1.4636, 1.0, 1.0], [1.0, 1.0, 1.4636], [1.5364, 1.5364, 1.5364], [1.0, 1.4636, 1.0], [1.75, 1.25, 2.5], [1.25, 1.75, 2.5], [1.5, 1.25, 2.75], [1.5, 1.75, 2.25], [1.25, 1.5, 2.75], [1.75, 1.5, 2.25], [1.785, 1.785, 2.0], [1.215, 1.215, 2.0], [1.0, 1.215, 2.215], [1.0, 1.785, 2.785], [1.785, 1.0, 2.785], [1.215, 1.0, 2.215], [1.5239, 1.0, 2.7543], [1.7543, 1.0, 2.5239], [1.4761, 1.2304, 2.4761], [1.2457, 1.7696, 2.2457], [1.5239, 1.7543, 2.0], [1.7543, 1.5239, 2.0], [1.2457, 1.2457, 2.7696], [1.4761, 1.4761, 2.2304], [1.7696, 1.2457, 2.2457], [1.2304, 1.4761, 2.4761], [1.0, 1.5239, 2.7543], [1.0, 1.7543, 2.5239], [1.0, 1.0, 2.0], [1.4636, 1.0, 2.0], [1.0, 1.0, 2.4636], [1.5364, 1.5364, 2.5364], [1.0, 1.4636, 2.0], [1.75, 2.25, 1.5], [1.25, 2.75, 1.5], [1.5, 2.25, 1.75], [1.5, 2.75, 1.25], [1.25, 2.5, 1.75], [1.75, 2.5, 1.25], [1.785, 2.785, 1.0], [1.215, 2.215, 1.0], [1.0, 2.215, 1.215], [1.0, 2.785, 1.785], [1.785, 2.0, 1.785], [1.215, 2.0, 1.215], [1.5239, 2.0, 1.7543], [1.7543, 2.0, 1.5239], [1.4761, 2.2304, 1.4761], [1.2457, 2.7696, 1.2457], [1.5239, 2.7543, 1.0], [1.7543, 2.5239, 1.0], [1.2457, 2.2457, 1.7696], [1.4761, 2.4761, 1.2304], [1.7696, 2.2457, 1.2457], [1.2304, 2.4761, 1.4761], [1.0, 2.5239, 1.7543], [1.0, 2.7543, 1.5239], [1.0, 2.0, 1.0], [1.4636, 2.0, 1.0], [1.0, 2.0, 1.4636], [1.5364, 2.5364, 1.5364], [1.0, 2.4636, 1.0], [1.75, 2.25, 2.5], [1.25, 2.75, 2.5], [1.5, 2.25, 2.75], [1.5, 2.75, 2.25], [1.25, 2.5, 2.75], [1.75, 2.5, 2.25], [1.785, 2.785, 2.0], [1.215, 2.215, 2.0], [1.0, 2.215, 2.215], [1.0, 2.785, 2.785], [1.785, 2.0, 2.785], [1.215, 2.0, 2.215], [1.5239, 2.0, 2.7543], [1.7543, 2.0, 2.5239], [1.4761, 2.2304, 2.4761], [1.2457, 2.7696, 2.2457], [1.5239, 2.7543, 2.0], [1.7543, 2.5239, 2.0], [1.2457, 2.2457, 2.7696], [1.4761, 2.4761, 2.2304], [1.7696, 2.2457, 2.2457], [1.2304, 2.4761, 2.4761], [1.0, 2.5239, 2.7543], [1.0, 2.7543, 2.5239], [1.0, 2.0, 2.0], [1.4636, 2.0, 2.0], [1.0, 2.0, 2.4636], [1.5364, 2.5364, 2.5364], [1.0, 2.4636, 2.0], [2.75, 1.25, 1.5], [2.25, 1.75, 1.5], [2.5, 1.25, 1.75], [2.5, 1.75, 1.25], [2.25, 1.5, 1.75], [2.75, 1.5, 1.25], [2.785, 1.785, 1.0], [2.215, 1.215, 1.0], [2.0, 1.215, 1.215], [2.0, 1.785, 1.785], [2.785, 1.0, 1.785], [2.215, 1.0, 1.215], [2.5239, 1.0, 1.7543], [2.7543, 1.0, 1.5239], [2.4761, 1.2304, 1.4761], [2.2457, 1.7696, 1.2457], [2.5239, 1.7543, 1.0], [2.7543, 1.5239, 1.0], [2.2457, 1.2457, 1.7696], [2.4761, 1.4761, 1.2304], [2.7696, 1.2457, 1.2457], [2.2304, 1.4761, 1.4761], [2.0, 1.5239, 1.7543], [2.0, 1.7543, 1.5239], [2.0, 1.0, 1.0], [2.4636, 1.0, 1.0], [2.0, 1.0, 1.4636], [2.5364, 1.5364, 1.5364], [2.0, 1.4636, 1.0], [2.75, 1.25, 2.5], [2.25, 1.75, 2.5], [2.5, 1.25, 2.75], [2.5, 1.75, 2.25], [2.25, 1.5, 2.75], [2.75, 1.5, 2.25], [2.785, 1.785, 2.0], [2.215, 1.215, 2.0], [2.0, 1.215, 2.215], [2.0, 1.785, 2.785], [2.785, 1.0, 2.785], [2.215, 1.0, 2.215], [2.5239, 1.0, 2.7543], [2.7543, 1.0, 2.5239], [2.4761, 1.2304, 2.4761], [2.2457, 1.7696, 2.2457], [2.5239, 1.7543, 2.0], [2.7543, 1.5239, 2.0], [2.2457, 1.2457, 2.7696], [2.4761, 1.4761, 2.2304], [2.7696, 1.2457, 2.2457], [2.2304, 1.4761, 2.4761], [2.0, 1.5239, 2.7543], [2.0, 1.7543, 2.5239], [2.0, 1.0, 2.0], [2.4636, 1.0, 2.0], [2.0, 1.0, 2.4636], [2.5364, 1.5364, 2.5364], [2.0, 1.4636, 2.0], [2.75, 2.25, 1.5], [2.25, 2.75, 1.5], [2.5, 2.25, 1.75], [2.5, 2.75, 1.25], [2.25, 2.5, 1.75], [2.75, 2.5, 1.25], [2.785, 2.785, 1.0], [2.215, 2.215, 1.0], [2.0, 2.215, 1.215], [2.0, 2.785, 1.785], [2.785, 2.0, 1.785], [2.215, 2.0, 1.215], [2.5239, 2.0, 1.7543], [2.7543, 2.0, 1.5239], [2.4761, 2.2304, 1.4761], [2.2457, 2.7696, 1.2457], [2.5239, 2.7543, 1.0], [2.7543, 2.5239, 1.0], [2.2457, 2.2457, 1.7696], [2.4761, 2.4761, 1.2304], [2.7696, 2.2457, 1.2457], [2.2304, 2.4761, 1.4761], [2.0, 2.5239, 1.7543], [2.0, 2.7543, 1.5239], [2.0, 2.0, 1.0], [2.4636, 2.0, 1.0], [2.0, 2.0, 1.4636], [2.5364, 2.5364, 1.5364], [2.0, 2.4636, 1.0], [2.75, 2.25, 2.5], [2.25, 2.75, 2.5], [2.5, 2.25, 2.75], [2.5, 2.75, 2.25], [2.25, 2.5, 2.75], [2.75, 2.5, 2.25], [2.785, 2.785, 2.0], [2.215, 2.215, 2.0], [2.0, 2.215, 2.215], [2.0, 2.785, 2.785], [2.785, 2.0, 2.785], [2.215, 2.0, 2.215], [2.5239, 2.0, 2.7543], [2.7543, 2.0, 2.5239], [2.4761, 2.2304, 2.4761], [2.2457, 2.7696, 2.2457], [2.5239, 2.7543, 2.0], [2.7543, 2.5239, 2.0], [2.2457, 2.2457, 2.7696], [2.4761, 2.4761, 2.2304], [2.7696, 2.2457, 2.2457], [2.2304, 2.4761, 2.4761], [2.0, 2.5239, 2.7543], [2.0, 2.7543, 2.5239], [2.0, 2.0, 2.0], [2.4636, 2.0, 2.0], [2.0, 2.0, 2.4636], [2.5364, 2.5364, 2.5364], [2.0, 2.4636, 2.0], [2.75, 2.25, 3.5], [2.25, 2.75, 3.5], [2.5, 2.25, 3.75], [2.5, 2.75, 3.25], [2.25, 2.5, 3.75], [2.75, 2.5, 3.25], [2.785, 2.785, 3.0], [2.215, 2.215, 3.0], [2.0, 2.215, 3.215], [2.0, 2.785, 3.785], [2.785, 2.0, 3.785], [2.215, 2.0, 3.215], [2.5239, 2.0, 3.7543], [2.7543, 2.0, 3.5239], [2.4761, 2.2304, 3.4761], [2.2457, 2.7696, 3.2457], [2.5239, 2.7543, 3.0], [2.7543, 2.5239, 3.0], [2.2457, 2.2457, 3.7696], [2.4761, 2.4761, 3.2304], [2.7696, 2.2457, 3.2457], [2.2304, 2.4761, 3.4761], [2.0, 2.5239, 3.7543], [2.0, 2.7543, 3.5239], [2.0, 2.0, 3.0], [2.4636, 2.0, 3.0], [2.0, 2.0, 3.4636], [2.5364, 2.5364, 3.5364], [2.0, 2.4636, 3.0], [2.75, 3.25, 2.5], [2.25, 3.75, 2.5], [2.5, 3.25, 2.75], [2.5, 3.75, 2.25], [2.25, 3.5, 2.75], [2.75, 3.5, 2.25], [2.785, 3.785, 2.0], [2.215, 3.215, 2.0], [2.0, 3.215, 2.215], [2.0, 3.785, 2.785], [2.785, 3.0, 2.785], [2.215, 3.0, 2.215], [2.5239, 3.0, 2.7543], [2.7543, 3.0, 2.5239], [2.4761, 3.2304, 2.4761], [2.2457, 3.7696, 2.2457], [2.5239, 3.7543, 2.0], [2.7543, 3.5239, 2.0], [2.2457, 3.2457, 2.7696], [2.4761, 3.4761, 2.2304], [2.7696, 3.2457, 2.2457], [2.2304, 3.4761, 2.4761], [2.0, 3.5239, 2.7543], [2.0, 3.7543, 2.5239], [2.0, 3.0, 2.0], [2.4636, 3.0, 2.0], [2.0, 3.0, 2.4636], [2.5364, 3.5364, 2.5364], [2.0, 3.4636, 2.0], [3.75, 2.25, 2.5], [3.25, 2.75, 2.5], [3.5, 2.25, 2.75], [3.5, 2.75, 2.25], [3.25, 2.5, 2.75], [3.75, 2.5, 2.25], [3.785, 2.785, 2.0], [3.215, 2.215, 2.0], [3.0, 2.215, 2.215], [3.0, 2.785, 2.785], [3.785, 2.0, 2.785], [3.215, 2.0, 2.215], [3.5239, 2.0, 2.7543], [3.7543, 2.0, 2.5239], [3.4761, 2.2304, 2.4761], [3.2457, 2.7696, 2.2457], [3.5239, 2.7543, 2.0], [3.7543, 2.5239, 2.0], [3.2457, 2.2457, 2.7696], [3.4761, 2.4761, 2.2304], [3.7696, 2.2457, 2.2457], [3.2304, 2.4761, 2.4761], [3.0, 2.5239, 2.7543], [3.0, 2.7543, 2.5239], [3.0, 2.0, 2.0], [3.4636, 2.0, 2.0], [3.0, 2.0, 2.4636], [3.5364, 2.5364, 2.5364], [3.0, 2.4636, 2.0], [3.75, 3.25, 3.5], [3.25, 3.75, 3.5], [3.5, 3.25, 3.75], [3.5, 3.75, 3.25], [3.25, 3.5, 3.75], [3.75, 3.5, 3.25], [3.785, 3.785, 3.0], [3.215, 3.215, 3.0], [3.0, 3.215, 3.215], [3.0, 3.785, 3.785], [3.785, 3.0, 3.785], [3.215, 3.0, 3.215], [3.5239, 3.0, 3.7543], [3.7543, 3.0, 3.5239], [3.4761, 3.2304, 3.4761], [3.2457, 3.7696, 3.2457], [3.5239, 3.7543, 3.0], [3.7543, 3.5239, 3.0], [3.2457, 3.2457, 3.7696], [3.4761, 3.4761, 3.2304], [3.7696, 3.2457, 3.2457], [3.2304, 3.4761, 3.4761], [3.0, 3.5239, 3.7543], [3.0, 3.7543, 3.5239], [3.0, 3.0, 3.0], [3.4636, 3.0, 3.0], [3.0, 3.0, 3.4636], [3.5364, 3.5364, 3.5364], [3.0, 3.4636, 3.0]], 'SC': [[2.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 2.0]], 'order': [81, 33, 1, 65, 49, 17, 137, 129, 97, 105, 121, 113, 274, 285, 257, 363, 219, 213, 298, 193, 333, 225, 250, 243, 385, 461, 442, 401, 451, 85, 37, 5, 69, 53, 21, 141, 133, 101, 109, 125, 117, 278, 281, 261, 367, 223, 209, 302, 197, 329, 229, 254, 247, 389, 457, 446, 405, 455, 83, 35, 3, 67, 51, 19, 139, 131, 99, 107, 123, 115, 276, 287, 259, 361, 217, 215, 300, 195, 335, 227, 252, 241, 387, 463, 444, 403, 449, 82, 34, 2, 66, 50, 18, 138, 130, 98, 106, 122, 114, 273, 286, 258, 364, 220, 214, 297, 194, 334, 226, 249, 244, 386, 462, 441, 402, 452, 43, 93, 75, 10, 29, 58, 186, 177, 145, 157, 171, 161, 371, 379, 353, 265, 314, 306, 201, 289, 233, 321, 349, 341, 393, 425, 409, 433, 417, 87, 39, 7, 71, 55, 23, 143, 135, 103, 111, 127, 119, 280, 283, 263, 365, 221, 211, 304, 199, 331, 231, 256, 245, 391, 459, 448, 407, 453, 86, 38, 6, 70, 54, 22, 142, 134, 102, 110, 126, 118, 277, 282, 262, 368, 224, 210, 301, 198, 330, 230, 253, 248, 390, 458, 445, 406, 456, 47, 89, 79, 14, 25, 62, 190, 181, 149, 153, 175, 165, 375, 383, 357, 269, 318, 310, 205, 293, 237, 325, 345, 337, 397, 429, 413, 437, 421, 84, 36, 4, 68, 52, 20, 140, 132, 100, 108, 124, 116, 275, 288, 260, 362, 218, 216, 299, 196, 336, 228, 251, 242, 388, 464, 443, 404, 450, 41, 95, 73, 12, 31, 60, 188, 179, 147, 159, 169, 163, 369, 377, 355, 267, 316, 308, 203, 291, 235, 323, 351, 343, 395, 427, 411, 435, 419, 44, 94, 76, 9, 30, 57, 185, 178, 146, 158, 172, 162, 372, 380, 354, 266, 313, 305, 202, 290, 234, 322, 350, 342, 394, 426, 410, 434, 418, 88, 40, 8, 72, 56, 24, 144, 136, 104, 112, 128, 120, 279, 284, 264, 366, 222, 212, 303, 200, 332, 232, 255, 246, 392, 460, 447, 408, 454, 45, 91, 77, 16, 27, 64, 192, 183, 151, 155, 173, 167, 373, 381, 359, 271, 320, 312, 207, 295, 239, 327, 347, 339, 399, 431, 415, 439, 423, 48, 90, 80, 13, 26, 61, 189, 182, 150, 154, 176, 166, 376, 384, 358, 270, 317, 309, 206, 294, 238, 326, 346, 338, 398, 430, 414, 438, 422, 42, 96, 74, 11, 32, 59, 187, 180, 148, 160, 170, 164, 370, 378, 356, 268, 315, 307, 204, 292, 236, 324, 352, 344, 396, 428, 412, 436, 420, 46, 92, 78, 15, 28, 63, 191, 184, 152, 156, 174, 168, 374, 382, 360, 272, 319, 311, 208, 296, 240, 328, 348, 340, 400, 432, 416, 440, 424], 'invSC': [[0.5, 0.0, 0.0], [0.0, 0.5, 0.0], [0.0, 0.0, 0.5]]} writeSCinfo(SCinfo, "SCinfo") haha=readSCinfo("SCinfo") print(haha['SC']) print(haha['invSC']) print(haha['SCref']) print(haha['SCpos']) print(haha['SCmat']) print(haha['invSCmat']) print(haha['order']) def readclus(file): if check(file): fin=open(file, "r") nclus=int(fin.readline()) clus=[] for i in range(nclus): item=[] fin.readline() npt=int(fin.readline()) for j in range(npt): item.append(list(map(float, fin.readline().split()))) clus.append(item) return clus else: print(str(file)+" not found :(") #writeclus(clus,"uniqueC") #print "\n".join(map(str, readclus("uniqueC"))) def readorb(file): if check(file): orbset=[] fin=open(file, "r") Norb=int(fin.readline()) for i in range(Norb): orb=[] fin.readline() nitem=int(fin.readline()) fin.readline() for j in range(nitem): item=[] npt=int(fin.readline()) clus=[] lpt=[] for k in range(npt): line=fin.readline() clus.append(list(map(float,line.split()))) item.append(clus) item.append(int(fin.readline())) item.append(int(fin.readline())) for k in range(npt): line=fin.readline() tmp=list(map(float,line.split())) tmp=list(map(int, tmp)) lpt.append([[tmp[0],tmp[1],tmp[2]],tmp[3]]) item.append(lpt) orb.append(item) orbset.append(orb) fin.close() return orbset else: print(str(file)+" not found :(") #test if False: orbset=[[[[[0.75, 0.25, 0.5]], 1, 1, [[[0.0, 0.0, 0.0], 1]]], [[[0.75, 0.5, 0.25]], 1, 2, [[[0.0, 0.0, 0.0], 6]]], [[[0.5, 0.25, -0.25]], 1, 3, [[[0.0, 0.0, -1.0], 3]]], [[[0.25, -0.25, -0.5]], 1, 4, [[[0.0, -1.0, -1.0], 2]]], [[[0.5, -0.25, 0.25]], 1, 5, [[[0.0, -1.0, 0.0], 4]]], [[[0.25, -0.5, -0.25]], 1, 6, [[[0.0, -1.0, -1.0], 5]]]],[[[[0.7696, 0.2457, 0.2457], [0.0, -0.215, -0.215]], 42, 1, [[[0.0, 0.0, 0.0], 21], [[0.0, -1.0, -1.0], 10]]], [[[0.5238999999999999, 0.0, -0.2457], [0.215, 0.0, 0.215]], 42, 3, [[[-0.0, 0.0, -1.0], 13], [[0.0, 0.0, 0.0], 12]]], [[[0.5238999999999999, -0.2457, 0.0], [0.215, 0.215, 0.0]], 42, 5, [[[-0.0, -1.0, 0.0], 17], [[0.0, 0.0, 0.0], 8]]], [[[-0.2457, 0.0, 0.5238999999999999], [0.215, 0.0, 0.215]], 42, 7, [[[-1.0, 0.0, -0.0], 14], [[0.0, 0.0, 0.0], 12]]], [[[0.2457, 0.2457, 0.7696], [-0.215, -0.215, 0.0]], 42, 9, [[[0.0, 0.0, 0.0], 19], [[-1.0, -1.0, 0.0], 7]]], [[[0.0, -0.2457, 0.5238999999999999], [0.0, 0.215, 0.215]], 42, 11, [[[0.0, -1.0, -0.0], 24], [[0.0, 0.0, 0.0], 9]]], [[[-0.7696, -0.5238999999999999, -0.5238999999999999], [0.0, -0.215, -0.215]], 42, 13, [[[-1.0, -1.0, -1.0], 22], [[0.0, -1.0, -1.0], 10]]], [[[-0.5238999999999999, -0.5238999999999999, -0.7696], [-0.215, -0.215, 0.0]], 42, 15, [[[-1.0, -1.0, -1.0], 20], [[-1.0, -1.0, 0.0], 7]]], [[[-0.5238999999999999, -0.7696, -0.5238999999999999], [-0.215, 0.0, -0.215]], 42, 17, [[[-1.0, -1.0, -1.0], 15], [[-1.0, 0.0, -1.0], 11]]], [[[-0.2457, 0.5238999999999999, 0.0], [0.215, 0.215, 0.0]], 42, 19, [[[-1.0, -0.0, 0.0], 18], [[0.0, 0.0, 0.0], 8]]], [[[0.2457, 0.7696, 0.2457], [-0.215, 0.0, -0.215]], 42, 21, [[[0.0, 0.0, 0.0], 16], [[-1.0, 0.0, -1.0], 11]]], [[[0.0, 0.5238999999999999, -0.2457], [0.0, 0.215, 0.215]], 42, 23, [[[0.0, -0.0, -1.0], 23], [[0.0, 0.0, 0.0], 9]]]]] print("\n".join(map(str,orbset))) writeorb(orbset,"test-orb") print("\n") print("\n".join(map(str,readorb("test-orb")))) #def read fit.ou def readfit(file): if check(file): counter=0 readflag=False for line in open(file): counter=counter+1 if counter==1: nstruc=list(map(int, line.split()))[1] fitlist=[0.0]*nstruc if len(line.split())>=1 and (line.split())[0]=="found": readflag=True continue if readflag: index=int((line.split())[0]) resl=float((line.split())[1]) fitlist[index-1]=resl print("Fit.our read successfully and length: "+str(len(fitlist))) return fitlist else: print(str(file)+" not found :(") #test: if False: print(readfit("fit.out-mu1"))
65.537662
12,884
0.507411
0
0
0
0
0
0
0
0
2,108
0.083545
6a60c251c96da7b05351011b63ba88125eca7fb7
9,790
py
Python
sdk/python/pulumi_azure_native/storage/storage_account_static_website.py
sebtelko/pulumi-azure-native
711ec021b5c73da05611c56c8a35adb0ce3244e4
[ "Apache-2.0" ]
null
null
null
sdk/python/pulumi_azure_native/storage/storage_account_static_website.py
sebtelko/pulumi-azure-native
711ec021b5c73da05611c56c8a35adb0ce3244e4
[ "Apache-2.0" ]
null
null
null
sdk/python/pulumi_azure_native/storage/storage_account_static_website.py
sebtelko/pulumi-azure-native
711ec021b5c73da05611c56c8a35adb0ce3244e4
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities __all__ = ['StorageAccountStaticWebsiteArgs', 'StorageAccountStaticWebsite'] @pulumi.input_type class StorageAccountStaticWebsiteArgs: def __init__(__self__, *, account_name: pulumi.Input[str], resource_group_name: pulumi.Input[str], error404_document: Optional[pulumi.Input[str]] = None, index_document: Optional[pulumi.Input[str]] = None): """ The set of arguments for constructing a StorageAccountStaticWebsite resource. :param pulumi.Input[str] account_name: The name of the storage account within the specified resource group. :param pulumi.Input[str] resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive. :param pulumi.Input[str] error404_document: The absolute path to a custom webpage that should be used when a request is made which does not correspond to an existing file. :param pulumi.Input[str] index_document: The webpage that Azure Storage serves for requests to the root of a website or any sub-folder. For example, 'index.html'. The value is case-sensitive. """ pulumi.set(__self__, "account_name", account_name) pulumi.set(__self__, "resource_group_name", resource_group_name) if error404_document is not None: pulumi.set(__self__, "error404_document", error404_document) if index_document is not None: pulumi.set(__self__, "index_document", index_document) @property @pulumi.getter(name="accountName") def account_name(self) -> pulumi.Input[str]: """ The name of the storage account within the specified resource group. """ return pulumi.get(self, "account_name") @account_name.setter def account_name(self, value: pulumi.Input[str]): pulumi.set(self, "account_name", value) @property @pulumi.getter(name="resourceGroupName") def resource_group_name(self) -> pulumi.Input[str]: """ The name of the resource group within the user's subscription. The name is case insensitive. """ return pulumi.get(self, "resource_group_name") @resource_group_name.setter def resource_group_name(self, value: pulumi.Input[str]): pulumi.set(self, "resource_group_name", value) @property @pulumi.getter(name="error404Document") def error404_document(self) -> Optional[pulumi.Input[str]]: """ The absolute path to a custom webpage that should be used when a request is made which does not correspond to an existing file. """ return pulumi.get(self, "error404_document") @error404_document.setter def error404_document(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "error404_document", value) @property @pulumi.getter(name="indexDocument") def index_document(self) -> Optional[pulumi.Input[str]]: """ The webpage that Azure Storage serves for requests to the root of a website or any sub-folder. For example, 'index.html'. The value is case-sensitive. """ return pulumi.get(self, "index_document") @index_document.setter def index_document(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "index_document", value) class StorageAccountStaticWebsite(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, account_name: Optional[pulumi.Input[str]] = None, error404_document: Optional[pulumi.Input[str]] = None, index_document: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, __props__=None): """ Enables the static website feature of a storage account. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] account_name: The name of the storage account within the specified resource group. :param pulumi.Input[str] error404_document: The absolute path to a custom webpage that should be used when a request is made which does not correspond to an existing file. :param pulumi.Input[str] index_document: The webpage that Azure Storage serves for requests to the root of a website or any sub-folder. For example, 'index.html'. The value is case-sensitive. :param pulumi.Input[str] resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive. """ ... @overload def __init__(__self__, resource_name: str, args: StorageAccountStaticWebsiteArgs, opts: Optional[pulumi.ResourceOptions] = None): """ Enables the static website feature of a storage account. :param str resource_name: The name of the resource. :param StorageAccountStaticWebsiteArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(StorageAccountStaticWebsiteArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, account_name: Optional[pulumi.Input[str]] = None, error404_document: Optional[pulumi.Input[str]] = None, index_document: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = StorageAccountStaticWebsiteArgs.__new__(StorageAccountStaticWebsiteArgs) if account_name is None and not opts.urn: raise TypeError("Missing required property 'account_name'") __props__.__dict__["account_name"] = account_name __props__.__dict__["error404_document"] = error404_document __props__.__dict__["index_document"] = index_document if resource_group_name is None and not opts.urn: raise TypeError("Missing required property 'resource_group_name'") __props__.__dict__["resource_group_name"] = resource_group_name __props__.__dict__["container_name"] = None super(StorageAccountStaticWebsite, __self__).__init__( 'azure-native:storage:StorageAccountStaticWebsite', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'StorageAccountStaticWebsite': """ Get an existing StorageAccountStaticWebsite resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = StorageAccountStaticWebsiteArgs.__new__(StorageAccountStaticWebsiteArgs) __props__.__dict__["container_name"] = None __props__.__dict__["error404_document"] = None __props__.__dict__["index_document"] = None return StorageAccountStaticWebsite(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="containerName") def container_name(self) -> pulumi.Output[str]: """ The name of the container to upload blobs to. """ return pulumi.get(self, "container_name") @property @pulumi.getter(name="error404Document") def error404_document(self) -> pulumi.Output[Optional[str]]: """ The absolute path to a custom webpage that should be used when a request is made which does not correspond to an existing file. """ return pulumi.get(self, "error404_document") @property @pulumi.getter(name="indexDocument") def index_document(self) -> pulumi.Output[Optional[str]]: """ The webpage that Azure Storage serves for requests to the root of a website or any sub-folder. For example, 'index.html'. The value is case-sensitive. """ return pulumi.get(self, "index_document")
48.226601
199
0.674157
9,371
0.957201
0
0
7,041
0.719203
0
0
4,329
0.442186
6a61c6ef3ad58f9b8003931de1870b0f5ad404c7
1,247
py
Python
python/example_code/s3/s3-python-example-get-bucket-policy.py
onehitcombo/aws-doc-sdk-examples
03e2e0c5dee75c5decbbb99e849c51417521fd82
[ "Apache-2.0" ]
3
2021-01-19T20:23:17.000Z
2021-01-19T21:38:59.000Z
python/example_code/s3/s3-python-example-get-bucket-policy.py
onehitcombo/aws-doc-sdk-examples
03e2e0c5dee75c5decbbb99e849c51417521fd82
[ "Apache-2.0" ]
null
null
null
python/example_code/s3/s3-python-example-get-bucket-policy.py
onehitcombo/aws-doc-sdk-examples
03e2e0c5dee75c5decbbb99e849c51417521fd82
[ "Apache-2.0" ]
2
2019-12-27T13:58:00.000Z
2020-05-21T18:35:40.000Z
# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # This file is licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. A copy of the # License is located at # # http://aws.amazon.com/apache2.0/ # # This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS # OF ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import boto3 # Create an S3 client s3 = boto3.client('s3') # Call to S3 to retrieve the policy for the given bucket result = s3.get_bucket_policy(Bucket='my-bucket') print(result) # snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.] # snippet-sourcedescription:[s3-python-example-get-bucket-policy.py demonstrates how to list the Amazon S3 Buckets in your account.] # snippet-keyword:[Python] # snippet-keyword:[AWS SDK for Python (Boto3)] # snippet-keyword:[Code Sample] # snippet-keyword:[Amazon S3] # snippet-service:[s3] # snippet-sourcetype:[full-example] # snippet-sourcedate:[2018-06-25] # snippet-sourceauthor:[jschwarzwalder (AWS)]
35.628571
133
0.735365
0
0
0
0
0
0
0
0
1,119
0.897354
6a61f1e1f810996e1c76609bf6e7fcc907c4da57
2,020
py
Python
lang/py/aingle/test/gen_interop_data.py
AIngleLab/aae
6e95f89fad60e62bb5305afe97c72f3278d8e04b
[ "Apache-2.0" ]
null
null
null
lang/py/aingle/test/gen_interop_data.py
AIngleLab/aae
6e95f89fad60e62bb5305afe97c72f3278d8e04b
[ "Apache-2.0" ]
null
null
null
lang/py/aingle/test/gen_interop_data.py
AIngleLab/aae
6e95f89fad60e62bb5305afe97c72f3278d8e04b
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python3 ## # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and # limitations under the License. import os import sys import aingle.codecs import aingle.datafile import aingle.io import aingle.schema NULL_CODEC = "null" CODECS_TO_VALIDATE = aingle.codecs.KNOWN_CODECS.keys() DATUM = { "intField": 12, "longField": 15234324, "stringField": "hey", "boolField": True, "floatField": 1234.0, "doubleField": -1234.0, "bytesField": b"12312adf", "nullField": None, "arrayField": [5.0, 0.0, 12.0], "mapField": {"a": {"label": "a"}, "bee": {"label": "cee"}}, "unionField": 12.0, "enumField": "C", "fixedField": b"1019181716151413", "recordField": {"label": "blah", "children": [{"label": "inner", "children": []}]}, } def generate(schema_path, output_path): with open(schema_path) as schema_file: interop_schema = aingle.schema.parse(schema_file.read()) for codec in CODECS_TO_VALIDATE: filename = output_path if codec != NULL_CODEC: base, ext = os.path.splitext(output_path) filename = base + "_" + codec + ext with aingle.datafile.DataFileWriter(open(filename, "wb"), aingle.io.DatumWriter(), interop_schema, codec=codec) as dfw: dfw.append(DATUM) if __name__ == "__main__": generate(sys.argv[1], sys.argv[2])
31.5625
127
0.681188
0
0
0
0
0
0
0
0
1,053
0.521287
6a630004921c5a5ff2ec4e4b2d0a96b0bf000baa
897
py
Python
data_io/util/value_blob_erosion.py
Rekrau/PyGreentea
457d7dc5be12b15c3c7663ceaf6d74301de56e43
[ "BSD-2-Clause" ]
null
null
null
data_io/util/value_blob_erosion.py
Rekrau/PyGreentea
457d7dc5be12b15c3c7663ceaf6d74301de56e43
[ "BSD-2-Clause" ]
4
2016-04-22T15:39:21.000Z
2016-11-15T21:23:58.000Z
data_io/util/value_blob_erosion.py
Rekrau/PyGreentea
457d7dc5be12b15c3c7663ceaf6d74301de56e43
[ "BSD-2-Clause" ]
4
2017-05-12T00:17:55.000Z
2019-07-01T19:23:32.000Z
import numpy as np from scipy import ndimage def erode_value_blobs(array, steps=1, values_to_ignore=tuple(), new_value=0): unique_values = list(np.unique(array)) all_entries_to_keep = np.zeros(shape=array.shape, dtype=np.bool) for unique_value in unique_values: entries_of_this_value = array == unique_value if unique_value in values_to_ignore: all_entries_to_keep = np.logical_or(entries_of_this_value, all_entries_to_keep) else: eroded_unique_indicator = ndimage.binary_erosion(entries_of_this_value, iterations=steps) all_entries_to_keep = np.logical_or(eroded_unique_indicator, all_entries_to_keep) result = array * all_entries_to_keep if new_value != 0: eroded_entries = np.logical_not(all_entries_to_keep) new_values = new_value * eroded_entries result += new_values return result
42.714286
101
0.733556
0
0
0
0
0
0
0
0
0
0
6a631c95edefbd6ccab71b999ffa359886535e5b
32,032
py
Python
astropy/units/tests/test_logarithmic.py
EnjoyLifeFund/macHighSierra-py36-pkgs
5668b5785296b314ea1321057420bcd077dba9ea
[ "BSD-3-Clause", "BSD-2-Clause", "MIT" ]
3
2018-03-20T15:09:16.000Z
2021-05-27T11:17:33.000Z
astropy/units/tests/test_logarithmic.py
EnjoyLifeFund/macHighSierra-py36-pkgs
5668b5785296b314ea1321057420bcd077dba9ea
[ "BSD-3-Clause", "BSD-2-Clause", "MIT" ]
null
null
null
astropy/units/tests/test_logarithmic.py
EnjoyLifeFund/macHighSierra-py36-pkgs
5668b5785296b314ea1321057420bcd077dba9ea
[ "BSD-3-Clause", "BSD-2-Clause", "MIT" ]
null
null
null
# coding: utf-8 # Licensed under a 3-clause BSD style license - see LICENSE.rst """ Test the Logarithmic Units and Quantities """ from __future__ import (absolute_import, unicode_literals, division, print_function) from ...extern import six from ...extern.six.moves import zip import pickle import itertools import pytest import numpy as np from numpy.testing.utils import assert_allclose from ...tests.helper import assert_quantity_allclose from ... import units as u, constants as c lu_units = [u.dex, u.mag, u.decibel] lu_subclasses = [u.DexUnit, u.MagUnit, u.DecibelUnit] lq_subclasses = [u.Dex, u.Magnitude, u.Decibel] pu_sample = (u.dimensionless_unscaled, u.m, u.g/u.s**2, u.Jy) class TestLogUnitCreation(object): def test_logarithmic_units(self): """Check logarithmic units are set up correctly.""" assert u.dB.to(u.dex) == 0.1 assert u.dex.to(u.mag) == -2.5 assert u.mag.to(u.dB) == -4 @pytest.mark.parametrize('lu_unit, lu_cls', zip(lu_units, lu_subclasses)) def test_callable_units(self, lu_unit, lu_cls): assert isinstance(lu_unit, u.UnitBase) assert callable(lu_unit) assert lu_unit._function_unit_class is lu_cls @pytest.mark.parametrize('lu_unit', lu_units) def test_equality_to_normal_unit_for_dimensionless(self, lu_unit): lu = lu_unit() assert lu == lu._default_function_unit # eg, MagUnit() == u.mag assert lu._default_function_unit == lu # and u.mag == MagUnit() @pytest.mark.parametrize('lu_unit, physical_unit', itertools.product(lu_units, pu_sample)) def test_call_units(self, lu_unit, physical_unit): """Create a LogUnit subclass using the callable unit and physical unit, and do basic check that output is right.""" lu1 = lu_unit(physical_unit) assert lu1.physical_unit == physical_unit assert lu1.function_unit == lu1._default_function_unit def test_call_invalid_unit(self): with pytest.raises(TypeError): u.mag([]) with pytest.raises(ValueError): u.mag(u.mag()) @pytest.mark.parametrize('lu_cls, physical_unit', itertools.product( lu_subclasses + [u.LogUnit], pu_sample)) def test_subclass_creation(self, lu_cls, physical_unit): """Create a LogUnit subclass object for given physical unit, and do basic check that output is right.""" lu1 = lu_cls(physical_unit) assert lu1.physical_unit == physical_unit assert lu1.function_unit == lu1._default_function_unit lu2 = lu_cls(physical_unit, function_unit=2*lu1._default_function_unit) assert lu2.physical_unit == physical_unit assert lu2.function_unit == u.Unit(2*lu2._default_function_unit) with pytest.raises(ValueError): lu_cls(physical_unit, u.m) def test_predefined_magnitudes(): assert_quantity_allclose((-21.1*u.STmag).physical, 1.*u.erg/u.cm**2/u.s/u.AA) assert_quantity_allclose((-48.6*u.ABmag).physical, 1.*u.erg/u.cm**2/u.s/u.Hz) assert_quantity_allclose((0*u.M_bol).physical, c.L_bol0) assert_quantity_allclose((0*u.m_bol).physical, c.L_bol0/(4.*np.pi*(10.*c.pc)**2)) def test_predefined_reinitialisation(): assert u.mag('ST') == u.STmag assert u.mag('AB') == u.ABmag assert u.mag('Bol') == u.M_bol assert u.mag('bol') == u.m_bol def test_predefined_string_roundtrip(): """Ensure roundtripping; see #5015""" with u.magnitude_zero_points.enable(): assert u.Unit(u.STmag.to_string()) == u.STmag assert u.Unit(u.ABmag.to_string()) == u.ABmag assert u.Unit(u.M_bol.to_string()) == u.M_bol assert u.Unit(u.m_bol.to_string()) == u.m_bol def test_inequality(): """Check __ne__ works (regresssion for #5342).""" lu1 = u.mag(u.Jy) lu2 = u.dex(u.Jy) lu3 = u.mag(u.Jy**2) lu4 = lu3 - lu1 assert lu1 != lu2 assert lu1 != lu3 assert lu1 == lu4 class TestLogUnitStrings(object): def test_str(self): """Do some spot checks that str, repr, etc. work as expected.""" lu1 = u.mag(u.Jy) assert str(lu1) == 'mag(Jy)' assert repr(lu1) == 'Unit("mag(Jy)")' assert lu1.to_string('generic') == 'mag(Jy)' with pytest.raises(ValueError): lu1.to_string('fits') lu2 = u.dex() assert str(lu2) == 'dex' assert repr(lu2) == 'Unit("dex(1)")' assert lu2.to_string() == 'dex(1)' lu3 = u.MagUnit(u.Jy, function_unit=2*u.mag) assert str(lu3) == '2 mag(Jy)' assert repr(lu3) == 'MagUnit("Jy", unit="2 mag")' assert lu3.to_string() == '2 mag(Jy)' lu4 = u.mag(u.ct) assert lu4.to_string('generic') == 'mag(ct)' assert lu4.to_string('latex') == ('$\\mathrm{mag}$$\\mathrm{\\left( ' '\\mathrm{ct} \\right)}$') assert lu4._repr_latex_() == lu4.to_string('latex') class TestLogUnitConversion(object): @pytest.mark.parametrize('lu_unit, physical_unit', itertools.product(lu_units, pu_sample)) def test_physical_unit_conversion(self, lu_unit, physical_unit): """Check various LogUnit subclasses are equivalent and convertible to their non-log counterparts.""" lu1 = lu_unit(physical_unit) assert lu1.is_equivalent(physical_unit) assert lu1.to(physical_unit, 0.) == 1. assert physical_unit.is_equivalent(lu1) assert physical_unit.to(lu1, 1.) == 0. pu = u.Unit(8.*physical_unit) assert lu1.is_equivalent(physical_unit) assert lu1.to(pu, 0.) == 0.125 assert pu.is_equivalent(lu1) assert_allclose(pu.to(lu1, 0.125), 0., atol=1.e-15) # Check we round-trip. value = np.linspace(0., 10., 6) assert_allclose(pu.to(lu1, lu1.to(pu, value)), value, atol=1.e-15) # And that we're not just returning True all the time. pu2 = u.g assert not lu1.is_equivalent(pu2) with pytest.raises(u.UnitsError): lu1.to(pu2) assert not pu2.is_equivalent(lu1) with pytest.raises(u.UnitsError): pu2.to(lu1) @pytest.mark.parametrize('lu_unit', lu_units) def test_container_unit_conversion(self, lu_unit): """Check that conversion to logarithmic units (u.mag, u.dB, u.dex) is only possible when the physical unit is dimensionless.""" values = np.linspace(0., 10., 6) lu1 = lu_unit(u.dimensionless_unscaled) assert lu1.is_equivalent(lu1.function_unit) assert_allclose(lu1.to(lu1.function_unit, values), values) lu2 = lu_unit(u.Jy) assert not lu2.is_equivalent(lu2.function_unit) with pytest.raises(u.UnitsError): lu2.to(lu2.function_unit, values) @pytest.mark.parametrize( 'flu_unit, tlu_unit, physical_unit', itertools.product(lu_units, lu_units, pu_sample)) def test_subclass_conversion(self, flu_unit, tlu_unit, physical_unit): """Check various LogUnit subclasses are equivalent and convertible to each other if they correspond to equivalent physical units.""" values = np.linspace(0., 10., 6) flu = flu_unit(physical_unit) tlu = tlu_unit(physical_unit) assert flu.is_equivalent(tlu) assert_allclose(flu.to(tlu), flu.function_unit.to(tlu.function_unit)) assert_allclose(flu.to(tlu, values), values * flu.function_unit.to(tlu.function_unit)) tlu2 = tlu_unit(u.Unit(100.*physical_unit)) assert flu.is_equivalent(tlu2) # Check that we round-trip. assert_allclose(flu.to(tlu2, tlu2.to(flu, values)), values, atol=1.e-15) tlu3 = tlu_unit(physical_unit.to_system(u.si)[0]) assert flu.is_equivalent(tlu3) assert_allclose(flu.to(tlu3, tlu3.to(flu, values)), values, atol=1.e-15) tlu4 = tlu_unit(u.g) assert not flu.is_equivalent(tlu4) with pytest.raises(u.UnitsError): flu.to(tlu4, values) def test_unit_decomposition(self): lu = u.mag(u.Jy) assert lu.decompose() == u.mag(u.Jy.decompose()) assert lu.decompose().physical_unit.bases == [u.kg, u.s] assert lu.si == u.mag(u.Jy.si) assert lu.si.physical_unit.bases == [u.kg, u.s] assert lu.cgs == u.mag(u.Jy.cgs) assert lu.cgs.physical_unit.bases == [u.g, u.s] def test_unit_multiple_possible_equivalencies(self): lu = u.mag(u.Jy) assert lu.is_equivalent(pu_sample) class TestLogUnitArithmetic(object): def test_multiplication_division(self): """Check that multiplication/division with other units is only possible when the physical unit is dimensionless, and that this turns the unit into a normal one.""" lu1 = u.mag(u.Jy) with pytest.raises(u.UnitsError): lu1 * u.m with pytest.raises(u.UnitsError): u.m * lu1 with pytest.raises(u.UnitsError): lu1 / lu1 for unit in (u.dimensionless_unscaled, u.m, u.mag, u.dex): with pytest.raises(u.UnitsError): lu1 / unit lu2 = u.mag(u.dimensionless_unscaled) with pytest.raises(u.UnitsError): lu2 * lu1 with pytest.raises(u.UnitsError): lu2 / lu1 # But dimensionless_unscaled can be cancelled. assert lu2 / lu2 == u.dimensionless_unscaled # With dimensionless, normal units are OK, but we return a plain unit. tf = lu2 * u.m tr = u.m * lu2 for t in (tf, tr): assert not isinstance(t, type(lu2)) assert t == lu2.function_unit * u.m with u.set_enabled_equivalencies(u.logarithmic()): with pytest.raises(u.UnitsError): t.to(lu2.physical_unit) # Now we essentially have a LogUnit with a prefactor of 100, # so should be equivalent again. t = tf / u.cm with u.set_enabled_equivalencies(u.logarithmic()): assert t.is_equivalent(lu2.function_unit) assert_allclose(t.to(u.dimensionless_unscaled, np.arange(3.)/100.), lu2.to(lu2.physical_unit, np.arange(3.))) # If we effectively remove lu1, a normal unit should be returned. t2 = tf / lu2 assert not isinstance(t2, type(lu2)) assert t2 == u.m t3 = tf / lu2.function_unit assert not isinstance(t3, type(lu2)) assert t3 == u.m # For completeness, also ensure non-sensical operations fail with pytest.raises(TypeError): lu1 * object() with pytest.raises(TypeError): slice(None) * lu1 with pytest.raises(TypeError): lu1 / [] with pytest.raises(TypeError): 1 / lu1 @pytest.mark.parametrize('power', (2, 0.5, 1, 0)) def test_raise_to_power(self, power): """Check that raising LogUnits to some power is only possible when the physical unit is dimensionless, and that conversion is turned off when the resulting logarithmic unit (such as mag**2) is incompatible.""" lu1 = u.mag(u.Jy) if power == 0: assert lu1 ** power == u.dimensionless_unscaled elif power == 1: assert lu1 ** power == lu1 else: with pytest.raises(u.UnitsError): lu1 ** power # With dimensionless, though, it works, but returns a normal unit. lu2 = u.mag(u.dimensionless_unscaled) t = lu2**power if power == 0: assert t == u.dimensionless_unscaled elif power == 1: assert t == lu2 else: assert not isinstance(t, type(lu2)) assert t == lu2.function_unit**power # also check we roundtrip t2 = t**(1./power) assert t2 == lu2.function_unit with u.set_enabled_equivalencies(u.logarithmic()): assert_allclose(t2.to(u.dimensionless_unscaled, np.arange(3.)), lu2.to(lu2.physical_unit, np.arange(3.))) @pytest.mark.parametrize('other', pu_sample) def test_addition_subtraction_to_normal_units_fails(self, other): lu1 = u.mag(u.Jy) with pytest.raises(u.UnitsError): lu1 + other with pytest.raises(u.UnitsError): lu1 - other with pytest.raises(u.UnitsError): other - lu1 def test_addition_subtraction_to_non_units_fails(self): lu1 = u.mag(u.Jy) with pytest.raises(TypeError): lu1 + 1. with pytest.raises(TypeError): lu1 - [1., 2., 3.] @pytest.mark.parametrize( 'other', (u.mag, u.mag(), u.mag(u.Jy), u.mag(u.m), u.Unit(2*u.mag), u.MagUnit('', 2.*u.mag))) def test_addition_subtraction(self, other): """Check physical units are changed appropriately""" lu1 = u.mag(u.Jy) other_pu = getattr(other, 'physical_unit', u.dimensionless_unscaled) lu_sf = lu1 + other assert lu_sf.is_equivalent(lu1.physical_unit * other_pu) lu_sr = other + lu1 assert lu_sr.is_equivalent(lu1.physical_unit * other_pu) lu_df = lu1 - other assert lu_df.is_equivalent(lu1.physical_unit / other_pu) lu_dr = other - lu1 assert lu_dr.is_equivalent(other_pu / lu1.physical_unit) def test_complicated_addition_subtraction(self): """for fun, a more complicated example of addition and subtraction""" dm0 = u.Unit('DM', 1./(4.*np.pi*(10.*u.pc)**2)) lu_dm = u.mag(dm0) lu_absST = u.STmag - lu_dm assert lu_absST.is_equivalent(u.erg/u.s/u.AA) def test_neg_pos(self): lu1 = u.mag(u.Jy) neg_lu = -lu1 assert neg_lu != lu1 assert neg_lu.physical_unit == u.Jy**-1 assert -neg_lu == lu1 pos_lu = +lu1 assert pos_lu is not lu1 assert pos_lu == lu1 def test_pickle(): lu1 = u.dex(u.cm/u.s**2) s = pickle.dumps(lu1) lu2 = pickle.loads(s) assert lu1 == lu2 def test_hashable(): lu1 = u.dB(u.mW) lu2 = u.dB(u.m) lu3 = u.dB(u.mW) assert hash(lu1) != hash(lu2) assert hash(lu1) == hash(lu3) luset = {lu1, lu2, lu3} assert len(luset) == 2 class TestLogQuantityCreation(object): @pytest.mark.parametrize('lq, lu', zip(lq_subclasses + [u.LogQuantity], lu_subclasses + [u.LogUnit])) def test_logarithmic_quantities(self, lq, lu): """Check logarithmic quantities are all set up correctly""" assert lq._unit_class == lu assert type(lu()._quantity_class(1.)) is lq @pytest.mark.parametrize('lq_cls, physical_unit', itertools.product(lq_subclasses, pu_sample)) def test_subclass_creation(self, lq_cls, physical_unit): """Create LogQuantity subclass objects for some physical units, and basic check on transformations""" value = np.arange(1., 10.) log_q = lq_cls(value * physical_unit) assert log_q.unit.physical_unit == physical_unit assert log_q.unit.function_unit == log_q.unit._default_function_unit assert_allclose(log_q.physical.value, value) with pytest.raises(ValueError): lq_cls(value, physical_unit) @pytest.mark.parametrize( 'unit', (u.mag, u.mag(), u.mag(u.Jy), u.mag(u.m), u.Unit(2*u.mag), u.MagUnit('', 2.*u.mag), u.MagUnit(u.Jy, -1*u.mag), u.MagUnit(u.m, -2.*u.mag))) def test_different_units(self, unit): q = u.Magnitude(1.23, unit) assert q.unit.function_unit == getattr(unit, 'function_unit', unit) assert q.unit.physical_unit is getattr(unit, 'physical_unit', u.dimensionless_unscaled) @pytest.mark.parametrize('value, unit', ( (1.*u.mag(u.Jy), None), (1.*u.dex(u.Jy), None), (1.*u.mag(u.W/u.m**2/u.Hz), u.mag(u.Jy)), (1.*u.dex(u.W/u.m**2/u.Hz), u.mag(u.Jy)))) def test_function_values(self, value, unit): lq = u.Magnitude(value, unit) assert lq == value assert lq.unit.function_unit == u.mag assert lq.unit.physical_unit == getattr(unit, 'physical_unit', value.unit.physical_unit) @pytest.mark.parametrize( 'unit', (u.mag(), u.mag(u.Jy), u.mag(u.m), u.MagUnit('', 2.*u.mag), u.MagUnit(u.Jy, -1*u.mag), u.MagUnit(u.m, -2.*u.mag))) def test_indirect_creation(self, unit): q1 = 2.5 * unit assert isinstance(q1, u.Magnitude) assert q1.value == 2.5 assert q1.unit == unit pv = 100. * unit.physical_unit q2 = unit * pv assert q2.unit == unit assert q2.unit.physical_unit == pv.unit assert q2.to_value(unit.physical_unit) == 100. assert (q2._function_view / u.mag).to_value(1) == -5. q3 = unit / 0.4 assert q3 == q1 def test_from_view(self): # Cannot view a physical quantity as a function quantity, since the # values would change. q = [100., 1000.] * u.cm/u.s**2 with pytest.raises(TypeError): q.view(u.Dex) # But fine if we have the right magnitude. q = [2., 3.] * u.dex lq = q.view(u.Dex) assert isinstance(lq, u.Dex) assert lq.unit.physical_unit == u.dimensionless_unscaled assert np.all(q == lq) def test_using_quantity_class(self): """Check that we can use Quantity if we have subok=True""" # following issue #5851 lu = u.dex(u.AA) with pytest.raises(u.UnitTypeError): u.Quantity(1., lu) q = u.Quantity(1., lu, subok=True) assert type(q) is lu._quantity_class def test_conversion_to_and_from_physical_quantities(): """Ensures we can convert from regular quantities.""" mst = [10., 12., 14.] * u.STmag flux_lambda = mst.physical mst_roundtrip = flux_lambda.to(u.STmag) # check we return a logquantity; see #5178. assert isinstance(mst_roundtrip, u.Magnitude) assert mst_roundtrip.unit == mst.unit assert_allclose(mst_roundtrip.value, mst.value) wave = [4956.8, 4959.55, 4962.3] * u.AA flux_nu = mst.to(u.Jy, equivalencies=u.spectral_density(wave)) mst_roundtrip2 = flux_nu.to(u.STmag, u.spectral_density(wave)) assert isinstance(mst_roundtrip2, u.Magnitude) assert mst_roundtrip2.unit == mst.unit assert_allclose(mst_roundtrip2.value, mst.value) def test_quantity_decomposition(): lq = 10.*u.mag(u.Jy) assert lq.decompose() == lq assert lq.decompose().unit.physical_unit.bases == [u.kg, u.s] assert lq.si == lq assert lq.si.unit.physical_unit.bases == [u.kg, u.s] assert lq.cgs == lq assert lq.cgs.unit.physical_unit.bases == [u.g, u.s] class TestLogQuantityViews(object): def setup(self): self.lq = u.Magnitude(np.arange(10.) * u.Jy) self.lq2 = u.Magnitude(np.arange(5.)) def test_value_view(self): lq_value = self.lq.value assert type(lq_value) is np.ndarray lq_value[2] = -1. assert np.all(self.lq.value == lq_value) def test_function_view(self): lq_fv = self.lq._function_view assert type(lq_fv) is u.Quantity assert lq_fv.unit is self.lq.unit.function_unit lq_fv[3] = -2. * lq_fv.unit assert np.all(self.lq.value == lq_fv.value) def test_quantity_view(self): # Cannot view as Quantity, since the unit cannot be represented. with pytest.raises(TypeError): self.lq.view(u.Quantity) # But a dimensionless one is fine. q2 = self.lq2.view(u.Quantity) assert q2.unit is u.mag assert np.all(q2.value == self.lq2.value) lq3 = q2.view(u.Magnitude) assert type(lq3.unit) is u.MagUnit assert lq3.unit.physical_unit == u.dimensionless_unscaled assert np.all(lq3 == self.lq2) class TestLogQuantitySlicing(object): def test_item_get_and_set(self): lq1 = u.Magnitude(np.arange(1., 11.)*u.Jy) assert lq1[9] == u.Magnitude(10.*u.Jy) lq1[2] = 100.*u.Jy assert lq1[2] == u.Magnitude(100.*u.Jy) with pytest.raises(u.UnitsError): lq1[2] = 100.*u.m with pytest.raises(u.UnitsError): lq1[2] = 100.*u.mag with pytest.raises(u.UnitsError): lq1[2] = u.Magnitude(100.*u.m) assert lq1[2] == u.Magnitude(100.*u.Jy) def test_slice_get_and_set(self): lq1 = u.Magnitude(np.arange(1., 10.)*u.Jy) lq1[2:4] = 100.*u.Jy assert np.all(lq1[2:4] == u.Magnitude(100.*u.Jy)) with pytest.raises(u.UnitsError): lq1[2:4] = 100.*u.m with pytest.raises(u.UnitsError): lq1[2:4] = 100.*u.mag with pytest.raises(u.UnitsError): lq1[2:4] = u.Magnitude(100.*u.m) assert np.all(lq1[2] == u.Magnitude(100.*u.Jy)) class TestLogQuantityArithmetic(object): def test_multiplication_division(self): """Check that multiplication/division with other quantities is only possible when the physical unit is dimensionless, and that this turns the result into a normal quantity.""" lq = u.Magnitude(np.arange(1., 11.)*u.Jy) with pytest.raises(u.UnitsError): lq * (1.*u.m) with pytest.raises(u.UnitsError): (1.*u.m) * lq with pytest.raises(u.UnitsError): lq / lq for unit in (u.m, u.mag, u.dex): with pytest.raises(u.UnitsError): lq / unit lq2 = u.Magnitude(np.arange(1, 11.)) with pytest.raises(u.UnitsError): lq2 * lq with pytest.raises(u.UnitsError): lq2 / lq with pytest.raises(u.UnitsError): lq / lq2 # but dimensionless_unscaled can be cancelled r = lq2 / u.Magnitude(2.) assert r.unit == u.dimensionless_unscaled assert np.all(r.value == lq2.value/2.) # with dimensionless, normal units OK, but return normal quantities tf = lq2 * u.m tr = u.m * lq2 for t in (tf, tr): assert not isinstance(t, type(lq2)) assert t.unit == lq2.unit.function_unit * u.m with u.set_enabled_equivalencies(u.logarithmic()): with pytest.raises(u.UnitsError): t.to(lq2.unit.physical_unit) t = tf / (50.*u.cm) # now we essentially have the same quantity but with a prefactor of 2 assert t.unit.is_equivalent(lq2.unit.function_unit) assert_allclose(t.to(lq2.unit.function_unit), lq2._function_view*2) @pytest.mark.parametrize('power', (2, 0.5, 1, 0)) def test_raise_to_power(self, power): """Check that raising LogQuantities to some power is only possible when the physical unit is dimensionless, and that conversion is turned off when the resulting logarithmic unit (say, mag**2) is incompatible.""" lq = u.Magnitude(np.arange(1., 4.)*u.Jy) if power == 0: assert np.all(lq ** power == 1.) elif power == 1: assert np.all(lq ** power == lq) else: with pytest.raises(u.UnitsError): lq ** power # with dimensionless, it works, but falls back to normal quantity # (except for power=1) lq2 = u.Magnitude(np.arange(10.)) t = lq2**power if power == 0: assert t.unit is u.dimensionless_unscaled assert np.all(t.value == 1.) elif power == 1: assert np.all(t == lq2) else: assert not isinstance(t, type(lq2)) assert t.unit == lq2.unit.function_unit ** power with u.set_enabled_equivalencies(u.logarithmic()): with pytest.raises(u.UnitsError): t.to(u.dimensionless_unscaled) def test_error_on_lq_as_power(self): lq = u.Magnitude(np.arange(1., 4.)*u.Jy) with pytest.raises(TypeError): lq ** lq @pytest.mark.parametrize('other', pu_sample) def test_addition_subtraction_to_normal_units_fails(self, other): lq = u.Magnitude(np.arange(1., 10.)*u.Jy) q = 1.23 * other with pytest.raises(u.UnitsError): lq + q with pytest.raises(u.UnitsError): lq - q with pytest.raises(u.UnitsError): q - lq @pytest.mark.parametrize( 'other', (1.23 * u.mag, 2.34 * u.mag(), u.Magnitude(3.45 * u.Jy), u.Magnitude(4.56 * u.m), 5.67 * u.Unit(2*u.mag), u.Magnitude(6.78, 2.*u.mag))) def test_addition_subtraction(self, other): """Check that addition/subtraction with quantities with magnitude or MagUnit units works, and that it changes the physical units appropriately.""" lq = u.Magnitude(np.arange(1., 10.)*u.Jy) other_physical = other.to(getattr(other.unit, 'physical_unit', u.dimensionless_unscaled), equivalencies=u.logarithmic()) lq_sf = lq + other assert_allclose(lq_sf.physical, lq.physical * other_physical) lq_sr = other + lq assert_allclose(lq_sr.physical, lq.physical * other_physical) lq_df = lq - other assert_allclose(lq_df.physical, lq.physical / other_physical) lq_dr = other - lq assert_allclose(lq_dr.physical, other_physical / lq.physical) @pytest.mark.parametrize('other', pu_sample) def test_inplace_addition_subtraction_unit_checks(self, other): lu1 = u.mag(u.Jy) lq1 = u.Magnitude(np.arange(1., 10.), lu1) with pytest.raises(u.UnitsError): lq1 += other assert np.all(lq1.value == np.arange(1., 10.)) assert lq1.unit == lu1 with pytest.raises(u.UnitsError): lq1 -= other assert np.all(lq1.value == np.arange(1., 10.)) assert lq1.unit == lu1 @pytest.mark.parametrize( 'other', (1.23 * u.mag, 2.34 * u.mag(), u.Magnitude(3.45 * u.Jy), u.Magnitude(4.56 * u.m), 5.67 * u.Unit(2*u.mag), u.Magnitude(6.78, 2.*u.mag))) def test_inplace_addition_subtraction(self, other): """Check that inplace addition/subtraction with quantities with magnitude or MagUnit units works, and that it changes the physical units appropriately.""" lq = u.Magnitude(np.arange(1., 10.)*u.Jy) other_physical = other.to(getattr(other.unit, 'physical_unit', u.dimensionless_unscaled), equivalencies=u.logarithmic()) lq_sf = lq.copy() lq_sf += other assert_allclose(lq_sf.physical, lq.physical * other_physical) lq_df = lq.copy() lq_df -= other assert_allclose(lq_df.physical, lq.physical / other_physical) def test_complicated_addition_subtraction(self): """For fun, a more complicated example of addition and subtraction.""" dm0 = u.Unit('DM', 1./(4.*np.pi*(10.*u.pc)**2)) DMmag = u.mag(dm0) m_st = 10. * u.STmag dm = 5. * DMmag M_st = m_st - dm assert M_st.unit.is_equivalent(u.erg/u.s/u.AA) assert np.abs(M_st.physical / (m_st.physical*4.*np.pi*(100.*u.pc)**2) - 1.) < 1.e-15 class TestLogQuantityComparisons(object): def test_comparison_to_non_quantities_fails(self): lq = u.Magnitude(np.arange(1., 10.)*u.Jy) # On python2, ordering operations always succeed, given essentially # meaningless results. if not six.PY2: with pytest.raises(TypeError): lq > 'a' assert not (lq == 'a') assert lq != 'a' def test_comparison(self): lq1 = u.Magnitude(np.arange(1., 4.)*u.Jy) lq2 = u.Magnitude(2.*u.Jy) assert np.all((lq1 > lq2) == np.array([True, False, False])) assert np.all((lq1 == lq2) == np.array([False, True, False])) lq3 = u.Dex(2.*u.Jy) assert np.all((lq1 > lq3) == np.array([True, False, False])) assert np.all((lq1 == lq3) == np.array([False, True, False])) lq4 = u.Magnitude(2.*u.m) assert not (lq1 == lq4) assert lq1 != lq4 with pytest.raises(u.UnitsError): lq1 < lq4 q5 = 1.5 * u.Jy assert np.all((lq1 > q5) == np.array([True, False, False])) assert np.all((q5 < lq1) == np.array([True, False, False])) with pytest.raises(u.UnitsError): lq1 >= 2.*u.m with pytest.raises(u.UnitsError): lq1 <= lq1.value * u.mag # For physically dimensionless, we can compare with the function unit. lq6 = u.Magnitude(np.arange(1., 4.)) fv6 = lq6.value * u.mag assert np.all(lq6 == fv6) # but not some arbitrary unit, of course. with pytest.raises(u.UnitsError): lq6 < 2.*u.m class TestLogQuantityMethods(object): def setup(self): self.mJy = np.arange(1., 5.).reshape(2, 2) * u.mag(u.Jy) self.m1 = np.arange(1., 5.5, 0.5).reshape(3, 3) * u.mag() self.mags = (self.mJy, self.m1) @pytest.mark.parametrize('method', ('mean', 'min', 'max', 'round', 'trace', 'std', 'var', 'ptp', 'diff', 'ediff1d')) def test_always_ok(self, method): for mag in self.mags: res = getattr(mag, method)() assert np.all(res.value == getattr(mag._function_view, method)().value) if method in ('std', 'ptp', 'diff', 'ediff1d'): assert res.unit == u.mag() elif method == 'var': assert res.unit == u.mag**2 else: assert res.unit == mag.unit def test_clip(self): for mag in self.mags: assert np.all(mag.clip(2. * mag.unit, 4. * mag.unit).value == mag.value.clip(2., 4.)) @pytest.mark.parametrize('method', ('sum', 'cumsum', 'nansum')) def test_only_ok_if_dimensionless(self, method): res = getattr(self.m1, method)() assert np.all(res.value == getattr(self.m1._function_view, method)().value) assert res.unit == self.m1.unit with pytest.raises(TypeError): getattr(self.mJy, method)() def test_dot(self): assert np.all(self.m1.dot(self.m1).value == self.m1.value.dot(self.m1.value)) @pytest.mark.parametrize('method', ('prod', 'cumprod')) def test_never_ok(self, method): with pytest.raises(ValueError): getattr(self.mJy, method)() with pytest.raises(ValueError): getattr(self.m1, method)() class TestLogQuantityUfuncs(object): """Spot checks on ufuncs.""" def setup(self): self.mJy = np.arange(1., 5.).reshape(2, 2) * u.mag(u.Jy) self.m1 = np.arange(1., 5.5, 0.5).reshape(3, 3) * u.mag() self.mags = (self.mJy, self.m1) def test_power(self): assert np.all(np.power(self.mJy, 0.) == 1.) assert np.all(np.power(self.m1, 1.) == self.m1) assert np.all(np.power(self.mJy, 1.) == self.mJy) assert np.all(np.power(self.m1, 2.) == self.m1 ** 2) with pytest.raises(u.UnitsError): np.power(self.mJy, 2.) def test_not_implemented_with_physical_unit(self): with pytest.raises(u.UnitsError): np.square(self.mJy) assert np.all(np.square(self.m1) == self.m1 ** 2)
37.031214
80
0.59094
28,690
0.895667
0
0
15,257
0.476305
0
0
4,696
0.146603
6a64620ee9819bca0e28e6f332c50299811770b5
13,981
py
Python
djconnectwise/tests/mocks.py
kti-sam/django-connectwise
28484faad9435892a46b8ce4a3c957f64c299971
[ "MIT" ]
null
null
null
djconnectwise/tests/mocks.py
kti-sam/django-connectwise
28484faad9435892a46b8ce4a3c957f64c299971
[ "MIT" ]
null
null
null
djconnectwise/tests/mocks.py
kti-sam/django-connectwise
28484faad9435892a46b8ce4a3c957f64c299971
[ "MIT" ]
null
null
null
import os from mock import patch from datetime import datetime, date, time import json import responses from . import fixtures from django.utils import timezone CW_MEMBER_IMAGE_FILENAME = 'AnonymousMember.png' def create_mock_call(method_name, return_value, side_effect=None): """Utility function for mocking the specified function or method""" _patch = patch(method_name, side_effect=side_effect) mock_get_call = _patch.start() if not side_effect: mock_get_call.return_value = return_value return mock_get_call, _patch def company_info_get_company_info_call(return_value): method_name = 'djconnectwise.api.CompanyInfoManager.get_company_info' return create_mock_call(method_name, return_value) def company_api_get_call(return_value): method_name = 'djconnectwise.api.CompanyAPIClient.get_companies' return create_mock_call(method_name, return_value) def company_api_by_id_call(return_value, raised=None): method_name = 'djconnectwise.api.CompanyAPIClient.by_id' return create_mock_call(method_name, return_value, side_effect=raised) def company_api_get_company_statuses_call(return_value, raised=None): method_name = 'djconnectwise.api.CompanyAPIClient.get_company_statuses' return create_mock_call(method_name, return_value, side_effect=raised) def company_api_get_company_types_call(return_value, raised=None): method_name = 'djconnectwise.api.CompanyAPIClient.get_company_types' return create_mock_call(method_name, return_value, side_effect=raised) def projects_api_get_project_statuses_call(return_value, raised=None): method_name = 'djconnectwise.api.ProjectAPIClient.get_project_statuses' return create_mock_call(method_name, return_value, side_effect=raised) def projects_api_get_project_types_call(return_value, raised=None): method_name = 'djconnectwise.api.ProjectAPIClient.get_project_types' return create_mock_call(method_name, return_value, side_effect=raised) def projects_api_get_project_phases_call(return_value, raised=None): method_name = 'djconnectwise.api.ProjectAPIClient.get_project_phases' return create_mock_call(method_name, return_value, side_effect=raised) def project_api_get_projects_call(return_value): method_name = 'djconnectwise.api.ProjectAPIClient.get_projects' return create_mock_call(method_name, return_value) def project_api_get_project_call(return_value, raised=None): method_name = 'djconnectwise.api.ProjectAPIClient.get_project' return create_mock_call(method_name, return_value, side_effect=raised) def _project_api_tickets_call(page=1, page_size=25, conditions=[]): return_value = [] test_date = date(1948, 5, 14) test_time = time(12, 0, 0, tzinfo=timezone.get_current_timezone()) test_datetime = datetime.combine(test_date, test_time) conditions.append('lastUpdated>' + timezone.localtime( value=test_datetime).isoformat() ) if page == 1: return_value = [fixtures.API_PROJECT_TICKET] return return_value def project_api_tickets_call(): method_name = 'djconnectwise.api.TicketAPIMixin.get_tickets' mock_call, _patch = create_mock_call( method_name, None, side_effect=_project_api_tickets_call) return mock_call, _patch def project_api_tickets_test_command(return_value): method_name = 'djconnectwise.api.TicketAPIMixin.get_tickets' mock_call, _patch = create_mock_call(method_name, return_value) return mock_call, _patch def sales_api_by_id_call(return_value, raised=None): method_name = 'djconnectwise.api.SalesAPIClient.by_id' return create_mock_call(method_name, return_value, side_effect=raised) def sales_api_get_opportunities_call(return_value, raised=None): method_name = 'djconnectwise.api.SalesAPIClient.get_opportunities' return create_mock_call(method_name, return_value, side_effect=raised) def sales_api_get_opportunity_statuses_call(return_value, raised=None): method_name = 'djconnectwise.api.SalesAPIClient.get_opportunity_statuses' return create_mock_call(method_name, return_value, side_effect=raised) def sales_api_get_opportunity_types_call(return_value, raised=None): method_name = 'djconnectwise.api.SalesAPIClient.get_opportunity_types' return create_mock_call(method_name, return_value, side_effect=raised) def sales_api_get_opportunity_stages_call(return_value, raised=None): method_name = 'djconnectwise.api.SalesAPIClient.get_opportunity_stages' return create_mock_call(method_name, return_value, side_effect=raised) def sales_api_get_sales_probabilities_call(return_value, raised=None): method_name = 'djconnectwise.api.SalesAPIClient.get_probabilities' return create_mock_call(method_name, return_value, side_effect=raised) def schedule_api_get_schedule_types_call(return_value, raised=None): method_name = 'djconnectwise.api.ScheduleAPIClient.get_schedule_types' return create_mock_call(method_name, return_value, side_effect=raised) def schedule_api_get_schedule_statuses_call(return_value, raised=None): method_name = 'djconnectwise.api.ScheduleAPIClient.get_schedule_statuses' return create_mock_call(method_name, return_value, side_effect=raised) def schedule_api_get_schedule_entries_call(return_value, raised=None): method_name = 'djconnectwise.api.ScheduleAPIClient.get_schedule_entries' return create_mock_call(method_name, return_value, side_effect=raised) def schedule_api_get_schedule_entry_call(return_value, raised=None): method_name = 'djconnectwise.api.ScheduleAPIClient.get_schedule_entry' return create_mock_call(method_name, return_value, side_effect=raised) def schedule_api_get_calendars_call(return_value, raised=None): method_name = 'djconnectwise.api.ScheduleAPIClient.get_calendars' return create_mock_call(method_name, return_value, side_effect=raised) def schedule_api_get_holidays_call(return_value, raised=None): method_name = 'djconnectwise.api.ScheduleAPIClient.get_holidays' return create_mock_call(method_name, return_value, side_effect=raised) def schedule_api_get_holiday_lists_call(return_value, raised=None): method_name = 'djconnectwise.api.ScheduleAPIClient.get_holiday_lists' return create_mock_call(method_name, return_value, side_effect=raised) def time_api_get_time_entries_call(return_value, raised=None): method_name = 'djconnectwise.api.TimeAPIClient.get_time_entries' return create_mock_call(method_name, return_value, side_effect=raised) def sales_api_get_activities_call(return_value, raised=None): method_name = 'djconnectwise.api.SalesAPIClient.get_activities' return create_mock_call(method_name, return_value, side_effect=raised) def sales_api_get_activities_statuses_call(return_value, raised=None): method_name = 'djconnectwise.api.SalesAPIClient.get_activity_statuses' return create_mock_call(method_name, return_value, side_effect=raised) def sales_api_get_activities_types_call(return_value, raised=None): method_name = 'djconnectwise.api.SalesAPIClient.get_activity_types' return create_mock_call(method_name, return_value, side_effect=raised) def sales_api_get_single_activity_call(return_value, raised=None): method_name = 'djconnectwise.api.SalesAPIClient.get_single_activity' return create_mock_call(method_name, return_value, side_effect=raised) def _service_api_tickets_call(page=1, page_size=25, conditions=[]): return_value = [] test_date = date(1948, 5, 14) test_time = time(12, 0, 0, tzinfo=timezone.get_current_timezone()) test_datetime = datetime.combine(test_date, test_time) conditions.append('lastUpdated>' + timezone.localtime( value=test_datetime).isoformat() ) if page == 1: return_value = [fixtures.API_SERVICE_TICKET] return return_value def service_api_tickets_call(): method_name = 'djconnectwise.api.TicketAPIMixin.get_tickets' mock_call, _patch = create_mock_call( method_name, None, side_effect=_service_api_tickets_call) return mock_call, _patch def _service_api_get_ticket_call(ticket_id): return fixtures.API_SERVICE_TICKET_MAP.get(ticket_id) def service_api_get_ticket_call(raised=None): method_name = 'djconnectwise.api.TicketAPIMixin.get_ticket' mock_call, _patch = create_mock_call( method_name, None, side_effect=raised if raised else _service_api_get_ticket_call) return mock_call, _patch def service_api_get_boards_call(return_value): method_name = 'djconnectwise.api.ServiceAPIClient.get_boards' return create_mock_call(method_name, return_value) def service_api_update_ticket_call(return_value): method_name = 'djconnectwise.api.ServiceAPIClient.update_ticket' return create_mock_call(method_name, return_value) def service_api_get_statuses_call(return_value): method_name = 'djconnectwise.api.ServiceAPIClient.get_statuses' return create_mock_call(method_name, return_value) def service_api_get_priorities_call(return_value): method_name = 'djconnectwise.api.ServiceAPIClient.get_priorities' return create_mock_call(method_name, return_value) def service_api_get_teams_call(return_value): method_name = 'djconnectwise.api.ServiceAPIClient.get_teams' return create_mock_call(method_name, return_value) def service_api_get_notes_call(return_value): method_name = 'djconnectwise.api.ServiceAPIClient.get_notes' return create_mock_call(method_name, return_value) def service_api_get_slas_call(return_value): method_name = 'djconnectwise.api.ServiceAPIClient.get_slas' return create_mock_call(method_name, return_value) def service_api_get_sla_priorities_call(return_value): method_name = 'djconnectwise.api.ServiceAPIClient.get_slapriorities' return create_mock_call(method_name, return_value) def service_api_get_types_call(return_value): method_name = 'djconnectwise.api.ServiceAPIClient.get_types' return create_mock_call(method_name, return_value) def service_api_get_subtypes_call(return_value): method_name = 'djconnectwise.api.ServiceAPIClient.get_subtypes' return create_mock_call(method_name, return_value) def service_api_get_items_call(return_value): method_name = 'djconnectwise.api.ServiceAPIClient.get_items' return create_mock_call(method_name, return_value) def sales_api_get_opportunity_notes_call(return_value): method_name = 'djconnectwise.api.SalesAPIClient.get_notes' return create_mock_call(method_name, return_value) def service_api_get_locations_call(return_value): method_name = 'djconnectwise.api.ServiceAPIClient.get_locations' return create_mock_call(method_name, return_value) def system_api_get_connectwise_version_call(return_value): method_name = 'djconnectwise.api.SystemAPIClient.get_connectwise_version' return create_mock_call(method_name, return_value) def system_api_get_members_call(return_value): method_name = 'djconnectwise.api.SystemAPIClient.get_members' return create_mock_call(method_name, return_value) def system_api_get_member_image_by_photo_id_call(return_value): method_name = 'djconnectwise.api.SystemAPIClient.' \ + 'get_member_image_by_photo_id' return create_mock_call(method_name, return_value) def system_api_get_member_count_call(return_value): method_name = 'djconnectwise.api.SystemAPIClient.get_members' return create_mock_call(method_name, return_value) def system_api_create_callback_call(return_value): method_name = 'djconnectwise.api.SystemAPIClient.create_callback' return create_mock_call(method_name, return_value) def system_api_delete_callback_call(return_value): method_name = 'djconnectwise.api.SystemAPIClient.delete_callback' return create_mock_call(method_name, return_value) def system_api_get_callbacks_call(return_value): method_name = 'djconnectwise.api.SystemAPIClient.get_callbacks' return create_mock_call(method_name, return_value) def system_api_get_territories_call(return_value): method_name = 'djconnectwise.api.SystemAPIClient.get_territories' return create_mock_call(method_name, return_value) def system_api_get_other_call(return_value): method_name = 'djconnectwise.api.SystemAPIClient.get_mycompanyother' return create_mock_call(method_name, return_value) def cw_api_fetch_resource_call(return_value): method_name = 'djconnectwise.api.ConnectWiseAPIClient.fetch_resource' return create_mock_call(method_name, return_value) def get(url, data, headers=None, status=200): """Set up requests mock for given URL and JSON-serializable data.""" get_raw(url, json.dumps(data), "application/json", headers, status=status) def time_api_get_work_types_call(return_value): method_name = 'djconnectwise.api.TimeAPIClient.get_work_types' return create_mock_call(method_name, return_value) def time_api_get_work_roles_call(return_value): method_name = 'djconnectwise.api.TimeAPIClient.get_work_roles' return create_mock_call(method_name, return_value) def finance_api_get_agreements_call(return_value): method_name = 'djconnectwise.api.FinanceAPIClient.get_agreements' return create_mock_call(method_name, return_value) def get_raw(url, data, content_type="application/octet-stream", headers=None, status=200): """Set up requests mock for given URL.""" responses.add( responses.GET, url, body=data, status=status, content_type=content_type, adding_headers=headers, ) def get_member_avatar(): """Return the avatar image data in the tests directory.""" cw_member_image_path = os.path.join( os.path.dirname(os.path.abspath(__file__)), CW_MEMBER_IMAGE_FILENAME ) with open(cw_member_image_path, 'rb') as anonymous_image_file: return anonymous_image_file.read()
36.126615
78
0.800801
0
0
0
0
0
0
0
0
3,298
0.235892
6a648d570a29d5a4d4e0f9f5bffd72aadfab36cb
2,632
py
Python
visual_odometry/visual_odometry.py
vineeths96/Visual-Odometry
88d96a23a0bde9c05de1f4dddcca8b6c4bd817e7
[ "MIT" ]
2
2021-07-20T03:49:54.000Z
2022-01-19T13:43:51.000Z
visual_odometry/visual_odometry.py
vineeths96/Visual-Odometry
88d96a23a0bde9c05de1f4dddcca8b6c4bd817e7
[ "MIT" ]
null
null
null
visual_odometry/visual_odometry.py
vineeths96/Visual-Odometry
88d96a23a0bde9c05de1f4dddcca8b6c4bd817e7
[ "MIT" ]
3
2021-11-28T06:23:23.000Z
2021-12-05T17:09:00.000Z
from .monovideoodometry import MonoVideoOdometry from .parameters import * def visual_odometry( image_path="./input/sequences/10/image_0/", pose_path="./input/poses/10.txt", fivepoint=False, ): """ Plots the estimated odometry path using either five point estimation or eight point estimation :param image_path: Path to the directory of camera images :param pose_path: Path to the directory of pose file :param fivepoint: Whether to use five point or eight point method :return: None """ vo = MonoVideoOdometry(image_path, pose_path, FOCAL, PP, K, LUCAS_KANADE_PARAMS, fivepoint) trajectory = np.zeros(shape=(800, 1200, 3)) frame_count = 0 while vo.hasNextFrame(): frame_count += 1 frame = vo.current_frame cv2.imshow("Frame", frame) k = cv2.waitKey(1) if k == 27: break vo.process_frame() estimated_coordinates = vo.get_mono_coordinates() true_coordinates = vo.get_true_coordinates() print("MSE Error: ", np.linalg.norm(estimated_coordinates - true_coordinates)) print("x: {}, y: {}, z: {}".format(*[str(pt) for pt in estimated_coordinates])) print("True_x: {}, True_y: {}, True_z: {}".format(*[str(pt) for pt in true_coordinates])) draw_x, draw_y, draw_z = [int(round(x)) for x in estimated_coordinates] true_x, true_y, true_z = [int(round(x)) for x in true_coordinates] trajectory = cv2.circle(trajectory, (true_x + 400, true_z + 100), 1, list((0, 0, 255)), 4) trajectory = cv2.circle(trajectory, (draw_x + 400, draw_z + 100), 1, list((0, 255, 0)), 4) cv2.putText( trajectory, "Actual Position:", (140, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1, ) cv2.putText(trajectory, "Red", (270, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1) cv2.putText( trajectory, "Estimated Odometry Position:", (30, 120), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1, ) cv2.putText( trajectory, "Green", (270, 120), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1, ) cv2.imshow("trajectory", trajectory) if frame_count % 5 == 0: cv2.imwrite(f"./results/trajectory/trajectory_{frame_count}.png", trajectory) cv2.imwrite(f"./results/trajectory.png", trajectory) cv2.destroyAllWindows()
32.097561
98
0.578647
0
0
0
0
0
0
0
0
598
0.227204
6a659a66fbda946ae307b1633f49b480eec28005
886
py
Python
tf-2-data-parallelism/src/utils.py
Amirosimani/amazon-sagemaker-script-mode
ea8d7d6b1b0613dffa793c9ae247cfd8868034ec
[ "Apache-2.0" ]
144
2019-02-05T21:03:30.000Z
2022-03-24T15:24:32.000Z
tf-2-data-parallelism/src/utils.py
kirit93/amazon-sagemaker-script-mode
095af07488889bb2655b741749d8740d3e11a49e
[ "Apache-2.0" ]
22
2019-03-04T04:18:02.000Z
2022-03-09T00:21:36.000Z
tf-2-data-parallelism/src/utils.py
kirit93/amazon-sagemaker-script-mode
095af07488889bb2655b741749d8740d3e11a49e
[ "Apache-2.0" ]
94
2019-02-05T21:03:33.000Z
2022-01-16T07:29:15.000Z
import os import numpy as np import tensorflow as tf def get_train_data(train_dir, batch_size): train_images = np.load(os.path.join(train_dir, 'train_images.npy')) train_labels = np.load(os.path.join(train_dir, 'train_labels.npy')) print('train_images', train_images.shape, 'train_labels', train_labels.shape) dataset_train = tf.data.Dataset.from_tensor_slices((train_images, train_labels)) dataset_train = dataset_train.repeat().shuffle(10000).batch(batch_size) return dataset_train def get_val_data(val_dir): test_images = np.load(os.path.join(val_dir, 'validation_images.npy')) test_labels = np.load(os.path.join(val_dir, 'validation_labels.npy')) print('validation_images', test_images.shape, 'validation_labels', test_labels.shape) dataset_test = tf.data.Dataset.from_tensor_slices((test_images, test_labels)) return dataset_test
36.916667
89
0.76298
0
0
0
0
0
0
0
0
148
0.167043
6a65a78ac7de33dc7adca445fb1aae94ba18f829
10,269
py
Python
scripts/run_rbf_comparison_car_air_top5.py
CaptainCandy/influence-release
a152486a1c130fb5f907259c6692b9fe0d2ef6d0
[ "MIT" ]
null
null
null
scripts/run_rbf_comparison_car_air_top5.py
CaptainCandy/influence-release
a152486a1c130fb5f907259c6692b9fe0d2ef6d0
[ "MIT" ]
null
null
null
scripts/run_rbf_comparison_car_air_top5.py
CaptainCandy/influence-release
a152486a1c130fb5f907259c6692b9fe0d2ef6d0
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Tue Mar 19 16:26:35 2019 @author: Administrator """ # Forked from run_rbf_comparison.py from __future__ import division from __future__ import print_function from __future__ import absolute_import from __future__ import unicode_literals import math import copy import numpy as np import pandas as pd import sklearn.linear_model as linear_model import sklearn.preprocessing as preprocessing import scipy import scipy.linalg as slin import scipy.sparse.linalg as sparselin import scipy.sparse as sparse import random import sys sys.path.append("C:/Tang/influence-release-master") #设置自定义包的搜索路径 from load_vehicles import load_vehicles import tensorflow as tf from tensorflow.contrib.learn.python.learn.datasets import base from sklearn.metrics.pairwise import rbf_kernel from influence.inceptionModel import BinaryInceptionModel from influence.smooth_hinge import SmoothHinge from influence.binaryLogisticRegressionWithLBFGS import BinaryLogisticRegressionWithLBFGS import influence.dataset as dataset from influence.dataset import DataSet from influence.dataset_poisoning import generate_inception_features #%% def get_Y_pred_correct_inception(model): Y_test = model.data_sets.test.labels if np.min(Y_test) < -0.5: Y_test = (np.copy(Y_test) + 1) / 2 Y_pred = model.sess.run(model.preds, feed_dict=model.all_test_feed_dict) Y_pred_correct = np.zeros([len(Y_test)]) for idx, label in enumerate(Y_test): Y_pred_correct[idx] = Y_pred[idx, int(label)] return Y_pred_correct num_classes = 2 num_train_ex_per_class = 40 num_test_ex_per_class = 300 dataset_name = 'carair_%s_%s' % (num_train_ex_per_class, num_test_ex_per_class) image_data_sets = load_vehicles( num_train_ex_per_class=num_train_ex_per_class, num_test_ex_per_class=num_test_ex_per_class) weight_decay = 0.001 initial_learning_rate = 0.001 keep_probs = None decay_epochs = [1000, 10000] #%% ### Generate kernelized feature vectors X_train = image_data_sets.train.x X_test = image_data_sets.test.x Y_train = np.copy(image_data_sets.train.labels) * 2 - 1 Y_test = np.copy(image_data_sets.test.labels) * 2 - 1 num_train = X_train.shape[0] num_test = X_test.shape[0] X_stacked = np.vstack((X_train, X_test)) gamma = 0.05 weight_decay = 0.0001 K = rbf_kernel(X_stacked, gamma = gamma / num_train) # ============================================================================= # L = slin.cholesky(K, lower=True) # L_train = L[:num_train, :num_train] # L_test = L[num_train:, :num_train] # ============================================================================= K_train = K[:num_train, :num_train] K_test = K[num_train:, :num_train] ### Compare top 5 influential examples from each network test_idx = 0 ## RBF input_channels = 1 weight_decay = 0.001 batch_size = num_train initial_learning_rate = 0.001 keep_probs = None max_lbfgs_iter = 1000 use_bias = False decay_epochs = [1000, 10000] tf.reset_default_graph() X_train = image_data_sets.train.x Y_train = image_data_sets.train.labels * 2 - 1 train = DataSet(K_train, Y_train) test = DataSet(K_test, Y_test) data_sets = base.Datasets(train=train, validation=None, test=test) input_dim = data_sets.train.x.shape[1] # Train with hinge print('Train rbf with hinge...') rbf_model = SmoothHinge( temp=0, use_bias=use_bias, input_dim=input_dim, weight_decay=weight_decay, num_classes=num_classes, batch_size=batch_size, data_sets=data_sets, initial_learning_rate=initial_learning_rate, keep_probs=keep_probs, decay_epochs=decay_epochs, mini_batch=False, train_dir='output7', log_dir='log', model_name='carair_rbf_hinge_t-0') rbf_model.train() hinge_W = rbf_model.sess.run(rbf_model.params)[0] # Then load weights into smoothed version print('Load weights into smoothed version...') tf.reset_default_graph() rbf_model = SmoothHinge( temp=0.001, use_bias=use_bias, input_dim=input_dim, weight_decay=weight_decay, num_classes=num_classes, batch_size=batch_size, data_sets=data_sets, initial_learning_rate=initial_learning_rate, keep_probs=keep_probs, decay_epochs=decay_epochs, mini_batch=False, train_dir='output7', log_dir='log', model_name='car_air_rbf_hinge_t-0.001') params_feed_dict = {} params_feed_dict[rbf_model.W_placeholder] = hinge_W rbf_model.sess.run(rbf_model.set_params_op, feed_dict=params_feed_dict) rbf_predicted_loss_diffs = rbf_model.get_influence_on_test_loss( [test_idx], np.arange(len(rbf_model.data_sets.train.labels)), force_refresh=True) #%% ## Inception dataset_name = 'carair_40_300' test_idx = 0 # Generate inception features print('Generate inception features...') img_side = 299 num_channels = 3 num_train_ex_per_class = 40 num_test_ex_per_class = 300 batch_size = 20 #TODO: 需要根据配置修改 # reset_default_graph大概就是重置当前线程,让tf session里定义的东西都失效,重来。就是重开一个神经网络session tf.reset_default_graph() full_model_name = '%s_inception' % dataset_name # 下面的语句是定义一个inception双分类器 full_model = BinaryInceptionModel( img_side=img_side, num_channels=num_channels, weight_decay=weight_decay, num_classes=num_classes, batch_size=batch_size, data_sets=image_data_sets, initial_learning_rate=initial_learning_rate, keep_probs=keep_probs, decay_epochs=decay_epochs, mini_batch=True, train_dir='output9', log_dir='log', model_name=full_model_name) # 下面的代码是在使用inception的卷积层生成特征 train_inception_features_val = generate_inception_features( full_model, image_data_sets.train.x, image_data_sets.train.labels, batch_size=batch_size) test_inception_features_val = generate_inception_features( full_model, image_data_sets.test.x, image_data_sets.test.labels, batch_size=batch_size) train = DataSet( train_inception_features_val, image_data_sets.train.labels) test = DataSet( test_inception_features_val, image_data_sets.test.labels) # train_f = np.load('G:/output/%s_inception_features_new_train.npz' % dataset_name) # train = DataSet(train_f['inception_features_val'], train_f['labels']) # test_f = np.load('G:/output/%s_inception_features_new_test.npz' % dataset_name) # test = DataSet(test_f['inception_features_val'], test_f['labels']) validation = None # 上面的代码是训练了inception模型的全连接层前面的部分,因此输出的feature有2048个维度 data_sets = base.Datasets(train=train, validation=validation, test=test) # train_f = np.load('G:/output/%s_inception_features_new_train.npz' % dataset_name) # train = DataSet(train_f['inception_features_val'], train_f['labels']) # test_f = np.load('G:/output/%s_inception_features_new_test.npz' % dataset_name) # test = DataSet(test_f['inception_features_val'], test_f['labels']) # validation = None # data_sets = base.Datasets(train=train, validation=validation, test=test) # 下面的代码利用从inception卷积层训练完成后的feature进行一个二分类逻辑回归,取消卷积层后面的FC全连接层 print('Train logistic regression after inception...') input_dim = 2048 weight_decay = 0.001 batch_size = 20 initial_learning_rate = 0.001 keep_probs = None decay_epochs = [1000, 10000] max_lbfgs_iter = 1000 num_classes = 2 tf.reset_default_graph() inception_model = BinaryLogisticRegressionWithLBFGS( input_dim=input_dim, weight_decay=weight_decay, max_lbfgs_iter=max_lbfgs_iter, num_classes=num_classes, batch_size=batch_size, data_sets=data_sets, initial_learning_rate=initial_learning_rate, keep_probs=keep_probs, decay_epochs=decay_epochs, mini_batch=False, train_dir='output9', log_dir='log', model_name='%s_inception_onlytop' % dataset_name) inception_model.train() # ============================================================================= # inception_predicted_loss_diffs = inception_model.get_influence_on_test_loss( # [test_idx], # np.arange(len(inception_model.data_sets.train.labels)), # force_refresh=True) # # x_test = X_test[test_idx, :] # y_test = Y_test[test_idx] # # # distances = dataset.find_distances(x_test, X_train) # flipped_idx = Y_train != y_test # rbf_margins_test = rbf_model.sess.run(rbf_model.margin, feed_dict=rbf_model.all_test_feed_dict) # rbf_margins_train = rbf_model.sess.run(rbf_model.margin, feed_dict=rbf_model.all_train_feed_dict) # inception_Y_pred_correct = get_Y_pred_correct_inception(inception_model) # # # np.savez( # 'output7/rbf_carair_results_%s' % test_idx, # test_idx=test_idx, # distances=distances, # flipped_idx=flipped_idx, # rbf_margins_test=rbf_margins_test, # rbf_margins_train=rbf_margins_train, # inception_Y_pred_correct=inception_Y_pred_correct, # rbf_predicted_loss_diffs=rbf_predicted_loss_diffs, # inception_predicted_loss_diffs=inception_predicted_loss_diffs # ) # ============================================================================= #%% print('Save results...') #rand_test = random.sample(range(1, 600),50) #np.savez('output7/rand_test_point', rand_test=rand_test) for test_idx in range(1, 600): rbf_predicted_loss_diffs = rbf_model.get_influence_on_test_loss( [test_idx], np.arange(len(rbf_model.data_sets.train.labels)), force_refresh=True) inception_predicted_loss_diffs = inception_model.get_influence_on_test_loss( [test_idx], np.arange(len(inception_model.data_sets.train.labels)), force_refresh=True) x_test = X_test[test_idx, :] y_test = Y_test[test_idx] distances = dataset.find_distances(x_test, X_train) flipped_idx = Y_train != y_test rbf_margins_test = rbf_model.sess.run(rbf_model.margin, feed_dict=rbf_model.all_test_feed_dict) rbf_margins_train = rbf_model.sess.run(rbf_model.margin, feed_dict=rbf_model.all_train_feed_dict) inception_Y_pred_correct = get_Y_pred_correct_inception(inception_model) np.savez( 'output9/rbf_carair_results_%s' % test_idx, test_idx=test_idx, distances=distances, flipped_idx=flipped_idx, rbf_margins_test=rbf_margins_test, rbf_margins_train=rbf_margins_train, inception_Y_pred_correct=inception_Y_pred_correct, rbf_predicted_loss_diffs=rbf_predicted_loss_diffs, inception_predicted_loss_diffs=inception_predicted_loss_diffs )
30.930723
101
0.7435
0
0
0
0
0
0
0
0
3,545
0.334971
6a6623a4cf3e4c5b80fbcffbafebb173294bba30
1,478
py
Python
data/external/repositories_2to3/145085/kaggle_Microsoft_Malware-master/kaggle_Microsoft_malware_small/find_4g.py
Keesiu/meta-kaggle
87de739aba2399fd31072ee81b391f9b7a63f540
[ "MIT" ]
null
null
null
data/external/repositories_2to3/145085/kaggle_Microsoft_Malware-master/kaggle_Microsoft_malware_small/find_4g.py
Keesiu/meta-kaggle
87de739aba2399fd31072ee81b391f9b7a63f540
[ "MIT" ]
null
null
null
data/external/repositories_2to3/145085/kaggle_Microsoft_Malware-master/kaggle_Microsoft_malware_small/find_4g.py
Keesiu/meta-kaggle
87de739aba2399fd31072ee81b391f9b7a63f540
[ "MIT" ]
1
2019-12-04T08:23:33.000Z
2019-12-04T08:23:33.000Z
import sys import pickle ########################################################## # usage # pypy find_4g.py xid_train.p ../../data/train # xid_train.p is a list like ['loIP1tiwELF9YNZQjSUO',''....] to specify # the order of samples in traing data # ../../data/train is the path of original train data ########################################################## xid_name=sys.argv[1] data_path=sys.argv[2] xid=pickle.load(open(xid_name)) #xid_train.p or xid_test.p newc=pickle.load(open('newc.p')) newc2=pickle.load(open('cutcmd3g_for_4g.p')) cmd4g={} for i in newc2: for j in newc: cmd4g[(i[0],i[1],i[2],j)]=0 print(newc) for c,f in enumerate(xid): count={} fo=open(data_path+'/'+f+'.asm') tot=0 a=-1 b=-1 d=-1 e=-1 for line in fo: xx=line.split() for x in xx: if x in newc: a=b b=d d=e e=x if (a,b,d,e) in cmd4g: if (a,b,d,e) not in count: count[(a,b,d,e)]=0 count[(a,b,d,e)]+=1 tot+=1 fo.close() if True:#c%10000==0: print(c*1.0/len(xid),tot) for i in count: cmd4g[i]=count[i]+cmd4g[i] del count cmd4gx={} for i in cmd4g: if cmd4g[i]>0: cmd4gx[i]=cmd4g[i] print(len(cmd4gx)) pickle.dump(cmd4gx,open('cmd4g.p','w'))
25.050847
72
0.451962
0
0
0
0
0
0
0
0
426
0.288227
6a6651ad80b45cc4756ccfc411bd482091aff56e
50
py
Python
src/domain/enums/__init__.py
Antonio-Gabriel/easepay_backend
9aaf4de27c9cc906911ae46ee61c75c6d92dc826
[ "MIT" ]
1
2021-11-24T09:18:19.000Z
2021-11-24T09:18:19.000Z
src/domain/enums/__init__.py
Antonio-Gabriel/easepay_backend
9aaf4de27c9cc906911ae46ee61c75c6d92dc826
[ "MIT" ]
null
null
null
src/domain/enums/__init__.py
Antonio-Gabriel/easepay_backend
9aaf4de27c9cc906911ae46ee61c75c6d92dc826
[ "MIT" ]
null
null
null
from .months import Months from .sizes import Size
25
26
0.82
0
0
0
0
0
0
0
0
0
0
6a6655e14286bbfcb799353c5812e25b7720b10d
1,512
py
Python
pygments/lexers/trafficscript.py
blu-base/pygments
da799d14818ed538bf937684a19ce779ddde9446
[ "BSD-2-Clause" ]
1
2015-06-08T14:52:49.000Z
2015-06-08T14:52:49.000Z
pygments/lexers/trafficscript.py
blu-base/pygments
da799d14818ed538bf937684a19ce779ddde9446
[ "BSD-2-Clause" ]
1
2022-03-13T09:17:24.000Z
2022-03-13T09:18:02.000Z
pygments/lexers/trafficscript.py
blu-base/pygments
da799d14818ed538bf937684a19ce779ddde9446
[ "BSD-2-Clause" ]
null
null
null
""" pygments.lexers.trafficscript ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Lexer for RiverBed's TrafficScript (RTS) language. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer from pygments.token import String, Number, Name, Keyword, Operator, Text, Comment __all__ = ['RtsLexer'] class RtsLexer(RegexLexer): """ For `Riverbed Stingray Traffic Manager <http://www.riverbed.com/stingray>`_ .. versionadded:: 2.1 """ name = 'TrafficScript' aliases = ['trafficscript', 'rts'] filenames = ['*.rts'] tokens = { 'root' : [ (r"'(\\\\|\\[^\\]|[^'\\])*'", String), (r'"', String, 'escapable-string'), (r'(0x[0-9a-fA-F]+|\d+)', Number), (r'\d+\.\d+', Number.Float), (r'\$[a-zA-Z](\w|_)*', Name.Variable), (r'(if|else|for(each)?|in|while|do|break|sub|return|import)', Keyword), (r'[a-zA-Z][\w.]*', Name.Function), (r'[-+*/%=,;(){}<>^.!~|&\[\]\?\:]', Operator), (r'(>=|<=|==|!=|' r'&&|\|\||' r'\+=|.=|-=|\*=|/=|%=|<<=|>>=|&=|\|=|\^=|' r'>>|<<|' r'\+\+|--|=>)', Operator), (r'[ \t\r]+', Text), (r'#[^\n]*', Comment), ], 'escapable-string' : [ (r'\\[tsn]', String.Escape), (r'[^"]', String), (r'"', String, '#pop'), ], }
29.076923
83
0.433201
1,116
0.738095
0
0
0
0
0
0
793
0.524471
6a681ede8ff42ae46d972ef7a200eff04f8f87d4
20,333
py
Python
pandas/tests/indexes/test_common.py
dimithras/pandas
d321be6e2a43270625abf671d9e59f16529c4b48
[ "BSD-3-Clause" ]
1
2020-10-29T17:32:26.000Z
2020-10-29T17:32:26.000Z
pandas/tests/indexes/test_common.py
BhavarthShah/pandas
efb068f25b911ff3009d5692eb831df35bb042e5
[ "BSD-3-Clause" ]
null
null
null
pandas/tests/indexes/test_common.py
BhavarthShah/pandas
efb068f25b911ff3009d5692eb831df35bb042e5
[ "BSD-3-Clause" ]
null
null
null
""" Collection of tests asserting things that should be true for any index subclass. Makes use of the `indices` fixture defined in pandas/tests/indexes/conftest.py. """ import re import numpy as np import pytest from pandas._libs.tslibs import iNaT from pandas.core.dtypes.common import is_period_dtype, needs_i8_conversion import pandas as pd from pandas import ( CategoricalIndex, DatetimeIndex, MultiIndex, PeriodIndex, RangeIndex, TimedeltaIndex, ) import pandas._testing as tm class TestCommon: def test_droplevel(self, index): # GH 21115 if isinstance(index, MultiIndex): # Tested separately in test_multi.py return assert index.droplevel([]).equals(index) for level in index.name, [index.name]: if isinstance(index.name, tuple) and level is index.name: # GH 21121 : droplevel with tuple name continue with pytest.raises(ValueError): index.droplevel(level) for level in "wrong", ["wrong"]: with pytest.raises( KeyError, match=r"'Requested level \(wrong\) does not match index name \(None\)'", ): index.droplevel(level) def test_constructor_non_hashable_name(self, index): # GH 20527 if isinstance(index, MultiIndex): pytest.skip("multiindex handled in test_multi.py") message = "Index.name must be a hashable type" renamed = [["1"]] # With .rename() with pytest.raises(TypeError, match=message): index.rename(name=renamed) # With .set_names() with pytest.raises(TypeError, match=message): index.set_names(names=renamed) def test_constructor_unwraps_index(self, index): if isinstance(index, pd.MultiIndex): raise pytest.skip("MultiIndex has no ._data") a = index b = type(a)(a) tm.assert_equal(a._data, b._data) @pytest.mark.parametrize("itm", [101, "no_int"]) # FutureWarning from non-tuple sequence of nd indexing @pytest.mark.filterwarnings("ignore::FutureWarning") def test_getitem_error(self, index, itm): with pytest.raises(IndexError): index[itm] @pytest.mark.parametrize( "fname, sname, expected_name", [ ("A", "A", "A"), ("A", "B", None), ("A", None, None), (None, "B", None), (None, None, None), ], ) def test_corner_union(self, index, fname, sname, expected_name): # GH 9943 9862 # Test unions with various name combinations # Do not test MultiIndex or repeats if isinstance(index, MultiIndex) or not index.is_unique: pytest.skip("Not for MultiIndex or repeated indices") # Test copy.union(copy) first = index.copy().set_names(fname) second = index.copy().set_names(sname) union = first.union(second) expected = index.copy().set_names(expected_name) tm.assert_index_equal(union, expected) # Test copy.union(empty) first = index.copy().set_names(fname) second = index.drop(index).set_names(sname) union = first.union(second) expected = index.copy().set_names(expected_name) tm.assert_index_equal(union, expected) # Test empty.union(copy) first = index.drop(index).set_names(fname) second = index.copy().set_names(sname) union = first.union(second) expected = index.copy().set_names(expected_name) tm.assert_index_equal(union, expected) # Test empty.union(empty) first = index.drop(index).set_names(fname) second = index.drop(index).set_names(sname) union = first.union(second) expected = index.drop(index).set_names(expected_name) tm.assert_index_equal(union, expected) @pytest.mark.parametrize( "fname, sname, expected_name", [ ("A", "A", "A"), ("A", "B", None), ("A", None, None), (None, "B", None), (None, None, None), ], ) def test_union_unequal(self, index, fname, sname, expected_name): if isinstance(index, MultiIndex) or not index.is_unique: pytest.skip("Not for MultiIndex or repeated indices") # test copy.union(subset) - need sort for unicode and string first = index.copy().set_names(fname) second = index[1:].set_names(sname) union = first.union(second).sort_values() expected = index.set_names(expected_name).sort_values() tm.assert_index_equal(union, expected) @pytest.mark.parametrize( "fname, sname, expected_name", [ ("A", "A", "A"), ("A", "B", None), ("A", None, None), (None, "B", None), (None, None, None), ], ) def test_corner_intersect(self, index, fname, sname, expected_name): # GH35847 # Test intersections with various name combinations if isinstance(index, MultiIndex) or not index.is_unique: pytest.skip("Not for MultiIndex or repeated indices") # Test copy.intersection(copy) first = index.copy().set_names(fname) second = index.copy().set_names(sname) intersect = first.intersection(second) expected = index.copy().set_names(expected_name) tm.assert_index_equal(intersect, expected) # Test copy.intersection(empty) first = index.copy().set_names(fname) second = index.drop(index).set_names(sname) intersect = first.intersection(second) expected = index.drop(index).set_names(expected_name) tm.assert_index_equal(intersect, expected) # Test empty.intersection(copy) first = index.drop(index).set_names(fname) second = index.copy().set_names(sname) intersect = first.intersection(second) expected = index.drop(index).set_names(expected_name) tm.assert_index_equal(intersect, expected) # Test empty.intersection(empty) first = index.drop(index).set_names(fname) second = index.drop(index).set_names(sname) intersect = first.intersection(second) expected = index.drop(index).set_names(expected_name) tm.assert_index_equal(intersect, expected) @pytest.mark.parametrize( "fname, sname, expected_name", [ ("A", "A", "A"), ("A", "B", None), ("A", None, None), (None, "B", None), (None, None, None), ], ) def test_intersect_unequal(self, index, fname, sname, expected_name): if isinstance(index, MultiIndex) or not index.is_unique: pytest.skip("Not for MultiIndex or repeated indices") # test copy.intersection(subset) - need sort for unicode and string first = index.copy().set_names(fname) second = index[1:].set_names(sname) intersect = first.intersection(second).sort_values() expected = index[1:].set_names(expected_name).sort_values() tm.assert_index_equal(intersect, expected) def test_to_flat_index(self, index): # 22866 if isinstance(index, MultiIndex): pytest.skip("Separate expectation for MultiIndex") result = index.to_flat_index() tm.assert_index_equal(result, index) def test_set_name_methods(self, index): new_name = "This is the new name for this index" # don't tests a MultiIndex here (as its tested separated) if isinstance(index, MultiIndex): pytest.skip("Skip check for MultiIndex") original_name = index.name new_ind = index.set_names([new_name]) assert new_ind.name == new_name assert index.name == original_name res = index.rename(new_name, inplace=True) # should return None assert res is None assert index.name == new_name assert index.names == [new_name] # FIXME: dont leave commented-out # with pytest.raises(TypeError, match="list-like"): # # should still fail even if it would be the right length # ind.set_names("a") with pytest.raises(ValueError, match="Level must be None"): index.set_names("a", level=0) # rename in place just leaves tuples and other containers alone name = ("A", "B") index.rename(name, inplace=True) assert index.name == name assert index.names == [name] def test_copy_and_deepcopy(self, index): from copy import copy, deepcopy if isinstance(index, MultiIndex): pytest.skip("Skip check for MultiIndex") for func in (copy, deepcopy): idx_copy = func(index) assert idx_copy is not index assert idx_copy.equals(index) new_copy = index.copy(deep=True, name="banana") assert new_copy.name == "banana" def test_unique(self, index): # don't test a MultiIndex here (as its tested separated) # don't test a CategoricalIndex because categories change (GH 18291) if isinstance(index, (MultiIndex, CategoricalIndex)): pytest.skip("Skip check for MultiIndex/CategoricalIndex") # GH 17896 expected = index.drop_duplicates() for level in 0, index.name, None: result = index.unique(level=level) tm.assert_index_equal(result, expected) msg = "Too many levels: Index has only 1 level, not 4" with pytest.raises(IndexError, match=msg): index.unique(level=3) msg = ( fr"Requested level \(wrong\) does not match index name " fr"\({re.escape(index.name.__repr__())}\)" ) with pytest.raises(KeyError, match=msg): index.unique(level="wrong") def test_get_unique_index(self, index): # MultiIndex tested separately if not len(index) or isinstance(index, MultiIndex): pytest.skip("Skip check for empty Index and MultiIndex") idx = index[[0] * 5] idx_unique = index[[0]] # We test against `idx_unique`, so first we make sure it's unique # and doesn't contain nans. assert idx_unique.is_unique is True try: assert idx_unique.hasnans is False except NotImplementedError: pass for dropna in [False, True]: result = idx._get_unique_index(dropna=dropna) tm.assert_index_equal(result, idx_unique) # nans: if not index._can_hold_na: pytest.skip("Skip na-check if index cannot hold na") if is_period_dtype(index.dtype): vals = index[[0] * 5]._data vals[0] = pd.NaT elif needs_i8_conversion(index.dtype): vals = index.asi8[[0] * 5] vals[0] = iNaT else: vals = index.values[[0] * 5] vals[0] = np.nan vals_unique = vals[:2] if index.dtype.kind in ["m", "M"]: # i.e. needs_i8_conversion but not period_dtype, as above vals = type(index._data)._simple_new(vals, dtype=index.dtype) vals_unique = type(index._data)._simple_new(vals_unique, dtype=index.dtype) idx_nan = index._shallow_copy(vals) idx_unique_nan = index._shallow_copy(vals_unique) assert idx_unique_nan.is_unique is True assert idx_nan.dtype == index.dtype assert idx_unique_nan.dtype == index.dtype for dropna, expected in zip([False, True], [idx_unique_nan, idx_unique]): for i in [idx_nan, idx_unique_nan]: result = i._get_unique_index(dropna=dropna) tm.assert_index_equal(result, expected) def test_mutability(self, index): if not len(index): pytest.skip("Skip check for empty Index") msg = "Index does not support mutable operations" with pytest.raises(TypeError, match=msg): index[0] = index[0] def test_view(self, index): assert index.view().name == index.name def test_searchsorted_monotonic(self, index): # GH17271 # not implemented for tuple searches in MultiIndex # or Intervals searches in IntervalIndex if isinstance(index, (MultiIndex, pd.IntervalIndex)): pytest.skip("Skip check for MultiIndex/IntervalIndex") # nothing to test if the index is empty if index.empty: pytest.skip("Skip check for empty Index") value = index[0] # determine the expected results (handle dupes for 'right') expected_left, expected_right = 0, (index == value).argmin() if expected_right == 0: # all values are the same, expected_right should be length expected_right = len(index) # test _searchsorted_monotonic in all cases # test searchsorted only for increasing if index.is_monotonic_increasing: ssm_left = index._searchsorted_monotonic(value, side="left") assert expected_left == ssm_left ssm_right = index._searchsorted_monotonic(value, side="right") assert expected_right == ssm_right ss_left = index.searchsorted(value, side="left") assert expected_left == ss_left ss_right = index.searchsorted(value, side="right") assert expected_right == ss_right elif index.is_monotonic_decreasing: ssm_left = index._searchsorted_monotonic(value, side="left") assert expected_left == ssm_left ssm_right = index._searchsorted_monotonic(value, side="right") assert expected_right == ssm_right else: # non-monotonic should raise. with pytest.raises(ValueError): index._searchsorted_monotonic(value, side="left") def test_pickle(self, index): original_name, index.name = index.name, "foo" unpickled = tm.round_trip_pickle(index) assert index.equals(unpickled) index.name = original_name def test_drop_duplicates(self, index, keep): if isinstance(index, MultiIndex): pytest.skip("MultiIndex is tested separately") if isinstance(index, RangeIndex): pytest.skip( "RangeIndex is tested in test_drop_duplicates_no_duplicates " "as it cannot hold duplicates" ) if len(index) == 0: pytest.skip( "empty index is tested in test_drop_duplicates_no_duplicates " "as it cannot hold duplicates" ) # make unique index holder = type(index) unique_values = list(set(index)) unique_idx = holder(unique_values) # make duplicated index n = len(unique_idx) duplicated_selection = np.random.choice(n, int(n * 1.5)) idx = holder(unique_idx.values[duplicated_selection]) # Series.duplicated is tested separately expected_duplicated = ( pd.Series(duplicated_selection).duplicated(keep=keep).values ) tm.assert_numpy_array_equal(idx.duplicated(keep=keep), expected_duplicated) # Series.drop_duplicates is tested separately expected_dropped = holder(pd.Series(idx).drop_duplicates(keep=keep)) tm.assert_index_equal(idx.drop_duplicates(keep=keep), expected_dropped) def test_drop_duplicates_no_duplicates(self, index): if isinstance(index, MultiIndex): pytest.skip("MultiIndex is tested separately") # make unique index if isinstance(index, RangeIndex): # RangeIndex cannot have duplicates unique_idx = index else: holder = type(index) unique_values = list(set(index)) unique_idx = holder(unique_values) # check on unique index expected_duplicated = np.array([False] * len(unique_idx), dtype="bool") tm.assert_numpy_array_equal(unique_idx.duplicated(), expected_duplicated) result_dropped = unique_idx.drop_duplicates() tm.assert_index_equal(result_dropped, unique_idx) # validate shallow copy assert result_dropped is not unique_idx def test_drop_duplicates_inplace(self, index): msg = r"drop_duplicates\(\) got an unexpected keyword argument" with pytest.raises(TypeError, match=msg): index.drop_duplicates(inplace=True) def test_has_duplicates(self, index): holder = type(index) if not len(index) or isinstance(index, (MultiIndex, RangeIndex)): # MultiIndex tested separately in: # tests/indexes/multi/test_unique_and_duplicates. # RangeIndex is unique by definition. pytest.skip("Skip check for empty Index, MultiIndex, and RangeIndex") idx = holder([index[0]] * 5) assert idx.is_unique is False assert idx.has_duplicates is True @pytest.mark.parametrize( "dtype", ["int64", "uint64", "float64", "category", "datetime64[ns]", "timedelta64[ns]"], ) def test_astype_preserves_name(self, index, dtype): # https://github.com/pandas-dev/pandas/issues/32013 if isinstance(index, MultiIndex): index.names = ["idx" + str(i) for i in range(index.nlevels)] else: index.name = "idx" try: # Some of these conversions cannot succeed so we use a try / except result = index.astype(dtype) except (ValueError, TypeError, NotImplementedError, SystemError): return if isinstance(index, MultiIndex): assert result.names == index.names else: assert result.name == index.name def test_ravel_deprecation(self, index): # GH#19956 ravel returning ndarray is deprecated with tm.assert_produces_warning(FutureWarning): index.ravel() @pytest.mark.parametrize("na_position", [None, "middle"]) def test_sort_values_invalid_na_position(index_with_missing, na_position): if isinstance(index_with_missing, (DatetimeIndex, PeriodIndex, TimedeltaIndex)): # datetime-like indices will get na_position kwarg as part of # synchronizing duplicate-sorting behavior, because we currently expect # them, other indices, and Series to sort differently (xref 35922) pytest.xfail("sort_values does not support na_position kwarg") elif isinstance(index_with_missing, (CategoricalIndex, MultiIndex)): pytest.xfail("missing value sorting order not defined for index type") if na_position not in ["first", "last"]: with pytest.raises(ValueError, match=f"invalid na_position: {na_position}"): index_with_missing.sort_values(na_position=na_position) @pytest.mark.parametrize("na_position", ["first", "last"]) def test_sort_values_with_missing(index_with_missing, na_position): # GH 35584. Test that sort_values works with missing values, # sort non-missing and place missing according to na_position if isinstance(index_with_missing, (DatetimeIndex, PeriodIndex, TimedeltaIndex)): # datetime-like indices will get na_position kwarg as part of # synchronizing duplicate-sorting behavior, because we currently expect # them, other indices, and Series to sort differently (xref 35922) pytest.xfail("sort_values does not support na_position kwarg") elif isinstance(index_with_missing, (CategoricalIndex, MultiIndex)): pytest.xfail("missing value sorting order not defined for index type") missing_count = np.sum(index_with_missing.isna()) not_na_vals = index_with_missing[index_with_missing.notna()].values sorted_values = np.sort(not_na_vals) if na_position == "first": sorted_values = np.concatenate([[None] * missing_count, sorted_values]) else: sorted_values = np.concatenate([sorted_values, [None] * missing_count]) expected = type(index_with_missing)(sorted_values) result = index_with_missing.sort_values(na_position=na_position) tm.assert_index_equal(result, expected)
37.79368
88
0.629764
17,621
0.866621
0
0
8,194
0.40299
0
0
4,873
0.23966
6a6837a4b97157cac91cdd54ef662d5a158d6207
22,699
py
Python
tests/test_dynamics.py
leasanchez/BiorbdOptim
28fac818af031668ecd82bc1929f78303c5d58d2
[ "MIT" ]
34
2020-12-14T17:09:41.000Z
2022-03-31T17:03:37.000Z
tests/test_dynamics.py
pariterre/bioptim
4064138e7d3fce34e21d488df19941937ce30557
[ "MIT" ]
229
2020-09-30T16:53:40.000Z
2022-03-29T21:11:46.000Z
tests/test_dynamics.py
fbailly/bioptim
3a5473ee7c39d645d960611596a45b044e8ccf58
[ "MIT" ]
15
2020-11-20T12:32:59.000Z
2022-01-22T22:59:08.000Z
import pytest import numpy as np from casadi import MX, SX import biorbd_casadi as biorbd from bioptim.dynamics.configure_problem import ConfigureProblem from bioptim.dynamics.dynamics_functions import DynamicsFunctions from bioptim.interfaces.biorbd_interface import BiorbdInterface from bioptim.misc.enums import ControlType from bioptim.optimization.non_linear_program import NonLinearProgram from bioptim.optimization.optimization_vector import OptimizationVector from bioptim.dynamics.configure_problem import DynamicsFcn, Dynamics from .utils import TestUtils class OptimalControlProgram: def __init__(self, nlp): self.n_phases = 1 self.nlp = [nlp] self.v = OptimizationVector(self) @pytest.mark.parametrize("cx", [MX, SX]) @pytest.mark.parametrize("with_external_force", [False, True]) @pytest.mark.parametrize("with_contact", [False, True]) def test_torque_driven(with_contact, with_external_force, cx): # Prepare the program nlp = NonLinearProgram() nlp.model = biorbd.Model( TestUtils.bioptim_folder() + "/examples/getting_started/models/2segments_4dof_2contacts.bioMod" ) nlp.ns = 5 nlp.cx = cx nlp.x_bounds = np.zeros((nlp.model.nbQ() * 3, 1)) nlp.u_bounds = np.zeros((nlp.model.nbQ(), 1)) ocp = OptimalControlProgram(nlp) nlp.control_type = ControlType.CONSTANT NonLinearProgram.add(ocp, "dynamics_type", Dynamics(DynamicsFcn.TORQUE_DRIVEN, with_contact=with_contact), False) np.random.seed(42) if with_external_force: external_forces = [np.random.rand(6, nlp.model.nbSegment(), nlp.ns)] nlp.external_forces = BiorbdInterface.convert_array_to_external_forces(external_forces)[0] # Prepare the dynamics ConfigureProblem.initialize(ocp, nlp) # Test the results states = np.random.rand(nlp.states.shape, nlp.ns) controls = np.random.rand(nlp.controls.shape, nlp.ns) params = np.random.rand(nlp.parameters.shape, nlp.ns) x_out = np.array(nlp.dynamics_func(states, controls, params)) if with_contact: contact_out = np.array(nlp.contact_forces_func(states, controls, params)) if with_external_force: np.testing.assert_almost_equal( x_out[:, 0], [0.8631034, 0.3251833, 0.1195942, 0.4937956, -7.7700092, -7.5782306, 21.7073786, -16.3059315], ) np.testing.assert_almost_equal(contact_out[:, 0], [-47.8131136, 111.1726516, -24.4449121]) else: np.testing.assert_almost_equal( x_out[:, 0], [0.6118529, 0.785176, 0.6075449, 0.8083973, -0.3214905, -0.1912131, 0.6507164, -0.2359716] ) np.testing.assert_almost_equal(contact_out[:, 0], [-2.444071, 128.8816865, 2.7245124]) else: if with_external_force: np.testing.assert_almost_equal( x_out[:, 0], [0.86310343, 0.32518332, 0.11959425, 0.4937956, 0.30731739, -9.97912778, 1.15263778, 36.02430956], ) else: np.testing.assert_almost_equal( x_out[:, 0], [0.61185289, 0.78517596, 0.60754485, 0.80839735, -0.30241366, -10.38503791, 1.60445173, 35.80238642], ) @pytest.mark.parametrize("cx", [MX, SX]) @pytest.mark.parametrize("with_external_force", [False, True]) @pytest.mark.parametrize("with_contact", [False, True]) def test_torque_derivative_driven(with_contact, with_external_force, cx): # Prepare the program nlp = NonLinearProgram() nlp.model = biorbd.Model( TestUtils.bioptim_folder() + "/examples/getting_started/models/2segments_4dof_2contacts.bioMod" ) nlp.ns = 5 nlp.cx = cx nlp.x_bounds = np.zeros((nlp.model.nbQ() * 3, 1)) nlp.u_bounds = np.zeros((nlp.model.nbQ(), 1)) ocp = OptimalControlProgram(nlp) nlp.control_type = ControlType.CONSTANT NonLinearProgram.add( ocp, "dynamics_type", Dynamics(DynamicsFcn.TORQUE_DERIVATIVE_DRIVEN, with_contact=with_contact), False ) np.random.seed(42) if with_external_force: external_forces = [np.random.rand(6, nlp.model.nbSegment(), nlp.ns)] nlp.external_forces = BiorbdInterface.convert_array_to_external_forces(external_forces)[0] # Prepare the dynamics ConfigureProblem.initialize(ocp, nlp) # Test the results states = np.random.rand(nlp.states.shape, nlp.ns) controls = np.random.rand(nlp.controls.shape, nlp.ns) params = np.random.rand(nlp.parameters.shape, nlp.ns) x_out = np.array(nlp.dynamics_func(states, controls, params)) if with_contact: contact_out = np.array(nlp.contact_forces_func(states, controls, params)) if with_external_force: np.testing.assert_almost_equal( x_out[:, 0], [ 0.8631034, 0.3251833, 0.1195942, 0.4937956, -7.7700092, -7.5782306, 21.7073786, -16.3059315, 0.8074402, 0.4271078, 0.417411, 0.3232029, ], ) np.testing.assert_almost_equal(contact_out[:, 0], [-47.8131136, 111.1726516, -24.4449121]) else: np.testing.assert_almost_equal( x_out[:, 0], [ 0.61185289, 0.78517596, 0.60754485, 0.80839735, -0.32149054, -0.19121314, 0.65071636, -0.23597164, 0.38867729, 0.54269608, 0.77224477, 0.72900717, ], ) np.testing.assert_almost_equal(contact_out[:, 0], [-2.444071, 128.8816865, 2.7245124]) else: if with_external_force: np.testing.assert_almost_equal( x_out[:, 0], [ 0.86310343, 0.32518332, 0.11959425, 0.4937956, 0.30731739, -9.97912778, 1.15263778, 36.02430956, 0.80744016, 0.42710779, 0.417411, 0.32320293, ], ) else: np.testing.assert_almost_equal( x_out[:, 0], [ 0.61185289, 0.78517596, 0.60754485, 0.80839735, -0.30241366, -10.38503791, 1.60445173, 35.80238642, 0.38867729, 0.54269608, 0.77224477, 0.72900717, ], ) @pytest.mark.parametrize("cx", [MX, SX]) @pytest.mark.parametrize("with_external_force", [False, True]) @pytest.mark.parametrize("with_contact", [False, True]) def test_torque_activation_driven(with_contact, with_external_force, cx): # Prepare the program nlp = NonLinearProgram() nlp.model = biorbd.Model( TestUtils.bioptim_folder() + "/examples/getting_started/models/2segments_4dof_2contacts.bioMod" ) nlp.ns = 5 nlp.cx = cx nlp.x_bounds = np.zeros((nlp.model.nbQ() * 2, 1)) nlp.u_bounds = np.zeros((nlp.model.nbQ(), 1)) ocp = OptimalControlProgram(nlp) nlp.control_type = ControlType.CONSTANT NonLinearProgram.add( ocp, "dynamics_type", Dynamics(DynamicsFcn.TORQUE_ACTIVATIONS_DRIVEN, with_contact=with_contact), False ) np.random.seed(42) if with_external_force: external_forces = [np.random.rand(6, nlp.model.nbSegment(), nlp.ns)] nlp.external_forces = BiorbdInterface.convert_array_to_external_forces(external_forces)[0] # Prepare the dynamics ConfigureProblem.initialize(ocp, nlp) # Test the results states = np.random.rand(nlp.states.shape, nlp.ns) controls = np.random.rand(nlp.controls.shape, nlp.ns) params = np.random.rand(nlp.parameters.shape, nlp.ns) x_out = np.array(nlp.dynamics_func(states, controls, params)) if with_contact: contact_out = np.array(nlp.contact_forces_func(states, controls, params)) if with_external_force: np.testing.assert_almost_equal( x_out[:, 0], [0.8631, 0.32518, 0.11959, 0.4938, 19.01887, 18.51503, -53.08574, 58.48719], decimal=5, ) np.testing.assert_almost_equal(contact_out[:, 0], [109.8086936, 3790.3932439, -3571.7858574]) else: np.testing.assert_almost_equal( x_out[:, 0], [0.61185289, 0.78517596, 0.60754485, 0.80839735, 0.78455384, -0.16844256, -1.56184114, 1.97658587], decimal=5, ) np.testing.assert_almost_equal(contact_out[:, 0], [-7.88958997, 329.70828173, -263.55516549]) else: if with_external_force: np.testing.assert_almost_equal( x_out[:, 0], [ 8.63103426e-01, 3.25183322e-01, 1.19594246e-01, 4.93795596e-01, 1.73558072e01, -4.69891264e01, 1.81396922e02, 3.61170139e03, ], decimal=5, ) else: np.testing.assert_almost_equal( x_out[:, 0], [ 6.11852895e-01, 7.85175961e-01, 6.07544852e-01, 8.08397348e-01, -2.38262975e01, -5.82033454e01, 1.27439020e02, 3.66531163e03, ], decimal=5, ) @pytest.mark.parametrize("cx", [MX, SX]) @pytest.mark.parametrize("with_external_force", [False, True]) @pytest.mark.parametrize("with_contact", [False, True]) @pytest.mark.parametrize("with_torque", [False, True]) @pytest.mark.parametrize("with_excitations", [False, True]) def test_muscle_driven(with_excitations, with_contact, with_torque, with_external_force, cx): # Prepare the program nlp = NonLinearProgram() nlp.model = biorbd.Model( TestUtils.bioptim_folder() + "/examples/muscle_driven_ocp/models/arm26_with_contact.bioMod" ) nlp.ns = 5 nlp.cx = cx nlp.x_bounds = np.zeros((nlp.model.nbQ() * 2 + nlp.model.nbMuscles(), 1)) nlp.u_bounds = np.zeros((nlp.model.nbMuscles(), 1)) ocp = OptimalControlProgram(nlp) nlp.control_type = ControlType.CONSTANT NonLinearProgram.add( ocp, "dynamics_type", Dynamics( DynamicsFcn.MUSCLE_DRIVEN, with_torque=with_torque, with_excitations=with_excitations, with_contact=with_contact, ), False, ) np.random.seed(42) if with_external_force: external_forces = [np.random.rand(6, nlp.model.nbSegment(), nlp.ns)] nlp.external_forces = BiorbdInterface.convert_array_to_external_forces(external_forces)[0] # Prepare the dynamics ConfigureProblem.initialize(ocp, nlp) # Test the results states = np.random.rand(nlp.states.shape, nlp.ns) controls = np.random.rand(nlp.controls.shape, nlp.ns) params = np.random.rand(nlp.parameters.shape, nlp.ns) x_out = np.array(nlp.dynamics_func(states, controls, params)) if with_contact: # Warning this test is a bit bogus, there since the model does not have contacts if with_torque: if with_excitations: if with_external_force: np.testing.assert_almost_equal( x_out[:, 0], [ 0.6158501, 0.50313626, 0.64241928, 1.07179622, -33.76217857, 36.21815923, 46.87928022, -1.80189035, 53.3914525, 48.30056919, 63.69373374, -28.15700995, ], ) else: np.testing.assert_almost_equal( x_out[:, 0], [ 1.83404510e-01, 6.11852895e-01, 7.85175961e-01, -9.29662878e00, 3.00872062e02, -9.50354903e02, 8.60630831e00, 3.19433638e00, 2.97405608e01, -2.02754226e01, -2.32467778e01, -4.19135012e01, ], decimal=6, ) else: if with_external_force: np.testing.assert_almost_equal( x_out[:, 0], [6.15850098e-01, 5.03136259e-01, 6.42419278e-01, -8.06478367e00, 2.42279101e02, -7.72114103e02], decimal=6, ) else: np.testing.assert_almost_equal( x_out[:, 0], [1.83404510e-01, 6.11852895e-01, 7.85175961e-01, -3.80892207e00, 1.20476051e02, -4.33291346e02], decimal=6, ) else: if with_excitations: if with_external_force: np.testing.assert_almost_equal( x_out[:, 0], [ 0.6158501, 0.50313626, 0.64241928, 0.91952705, -39.04876174, 45.31837288, 55.65557816, 50.47052688, 0.36025589, 58.92377491, 29.70094194, -15.13534937, ], ) else: np.testing.assert_almost_equal( x_out[:, 0], [ 1.83404510e-01, 6.11852895e-01, 7.85175961e-01, -9.72712350e00, 3.10866170e02, -9.82725656e02, -7.72228930e00, -1.13759732e01, 9.51906209e01, 4.45077128e00, -5.20261014e00, -2.80864106e01, ], decimal=6, ) else: if with_external_force: np.testing.assert_almost_equal( x_out[:, 0], [0.6158501, 0.50313626, 0.64241928, 0.91952705, -39.04876174, 45.31837288], ) else: np.testing.assert_almost_equal( x_out[:, 0], [1.83404510e-01, 6.11852895e-01, 7.85175961e-01, -9.72712350e00, 3.10866170e02, -9.82725656e02], decimal=6, ) else: if with_torque: if with_excitations: if with_external_force: np.testing.assert_almost_equal( x_out[:, 0], [ 0.6158501, 0.50313626, 0.64241928, 1.07179622, -33.76217857, 36.21815923, 46.87928022, -1.80189035, 53.3914525, 48.30056919, 63.69373374, -28.15700995, ], ) else: np.testing.assert_almost_equal( x_out[:, 0], [ 1.83404510e-01, 6.11852895e-01, 7.85175961e-01, -9.29662878e00, 3.00872062e02, -9.50354903e02, 8.60630831e00, 3.19433638e00, 2.97405608e01, -2.02754226e01, -2.32467778e01, -4.19135012e01, ], decimal=6, ) else: if with_external_force: np.testing.assert_almost_equal( x_out[:, 0], [6.15850098e-01, 5.03136259e-01, 6.42419278e-01, -8.06478367e00, 2.42279101e02, -7.72114103e02], decimal=6, ) else: np.testing.assert_almost_equal( x_out[:, 0], [1.83404510e-01, 6.11852895e-01, 7.85175961e-01, -3.80892207e00, 1.20476051e02, -4.33291346e02], decimal=6, ) else: if with_excitations: if with_external_force: np.testing.assert_almost_equal( x_out[:, 0], [ 0.6158501, 0.50313626, 0.64241928, 0.91952705, -39.04876174, 45.31837288, 55.65557816, 50.47052688, 0.36025589, 58.92377491, 29.70094194, -15.13534937, ], ) else: np.testing.assert_almost_equal( x_out[:, 0], [ 1.83404510e-01, 6.11852895e-01, 7.85175961e-01, -9.72712350e00, 3.10866170e02, -9.82725656e02, -7.72228930e00, -1.13759732e01, 9.51906209e01, 4.45077128e00, -5.20261014e00, -2.80864106e01, ], decimal=6, ) else: if with_external_force: np.testing.assert_almost_equal( x_out[:, 0], [0.6158501, 0.50313626, 0.64241928, 0.91952705, -39.04876174, 45.31837288], ) else: np.testing.assert_almost_equal( x_out[:, 0], [1.83404510e-01, 6.11852895e-01, 7.85175961e-01, -9.72712350e00, 3.10866170e02, -9.82725656e02], decimal=6, ) @pytest.mark.parametrize("with_contact", [False, True]) def test_custom_dynamics(with_contact): def custom_dynamic(states, controls, parameters, nlp, with_contact=False) -> tuple: DynamicsFunctions.apply_parameters(parameters, nlp) q = DynamicsFunctions.get(nlp.states["q"], states) qdot = DynamicsFunctions.get(nlp.states["qdot"], states) tau = DynamicsFunctions.get(nlp.controls["tau"], controls) dq = DynamicsFunctions.compute_qdot(nlp, q, qdot) ddq = DynamicsFunctions.forward_dynamics(nlp, q, qdot, tau, with_contact) return dq, ddq def configure(ocp, nlp, with_contact=None): ConfigureProblem.configure_q(nlp, True, False) ConfigureProblem.configure_qdot(nlp, True, False) ConfigureProblem.configure_tau(nlp, False, True) ConfigureProblem.configure_dynamics_function(ocp, nlp, custom_dynamic, with_contact=with_contact) if with_contact: ConfigureProblem.configure_contact_function(ocp, nlp, DynamicsFunctions.forces_from_torque_driven) # Prepare the program nlp = NonLinearProgram() nlp.model = biorbd.Model( TestUtils.bioptim_folder() + "/examples/getting_started/models/2segments_4dof_2contacts.bioMod" ) nlp.ns = 5 nlp.cx = MX nlp.x_bounds = np.zeros((nlp.model.nbQ() * 3, 1)) nlp.u_bounds = np.zeros((nlp.model.nbQ(), 1)) ocp = OptimalControlProgram(nlp) nlp.control_type = ControlType.CONSTANT NonLinearProgram.add( ocp, "dynamics_type", Dynamics(configure, dynamic_function=custom_dynamic, with_contact=with_contact), False ) np.random.seed(42) # Prepare the dynamics ConfigureProblem.initialize(ocp, nlp) # Test the results states = np.random.rand(nlp.states.shape, nlp.ns) controls = np.random.rand(nlp.controls.shape, nlp.ns) params = np.random.rand(nlp.parameters.shape, nlp.ns) x_out = np.array(nlp.dynamics_func(states, controls, params)) if with_contact: contact_out = np.array(nlp.contact_forces_func(states, controls, params)) np.testing.assert_almost_equal( x_out[:, 0], [0.6118529, 0.785176, 0.6075449, 0.8083973, -0.3214905, -0.1912131, 0.6507164, -0.2359716] ) np.testing.assert_almost_equal(contact_out[:, 0], [-2.444071, 128.8816865, 2.7245124]) else: np.testing.assert_almost_equal( x_out[:, 0], [0.61185289, 0.78517596, 0.60754485, 0.80839735, -0.30241366, -10.38503791, 1.60445173, 35.80238642], )
37.958194
120
0.491784
150
0.006608
0
0
21,963
0.967576
0
0
1,001
0.044099
6a68e42c5242acff02618aac8ab6c6c44bb61d29
1,312
py
Python
polyaxon/event_manager/event_manager.py
elyase/polyaxon
1c19f059a010a6889e2b7ea340715b2bcfa382a0
[ "MIT" ]
null
null
null
polyaxon/event_manager/event_manager.py
elyase/polyaxon
1c19f059a010a6889e2b7ea340715b2bcfa382a0
[ "MIT" ]
null
null
null
polyaxon/event_manager/event_manager.py
elyase/polyaxon
1c19f059a010a6889e2b7ea340715b2bcfa382a0
[ "MIT" ]
null
null
null
from hestia.manager_interface import ManagerInterface from event_manager import event_actions class EventManager(ManagerInterface): def _get_state_data(self, event): # pylint:disable=arguments-differ return event.event_type, event def subscribe(self, event): # pylint:disable=arguments-differ """ >>> subscribe(SomeEvent) """ super().subscribe(obj=event) def knows(self, event_type): # pylint:disable=arguments-differ return super().knows(key=event_type) def get(self, event_type): # pylint:disable=arguments-differ return super().get(key=event_type) def user_write_events(self): """Return event types where use acted on an object. The write events are events with actions: * CREATED * UPDATED * DELETED * RESUMED * COPIED * CLONED * STOPPED """ return [event_type for event_type, event in self.items if event.get_event_action() in event_actions.WRITE_ACTIONS] def user_view_events(self): """Return event types where use viewed a main object.""" return [event_type for event_type, event in self.items if event.get_event_action() == event_actions.VIEWED]
31.238095
90
0.634909
1,214
0.925305
0
0
0
0
0
0
502
0.382622
6a6b124cb7b2cd1d6d09ae5b84d5b49e63612508
679
py
Python
test_f_login_andy.py
KotoLLC/peacenik-tests
760f7799ab2b9312fe0cce373890195151c48fce
[ "Apache-2.0" ]
null
null
null
test_f_login_andy.py
KotoLLC/peacenik-tests
760f7799ab2b9312fe0cce373890195151c48fce
[ "Apache-2.0" ]
null
null
null
test_f_login_andy.py
KotoLLC/peacenik-tests
760f7799ab2b9312fe0cce373890195151c48fce
[ "Apache-2.0" ]
null
null
null
from helpers import * def test_f_login_andy(): url = "http://central.orbits.local/rpc.AuthService/Login" raw_payload = {"name": "andy","password": "12345"} payload = json.dumps(raw_payload) headers = {'Content-Type': 'application/json'} # convert dict to json by json.dumps() for body data. response = requests.request("POST", url, headers=headers, data=payload) save_cookies(response.cookies,"cookies.txt") # Validate response headers and body contents, e.g. status code. assert response.status_code == 200 # print full request and response pretty_print_request(response.request) pretty_print_response(response)
35.736842
75
0.696613
0
0
0
0
0
0
0
0
282
0.415317
6a6b9fd92e89d1958b00048f55376ec87fde6db2
7,696
py
Python
docker/src/clawpack-5.3.1/riemann/src/shallow_1D_py.py
ian-r-rose/visualization
ed6d9fab95eb125e7340ab3fad3ed114ed3214af
[ "CC-BY-4.0" ]
11
2017-01-04T18:19:48.000Z
2021-02-21T01:46:33.000Z
docker/src/clawpack-5.3.1/riemann/src/shallow_1D_py.py
ian-r-rose/visualization
ed6d9fab95eb125e7340ab3fad3ed114ed3214af
[ "CC-BY-4.0" ]
8
2016-09-22T20:49:51.000Z
2019-09-06T23:28:13.000Z
docker/src/clawpack-5.3.1/riemann/src/shallow_1D_py.py
ian-r-rose/visualization
ed6d9fab95eb125e7340ab3fad3ed114ed3214af
[ "CC-BY-4.0" ]
13
2016-09-22T20:20:06.000Z
2020-07-13T14:48:32.000Z
#!/usr/bin/env python # encoding: utf-8 r""" Riemann solvers for the shallow water equations. The available solvers are: * Roe - Use Roe averages to caluclate the solution to the Riemann problem * HLL - Use a HLL solver * Exact - Use a newton iteration to calculate the exact solution to the Riemann problem .. math:: q_t + f(q)_x = 0 where .. math:: q(x,t) = \left [ \begin{array}{c} h \\ h u \end{array} \right ], the flux function is .. math:: f(q) = \left [ \begin{array}{c} h u \\ hu^2 + 1/2 g h^2 \end{array}\right ]. and :math:`h` is the water column height, :math:`u` the velocity and :math:`g` is the gravitational acceleration. :Authors: Kyle T. Mandli (2009-02-05): Initial version """ # ============================================================================ # Copyright (C) 2009 Kyle T. Mandli <mandli@amath.washington.edu> # # Distributed under the terms of the Berkeley Software Distribution (BSD) # license # http://www.opensource.org/licenses/ # ============================================================================ import numpy as np num_eqn = 2 num_waves = 2 def shallow_roe_1D(q_l,q_r,aux_l,aux_r,problem_data): r""" Roe shallow water solver in 1d:: ubar = (sqrt(u_l) + sqrt(u_r)) / (sqrt(h_l) + sqrt(h_r)) cbar = sqrt( 0.5 * g * (h_l + h_r)) W_1 = | 1 | s_1 = ubar - cbar | ubar - cbar | W_2 = | 1 | s_1 = ubar + cbar | ubar + cbar | a1 = 0.5 * ( - delta_hu + (ubar + cbar) * delta_h ) / cbar a2 = 0.5 * ( delta_hu - (ubar - cbar) * delta_h ) / cbar *problem_data* should contain: - *g* - (float) Gravitational constant - *efix* - (bool) Boolean as to whether a entropy fix should be used, if not present, false is assumed :Version: 1.0 (2009-02-05) """ # Array shapes num_rp = q_l.shape[1] # Output arrays wave = np.empty( (num_eqn, num_waves, num_rp) ) s = np.zeros( (num_waves, num_rp) ) amdq = np.zeros( (num_eqn, num_rp) ) apdq = np.zeros( (num_eqn, num_rp) ) # Compute roe-averaged quantities ubar = ( (q_l[1,:]/np.sqrt(q_l[0,:]) + q_r[1,:]/np.sqrt(q_r[0,:])) / (np.sqrt(q_l[0,:]) + np.sqrt(q_r[0,:])) ) cbar = np.sqrt(0.5 * problem_data['grav'] * (q_l[0,:] + q_r[0,:])) # Compute Flux structure delta = q_r - q_l a1 = 0.5 * (-delta[1,:] + (ubar + cbar) * delta[0,:]) / cbar a2 = 0.5 * ( delta[1,:] - (ubar - cbar) * delta[0,:]) / cbar # Compute each family of waves wave[0,0,:] = a1 wave[1,0,:] = a1 * (ubar - cbar) s[0,:] = ubar - cbar wave[0,1,:] = a2 wave[1,1,:] = a2 * (ubar + cbar) s[1,:] = ubar + cbar if problem_data['efix']: raise NotImplementedError("Entropy fix has not been implemented.") else: s_index = np.zeros((2,num_rp)) for m in xrange(num_eqn): for mw in xrange(num_waves): s_index[0,:] = s[mw,:] amdq[m,:] += np.min(s_index,axis=0) * wave[m,mw,:] apdq[m,:] += np.max(s_index,axis=0) * wave[m,mw,:] return wave, s, amdq, apdq def shallow_hll_1D(q_l,q_r,aux_l,aux_r,problem_data): r""" HLL shallow water solver :: W_1 = Q_hat - Q_l s_1 = min(u_l-c_l,u_l+c_l,lambda_roe_1,lambda_roe_2) W_2 = Q_r - Q_hat s_2 = max(u_r-c_r,u_r+c_r,lambda_roe_1,lambda_roe_2) Q_hat = ( f(q_r) - f(q_l) - s_2 * q_r + s_1 * q_l ) / (s_1 - s_2) *problem_data* should contain: - *g* - (float) Gravitational constant :Version: 1.0 (2009-02-05) """ # Array shapes num_rp = q_l.shape[1] num_eqn = 2 num_waves = 2 # Output arrays wave = np.empty( (num_eqn, num_waves, num_rp) ) s = np.empty( (num_waves, num_rp) ) amdq = np.zeros( (num_eqn, num_rp) ) apdq = np.zeros( (num_eqn, num_rp) ) # Compute Roe and right and left speeds ubar = ( (q_l[1,:]/np.sqrt(q_l[0,:]) + q_r[1,:]/np.sqrt(q_r[0,:])) / (np.sqrt(q_l[0,:]) + np.sqrt(q_r[0,:])) ) cbar = np.sqrt(0.5 * problem_data['grav'] * (q_l[0,:] + q_r[0,:])) u_r = q_r[1,:] / q_r[0,:] c_r = np.sqrt(problem_data['grav'] * q_r[0,:]) u_l = q_l[1,:] / q_l[0,:] c_l = np.sqrt(problem_data['grav'] * q_l[0,:]) # Compute Einfeldt speeds s_index = np.empty((4,num_rp)) s_index[0,:] = ubar+cbar s_index[1,:] = ubar-cbar s_index[2,:] = u_l + c_l s_index[3,:] = u_l - c_l s[0,:] = np.min(s_index,axis=0) s_index[2,:] = u_r + c_r s_index[3,:] = u_r - c_r s[1,:] = np.max(s_index,axis=0) # Compute middle state q_hat = np.empty((2,num_rp)) q_hat[0,:] = ((q_r[1,:] - q_l[1,:] - s[1,:] * q_r[0,:] + s[0,:] * q_l[0,:]) / (s[0,:] - s[1,:])) q_hat[1,:] = ((q_r[1,:]**2/q_r[0,:] + 0.5 * problem_data['grav'] * q_r[0,:]**2 - (q_l[1,:]**2/q_l[0,:] + 0.5 * problem_data['grav'] * q_l[0,:]**2) - s[1,:] * q_r[1,:] + s[0,:] * q_l[1,:]) / (s[0,:] - s[1,:])) # Compute each family of waves wave[:,0,:] = q_hat - q_l wave[:,1,:] = q_r - q_hat # Compute variations s_index = np.zeros((2,num_rp)) for m in xrange(num_eqn): for mw in xrange(num_waves): s_index[0,:] = s[mw,:] amdq[m,:] += np.min(s_index,axis=0) * wave[m,mw,:] apdq[m,:] += np.max(s_index,axis=0) * wave[m,mw,:] return wave, s, amdq, apdq def shallow_fwave_1d(q_l, q_r, aux_l, aux_r, problem_data): r"""Shallow water Riemann solver using fwaves Also includes support for bathymetry but be wary if you think you might have dry states as this has not been tested. *problem_data* should contain: - *grav* - (float) Gravitational constant - *sea_level* - (float) Datum from which the dry-state is calculated. :Version: 1.0 (2014-09-05) """ g = problem_data['grav'] num_rp = q_l.shape[1] num_eqn = 2 num_waves = 2 # Output arrays fwave = np.empty( (num_eqn, num_waves, num_rp) ) s = np.empty( (num_waves, num_rp) ) amdq = np.zeros( (num_eqn, num_rp) ) apdq = np.zeros( (num_eqn, num_rp) ) # Extract state u_l = np.where(q_l[0,:] - problem_data['sea_level'] > 1e-3, q_l[1,:] / q_l[0,:], 0.0) u_r = np.where(q_r[0,:] - problem_data['sea_level'] > 1e-3, q_r[1,:] / q_r[0,:], 0.0) phi_l = q_l[0,:] * u_l**2 + 0.5 * g * q_l[0,:]**2 phi_r = q_r[0,:] * u_r**2 + 0.5 * g * q_r[0,:]**2 # Speeds s[0,:] = u_l - np.sqrt(g * q_l[0,:]) s[1,:] = u_r + np.sqrt(g * q_r[0,:]) delta1 = q_r[1,:] - q_l[1,:] delta2 = phi_r - phi_l + g * 0.5 * (q_r[0,:] + q_l[0,:]) * (aux_r[0,:] - aux_l[0,:]) beta1 = (s[1,:] * delta1 - delta2) / (s[1,:] - s[0,:]) beta2 = (delta2 - s[0,:] * delta1) / (s[1,:] - s[0,:]) fwave[0,0,:] = beta1 fwave[1,0,:] = beta1 * s[0,:] fwave[0,1,:] = beta2 fwave[1,1,:] = beta2 * s[1,:] for m in xrange(num_eqn): for mw in xrange(num_waves): amdq[m,:] += (s[mw,:] < 0.0) * fwave[m,mw,:] apdq[m,:] += (s[mw,:] >= 0.0) * fwave[m,mw,:] return fwave, s, amdq, apdq def shallow_exact_1D(q_l,q_r,aux_l,aux_r,problem_data): r""" Exact shallow water Riemann solver .. warning:: This solver has not been implemented. """ raise NotImplementedError("The exact swe solver has not been implemented.")
31.801653
88
0.511954
0
0
0
0
0
0
0
0
3,263
0.423986
6a6cf8239e9dd6960a26d7ae881835b1d30a1dd5
10,408
py
Python
nuitka/Constants.py
juanfra684/Nuitka
0e276895fadabefb598232f2ccf8cc7736c9a85b
[ "Apache-2.0" ]
1
2020-04-13T18:56:02.000Z
2020-04-13T18:56:02.000Z
nuitka/Constants.py
juanfra684/Nuitka
0e276895fadabefb598232f2ccf8cc7736c9a85b
[ "Apache-2.0" ]
1
2020-07-11T17:53:56.000Z
2020-07-11T17:53:56.000Z
nuitka/Constants.py
juanfra684/Nuitka
0e276895fadabefb598232f2ccf8cc7736c9a85b
[ "Apache-2.0" ]
null
null
null
# Copyright 2020, Kay Hayen, mailto:kay.hayen@gmail.com # # Part of "Nuitka", an optimizing Python compiler that is compatible and # integrates with CPython, but also works on its own. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Module for constants in Nuitka. This contains tools to compare, classify and test constants. """ import math from types import BuiltinFunctionType from nuitka.Builtins import builtin_type_names from nuitka.PythonVersions import python_version from .__past__ import ( # pylint: disable=I0021,redefined-builtin iterItems, long, unicode, xrange, ) from .Builtins import ( builtin_anon_names, builtin_anon_value_list, builtin_exception_values_list, builtin_named_values_list, ) NoneType = type(None) def compareConstants(a, b): # Many many cases to deal with, pylint: disable=too-many-branches,too-many-return-statements # Supposed fast path for comparison. if type(a) is not type(b): return False # Now it's either not the same, or it is a container that contains NaN or it # is a complex or float that is NaN, the other cases can use == at the end. if type(a) is complex: return compareConstants(a.imag, b.imag) and compareConstants(a.real, b.real) if type(a) is float: # Check sign first, -0.0 is not 0.0, or -nan is not nan, it has a # different sign for a start. if math.copysign(1.0, a) != math.copysign(1.0, b): return False if math.isnan(a) and math.isnan(b): return True return a == b if type(a) in (tuple, list): if len(a) != len(b): return False for ea, eb in zip(a, b): if not compareConstants(ea, eb): return False return True if type(a) is dict: if len(a) != len(b): return False for ea1, ea2 in iterItems(a): for eb1, eb2 in iterItems(b): if compareConstants(ea1, eb1) and compareConstants(ea2, eb2): break else: return False return True if type(a) in (frozenset, set): if len(a) != len(b): return False for ea in a: if ea not in b: # Due to NaN values, we need to compare each set element with # all the other set to be really sure. for eb in b: if compareConstants(ea, eb): break else: return False return True if type(a) is xrange: return str(a) == str(b) # The NaN values of float and complex may let this fail, even if the # constants are built in the same way, therefore above checks. return a == b # These built-in type references are kind of constant too. The list should be # complete. constant_builtin_types = ( int, str, float, list, tuple, set, dict, slice, complex, xrange, NoneType, ) if python_version >= 300: constant_builtin_types += (bytes,) else: constant_builtin_types += ( unicode, long, # This has no name in Python, but the natural one in C-API. builtin_anon_names["instance"], ) def isConstant(constant): # Too many cases and all return, that is how we do it here, # pylint: disable=too-many-branches,too-many-return-statements constant_type = type(constant) if constant_type is dict: for key, value in iterItems(constant): if not isConstant(key): return False if not isConstant(value): return False return True elif constant_type in (tuple, list): for element_value in constant: if not isConstant(element_value): return False return True elif constant_type is slice: if ( not isConstant(constant.start) or not isConstant(constant.stop) or not isConstant(constant.step) ): return False return True elif constant_type in ( str, unicode, complex, int, long, bool, float, NoneType, range, bytes, set, frozenset, xrange, bytearray, ): return True elif constant in (Ellipsis, NoneType, NotImplemented): return True elif constant in builtin_anon_value_list: return True elif constant_type is type: # Maybe pre-build this as a set for quicker testing. return ( constant.__name__ in builtin_type_names or constant in builtin_exception_values_list ) elif constant_type is BuiltinFunctionType and constant in builtin_named_values_list: # TODO: Some others could also be usable and even interesting, but # then probably should go into other node types, e.g. str.join is # a candidate. return True else: return False def isMutable(constant): """ Is a constant mutable That means a user of a reference to it, can modify it. Strings are a prime example of immutable, dictionaries are mutable. """ # Many cases and all return, that is how we do it here, # pylint: disable=too-many-return-statements constant_type = type(constant) if constant_type in ( str, unicode, complex, int, long, bool, float, NoneType, range, bytes, slice, xrange, type, BuiltinFunctionType, ): return False elif constant_type in (dict, list, set, bytearray): return True elif constant_type is tuple: for value in constant: if isMutable(value): return True return False elif constant_type is frozenset: for value in constant: if isMutable(value): return True return False elif constant is Ellipsis: return False elif constant is NotImplemented: return False else: assert False, repr(constant) def isHashable(constant): """ Is a constant hashable That means a user of a reference to it, can use it for dicts and set keys. This is distinct from mutable, there is one types that is not mutable, and still not hashable: slices. """ # Many cases and all return, that is how we do it here, # pylint: disable=too-many-return-statements constant_type = type(constant) if constant_type in ( str, unicode, complex, int, long, bool, float, NoneType, xrange, bytes, type, BuiltinFunctionType, ): return True elif constant_type in (dict, list, set, slice, bytearray): return False elif constant_type is tuple: for value in constant: if not isHashable(value): return False return True elif constant_type is frozenset: for value in constant: if not isHashable(value): return False return True elif constant is Ellipsis: return True else: assert False, constant_type def getUnhashableConstant(constant): # Too many cases and all return, that is how we do it here, # pylint: disable=too-many-return-statements constant_type = type(constant) if constant_type in ( str, unicode, complex, int, long, bool, float, NoneType, xrange, bytes, type, BuiltinFunctionType, ): return None elif constant_type in (dict, list, set): return constant elif constant_type is tuple: for value in constant: res = getUnhashableConstant(value) if res is not None: return res return None elif constant is Ellipsis: return None elif constant in constant_builtin_types: return None elif constant_type is slice: return None else: assert False, constant_type def isIterableConstant(constant): return type(constant) in ( str, unicode, list, tuple, set, frozenset, dict, xrange, bytes, bytearray, ) def getConstantIterationLength(constant): assert isIterableConstant(constant) return len(constant) def isNumberConstant(constant): return type(constant) in (int, long, float, bool) def isIndexConstant(constant): return type(constant) in (int, long, bool) def createConstantDict(keys, values): # Create it proper size immediately. constant_value = dict.fromkeys(keys, None) for key, value in zip(keys, values): constant_value[key] = value return constant_value def getConstantWeight(constant): constant_type = type(constant) if constant_type is dict: result = 0 for key, value in iterItems(constant): result += getConstantWeight(key) result += getConstantWeight(value) return result elif constant_type in (tuple, list, set, frozenset): result = 0 for element_value in constant: result += getConstantWeight(element_value) return result else: return 1 def isCompileTimeConstantValue(value): """ Determine if a value will be usable at compile time. """ # This needs to match code in makeCompileTimeConstantReplacementNode if isConstant(value): return True elif type(value) is type: return True else: return False
25.635468
96
0.600596
0
0
0
0
0
0
0
0
2,862
0.274981
6a6d56d36f5446ad1de42a20d6e31bc1aa3492a2
13,724
py
Python
functions/predictionLambda/botocore/endpoint.py
chriscoombs/aws-comparing-algorithms-performance-mlops-cdk
6d3888f3ecd667ee76dc473edba37a608786ed2e
[ "Apache-2.0" ]
40
2020-07-11T10:07:51.000Z
2021-12-11T17:09:20.000Z
functions/predictionLambda/botocore/endpoint.py
chriscoombs/aws-comparing-algorithms-performance-mlops-cdk
6d3888f3ecd667ee76dc473edba37a608786ed2e
[ "Apache-2.0" ]
21
2019-11-10T05:38:06.000Z
2022-03-10T15:07:48.000Z
functions/predictionLambda/botocore/endpoint.py
chriscoombs/aws-comparing-algorithms-performance-mlops-cdk
6d3888f3ecd667ee76dc473edba37a608786ed2e
[ "Apache-2.0" ]
37
2020-07-09T23:12:30.000Z
2022-03-16T11:15:58.000Z
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/ # Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import os import logging import time import threading from botocore.vendored import six from botocore.awsrequest import create_request_object from botocore.exceptions import HTTPClientError from botocore.httpsession import URLLib3Session from botocore.utils import is_valid_endpoint_url, get_environ_proxies from botocore.hooks import first_non_none_response from botocore.history import get_global_history_recorder from botocore.response import StreamingBody from botocore import parsers logger = logging.getLogger(__name__) history_recorder = get_global_history_recorder() DEFAULT_TIMEOUT = 60 MAX_POOL_CONNECTIONS = 10 def convert_to_response_dict(http_response, operation_model): """Convert an HTTP response object to a request dict. This converts the requests library's HTTP response object to a dictionary. :type http_response: botocore.vendored.requests.model.Response :param http_response: The HTTP response from an AWS service request. :rtype: dict :return: A response dictionary which will contain the following keys: * headers (dict) * status_code (int) * body (string or file-like object) """ response_dict = { 'headers': http_response.headers, 'status_code': http_response.status_code, 'context': { 'operation_name': operation_model.name, } } if response_dict['status_code'] >= 300: response_dict['body'] = http_response.content elif operation_model.has_event_stream_output: response_dict['body'] = http_response.raw elif operation_model.has_streaming_output: length = response_dict['headers'].get('content-length') response_dict['body'] = StreamingBody(http_response.raw, length) else: response_dict['body'] = http_response.content return response_dict class Endpoint(object): """ Represents an endpoint for a particular service in a specific region. Only an endpoint can make requests. :ivar service: The Service object that describes this endpoints service. :ivar host: The fully qualified endpoint hostname. :ivar session: The session object. """ def __init__(self, host, endpoint_prefix, event_emitter, response_parser_factory=None, http_session=None): self._endpoint_prefix = endpoint_prefix self._event_emitter = event_emitter self.host = host self._lock = threading.Lock() if response_parser_factory is None: response_parser_factory = parsers.ResponseParserFactory() self._response_parser_factory = response_parser_factory self.http_session = http_session if self.http_session is None: self.http_session = URLLib3Session() def __repr__(self): return '%s(%s)' % (self._endpoint_prefix, self.host) def make_request(self, operation_model, request_dict): logger.debug("Making request for %s with params: %s", operation_model, request_dict) return self._send_request(request_dict, operation_model) def create_request(self, params, operation_model=None): request = create_request_object(params) if operation_model: request.stream_output = any([ operation_model.has_streaming_output, operation_model.has_event_stream_output ]) service_id = operation_model.service_model.service_id.hyphenize() event_name = 'request-created.{service_id}.{op_name}'.format( service_id=service_id, op_name=operation_model.name) self._event_emitter.emit(event_name, request=request, operation_name=operation_model.name) prepared_request = self.prepare_request(request) return prepared_request def _encode_headers(self, headers): # In place encoding of headers to utf-8 if they are unicode. for key, value in headers.items(): if isinstance(value, six.text_type): headers[key] = value.encode('utf-8') def prepare_request(self, request): self._encode_headers(request.headers) return request.prepare() def _send_request(self, request_dict, operation_model): attempts = 1 request = self.create_request(request_dict, operation_model) context = request_dict['context'] success_response, exception = self._get_response( request, operation_model, context) while self._needs_retry(attempts, operation_model, request_dict, success_response, exception): attempts += 1 # If there is a stream associated with the request, we need # to reset it before attempting to send the request again. # This will ensure that we resend the entire contents of the # body. request.reset_stream() # Create a new request when retried (including a new signature). request = self.create_request( request_dict, operation_model) success_response, exception = self._get_response( request, operation_model, context) if success_response is not None and \ 'ResponseMetadata' in success_response[1]: # We want to share num retries, not num attempts. total_retries = attempts - 1 success_response[1]['ResponseMetadata']['RetryAttempts'] = \ total_retries if exception is not None: raise exception else: return success_response def _get_response(self, request, operation_model, context): # This will return a tuple of (success_response, exception) # and success_response is itself a tuple of # (http_response, parsed_dict). # If an exception occurs then the success_response is None. # If no exception occurs then exception is None. success_response, exception = self._do_get_response( request, operation_model) kwargs_to_emit = { 'response_dict': None, 'parsed_response': None, 'context': context, 'exception': exception, } if success_response is not None: http_response, parsed_response = success_response kwargs_to_emit['parsed_response'] = parsed_response kwargs_to_emit['response_dict'] = convert_to_response_dict( http_response, operation_model) service_id = operation_model.service_model.service_id.hyphenize() self._event_emitter.emit( 'response-received.%s.%s' % ( service_id, operation_model.name), **kwargs_to_emit) return success_response, exception def _do_get_response(self, request, operation_model): try: logger.debug("Sending http request: %s", request) history_recorder.record('HTTP_REQUEST', { 'method': request.method, 'headers': request.headers, 'streaming': operation_model.has_streaming_input, 'url': request.url, 'body': request.body }) service_id = operation_model.service_model.service_id.hyphenize() event_name = 'before-send.%s.%s' % (service_id, operation_model.name) responses = self._event_emitter.emit(event_name, request=request) http_response = first_non_none_response(responses) if http_response is None: http_response = self._send(request) except HTTPClientError as e: return (None, e) except Exception as e: logger.debug("Exception received when sending HTTP request.", exc_info=True) return (None, e) # This returns the http_response and the parsed_data. response_dict = convert_to_response_dict(http_response, operation_model) http_response_record_dict = response_dict.copy() http_response_record_dict['streaming'] = \ operation_model.has_streaming_output history_recorder.record('HTTP_RESPONSE', http_response_record_dict) protocol = operation_model.metadata['protocol'] parser = self._response_parser_factory.create_parser(protocol) parsed_response = parser.parse( response_dict, operation_model.output_shape) # Do a second parsing pass to pick up on any modeled error fields # NOTE: Ideally, we would push this down into the parser classes but # they currently have no reference to the operation or service model # The parsers should probably take the operation model instead of # output shape but we can't change that now if http_response.status_code >= 300: self._add_modeled_error_fields( response_dict, parsed_response, operation_model, parser, ) history_recorder.record('PARSED_RESPONSE', parsed_response) return (http_response, parsed_response), None def _add_modeled_error_fields( self, response_dict, parsed_response, operation_model, parser, ): error_code = parsed_response.get("Error", {}).get("Code") if error_code is None: return service_model = operation_model.service_model error_shape = service_model.shape_for_error_code(error_code) if error_shape is None: return modeled_parse = parser.parse(response_dict, error_shape) # TODO: avoid naming conflicts with ResponseMetadata and Error parsed_response.update(modeled_parse) def _needs_retry(self, attempts, operation_model, request_dict, response=None, caught_exception=None): service_id = operation_model.service_model.service_id.hyphenize() event_name = 'needs-retry.%s.%s' % ( service_id, operation_model.name) responses = self._event_emitter.emit( event_name, response=response, endpoint=self, operation=operation_model, attempts=attempts, caught_exception=caught_exception, request_dict=request_dict) handler_response = first_non_none_response(responses) if handler_response is None: return False else: # Request needs to be retried, and we need to sleep # for the specified number of times. logger.debug("Response received to retry, sleeping for " "%s seconds", handler_response) time.sleep(handler_response) return True def _send(self, request): return self.http_session.send(request) class EndpointCreator(object): def __init__(self, event_emitter): self._event_emitter = event_emitter def create_endpoint(self, service_model, region_name, endpoint_url, verify=None, response_parser_factory=None, timeout=DEFAULT_TIMEOUT, max_pool_connections=MAX_POOL_CONNECTIONS, http_session_cls=URLLib3Session, proxies=None, socket_options=None, client_cert=None): if not is_valid_endpoint_url(endpoint_url): raise ValueError("Invalid endpoint: %s" % endpoint_url) if proxies is None: proxies = self._get_proxies(endpoint_url) endpoint_prefix = service_model.endpoint_prefix logger.debug('Setting %s timeout as %s', endpoint_prefix, timeout) http_session = http_session_cls( timeout=timeout, proxies=proxies, verify=self._get_verify_value(verify), max_pool_connections=max_pool_connections, socket_options=socket_options, client_cert=client_cert, ) return Endpoint( endpoint_url, endpoint_prefix=endpoint_prefix, event_emitter=self._event_emitter, response_parser_factory=response_parser_factory, http_session=http_session ) def _get_proxies(self, url): # We could also support getting proxies from a config file, # but for now proxy support is taken from the environment. return get_environ_proxies(url) def _get_verify_value(self, verify): # This is to account for: # https://github.com/kennethreitz/requests/issues/1436 # where we need to honor REQUESTS_CA_BUNDLE because we're creating our # own request objects. # First, if verify is not None, then the user explicitly specified # a value so this automatically wins. if verify is not None: return verify # Otherwise use the value from REQUESTS_CA_BUNDLE, or default to # True if the env var does not exist. return os.environ.get('REQUESTS_CA_BUNDLE', True)
42.09816
81
0.6587
11,246
0.81944
0
0
0
0
0
0
3,720
0.271058
6a6dcc4d9c3e1b2437b6c8b26173ce12b1dfa929
7,761
py
Python
week2/Assignment2Answer.py
RayshineRen/Introduction_to_Data_Science_in_Python
b19aa781a8f8d0e25853c4e86dadd4c9bebbcd71
[ "MIT" ]
1
2020-09-22T15:06:02.000Z
2020-09-22T15:06:02.000Z
week2/Assignment2Answer.py
RayshineRen/Introduction_to_Data_Science_in_Python
b19aa781a8f8d0e25853c4e86dadd4c9bebbcd71
[ "MIT" ]
1
2020-11-03T14:11:02.000Z
2020-11-03T14:24:50.000Z
week2/Assignment2Answer.py
RayshineRen/Introduction_to_Data_Science_in_Python
b19aa781a8f8d0e25853c4e86dadd4c9bebbcd71
[ "MIT" ]
2
2020-09-22T05:27:09.000Z
2020-11-05T10:39:49.000Z
# -*- coding: utf-8 -*- """ Created on Fri Sep 18 21:56:15 2020 @author: Ray @email: 1324789704@qq.com @wechat: RayTing0305 """ ''' Question 1 Write a function called proportion_of_education which returns the proportion of children in the dataset who had a mother with the education levels equal to less than high school (<12), high school (12), more than high school but not a college graduate (>12) and college degree. This function should return a dictionary in the form of (use the correct numbers, do not round numbers): {"less than high school":0.2, "high school":0.4, "more than high school but not college":0.2, "college":0.2} ''' import scipy.stats as stats import numpy as np import pandas as pd df = pd.read_csv("./assets/NISPUF17.csv") def proportion_of_education(): # your code goes here # YOUR CODE HERE df_edu = df.EDUC1 edu_list = [1, 2, 3, 4] zero_df = pd.DataFrame(np.zeros((df_edu.shape[0], len(edu_list))), columns=edu_list) for edu in edu_list: zero_df[edu][df_edu==edu]=1 #zero_df sum_ret = zero_df.sum(axis=0) name_l = ["less than high school", "high school", "more than high school but not college", "college"] rat = sum_ret.values/sum(sum_ret.values) dic = dict() for i in range(4): dic[name_l[i]] = rat[i] return dic raise NotImplementedError() assert type(proportion_of_education())==type({}), "You must return a dictionary." assert len(proportion_of_education()) == 4, "You have not returned a dictionary with four items in it." assert "less than high school" in proportion_of_education().keys(), "You have not returned a dictionary with the correct keys." assert "high school" in proportion_of_education().keys(), "You have not returned a dictionary with the correct keys." assert "more than high school but not college" in proportion_of_education().keys(), "You have not returned a dictionary with the correct keys." assert "college" in proportion_of_education().keys(), "You have not returned a dictionary with the correct" ''' Question 2 Let's explore the relationship between being fed breastmilk as a child and getting a seasonal influenza vaccine from a healthcare provider. Return a tuple of the average number of influenza vaccines for those children we know received breastmilk as a child and those who know did not. This function should return a tuple in the form (use the correct numbers: (2.5, 0.1) ''' def average_influenza_doses(): # YOUR CODE HERE #是否喂养母乳 fed_breastmilk = list(df.groupby(by='CBF_01')) be_fed_breastmilk = fed_breastmilk[0][1] not_fed_breastmilk = fed_breastmilk[1][1] #喂养母乳的influenza数目 be_fed_breastmilk_influenza = be_fed_breastmilk.P_NUMFLU num_be_fed_breastmilk_influenza = be_fed_breastmilk_influenza.dropna().mean() #未喂养母乳的influenza数目 not_be_fed_breastmilk_influenza = not_fed_breastmilk.P_NUMFLU num_not_be_fed_breastmilk_influenza = not_be_fed_breastmilk_influenza.dropna().mean() return num_be_fed_breastmilk_influenza, num_not_be_fed_breastmilk_influenza raise NotImplementedError() assert len(average_influenza_doses())==2, "Return two values in a tuple, the first for yes and the second for no." ''' Question 3 It would be interesting to see if there is any evidence of a link between vaccine effectiveness and sex of the child. Calculate the ratio of the number of children who contracted chickenpox but were vaccinated against it (at least one varicella dose) versus those who were vaccinated but did not contract chicken pox. Return results by sex. This function should return a dictionary in the form of (use the correct numbers): {"male":0.2, "female":0.4} Note: To aid in verification, the chickenpox_by_sex()['female'] value the autograder is looking for starts with the digits 0.0077. ''' def chickenpox_by_sex(): # YOUR CODE HERE #是否感染Varicella cpox = df.HAD_CPOX #cpox.value_counts() cpox_group = list(df.groupby(by='HAD_CPOX')) have_cpox = cpox_group[0][1] not_have_cpox = cpox_group[1][1] #男女分开 have_cpox_group = list(have_cpox.groupby(by='SEX')) not_have_cpox_group = list(not_have_cpox.groupby(by='SEX')) have_cpox_boy = have_cpox_group[0][1] have_cpox_girl = have_cpox_group[1][1] not_have_cpox_boy = not_have_cpox_group[0][1] not_have_cpox_girl = not_have_cpox_group[1][1] #接种感染 #have_cpox_boy_injected = have_cpox_boy[(have_cpox_boy['P_NUMMMR']>0) | (have_cpox_boy['P_NUMVRC']>0)] have_cpox_boy_injected = have_cpox_boy[(have_cpox_boy['P_NUMVRC']>0)] num_have_cpox_boy_injected = have_cpox_boy_injected.count()['SEQNUMC'] have_cpox_girl_injected = have_cpox_girl[(have_cpox_girl['P_NUMVRC']>0)] num_have_cpox_girl_injected = have_cpox_girl_injected.count()['SEQNUMC'] #接种未感染 not_have_cpox_boy_injected = not_have_cpox_boy[(not_have_cpox_boy['P_NUMVRC']>0)] num_not_have_cpox_boy_injected = not_have_cpox_boy_injected.count()['SEQNUMC'] not_have_cpox_girl_injected = not_have_cpox_girl[(not_have_cpox_girl['P_NUMVRC']>0)] num_not_have_cpox_girl_injected = not_have_cpox_girl_injected.count()['SEQNUMC'] #计算比例 ratio_boy = num_have_cpox_boy_injected / num_not_have_cpox_boy_injected ratio_girl = num_have_cpox_girl_injected / num_not_have_cpox_girl_injected dic = {} dic['male'] = ratio_boy dic['female'] = ratio_girl return dic raise NotImplementedError() assert len(chickenpox_by_sex())==2, "Return a dictionary with two items, the first for males and the second for females." ''' Question 4 A correlation is a statistical relationship between two variables. If we wanted to know if vaccines work, we might look at the correlation between the use of the vaccine and whether it results in prevention of the infection or disease [1]. In this question, you are to see if there is a correlation between having had the chicken pox and the number of chickenpox vaccine doses given (varicella). Some notes on interpreting the answer. The had_chickenpox_column is either 1 (for yes) or 2 (for no), and the num_chickenpox_vaccine_column is the number of doses a child has been given of the varicella vaccine. A positive correlation (e.g., corr > 0) means that an increase in had_chickenpox_column (which means more no’s) would also increase the values of num_chickenpox_vaccine_column (which means more doses of vaccine). If there is a negative correlation (e.g., corr < 0), it indicates that having had chickenpox is related to an increase in the number of vaccine doses. Also, pval is the probability that we observe a correlation between had_chickenpox_column and num_chickenpox_vaccine_column which is greater than or equal to a particular value occurred by chance. A small pval means that the observed correlation is highly unlikely to occur by chance. In this case, pval should be very small (will end in e-18 indicating a very small number). [1] This isn’t really the full picture, since we are not looking at when the dose was given. It’s possible that children had chickenpox and then their parents went to get them the vaccine. Does this dataset have the data we would need to investigate the timing of the dose? ''' def corr_chickenpox(): cpox = df[(df.P_NUMVRC).notnull()] have_cpox = cpox[(cpox.HAD_CPOX==1) | (cpox.HAD_CPOX==2)] df1=pd.DataFrame({"had_chickenpox_column":have_cpox.HAD_CPOX, "num_chickenpox_vaccine_column":have_cpox.P_NUMVRC}) corr, pval=stats.pearsonr(df1["had_chickenpox_column"],df1["num_chickenpox_vaccine_column"]) return corr raise NotImplementedError()
53.895833
576
0.74024
0
0
0
0
0
0
0
0
4,615
0.587823
6a6dde8d68a99fd68fff6d0aa6d0f4f64dc22408
4,018
py
Python
backup/26.py
accordinglyto/dferte
d4b8449c1633973dc538c9e72aca5d37802a4ee4
[ "MIT" ]
null
null
null
backup/26.py
accordinglyto/dferte
d4b8449c1633973dc538c9e72aca5d37802a4ee4
[ "MIT" ]
8
2020-11-13T18:55:17.000Z
2022-03-12T00:34:40.000Z
backup/26.py
accordinglyto/dferte
d4b8449c1633973dc538c9e72aca5d37802a4ee4
[ "MIT" ]
null
null
null
from numpy import genfromtxt import matplotlib.pyplot as plt import mpl_finance import numpy as np import uuid import matplotlib # Input your csv file here with historical data ad = genfromtxt(f"../financial_data/SM.csv", delimiter=",", dtype=str) def convolve_sma(array, period): return np.convolve(array, np.ones((period,)) / period, mode="valid") def graphwerk(start, finish): open = [] high = [] low = [] close = [] volume = [] # decision = [] date = [] c_open = [] c_high = [] c_low = [] c_close = [] c_volume = [] c_date = [] c_start = start + 12 for x in range(finish - start): c_open.append(float(pd[c_start][1])) c_high.append(float(pd[c_start][2])) c_low.append(float(pd[c_start][3])) c_close.append(float(pd[c_start][4])) c_volume.append(float(pd[c_start][5])) c_date.append(pd[c_start][0]) c_start = c_start + 1 for x in range(finish - start): # Below filtering is valid for eurusd.csv file. Other financial data files have different orders so you need to find out # what means open, high and close in their respective order. open.append(float(pd[start][1])) high.append(float(pd[start][2])) low.append(float(pd[start][3])) close.append(float(pd[start][4])) volume.append(float(pd[start][5])) # decision.append(str(pd[start][6])) date.append(pd[start][0]) start = start + 1 decision = "sell" min_forecast = min(c_low) max_forecast = max(c_high) if close[-1] * 1.03 < max_forecast: decision = "buy" # for z in all_prices: # if close[-1] * 1.03 < z: # decision = "buy" sma = convolve_sma(close, 5) smb = list(sma) diff = sma[-1] - sma[-2] for x in range(len(close) - len(smb)): smb.append(smb[-1] + diff) fig = plt.figure(num=1, figsize=(3, 3), dpi=50, facecolor="w", edgecolor="k") dx = fig.add_subplot(111) # mpl_finance.volume_overlay(ax, open, close, volume, width=0.4, colorup='b', colordown='b', alpha=1) mpl_finance.candlestick2_ochl( dx, open, close, high, low, width=1.5, colorup="g", colordown="r", alpha=0.5 ) plt.autoscale() # plt.plot(smb, color="blue", linewidth=10, alpha=0.5) plt.axis("off") if decision == "sell": print("last value: " + str(close[-1])) print( "range of values in next 13 bars: " + str(min_forecast) + "-" + str(max_forecast) ) print("sell") plt.savefig(sell_dir + str(uuid.uuid4()) + ".jpg", bbox_inches="tight") else: print("last value: " + str(close[-1])) print( "range of values in next 13 bars: " + str(min_forecast) + "-" + str(max_forecast) ) print("buy") plt.savefig(buy_dir + str(uuid.uuid4()) + ".jpg", bbox_inches="tight") # if close[-1] >= close_next: # print('previous value is bigger') # print('last value: ' + str(close[-1])) # print('next value: ' + str(close_next)) # print('sell') # plt.savefig(sell_dir + str(uuid.uuid4()) +'.jpg', bbox_inches='tight') # else: # print('previous value is smaller') # print('last value: '+ str(close[-1])) # print('next value: ' + str(close_next)) # print('buy') # plt.savefig(buy_dir + str(uuid.uuid4())+'.jpg', bbox_inches='tight') # plt.show() open.clear() close.clear() volume.clear() high.clear() low.clear() plt.cla() plt.clf() # output = [] # with open("STOCKbluechip.csv") as f: # output = [str(s) for line in f.readlines() for s in line[:-1].split(",")] # for stock in output: pd = ad buy_dir = "../data/train/buy/" sell_dir = "../data/train/sell/" iter = 0 for x in range(len(pd)): graphwerk(iter, iter + 12) iter = iter + 2
28.097902
128
0.558238
0
0
0
0
0
0
0
0
1,439
0.358138
6a6e5f7f79247bb69a8f187a793986d06aaf806b
3,091
py
Python
streams/readers/arff_reader.py
JanSurft/tornado
2c07686c5358d2bcb15d6edac3126ad9346c3c76
[ "MIT" ]
103
2017-10-01T20:24:58.000Z
2022-03-16T09:09:10.000Z
streams/readers/arff_reader.py
JanSurft/tornado
2c07686c5358d2bcb15d6edac3126ad9346c3c76
[ "MIT" ]
2
2019-09-17T11:06:26.000Z
2021-11-08T23:57:46.000Z
streams/readers/arff_reader.py
JanSurft/tornado
2c07686c5358d2bcb15d6edac3126ad9346c3c76
[ "MIT" ]
28
2018-12-18T00:43:10.000Z
2022-03-04T08:39:47.000Z
""" The Tornado Framework By Ali Pesaranghader University of Ottawa, Ontario, Canada E-mail: apesaran -at- uottawa -dot- ca / alipsgh -at- gmail -dot- com """ import re from data_structures.attribute import Attribute from dictionary.tornado_dictionary import TornadoDic class ARFFReader: """This class is used to read a .arff file.""" @staticmethod def read(file_path): labels = [] attributes = [] attributes_min_max = [] records = [] data_flag = False reader = open(file_path, "r") for line in reader: if line.strip() == '': continue if line.startswith("@attribute") or line.startswith("@ATTRIBUTE"): line = line.strip('\n\r\t') line = line.split(' ') attribute_name = line[1] attribute_value_range = line[2] attribute = Attribute() attribute.set_name(attribute_name) if attribute_value_range.lower() in ['numeric', 'real', 'integer']: attribute_type = TornadoDic.NUMERIC_ATTRIBUTE attribute_value_range = [] attributes_min_max.append([0, 0]) else: attribute_type = TornadoDic.NOMINAL_ATTRIBUTE attribute_value_range = attribute_value_range.strip('{}').replace("'", "") attribute_value_range = attribute_value_range.split(',') attributes_min_max.append([None, None]) attribute.set_type(attribute_type) attribute.set_possible_values(attribute_value_range) attributes.append(attribute) elif line.startswith("@data") or line.startswith("@DATA"): data_flag = True labels = attributes[len(attributes) - 1].POSSIBLE_VALUES attributes.pop(len(attributes) - 1) continue elif data_flag is True: line = re.sub('\s+', '', line) elements = line.split(',') for i in range(0, len(elements) - 1): if attributes[i].TYPE == TornadoDic.NUMERIC_ATTRIBUTE: elements[i] = float(elements[i]) min_value = attributes_min_max[i][0] max_value = attributes_min_max[i][1] if elements[i] < min_value: min_value = elements[i] elif elements[i] > max_value: max_value = elements[i] attributes_min_max[i] = [min_value, max_value] records.append(elements) for i in range(0, len(attributes)): if attributes[i].TYPE == TornadoDic.NUMERIC_ATTRIBUTE: attributes[i].set_bounds_values(attributes_min_max[i][0], attributes_min_max[i][1]) return labels, attributes, records
38.6375
100
0.525396
2,802
0.906503
0
0
2,725
0.881592
0
0
309
0.099968
6a6f28bb63a4999e5f2dcb27c1de7d562bafcd05
1,664
py
Python
Experimente/Experiment ID 8/run-cifar10-v7.py
MichaelSchwabe/conv-ebnas-abgabe
f463d7bbd9b514597e19d25007913f7994cbbf7c
[ "MIT" ]
6
2021-11-03T07:20:48.000Z
2021-11-10T08:20:44.000Z
Experimente/Experiment ID 8/run-cifar10-v7.py
MichaelSchwabe/conv-ebnas-abgabe
f463d7bbd9b514597e19d25007913f7994cbbf7c
[ "MIT" ]
1
2021-11-02T21:10:51.000Z
2021-11-02T21:11:05.000Z
Experimente/Experiment ID 8/run-cifar10-v7.py
MichaelSchwabe/conv-ebnas-abgabe
f463d7bbd9b514597e19d25007913f7994cbbf7c
[ "MIT" ]
null
null
null
from __future__ import print_function from keras.datasets import mnist from keras.datasets import cifar10 from keras.utils.np_utils import to_categorical import numpy as np from keras import backend as K from evolution import Evolution from genome_handler import GenomeHandler import tensorflow as tf #import mlflow.keras #import mlflow #import mlflow.tensorflow #mlflow.tensorflow.autolog() #mlflow.keras.autolog() print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU'))) K.set_image_data_format("channels_last") #(x_train, y_train), (x_test, y_test) = mnist.load_data() (x_train, y_train), (x_test, y_test) = cifar10.load_data() x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], x_train.shape[2],x_train.shape[3]).astype('float32') / 255 x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2], x_test.shape[3]).astype('float32') / 255 # nCLasses y_train = to_categorical(y_train) y_test = to_categorical(y_test) #y_train.shape dataset = ((x_train, y_train), (x_test, y_test)) genome_handler = GenomeHandler(max_conv_layers=4, max_dense_layers=2, # includes final dense layer max_filters=512, max_dense_nodes=1024, input_shape=x_train.shape[1:], n_classes=10) evo = Evolution(genome_handler, data_path="log/evo_cifar10_gen40_pop10_e20.csv") model = evo.run(dataset=dataset, num_generations=40, pop_size=10, epochs=20,metric='acc') #epochs=10,metric='loss') print(model.summary())
37.818182
120
0.676683
0
0
0
0
0
0
0
0
347
0.208534
6a703f7100900fb7196e6525d9f4720fdc63dbae
11,514
py
Python
CarModel.py
JaredFG/Multiagentes-Unity
37f7ec5c0588865ef08b50df83566a43d817bebf
[ "MIT" ]
null
null
null
CarModel.py
JaredFG/Multiagentes-Unity
37f7ec5c0588865ef08b50df83566a43d817bebf
[ "MIT" ]
null
null
null
CarModel.py
JaredFG/Multiagentes-Unity
37f7ec5c0588865ef08b50df83566a43d817bebf
[ "MIT" ]
1
2022-02-10T20:33:44.000Z
2022-02-10T20:33:44.000Z
''' Autores:Eduardo Rodríguez López A01749381 Rebeca Rojas Pérez A01751192 Jared Abraham Flores Guarneros A01379868 Eduardo Aguilar Chías A01749375 ''' from random import random from mesa.visualization.modules import CanvasGrid from mesa.visualization.ModularVisualization import ModularServer from mesa.batchrunner import BatchRunner from mesa.datacollection import DataCollector from mesa.space import MultiGrid from mesa import Agent , Model from mesa.time import RandomActivation #Clase para crear a los agentes automóviles class CarAgent(Agent): def __init__(self, unique_id, model): super().__init__(unique_id, model) self.next_cell = None self.direction = None self.agent_type = 0 #Función para validar si la posición es válida, en caso de que sea válida regresa True, en caso contrario #regresa False. def is_valid(self, position): if position[0] < self.model.width and position[1] < self.model.height and position[0] >= 0 and position[1] >= 0: if not self.model.grid.is_cell_empty(position): return True return False #Función para recibir las posibles celdas a dónde moverse, regresa la posición de la calle. def get_poss_cell(self): neighborhood = self.model.grid.get_neighborhood(self.pos, moore=False, include_center=False) for cell in neighborhood: for agent in self.model.grid.get_cell_list_contents(cell): if agent.agent_type == 2: next_dir = (self.pos[0] - agent.pos[0], self.pos[1] - agent.pos[1]) if next_dir[0] * -1 != self.direction[0] and next_dir[1] * -1 != self.direction[1]: return agent.pos #Función para avanzar hacia el frente, regresa el valor de la variable move que son coordenadas. def get_nextcell(self): move = (self.pos[0] + self.direction[0], self.pos[1] + self.direction[1]) return move #Función para obtener la dirección hacia donde debe moverse el automóvil, regresa la dirección # de la calle. def get_nextdirect(self, position): for agent in self.model.grid.get_cell_list_contents(position): if agent.agent_type == 2: return agent.direction #Función para dar vuelta, regresa la dirección de la calle. def turn(self): for cell in self.model.grid.get_neighborhood(self.pos, moore=False, include_center=False): for agent in self.model.grid.get_cell_list_contents(cell): if agent.agent_type == 2: if agent.direction != self.direction: return agent.direction return None #Función para revisar la luz de los semáforos, regresa la luz del semáforo en caso # de que el automóvil tenga uno de vecino. En caso contrario regresa True. def check_light(self): for agent in self.model.grid.get_cell_list_contents(self.next_cell): if agent.agent_type == 1: return agent.light return True #Función para checar si hay otro automovil enfrente, regresa un valor booleano. def check_car(self): for agent in self.model.grid.get_cell_list_contents(self.next_cell): if agent.agent_type == 0: return False return True def step(self): #Variable para guardar el resultado de la función get_nextcell(). next_cell = self.get_nextcell() #Condición, si la siguiente celda es válida, se guarda en el automóvil y se cambia su dirección. if self.is_valid(next_cell): self.next_cell = next_cell self.direction = self.get_nextdirect(self.next_cell) #En caso contrario una varible guarda el resultado de la función turn(). else: direction = self.turn() #Condición, si la variable direction es verdadera se cambia la dirección del automóvil. if direction: self.direction = direction #En caso contrario una variable guarda el resultado de la función get_poss_cell(). #La siguiente celda del automóvil cambia al valor de la variable. else: poss = self.get_poss_cell() self.next_cell = poss if self.check_car(): if self.check_light(): self.model.grid.move_agent(self, self.next_cell) #Clase para crear a los agentes semáforos. class TrafficLightAgent(Agent): def __init__(self, unique_id, model): super().__init__(unique_id, model) self.agent_type = 1 self.light = False #Función para cambiar la luz de los semáforos. def change(self): self.light = not self.light #Función para contar el número de automóviles que hay en un semáforo, # regresa el contador con el número de automóviles. def count_cars(self): counter = 0 neighborhood = self.model.grid.get_neighborhood(self.pos, moore=False, include_center=True) for cell in neighborhood: for agent in self.model.grid.get_cell_list_contents(cell): if agent.agent_type == 0: counter += 1 return counter #Clase para crear a los agentes calle. class StreetAgent(Agent): def __init__(self, unique_id, model): super().__init__(unique_id, model) self.direction = None self.agent_type = 2 #Clase para crear el modelo. class CarModel(Model): def __init__(self, N: int, width: int, height: int): self.num_agents = N self.running = True self.grid = MultiGrid(width, height, False) self.schedule = RandomActivation(self) self.uids = 0 self.lights_ids = 0 self.width = width self.height = height street_pos = [] self.lights = 4 #Loop para crear la parte interior de las calles, donde está el cruce. for row in range(height): for col in range(width): agent = StreetAgent(self.uids, self) self.uids += 1 flag = True if col > width // 2 - 2 and col < width // 2 + 1 and col > 1 and col < height - 1: if row >= height // 2: agent.direction = (0, 1) else: agent.direction = (0, -1) elif row > height // 2 - 2 and row < height // 2 + 1 and row > 1 and row < width - 1: if col > width // 2: agent.direction = (-1, 0) else: agent.direction = (1, 0) else: flag = False if flag: self.grid.place_agent(agent, (col, row)) street_pos.append((col, row)) #Loop para crear la parte exterior de las calles, donde NO está el cruce. for row in range(height): for col in range(width): agent = StreetAgent(self.uids, self) self.uids += 1 flag = True if row < 2: if col < width - 2: agent.direction = (1, 0) else: agent.direction = (0, 1) elif row >= 2 and row < height - 2: if col < 2: agent.direction = (0, -1) elif col >= width - 2 and col < width: agent.direction = (0, 1) else: flag = False elif row >= height -2 and row < height: if col < width - 2: agent.direction = (-1, 0) else: agent.direction = (0, 1) else: flag = False if flag: self.grid.place_agent(agent, (col, row)) street_pos.append((col, row)) #Loop para crear los automóviles en posiciones random donde hay calle. for i in range(self.num_agents): a = CarAgent(self.uids, self) self.uids += 1 pos_index = self.random.randint(0, len(street_pos) - 1) pos = street_pos.pop(pos_index) a.direction = self.grid.get_cell_list_contents(pos)[0].direction self.grid.place_agent(a, pos) self.schedule.add(a) #Crear los semáforos for i in range(self.lights): alight = TrafficLightAgent(self.lights_ids, self) self.lights_ids += 1 self.schedule.add(alight) x = 8 y = 9 if i == 0: alight.light = True self.grid.place_agent(alight, (x, y)) elif i == 1: x = 8 y = 10 alight.light = True self.grid.place_agent(alight, (x, y)) elif i == 2: x = 11 y = 9 alight.light = False self.grid.place_agent(alight, (x, y)) else: x = 11 y = 10 alight.light = False self.grid.place_agent(alight, (x, y)) def step(self): #Contadores para saber cuáles semáforos tienen más automóviles. count_left = 0 count_right = 0 #Loop para añadir a los contadores la cantidad de automóviles que hay en cada lado. for agent in self.schedule.agents: if agent.agent_type == 1: if agent.unique_id == 0: count_left += agent.count_cars() elif agent.unique_id == 1: count_left += agent.count_cars() elif agent.unique_id == 2: count_right += agent.count_cars() elif agent.unique_id == 3: count_right += agent.count_cars() #Condición, si el lado izquierdo tiene más automóviles, los semáforos del lado izquierdo #dan luz verde y los semáforos del lado derecho dan luz roja. if count_left >= count_right: for agent in self.schedule.agents: if agent.agent_type == 1: if agent.unique_id == 0: agent.light = True elif agent.unique_id == 1: agent.light = True elif agent.unique_id == 2: agent.light = False else: agent.light = False #En caso contrario los semáforos del lado derecho dan luz verde y los semáforos del lado #izquierdo dan luz roja. else: for agent in self.schedule.agents: if agent.agent_type == 1: if agent.unique_id == 0: agent.light = False elif agent.unique_id == 1: agent.light = False elif agent.unique_id == 2: agent.light = True else: agent.light = True self.schedule.step()
41.566787
120
0.538822
10,743
0.928121
0
0
0
0
0
0
2,518
0.217538
6a705dbb2cd1b609cc2090d60bc5b82810db8095
1,684
py
Python
qcodes/widgets/display.py
nulinspiratie/Qcodes
d050d38ac83f532523a39549c3247dfa6096a36e
[ "MIT" ]
2
2017-02-27T06:02:39.000Z
2019-06-03T04:56:59.000Z
qcodes/widgets/display.py
nulinspiratie/Qcodes
d050d38ac83f532523a39549c3247dfa6096a36e
[ "MIT" ]
50
2017-04-12T04:03:15.000Z
2022-03-09T00:41:43.000Z
qcodes/widgets/display.py
nulinspiratie/Qcodes
d050d38ac83f532523a39549c3247dfa6096a36e
[ "MIT" ]
null
null
null
"""Helper for adding content stored in a file to a jupyter notebook.""" import os from pkg_resources import resource_string from IPython.display import display, Javascript, HTML # Originally I implemented this using regular open() and read(), so it # could use relative paths from the importing file. # # But for distributable packages, pkg_resources.resource_string is the # best way to load data files, because it works even if the package is # in an egg or zip file. See: # http://pythonhosted.org/setuptools/setuptools.html#accessing-data-files-at-runtime def display_auto(qcodes_path, file_type=None): """ Display some javascript, css, or html content in a jupyter notebook. Content comes from a package-relative file path. Will use the file extension to determine file type unless overridden by file_type Args: qcodes_path (str): the path to the target file within the qcodes package, like 'widgets/widgets.js' file_type (Optional[str]): Override the file extension to determine what type of file this is. Case insensitive, supported values are 'js', 'css', and 'html' """ contents = resource_string('qcodes', qcodes_path).decode('utf-8') if file_type is None: ext = os.path.splitext(qcodes_path)[1].lower() elif 'js' in file_type.lower(): ext = '.js' elif 'css' in file_type.lower(): ext = '.css' else: ext = '.html' if ext == '.js': display(Javascript(contents)) elif ext == '.css': display(HTML('<style>' + contents + '</style>')) else: # default to html. Anything else? display(HTML(contents))
35.083333
84
0.672803
0
0
0
0
0
0
0
0
1,097
0.651425
6a71f08eeecbd606e19448cf8f9c90856e40cbac
6,697
py
Python
hubcontrol.py
smr99/lego-hub-tk
d3b86847873fa80deebf993ccd44b4d3d8f9bf40
[ "MIT" ]
16
2021-02-17T01:59:39.000Z
2022-03-29T05:10:12.000Z
hubcontrol.py
smr99/lego-hub-tk
d3b86847873fa80deebf993ccd44b4d3d8f9bf40
[ "MIT" ]
15
2021-04-20T04:01:36.000Z
2022-02-01T02:46:30.000Z
hubcontrol.py
smr99/lego-hub-tk
d3b86847873fa80deebf993ccd44b4d3d8f9bf40
[ "MIT" ]
9
2021-04-18T20:29:21.000Z
2022-03-31T11:50:04.000Z
#! /usr/bin/python3 import base64 from data.ProgramHubLogger import ProgramHubLogger from datetime import datetime import logging import os import sys from ui.MotionSensor import MotionSensorWidget from ui.PositionStatus import PositionStatusWidget from ui.DevicePortWidget import DevicePortWidget from ui.ConnectionWidget import ConnectionWidget from PyQt5 import QtCore, QtGui, QtWidgets from PyQt5.QtCore import * from PyQt5.QtGui import * from PyQt5.QtWidgets import * from PyQt5.QtWidgets import QApplication, QPushButton, QWidget from comm.HubClient import ConnectionState, HubClient from data.HubMonitor import HubMonitor from data.HubStatus import HubStatus from ui.DeviceStatusWidget import DeviceStatusWidget from utils.setup import setup_logging logger = logging.getLogger("App") log_filename = os.path.dirname(__file__) + "/logs/hubcontrol.log" setup_logging(log_filename) def list_programs(info): storage = info['storage'] slots = info['slots'] print("%4s %-40s %6s %-20s %-12s %-10s" % ("Slot", "Decoded Name", "Size", "Last Modified", "Project_id", "Type")) for i in range(20): if str(i) in slots: sl = slots[str(i)] modified = datetime.utcfromtimestamp(sl['modified']/1000).strftime('%Y-%m-%d %H:%M:%S') try: decoded_name = base64.b64decode(sl['name']).decode('utf-8') except: decoded_name = sl['name'] try: project = sl['project_id'] except: project = " " try: type = sl['type'] except: type = " " print("%4s %-40s %5db %-20s %-12s %-10s" % (i, decoded_name, sl['size'], modified, project, type)) print(("Storage free %s%s of total %s%s" % (storage['free'], storage['unit'], storage['total'], storage['unit']))) class ConsoleWidget(QTextEdit): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.setReadOnly(True) self.setLineWrapMode(QTextEdit.NoWrap) def append(self, text): self.moveCursor(QTextCursor.End) self.insertPlainText(text) sb = self.verticalScrollBar() sb.setValue(sb.maximum()) def append_line(self, text): self.append(text + '\n') class ProgramWidget(QWidget): def __init__(self, hub_client : HubClient, hub_monitor : HubMonitor, *args, **kwargs): super().__init__(*args, **kwargs) self._client = hub_client self._monitor = hub_monitor self._executing_program_label = QLabel() self._slot_spinbox = QSpinBox() self._run_button = QPushButton('Run') self._run_button.clicked.connect(self.run_program) self._stop_button = QPushButton('Stop') self._stop_button.clicked.connect(self.stop_program) runstop_widget = QWidget() layout = QHBoxLayout(runstop_widget) layout.addWidget(QLabel('Slot:')) layout.addWidget(self._slot_spinbox) layout.addWidget(self._run_button) layout.addWidget(self._stop_button) box = QGroupBox('Program Execution') layout = QFormLayout(box) layout.addRow('Executing Program ID:', self._executing_program_label) layout.addRow(runstop_widget) layout = QVBoxLayout() layout.addWidget(box) self.setLayout(layout) def refresh(self): is_connected = self._client.state == ConnectionState.TELEMETRY self._executing_program_label.setText(self._monitor.execution_status[0]) self._run_button.setEnabled(is_connected) self._stop_button.setEnabled(is_connected) def run_program(self): slot = self._slot_spinbox.value() r = self._client.program_execute(slot) logger.debug('Program execute returns: %s', r) def stop_program(self): r = self._client.program_terminate() logger.debug('Program terminate returns: %s', r) class MainWindow(QMainWindow): def __init__(self, hub_client, hub_monitor, *args, **kwargs): super().__init__(*args, **kwargs) status = hub_monitor.status self._client = hub_client self._hub_monitor = hub_monitor self.position_widget = PositionStatusWidget(status) self.motion_widget = MotionSensorWidget(status) self.program_widget = ProgramWidget(hub_client, hub_monitor) self.port_widget = DevicePortWidget(status) self.console = ConsoleWidget() self.list_button = QPushButton('List') self.list_button.clicked.connect(self.list_programs) # Top row (status) top_box = QWidget() layout = QHBoxLayout(top_box) layout.addWidget(ConnectionWidget(hub_client)) layout.addWidget(self.position_widget) layout.addWidget(self.motion_widget) # Button bar buttons = QWidget() layout = QHBoxLayout(buttons) layout.addWidget(self.list_button) mw = QWidget() layout = QVBoxLayout(mw) layout.addWidget(top_box) layout.addWidget(buttons) layout.addWidget(self.program_widget) layout.addWidget(self.port_widget) layout.addWidget(self.console) self.setCentralWidget(mw) hub_monitor.events.console_print += self.console.append # Timer refresh trick from https://github.com/Taar2/pyqt5-modelview-tutorial/blob/master/modelview_3.py # this trick is used to work around the issue of updating UI from background threads -- i.e. events # raised by HubClient. timer = QtCore.QTimer(self) timer.setInterval(200) timer.timeout.connect(self.refresh) timer.start() def refresh(self): is_connected = self._client.state == ConnectionState.TELEMETRY is_connected_usb = is_connected and self._hub_monitor.status.is_usb_connected self.list_button.setEnabled(is_connected_usb) self.position_widget.refresh() self.motion_widget.refresh() self.port_widget.refresh() self.program_widget.refresh() def list_programs(self): storage_status = self._client.get_storage_status() if storage_status is not None: list_programs(storage_status) def run_program(self): slot = 4 r = self._client.program_execute(slot) print('Program execute returns: ', r) logger.info("LEGO status app starting up") hc = HubClient() monitor = HubMonitor(hc) monitor.logger = ProgramHubLogger('logs/program') app = QApplication(sys.argv) window = MainWindow(hc, monitor) window.setWindowTitle('LEGO Hub Status') window.show() hc.start() sys.exit(app.exec_())
33.823232
119
0.667164
4,528
0.676124
0
0
0
0
0
0
795
0.11871
6a725ee4987cc406e04ed4e04ead31dbd1e9b6ea
1,088
py
Python
To-D0-App-main/base/views.py
shagun-agrawal/To-Do-App
083081690fe9d291f13c0452a695a092b7544ab2
[ "MIT" ]
1
2021-04-08T14:12:38.000Z
2021-04-08T14:12:38.000Z
To-D0-App-main/base/views.py
shagun-agrawal/To-Do-App
083081690fe9d291f13c0452a695a092b7544ab2
[ "MIT" ]
null
null
null
To-D0-App-main/base/views.py
shagun-agrawal/To-Do-App
083081690fe9d291f13c0452a695a092b7544ab2
[ "MIT" ]
null
null
null
from django.shortcuts import render from django.views.generic.list import ListView from django.views.generic.detail import DetailView from django.views.generic.edit import CreateView, UpdateView, DeleteView from django.urls import reverse_lazy from django.contrib.auth.views import LoginView from .models import Task # Create your views here. class CustomLoginView(LoginView): template_name='base/login.html' fiels='__all__' redirect_auhenticated_user = True def get_success_url(self): return reverse_lazy('tasks') class TaskList(ListView): model = Task context_object_name = 'tasks' class TaskDetail(DetailView): model = Task context_object_name = 'task' class TaskCreate(CreateView): model = Task fields = '__all__' success_url = reverse_lazy('tasks') class TaskUpdate(UpdateView): model = Task fields = '__all__' success_url = reverse_lazy('tasks') class TaskDelete(DeleteView): model = Task context_object_name='Task' success_url = reverse_lazy('tasks')
24.727273
73
0.714154
705
0.647978
0
0
0
0
0
0
117
0.107537
6a72d886218147f91e76b4f7f571b23929432026
966
py
Python
tests/unit_tests/test_nn/test_converters/test_tensorflow/test_Dropout.py
samysweb/dnnv
58fb95b7300914d9da28eed86c39eca473b1aaef
[ "MIT" ]
5
2022-01-28T20:30:34.000Z
2022-03-17T09:26:52.000Z
tests/unit_tests/test_nn/test_converters/test_tensorflow/test_Dropout.py
samysweb/dnnv
58fb95b7300914d9da28eed86c39eca473b1aaef
[ "MIT" ]
9
2022-01-27T03:50:28.000Z
2022-02-08T18:42:17.000Z
tests/unit_tests/test_nn/test_converters/test_tensorflow/test_Dropout.py
samysweb/dnnv
58fb95b7300914d9da28eed86c39eca473b1aaef
[ "MIT" ]
2
2022-02-03T17:32:43.000Z
2022-03-24T16:38:49.000Z
import numpy as np from dnnv.nn.converters.tensorflow import * from dnnv.nn.operations import * TOL = 1e-6 def test_Dropout_consts(): x = np.array([3, 4]).astype(np.float32) op = Dropout(x) tf_op = TensorflowConverter().visit(op) result_ = tf_op() assert isinstance(result_, tuple) assert len(result_) == 2 result, none = result_ assert none is None y = np.array([3, 4]).astype(np.float32) assert np.all(result >= (y - TOL)) assert np.all(result <= (y + TOL)) def test_Dropout_x_is_op(): x = np.array([3, 4]).astype(np.float32) input_op = Input((2,), np.dtype(np.float32)) op = Dropout(input_op) tf_op = TensorflowConverter().visit(op) result_ = tf_op(x) assert isinstance(result_, tuple) assert len(result_) == 2 result, none = result_ assert none is None y = np.array([3, 4]).astype(np.float32) assert np.all(result >= (y - TOL)) assert np.all(result <= (y + TOL))
26.108108
48
0.63354
0
0
0
0
0
0
0
0
0
0
6a7328e83cbca070a32d28d91e1af148c593184e
4,202
py
Python
smarts/zoo/worker.py
idsc-frazzoli/SMARTS
bae0a6ea160330921edc94a7161a4e8cf72a1974
[ "MIT" ]
554
2020-10-16T02:30:35.000Z
2022-03-29T14:13:00.000Z
smarts/zoo/worker.py
idsc-frazzoli/SMARTS
bae0a6ea160330921edc94a7161a4e8cf72a1974
[ "MIT" ]
917
2020-10-17T00:10:31.000Z
2022-03-31T23:00:47.000Z
smarts/zoo/worker.py
idsc-frazzoli/SMARTS
bae0a6ea160330921edc94a7161a4e8cf72a1974
[ "MIT" ]
135
2020-10-20T01:44:49.000Z
2022-03-27T04:51:31.000Z
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """ Run an agent in it's own (independent) process. What Agent code does is out of our direct control, we want to avoid any interactions with global state that might be present in the SMARTS process. To protect and isolate Agents from any pollution of global state in the main SMARTS process, we spawn Agents in their fresh and independent python process. This script is called from within SMARTS to instantiate a remote agent. The protocal is as follows: 1. SMARTS calls: worker.py --port 5467 # sets a unique port per agent 2. worker.py will begin listening on port 5467. 3. SMARTS connects to (ip, 5467) as a client. 4. SMARTS calls `build()` rpc with `AgentSpec` as input. 5. worker.py recieves the `AgentSpec` instances and builds the Agent. 6. SMARTS calls `act()` rpc with observation as input and receives the actions as response from worker.py. """ import argparse import importlib import logging import os import signal import sys from concurrent import futures import grpc from smarts.zoo import worker_pb2_grpc, worker_servicer # Front-load some expensive imports as to not block the simulation modules = [ "smarts.core.utils.pybullet", "smarts.core.utils.sumo", "smarts.core.sumo_road_network", "numpy", "sklearn", "shapely", "scipy", "trimesh", "panda3d", "gym", "ray", ] for mod in modules: try: importlib.import_module(mod) except ImportError: if mod == "ray": print( "You need to install the ray dependency using pip install -e .[train] first" ) if mod == "panda3d": print( "You need to install the panda3d dependency using pip install -e .[camera-obs] first" ) pass # End front-loaded imports logging.basicConfig(level=logging.INFO) log = logging.getLogger(f"worker.py - pid({os.getpid()})") def serve(port): ip = "[::]" server = grpc.server(futures.ThreadPoolExecutor(max_workers=1)) worker_pb2_grpc.add_WorkerServicer_to_server( worker_servicer.WorkerServicer(), server ) server.add_insecure_port(f"{ip}:{port}") server.start() log.debug(f"Worker - ip({ip}), port({port}), pid({os.getpid()}): Started serving.") def stop_server(unused_signum, unused_frame): server.stop(0) log.debug( f"Worker - ip({ip}), port({port}), pid({os.getpid()}): Received interrupt signal." ) # Catch keyboard interrupt and terminate signal signal.signal(signal.SIGINT, stop_server) signal.signal(signal.SIGTERM, stop_server) # Wait to receive server termination signal server.wait_for_termination() log.debug(f"Worker - ip({ip}), port({port}), pid({os.getpid()}): Server exited") if __name__ == "__main__": parser = argparse.ArgumentParser("Run an agent in an independent process.") parser.add_argument( "--port", type=int, required=True, help="Port to listen for remote client connections.", ) args = parser.parse_args() serve(args.port)
34.442623
155
0.702285
0
0
0
0
0
0
0
0
2,854
0.6792
6a744ccf0662773e4f40dc632d9bd05f720ada5c
2,846
py
Python
week2/problems/problem2.py
Nburkhal/mit-cs250
a3d32a217deb2cfa1b94d8188bef73c0742b1245
[ "MIT" ]
null
null
null
week2/problems/problem2.py
Nburkhal/mit-cs250
a3d32a217deb2cfa1b94d8188bef73c0742b1245
[ "MIT" ]
null
null
null
week2/problems/problem2.py
Nburkhal/mit-cs250
a3d32a217deb2cfa1b94d8188bef73c0742b1245
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Now write a program that calculates the minimum fixed monthly payment needed in order pay off a credit card balance within 12 months. By a fixed monthly payment, we mean a single number which does not change each month, but instead is a constant amount that will be paid each month. In this problem, we will not be dealing with a minimum monthly payment rate. The following variables contain values as described below: balance - the outstanding balance on the credit card annualInterestRate - annual interest rate as a decimal The program should print out one line: the lowest monthly payment that will pay off all debt in under 1 year, for example: Lowest Payment: 180 Assume that the interest is compounded monthly according to the balance at the end of the month (after the payment for that month is made). The monthly payment must be a multiple of $10 and is the same for all months. Notice that it is possible for the balance to become negative using this payment scheme, which is okay. A summary of the required math is found below: Monthly interest rate = (Annual interest rate) / 12.0 Monthly unpaid balance = (Previous balance) - (Minimum fixed monthly payment) Updated balance each month = (Monthly unpaid balance) + (Monthly interest rate x Monthly unpaid balance) Test Case 1: balance = 3329 annualInterestRate = 0.2 Result Your Code Should Generate: ------------------- Lowest Payment: 310 Test Case 2: balance = 4773 annualInterestRate = 0.2 Result Your Code Should Generate: ------------------- Lowest Payment: 440 Test Case 3: balance = 3926 annualInterestRate = 0.2 Result Your Code Should Generate: ------------------- Lowest Payment: 360 """ # Establish variables that we know / needed for the evaluation. # Counter optional balance = 3329 annualInterestRate = 0.2 monthlyInterestRate = annualInterestRate / 12 monthlyPayment = 0 updatedBalance = balance counter = 0 # Will loop through everything until we find a rate that will reduce updatedBalance to 0. while updatedBalance > 0: # Was stated that payments needed to happen in increments of $10 monthlyPayment += 10 # To reset balance back to actual balance when loop inevitably fails. updatedBalance = balance month = 1 # For 12 months and while balance is not 0... while month <= 12 and updatedBalance > 0: # Subtract the ($10*n) amount updatedBalance -= monthlyPayment # Compound the interest AFTER making monthly payment interest = monthlyInterestRate * updatedBalance updatedBalance += interest # Increase month counter month += 1 counter += 1 print("Lowest Payment: ", monthlyPayment) print("Number of iterations: ", counter)
37.447368
104
0.713282
0
0
0
0
0
0
0
0
2,287
0.803584
6a74aa2ca33901c3b2b2f9d3fd978d06054719fb
2,410
py
Python
leetcode_python/Sort/sort-characters-by-frequency.py
yennanliu/CS_basics
3c50c819897a572ff38179bfb0083a19b2325fde
[ "Unlicense" ]
18
2019-08-01T07:45:02.000Z
2022-03-31T18:05:44.000Z
leetcode_python/Sort/sort-characters-by-frequency.py
yennanliu/CS_basics
3c50c819897a572ff38179bfb0083a19b2325fde
[ "Unlicense" ]
null
null
null
leetcode_python/Sort/sort-characters-by-frequency.py
yennanliu/CS_basics
3c50c819897a572ff38179bfb0083a19b2325fde
[ "Unlicense" ]
15
2019-12-29T08:46:20.000Z
2022-03-08T14:14:05.000Z
# V0 import collections class Solution(object): def frequencySort(self, s): count = collections.Counter(s) count_dict = dict(count) count_tuple_sorted = sorted(count_dict.items(), key=lambda kv : -kv[1]) res = '' for item in count_tuple_sorted: res += item[0] * item[1] return res # V0' # IDEA : collections.Counter(s).most_common class Solution(object): def frequencySort(self, s): return ''.join(c * t for c, t in collections.Counter(s).most_common()) # V1 # IDEA : SORT # https://blog.csdn.net/fuxuemingzhu/article/details/79437548 import collections class Solution(object): def frequencySort(self, s): """ :type s: str :rtype: str """ count = collections.Counter(s).most_common() res = '' for c, v in count: res += c * v return res ### Test case: s=Solution() assert s.frequencySort(['a','b','c','c']) == 'ccab' assert s.frequencySort(['a']) == 'a' assert s.frequencySort(['a','A','c','c']) == 'ccaA' assert s.frequencySort(['c','c','c']) == 'ccc' assert s.frequencySort([]) == '' assert s.frequencySort(['','','']) == '' # V1' # http://bookshadow.com/weblog/2016/11/02/leetcode-sort-characters-by-frequency/ class Solution(object): def frequencySort(self, s): """ :type s: str :rtype: str """ return ''.join(c * t for c, t in collections.Counter(s).most_common()) # V2 import collections class Solution(object): def frequencySort(self, s): # sort Counter by value # https://stackoverflow.com/questions/20950650/how-to-sort-counter-by-value-python s_freq_dict = collections.Counter(s).most_common() output = '' for i in range(len(s_freq_dict)): output = output + (s_freq_dict[i][0]*s_freq_dict[i][1]) return output # V2' # Time: O(n) # Space: O(n) import collections class Solution(object): def frequencySort(self, s): """ :type s: str :rtype: str """ freq = collections.defaultdict(int) for c in s: freq[c] += 1 counts = [""] * (len(s)+1) for c in freq: counts[freq[c]] += c result = "" for count in reversed(range(len(counts)-1)): for c in counts[count]: result += c * count return result
27.078652
87
0.568465
1,767
0.733195
0
0
0
0
0
0
621
0.257676
6a750b59d5869e7c84ae1fcca0d133a7dbf28cce
9,165
py
Python
eval/scripts/human/html_gen.py
chateval/chatevalv2
7ba96d81842db00427a6d6351d5cea76a8766450
[ "Apache-2.0" ]
5
2018-06-11T19:47:23.000Z
2020-03-04T01:29:15.000Z
eval/scripts/human/html_gen.py
chateval/app
7ba96d81842db00427a6d6351d5cea76a8766450
[ "Apache-2.0" ]
12
2018-07-11T18:50:13.000Z
2022-02-10T10:45:58.000Z
eval/scripts/human/html_gen.py
chateval/app
7ba96d81842db00427a6d6351d5cea76a8766450
[ "Apache-2.0" ]
1
2018-06-29T14:52:16.000Z
2018-06-29T14:52:16.000Z
"""Stores all the helper functions that generate html""" import random def generate_2choice_html(example): '''Makes html for ranking form for the specified row index. Returns the HTML for a table of radio buttons used for ranking, as well as a count of the total number of radio buttons. ''' # Check for duplicates. if example.target_lines[0] == example.target_lines[1]: return "", 0 # Find all the non-duplicate target indices. target_indices = [0, 1] # Randomize the order targets are shown in. random.shuffle(target_indices) num_targets = len(target_indices) source_html = '' speaker = 'A' for utterance in example.source_line_utterances(): source_html += '<h4>Speaker %s: %s</h4>' % (speaker, utterance) speaker = 'A' if speaker == 'B' else 'B' html = """ <br/> <div class="panel panel-default btn-group"> %s <br/> <table> """ % (source_html) html += """ <tr> <td>Speaker %s: %s</td>""" % (speaker, example.target_lines[target_indices[0]]) html += """ <td> <label class="btn"> <input type="radio" class="%s" name="%s-target-%s" data-col="1" value="1"/> </label> </td> </tr>""" % (example.key, example.key, target_indices[0]) html += """ <tr> <td>Speaker %s: %s</td>""" % (speaker, example.target_lines[target_indices[1]]) html += """ <td> <label class="btn"> <input type="radio" class="%s" name="%s-target-%s" data-col="1" value="1"/> </label> </td> </tr>""" % (example.key, example.key, target_indices[1]) html += """ <tr> <td>It's a tie.</td> <td> <label class="btn"> <input type="radio" class="%s" name="%s-target-tie" data-col="1" value="1"/> </label> </td> </tr>""" % (example.key, example.key) html += """ </table> </div> """ return html, 1 def generate_ranking_tables_html(example): '''Makes html for ranking form for the specified row index. Returns the HTML for a table of radio buttons used for ranking, as well as a count of the total number of radio buttons. ''' # Find all the non-duplicate target indices. target_indices = [] for idx in range(len(example.target_lines)): current = example.target_lines[idx] if current not in example.target_lines[0:idx] or idx == 0: target_indices.append(idx) # Randomize the order targets are shown in. random.shuffle(target_indices) num_targets = len(target_indices) html = """ <br/> <div class="panel panel-default btn-group"> <h4>Speaker A: %s</h4> <table> <tr> <th></th> """ % example.source_line for idx in range(num_targets): if idx == 0: tag = 'best' elif idx == num_targets - 1: tag = 'worst' else: tag = '' html += '<th align="center">%s<br>%s</th>' % (tag, idx+1) html += "</tr>" for idx in target_indices: html += """ <tr> <td>Speaker B: %s</td>""" % (example.target_lines[idx]) # Add a row of radio buttons whose length is the number of options. for jdx in range(num_targets): html += """ <td> <label class="btn"> <input type="radio" class="%s" name="%s-target-%s" data-col="%s" value="%s"/> </label> </td>""" % (example.key, example.key, idx, jdx, jdx) html += "</tr>" html += """ </table> </div> """ return html, num_targets def generate_2choice_instructions(): return """ <p>Consider the following exchange between two speakers.</p> <p>Your task is to decide which response sounds better given the previous things said.</p> <p>If both responses are equally good, click "It's a tie."<p> <p><b>Example:</b><br/>Speaker A: can i get you something from the cafe?</p> <table> <tr><td>Speaker B: coffee would be great</td></tr> <tr><td>Speaker B: I don't know what to say.</td></tr> </table> <br/> <p>In this case, the first response is better as it directly answers Speaker A's question, so you should click the bubble next to it.</p> <h3>You must click the Submit button when you are finished. You must complete every question before you can click Submit.</h3> """ def generate_multuchoice_instructions(): return """ <p>Consider the following Twitter exchanges between Speakers A and B.</p> <p>Your task is to rank the possible responses by Speaker B from best to worst, where the best response should get the lowest ranking.</p> <br/> <p><b>Example:</b><br/>Speaker A: can i get you something from the cafe?</p> <table> <tr><td>Speaker B: coffee would be great</td></tr> <tr><td>Speaker B: can you believe he missed the shot?</td></tr> <tr><td>Speaker B: I don't know what to say.</td></tr> </table> <br/> <p>In this case, the first response should be given rank 1, the second rank 2, and the third rank 3.</p> <h3>You must click the Submit button when you are finished. You must complete every question before you can click Submit.</h3> """ def generate_HIT_html(num_required, tables_html, instructions): question_html_value = """ <HTMLQuestion xmlns="http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2011-11-11/HTMLQuestion.xsd"> <HTMLContent><![CDATA[ <!DOCTYPE html> <html> <head> <link crossorigin="anonymous" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.0.3/css/bootstrap.min.css" integrity="sha384-IS73LIqjtYesmURkDE9MXKbXqYA8rvKEp/ghicjem7Vc3mGRdQRptJSz60tvrB6+" rel="stylesheet" /><!-- The following snippet enables the 'responsive' behavior on smaller screens --> <style> table { border-collapse: collapse; display: block; } td, th { border: 1px solid #ccc; } th:empty { border: 0; } #collapseTrigger{ color:#fff; display: block; text-decoration: none; } * { margin: 0; padding: 0; } tr td:nth-child(1) { padding-left: 10px; padding-right: 10px; } .panel { padding: 10px } </style> <meta http-equiv='Content-Type' content='text/html; charset=UTF-8'/> <script src='https://s3.amazonaws.com/mturk-public/externalHIT_v1.js' type='text/javascript'></script> <script src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script> <script> $(function() { var col, el; $("input[type=radio]").click(function() { // Make sure only one radio button is enabled per column. el = $(this); col = el.data("col"); cl = el.attr("class"); //if cl.includes("ex-") { $("input." + cl + "[data-col=" + col + "]").prop("checked", false); //} el.prop("checked", true); console.log("Here!") // Only enable submit if enough radio buttons are checked. if ($('input:radio:checked').length >= """ + str(num_required) + """ ) { $("input[type=submit]").removeAttr("disabled"); } else { $("input[type=submit]").attr("disabled", "disabled"); } }); }); $(document).ready(function() { // Instructions expand/collapse var content = $('#instructionBody'); var trigger = $('#collapseTrigger'); content.show(); $('.collapse-text').text('(Click to collapse)'); trigger.click(function(){ content.toggle(); var isVisible = content.is(':visible'); if(isVisible){ $('.collapse-text').text('(Click to collapse)'); }else{ $('.collapse-text').text('(Click to expand)'); } }); // end expand/collapse }); </script> <title>Chatbot Evaluation Task</title> </head> <body> <div class="col-xs-12 col-md-12"><!-- Instructions --> <div class="panel panel-primary"> <!-- WARNING: the ids "collapseTrigger" and "instructionBody" are being used to enable expand/collapse feature --> <a class="panel-heading" href="javascript:void(0);" id="collapseTrigger"><strong>Rate the Chatbot's Responses</strong> <span class="collapse-text">(Click to expand)</span> </a> <div class="panel-body" id="instructionBody"> """ + instructions + """ </div> </div> </div> <!-- HTML to handle creating the HIT form --> <form name='mturk_form' method='post' id='mturk_form' action='https://workersandbox.mturk.com/mturk/externalSubmit'> <input type='hidden' value='' name='assignmentId' id='assignmentId'/> <!-- This is where you define your question(s) --> """ + tables_html + """ <!-- HTML to handle submitting the HIT --> <p><input type='submit' id='submitButton' value='Submit' /></p></form> <h4>You must fill out rankings for every question before you can submit.</h4> <script language='Javascript'>turkSetAssignmentID();</script> </body> </html> ]]> </HTMLContent> <FrameHeight>600</FrameHeight> </HTMLQuestion> """ return question_html_value
33.448905
295
0.59509
0
0
0
0
0
0
0
0
7,378
0.805019
6a75a5070f34939725c30b7941b46fda26295424
1,582
py
Python
python3_module_template/subproject/myexample.py
sdpython/python_project_template
e365b29ba9a7dfd2688f68eb7ff2b84a6a82cb57
[ "MIT" ]
null
null
null
python3_module_template/subproject/myexample.py
sdpython/python_project_template
e365b29ba9a7dfd2688f68eb7ff2b84a6a82cb57
[ "MIT" ]
null
null
null
python3_module_template/subproject/myexample.py
sdpython/python_project_template
e365b29ba9a7dfd2688f68eb7ff2b84a6a82cb57
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ @file @brief This the documentation of this module (myexampleb). """ class myclass: """ This is the documentation for this class. **example with a sphinx directives** It works everywhere in the documentation. .. exref:: :title: an example of use Just for documentation purpose. :: m = myclass(0) The old way: @example(an old example of use) This only works from the code, not inserted in a RST file. The source documentation is parsed and every such example is collected and placed in a page ``all_examples.rst`` (look at the source). @code m = myclass(0) @endcode @endexample **FAQ** .. faqref:: :title: How to add a question ? Just look a this section. Look also :ref:`l-FAQ2`. .. faqref:: :title: Add a label :lid: label1 Look also :ref:`l-FAQ2`. **BLOC** .. blocref:: :title: How to add a bloc :tag: aaaa Just look a this bloc. Look also :ref:`l-FAQ2`. An accent, é, to check it is working. A link to github source: :githublink:`source|py`. """ def __init__(self, pa): """ documentation for the constructor @param pa first parameter """ self.pa = pa def get_value(self, mul): """ returns the parameter multiplied by a value @param mul a float @return a float """ return self.pa * mul
18.611765
58
0.54488
1,482
0.936197
0
0
0
0
0
0
1,429
0.902716
6a75c6bcf2a235fe76f46e51c4cc31283811626a
2,534
py
Python
simulation/dataset_G_1q_X_Z_N1.py
eperrier/QDataSet
383b38b9b4166848f72fac0153800525e66b477b
[ "MIT" ]
42
2021-08-17T02:27:59.000Z
2022-03-26T16:00:57.000Z
simulation/dataset_G_1q_X_Z_N1.py
eperrier/QDataSet
383b38b9b4166848f72fac0153800525e66b477b
[ "MIT" ]
1
2021-09-25T11:15:20.000Z
2021-09-27T04:18:25.000Z
simulation/dataset_G_1q_X_Z_N1.py
eperrier/QDataSet
383b38b9b4166848f72fac0153800525e66b477b
[ "MIT" ]
6
2021-08-17T02:28:04.000Z
2022-03-22T07:11:48.000Z
############################################## """ This module generate a dataset """ ############################################## # preample import numpy as np from utilites import Pauli_operators, simulate, CheckNoise ################################################ # meta parameters name = "G_1q_X_Z_N1" ################################################ # quantum parameters dim = 2 # dimension of the system Omega = 12 # qubit energy gap static_operators = [0.5*Pauli_operators[3]*Omega] # drift Hamiltonian dynamic_operators = [0.5*Pauli_operators[1]] # control Hamiltonian noise_operators = [0.5*Pauli_operators[3]] # noise Hamiltonian initial_states = [ np.array([[0.5,0.5],[0.5,0.5]]), np.array([[0.5,-0.5],[-0.5,0.5]]), np.array([[0.5,-0.5j],[0.5j,0.5]]),np.array([[0.5,0.5j],[-0.5j,0.5]]), np.array([[1,0],[0,0]]), np.array([[0,0],[0,1]]) ] # intial state of qubit measurement_operators = Pauli_operators[1:] # measurement operators ################################################## # simulation parameters T = 1 # Evolution time M = 1024 # Number of time steps num_ex = 10000 # Number of examples batch_size = 50 # batch size for TF ################################################## # noise parameters K = 2000 # Number of realzations noise_profile = [1] # Noise type ################################################### # control parameters pulse_shape = "Gaussian" # Control pulse shape num_pulses = 5 # Number of pulses per sequence #################################################### # Generate the dataset sim_parameters = dict( [(k,eval(k)) for k in ["name", "dim", "Omega", "static_operators", "dynamic_operators", "noise_operators", "measurement_operators", "initial_states", "T", "M", "num_ex", "batch_size", "K", "noise_profile", "pulse_shape", "num_pulses"] ]) CheckNoise(sim_parameters) simulate(sim_parameters) ####################################################
56.311111
261
0.404893
0
0
0
0
0
0
0
0
1,192
0.470403
6a75e8b3a7f6a8bf44de0912c3cdfced6251b233
55
py
Python
configs/mmdet/detection/detection_tensorrt_static-300x300.py
zhiqwang/mmdeploy
997d111a6f4ca9624ab3b36717748e6ce002037d
[ "Apache-2.0" ]
746
2021-12-27T10:50:28.000Z
2022-03-31T13:34:14.000Z
configs/mmdet/detection/detection_tensorrt_static-300x300.py
zhiqwang/mmdeploy
997d111a6f4ca9624ab3b36717748e6ce002037d
[ "Apache-2.0" ]
253
2021-12-28T05:59:13.000Z
2022-03-31T18:22:25.000Z
configs/mmdet/detection/detection_tensorrt_static-300x300.py
zhiqwang/mmdeploy
997d111a6f4ca9624ab3b36717748e6ce002037d
[ "Apache-2.0" ]
147
2021-12-27T10:50:33.000Z
2022-03-30T10:44:20.000Z
_base_ = ['../_base_/base_tensorrt_static-300x300.py']
27.5
54
0.745455
0
0
0
0
0
0
0
0
43
0.781818
6a7641f27315b4a34aa454452b185ab3ffeddc05
505
py
Python
user_service/user_service/api.py
Ziang-Lu/Flask-Blog
8daf901a0ea0e079ad24a61fd7f16f1298514d4c
[ "MIT" ]
null
null
null
user_service/user_service/api.py
Ziang-Lu/Flask-Blog
8daf901a0ea0e079ad24a61fd7f16f1298514d4c
[ "MIT" ]
2
2020-06-09T08:40:42.000Z
2021-04-30T21:20:35.000Z
user_service/user_service/api.py
Ziang-Lu/Flask-Blog
8daf901a0ea0e079ad24a61fd7f16f1298514d4c
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ API definition module. """ from flask import Blueprint from flask_restful import Api from .resources.user import UserAuth, UserItem, UserList, UserFollow # Create an API-related blueprint api_bp = Blueprint(name='api', import_name=__name__) api = Api(api_bp) api.add_resource(UserList, '/users') api.add_resource(UserItem, '/users/<int:id>') api.add_resource(UserAuth, '/user-auth') api.add_resource( UserFollow, '/user-follow/<int:follower_id>/<followed_username>' )
22.954545
68
0.740594
0
0
0
0
0
0
0
0
180
0.356436
6a77df2fb34c60a66cb0710a264af376f888be93
2,112
py
Python
advanced/itertools_funcs.py
ariannasg/python3-essential-training
9b52645f5ccb57d2bda5d5f4a3053681a026450a
[ "MIT" ]
1
2020-06-02T08:37:41.000Z
2020-06-02T08:37:41.000Z
advanced/itertools_funcs.py
ariannasg/python3-training
9b52645f5ccb57d2bda5d5f4a3053681a026450a
[ "MIT" ]
null
null
null
advanced/itertools_funcs.py
ariannasg/python3-training
9b52645f5ccb57d2bda5d5f4a3053681a026450a
[ "MIT" ]
null
null
null
#!usr/bin/env python3 import itertools # itertools is a module that's not technically a set of built-in functions but # it is part of the standard library that comes with python. # it's useful for for creating and using iterators. def main(): print('some infinite iterators') # cycle iterator can be used to cycle over a collection over and over seq1 = ["Joe", "John", "Mike"] cycle1 = itertools.cycle(seq1) print(next(cycle1)) print(next(cycle1)) print(next(cycle1)) print(next(cycle1)) print(next(cycle1)) # use count to create a simple counter count1 = itertools.count(100, 3) print(next(count1)) print(next(count1)) print(next(count1)) print('some non-infinite iterators') values = [10, 5, 20, 30, 40, 50, 40, 30] # accumulate creates an iterator that accumulates/aggregates values print(list(itertools.accumulate(values))) # this defaults to addition print(list(itertools.accumulate(values, max))) print(list(itertools.accumulate(values, min))) # use chain to connect sequences together x = itertools.chain('ABCD', '1234') print(list(x)) # dropwhile and takewhile will return values until # a certain condition is met that stops them. they are similar to the # filter built-in function. # dropwhile will drop the values from the sequence as long as the # condition of the function is true and then returns the rest of values print(list(itertools.dropwhile(is_less_than_forty, values))) # takewhile will keep the values from the sequence as long as the # condition of the function is true and then stops giving data print(list(itertools.takewhile(is_less_than_forty, values))) def is_less_than_forty(x): return x < 40 if __name__ == "__main__": main() # CONSOLE OUTPUT: # some infinite iterators # Joe # John # Mike # Joe # John # 100 # 103 # 106 # some non-infinite iterators # [10, 15, 35, 65, 105, 155, 195, 225] # [10, 10, 20, 30, 40, 50, 50, 50] # [10, 5, 5, 5, 5, 5, 5, 5] # ['A', 'B', 'C', 'D', '1', '2', '3', '4'] # [40, 50, 40, 30] # [10, 5, 20, 30]
29.333333
78
0.673295
0
0
0
0
0
0
0
0
1,244
0.589015
6a782fcc9b346f1edc133e8b8d12314c1cc0a5ff
421
py
Python
aula 05/model/Pessoa.py
Azenha/AlgProg2
062b5caac24435717074a18a7499f80130489a46
[ "MIT" ]
null
null
null
aula 05/model/Pessoa.py
Azenha/AlgProg2
062b5caac24435717074a18a7499f80130489a46
[ "MIT" ]
null
null
null
aula 05/model/Pessoa.py
Azenha/AlgProg2
062b5caac24435717074a18a7499f80130489a46
[ "MIT" ]
null
null
null
class Pessoa: def __init__(self, codigo, nome, endereco, telefone): self.__codigo = int(codigo) self.nome = str(nome) self._endereco = str(endereco) self.__telefone = str(telefone) def imprimeNome(self): print(f"Você pode chamar essa pessoa de {self.nome}.") def __imprimeTelefone(self): print(f"Você pode ligar para esta pessoa no número {self.__telefone}.")
35.083333
79
0.650831
424
1
0
0
0
0
0
0
114
0.268868
6a78c291da971309bc64ed073bb014f9b709c144
119
py
Python
examples/plain_text_response.py
lukefx/stardust
4d9e399ffba9d4a47a2f428b59b5abf4c5bd41ad
[ "MIT" ]
2
2020-11-27T10:30:38.000Z
2020-12-22T16:48:49.000Z
examples/plain_text_response.py
lukefx/stardust
4d9e399ffba9d4a47a2f428b59b5abf4c5bd41ad
[ "MIT" ]
null
null
null
examples/plain_text_response.py
lukefx/stardust
4d9e399ffba9d4a47a2f428b59b5abf4c5bd41ad
[ "MIT" ]
null
null
null
from starlette.responses import PlainTextResponse async def serve(req): return PlainTextResponse("Hello World!")
19.833333
49
0.789916
0
0
0
0
0
0
66
0.554622
14
0.117647
6a78c857a857449cf31704c6af0759d610215a2d
25,852
py
Python
pypyrus_logbook/logger.py
t3eHawk/pypyrus_logbook
bd647a1c355b07e8df28c0d7298fcfe68cd9572e
[ "MIT" ]
null
null
null
pypyrus_logbook/logger.py
t3eHawk/pypyrus_logbook
bd647a1c355b07e8df28c0d7298fcfe68cd9572e
[ "MIT" ]
null
null
null
pypyrus_logbook/logger.py
t3eHawk/pypyrus_logbook
bd647a1c355b07e8df28c0d7298fcfe68cd9572e
[ "MIT" ]
2
2019-02-06T08:05:43.000Z
2019-02-06T08:06:35.000Z
import atexit import datetime as dt import os import platform import pypyrus_logbook as logbook import sys import time import traceback from .conf import all_loggers from .formatter import Formatter from .header import Header from .output import Root from .record import Record from .sysinfo import Sysinfo class Logger(): """This class represents a single logger. Logger by it self is a complex set of methods, items and commands that together gives funcionality for advanced logging in different outputs: console, file, email, database table, HTML document - and using information from diffrent inputs: user messages, traceback, frames, user parameters, execution arguments and systems descriptors. Each logger must have an unique name which will help to identify it. Main application logger will have the same name as a python script file. It can be accessed by native logbook methods or by calling `getlogger()` method with no name. Parameters ---------- name : str, optional The argument is used te define `name` attribute app : str, optional The argument is used to set the `app` attribute. desc : str, optional The argument is used to set the `desc` attribute. version : str, optional The argument is used to set the `version` attribute. status : bool, optional The argument is used to open or close output `root`. console : bool, optional The argument is used to open or close output `console`. file : bool, optional The argument is used to open or close output `file`. email : bool, optional The argument is used to open or close output `email`. html : bool, optional The argument is used to open or close output `html`. table : bool, optional The argument is used to open or close output `table`. directory : str, optional The argument is used to set logging file folder. filename : str, optional The argument is used to set logging file name. extension : str, optional The argument is used to set logging file extension. smtp : dict, optional The argument is used to configure SMTP connection. db : dict, optional The argument is used to configure DB connection. format : str, optional The argument is used to set record template. info : bool, optional The argument is used to filter info records. The default is True. debug : bool, optional The argument is used to filter debug records. The default is False. warning : bool, optional The argument is used to filter warning records. The default is True. error : bool, optional The argument is used to filter error records. The default is True. critical : bool, optional The argument is used to filter critical records. The default is True. alarming : bool, optional The argument is used to enable or disable alarming mechanism. The default is True. control : bool, optional The argument is used to enable or disable execution break in case on error. The default is True. maxsize : int or bool, optional The argument is used to define maximum size of output file. Must be presented as number of bytes. The default is 10 Mb. maxdays : int or bool, optional The argument is used to define maximum number of days that will be logged to same file. The default is 1 which means that new output file will be opened at each 00:00:00. maxlevel : int or bool, optional The argument is used to define the break error level (WARNING = 0, ERRROR = 1, CRITICAL = 2). All that higher the break level will interrupt application execution. The default is 1. maxerrors : int or bool, optional The argument is used to define maximun number of errors. The default is False which means it is disabled. Attributes ---------- name : str Name of the logger. app : str Name of the application that we are logging. desc : str Description of the application that we are logging. version : str Version of the application that we are logging. start_date : datetime.datetime Date when logging was started. rectypes : dict All available record types. Keys are used in `Logger` write methods as `rectype` argument. Values are used in formatting. So if you wish to modify `rectype` form then edit appropriate one here. If you wish to use own record types then just add it to that dictinary. By default we provide the next few record types: +---------+---------+ | Key | Value | +=========+=========+ |none |NONE | +---------+---------+ |info |INFO | +---------+---------+ |debug |DEBUG | +---------+---------+ |warning |WARNING | +---------+---------+ |error |ERROR | +---------+---------+ |critical |CRITICAL | +---------+---------+ messages : dict Messages that are printed with some `Logger` methods like `ok()`, `success()`, `fail()`. If you wish to modify the text of this messages just edit the value of appropriate item. with_errors : int The flag shows that logger catched errors in the application during its execution. count_errors : int Number of errors that logger catched in the application during its execution. filters : dict Record types filters. To filter record type just set corresponding item value to False. root : pypyrus_logbook.output.Root The output `Root` object. console : pypyrus_logbook.output.Console The output `Console` object. Shortcut for `Logger.root.console`. file : pypyrus_logbook.output.File The output file. Shortcut for `Logger.output.file`. email : pypyrus_logbook.output.Email The output email. Shortcut for `Logger.output.email`. html: pypyrus_logbook.output.HTML The output HTML document. Shortcut for `Logger.output.html`. table: pypyrus_logbook.output.Table The output table. Shortcut for `Logger.output.table`. formatter : pypyrus_logbook.formatter.Formatter Logger formatter which sets all formatting configuration like record template, error message template, line length etc. sysinfo : pypyrus_logbook.sysinfo.Sysinfo Special input object which parse different inputs includeing system specifications, flag arguments, execution parameters, user parameters and environment variables and transforms all of that to `Dataset` object. Through the `Dataset` object data can be easily accessed by get item operation or by point like `sysinfo.desc['hostname']` or `sysinfo.desc.hostname`. header : pypyrus_logbook.header.Header The header that can be printed to the writable output. """ def __init__(self, name=None, app=None, desc=None, version=None, status=True, console=True, file=True, email=False, html=False, table=False, directory=None, filename=None, extension=None, smtp=None, db=None, format=None, info=True, debug=False, warning=True, error=True, critical=True, alarming=True, control=True, maxsize=(1024*1024*10), maxdays=1, maxlevel=2, maxerrors=False): # Unique name of the logger. self._name = name # Attributes describing the application. self.app = None self.desc = None self.version = None # Some logger important attributes self._start_date = dt.datetime.now() self.rectypes = {'none': 'NONE', 'info': 'INFO', 'debug': 'DEBUG', 'warning': 'WARNING', 'error': 'ERROR', 'critical': 'CRITICAL'} self.messages = {'ok': 'OK', 'success': 'SUCCESS', 'fail': 'FAIL'} self._with_error = False self._count_errors = 0 # Complete the initial configuration. self.configure(app=app, desc=desc, version=version, status=status, console=console, file=file, email=email, html=html, table=table, directory=directory, filename=filename, extension=extension, smtp=smtp, db=db, format=format, info=info, debug=debug, warning=warning, error=error, critical=critical, alarming=alarming, control=control, maxsize=maxsize, maxdays=maxdays, maxlevel=maxlevel, maxerrors=maxerrors) # Output shortcuts. self.console = self.root.console self.file = self.root.file self.email = self.root.email self.html = self.root.html self.table = self.root.table # Set exit function. atexit.register(self._exit) # Add creating logger to special all_loggers dictinary. all_loggers[self._name] = self pass def __str__(self): return f'<Logger object "{self._name}">' __repr__ = __str__ @property def name(self): """Unique logger name.""" return self._name @property def start_date(self): """Logging start date.""" return self._start_date @property def with_error(self): """Flag that shows was an error or not.""" return self._with_error @property def count_errors(self): """The number of occured errors.""" return self._count_errors def configure(self, app=None, desc=None, version=None, status=None, console=None, file=None, email=None, html=None, table=None, directory=None, filename=None, extension=None, smtp=None, db=None, format=None, info=None, debug=None, warning=None, error=None, critical=None, alarming=None, control=None, maxsize=None, maxdays=None, maxlevel=None, maxerrors=None): """Main method to configure the logger and all its attributes. This is an only one right way to customize logger. Parameters are the same as for creatrion. Parameters ---------- app : str, optional The argument is used to set the `app` attribute. desc : str, optional The argument is used to set the `desc` attribute. version : str, optional The argument is used to set the `version` attribute. status : bool, optional The argument is used to open or close output `root`. console : bool, optional The argument is used to open or close output `console`. file : bool, optional The argument is used to open or close output `file`. email : bool, optional The argument is used to open or close output `email`. html : bool, optional The argument is used to open or close output `html`. table : bool, optional The argument is used to open or close output `table`. directory : str, optional The argument is used to set logging file folder. filename : str, optional The argument is used to set logging file name. extension : str, optional The argument is used to set logging file extension. smtp : dict, optional The argument is used to configure SMTP connection. db : dict, optional The argument is used to configure DB connection. format : str, optional The argument is used to set record template. info : bool, optional The argument is used to filter info records. debug : bool, optional The argument is used to filter debug records. warning : bool, optional The argument is used to filter warning records. error : bool, optional The argument is used to filter error records. critical : bool, optional The argument is used to filter critical records. alarming : bool, optional The argument is used to enable or disable alarming mechanism. control : bool, optional The argument is used to enable or disable execution break in case on error. maxsize : int or bool, optional The argument is used to define maximum size of output file. maxdays : int or bool, optional The argument is used to define maximum number of days that will be logged to same file. maxlevel : int or bool, optional The argument is used to define the break error level. maxerrors : int or bool, optional The argument is used to define maximun number of errors. """ if isinstance(app, str) is True: self.app = app if isinstance(desc, str) is True: self.desc = desc if isinstance(version, (str, int, float)) is True: self.version = version # Build the output root if it is not exists. In other case modify # existing output if it is requested. if hasattr(self, 'root') is False: self.root = Root(self, console=console, file=file, email=email, html=html, table=table, status=status, directory=directory, filename=filename, extension=extension, smtp=smtp, db=db) else: for key, value in {'console': console, 'file': file, 'email': email, 'html': html, 'table': table}.items(): if value is True: getattr(self.root, key).open() if key == 'file': getattr(self.root, key).new() elif value is False: getattr(self.root, key).close() # Customize output file path. path = {} if directory is not None: path['dir'] = directory if filename is not None: path['name'] = filename if extension is not None: path['ext'] = extension if len(path) > 0: self.root.file.configure(**path) # Customize SMTP server. if isinstance(smtp, dict) is True: self.root.email.configure(**smtp) # Customize database connection. if isinstance(db, dict) is True: self.root.table.configure(**db) # Create formatter in case it is not exists yet or just customize it. # Parameter format can be either string or dictionary. # When it is string then it must describe records format. # When it is dictionary it can contaion any parameter of formatter # that must be customized. if isinstance(format, str) is True: format = {'record': format} if hasattr(self, 'formatter') is False: format = {} if isinstance(format, dict) is False else format self.formatter = Formatter(**format) elif isinstance(format, dict) is True: self.formatter.configure(**format) # Create or customize record type filters. if hasattr(self, 'filters') is False: self.filters = {} for key, value in {'info': info, 'debug': debug, 'error': error, 'warning': warning, 'critical': critical}.items(): if isinstance(value, bool) is True: self.filters[key] = value # Customize limits and parameters of execution behaviour. if isinstance(maxsize, (int, float, bool)) is True: self._maxsize = maxsize if isinstance(maxdays, (int, float, bool)) is True: self._maxdays = maxdays self.__calculate_restart_date() if isinstance(maxlevel, (int, float, bool)) is True: self._maxlevel = maxlevel if isinstance(maxerrors, (int, float, bool)) is True: self._maxerrors = maxerrors if isinstance(alarming, bool) is True: self._alarming = alarming if isinstance(control, bool) is True: self._control = control # Initialize sysinfo instance when not exists. if hasattr(self, 'sysinfo') is False: self.sysinfo = Sysinfo(self) # Initialize header instance when not exists. if hasattr(self, 'header') is False: self.header = Header(self) pass def write(self, record): """Direct write to the output. Parameters ---------- record : Record The argument is used to send it to the output `root`. """ self.__check_file_stats() self.root.write(record) pass def record(self, rectype, message, error=False, **kwargs): """Basic method to write records. Parameters ---------- rectype : str By default method creates the record with the type NONE. That can be changed but depends on available record types. All registered record types are stored in the instance attribute rectypes. If you wish to use own record type or change the presentaion of exeisting one then edit this dictinary. message : str The message that must be written. error : bool, optional If record is error then set that parameter to `True`. **kwargs The keyword arguments used for additional forms (variables) for record and message formatting. """ if self.filters.get(rectype, True) is True: record = Record(self, rectype, message, error=error, **kwargs) self.write(record) pass def info(self, message, **kwargs): """Send INFO record to output.""" rectype = 'info' self.record(rectype, message, **kwargs) pass def debug(self, message, **kwargs): """Send DEBUG record to the output.""" rectype = 'debug' self.record(rectype, message, **kwargs) pass def error(self, message=None, rectype='error', format=None, alarming=False, level=1, **kwargs): """Send ERROR record to the output. If exception in current traceback exists then method will format the exception according to `formatter.error` string presentation. If `formatter.error` is set to `False` the exception will be just printed in original Python style. Also method will send an alarm if alarming attribute is `True`, email output is enabled and SMTP server is configurated. If one of the limit triggers worked then application will be aborted. Parameters ---------- message : str, optional The message that must be written instead of exception. rectype : str, optional The type of error according to `rectypes` dictionary. format : str, optional The format of the error message. alarming : bool The argument is used to enable or disable the alarming mechanism for this certain call. level : int The argument is used to describe the error level. **kwargs The keyword arguments used for additional forms (variables) for record and message formatting. """ self._with_error = True self._count_errors += 1 format = self.formatter.error if format is None else format # Parse the error. err_type, err_value, err_tb = sys.exc_info() if message is None and err_type is not None: if isinstance(format, str) is True: err_name = err_type.__name__ err_value = err_value for tb in traceback.walk_tb(err_tb): f_code = tb[0].f_code err_file = os.path.abspath(f_code.co_filename) err_line = tb[1] err_obj = f_code.co_name self.record(rectype, message, error=True, err_name=err_name, err_value=err_value, err_file=err_file, err_line=err_line, err_obj=err_obj, **kwargs) elif format is False: exception = traceback.format_exception(err_type, err_value, err_tb) message = '\n' message += ''.join(exception) self.record(rectype, message, **kwargs) else: message = message or '' self.record(rectype, message, **kwargs) # Break execution in case of critical error if permitted. # The alarm will be generated at exit if it is configured. if self._control is True: if level >= self._maxlevel: sys.exit() if self._maxerrors is not False: if self._count_errors > self._maxerrors: sys.exit() # Send alarm if execution was not aborted but alarm is needed. if alarming is True: self.root.email.alarm() pass def warning(self, message=None, **kwargs): """Send WARNING error record to the output.""" self.error(message, rectype='warning', level=0, **kwargs) pass def critical(self, message=None, **kwargs): """Send CRITICAL error record to the output.""" self.error(message, rectype='critical', level=2, **kwargs) pass def head(self): """Send header to the output.""" string = self.header.create() self.write(string) pass def subhead(self, string): """Send subheader as upper-case text between two border lines to the output. Parameters ---------- string : str The text that will be presented as subheader. """ bound = f'{self.formatter.div*self.formatter.length}\n' string = f'{bound}\t{string}\n{bound}'.upper() self.write(string) pass def line(self, message): """Send raw text with the new line to the output. Parameters ---------- message : str The message that must be written. """ self.write(f'{message}\n') pass def bound(self, div=None, length=None): """Write horizontal border in the output. Useful when need to separate different blocks of information. Parameters ---------- div : str, optional Symbol that is used to bulid the bound. length : int, optional Lenght of the bound. """ border = self.formatter.div * self.formatter.length self.write(border + '\n') pass def blank(self, number=1): """Write blank lines in the output. Parameters ---------- number : int, optional The number of the blank lines that must be written. """ string = '\n'*number self.write(string) pass def ok(self, **kwargs): """Print INFO message with OK.""" rectype = 'info' message = self.messages['ok'] self.record(rectype, message, **kwargs) pass def success(self, **kwargs): """Print INFO message with SUCCESS.""" rectype = 'info' message = self.messages['success'] self.record(rectype, message, **kwargs) pass def fail(self, **kwargs): """Print INFO message with FAIL.""" rectype = 'info' message = self.messages['fail'] self.record(rectype, message, **kwargs) pass def restart(self): """Restart logging. Will open new file.""" self._start_date = dt.datetime.now() self.__calculate_restart_date() if self.root.file.status is True: self.root.file.new() if self.header.used is True: self.head() pass def send(self, *args, **kwargs): """Send email message. Note that SMTP server connection must be configured. """ self.root.email.send(*args, **kwargs) pass def set(self, **kwargs): """Update values in table. Note that DB connection must be configured. """ self.root.table.write(**kwargs) pass def _exit(self): # Inform about the error. if self._alarming is True and self._with_error is True: self.root.email.alarm() pass def __calculate_restart_date(self): """Calculate the date when logger must be restarted according to maxdays parameter. """ self.__restart_date = (self._start_date + dt.timedelta(days=self._maxdays)) pass def __check_file_stats(self): """Check the output file statistics to catch when current file must be closed and new one must be opened. """ if self.root.file.status is True: if self._maxsize is not False: if self.root.file.size is not None: if self.root.file.size > self._maxsize: self.restart() return if self._maxdays is not False: if self.__restart_date.day == dt.datetime.now().day: self.restart() return
39.348554
79
0.592952
25,541
0.98797
0
0
423
0.016362
0
0
15,068
0.582856
6a79e21ee2f5d7ad67e69bd27f9206807683db56
488
py
Python
darling_ansible/python_venv/lib/python3.7/site-packages/oci/object_storage/transfer/constants.py
revnav/sandbox
f9c8422233d093b76821686b6c249417502cf61d
[ "Apache-2.0" ]
null
null
null
darling_ansible/python_venv/lib/python3.7/site-packages/oci/object_storage/transfer/constants.py
revnav/sandbox
f9c8422233d093b76821686b6c249417502cf61d
[ "Apache-2.0" ]
null
null
null
darling_ansible/python_venv/lib/python3.7/site-packages/oci/object_storage/transfer/constants.py
revnav/sandbox
f9c8422233d093b76821686b6c249417502cf61d
[ "Apache-2.0" ]
1
2020-06-25T03:12:58.000Z
2020-06-25T03:12:58.000Z
# coding: utf-8 # Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. # This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. MEBIBYTE = 1024 * 1024 STREAMING_DEFAULT_PART_SIZE = 10 * MEBIBYTE DEFAULT_PART_SIZE = 128 * MEBIBYTE OBJECT_USE_MULTIPART_SIZE = 128 * MEBIBYTE
54.222222
245
0.772541
0
0
0
0
0
0
0
0
339
0.694672
6a7a250cd753510b2923ce0ec46a2aae0ee1d50c
1,028
py
Python
scraper/news/spiders/millardayo.py
ZendaInnocent/news-api
71465aea50e0b1cea08a421d72156cbe7ed8a952
[ "Apache-2.0" ]
3
2021-11-15T08:43:53.000Z
2021-11-15T19:44:56.000Z
scraper/news/spiders/millardayo.py
ZendaInnocent/news-api
71465aea50e0b1cea08a421d72156cbe7ed8a952
[ "Apache-2.0" ]
null
null
null
scraper/news/spiders/millardayo.py
ZendaInnocent/news-api
71465aea50e0b1cea08a421d72156cbe7ed8a952
[ "Apache-2.0" ]
1
2021-11-15T08:43:58.000Z
2021-11-15T08:43:58.000Z
# Spider for MillardAyo.com import scrapy from bs4 import BeautifulSoup class MillardAyoSpider(scrapy.Spider): name = 'millardayo' allowed_urls = ['www.millardayo.com'] start_urls = [ 'https://millardayo.com', ] def parse(self, response, **kwargs): # extracting data - link, image, title, excerpt soup = BeautifulSoup(response.body, 'lxml') posts = soup.find_all('li', {'class': 'infinite-post'}) for post in posts: try: yield { 'image_url': post.find('img').get('src'), 'link': post.find('a').get('href'), 'title': post.find('a').get('title'), 'excerpt': post.find('p').get_text(), 'source': 'millardayo', } except AttributeError: pass next_page = soup.find('a', text='Next ›').get('href') if next_page: yield response.follow(next_page, callback=self.parse)
27.783784
65
0.521401
954
0.926214
784
0.761165
0
0
0
0
266
0.258252
6a7ab579c54d59a8d5d95bf8ca299d1a0ccc36a3
10,384
py
Python
sdks/python/apache_beam/runners/portability/expansion_service_test.py
stephenoken/beam
4797f310b6671de6fd703502520f4b012b655c82
[ "Apache-2.0" ]
3
2020-08-28T17:47:26.000Z
2021-08-17T06:38:58.000Z
sdks/python/apache_beam/runners/portability/expansion_service_test.py
stephenoken/beam
4797f310b6671de6fd703502520f4b012b655c82
[ "Apache-2.0" ]
5
2020-11-13T19:06:10.000Z
2021-11-10T19:56:12.000Z
sdks/python/apache_beam/runners/portability/expansion_service_test.py
stephenoken/beam
4797f310b6671de6fd703502520f4b012b655c82
[ "Apache-2.0" ]
1
2018-09-30T05:34:06.000Z
2018-09-30T05:34:06.000Z
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # pytype: skip-file from __future__ import absolute_import import argparse import logging import signal import sys import typing import grpc from past.builtins import unicode import apache_beam as beam import apache_beam.transforms.combiners as combine from apache_beam.coders import StrUtf8Coder from apache_beam.pipeline import PipelineOptions from apache_beam.portability.api import beam_expansion_api_pb2_grpc from apache_beam.portability.api.external_transforms_pb2 import ExternalConfigurationPayload from apache_beam.runners.portability import expansion_service from apache_beam.transforms import ptransform from apache_beam.transforms.external import ImplicitSchemaPayloadBuilder from apache_beam.utils.thread_pool_executor import UnboundedThreadPoolExecutor # This script provides an expansion service and example ptransforms for running # external transform test cases. See external_test.py for details. _LOGGER = logging.getLogger(__name__) TEST_PREFIX_URN = "beam:transforms:xlang:test:prefix" TEST_MULTI_URN = "beam:transforms:xlang:test:multi" TEST_GBK_URN = "beam:transforms:xlang:test:gbk" TEST_CGBK_URN = "beam:transforms:xlang:test:cgbk" TEST_COMGL_URN = "beam:transforms:xlang:test:comgl" TEST_COMPK_URN = "beam:transforms:xlang:test:compk" TEST_FLATTEN_URN = "beam:transforms:xlang:test:flatten" TEST_PARTITION_URN = "beam:transforms:xlang:test:partition" @ptransform.PTransform.register_urn('beam:transforms:xlang:count', None) class CountPerElementTransform(ptransform.PTransform): def expand(self, pcoll): return pcoll | combine.Count.PerElement() def to_runner_api_parameter(self, unused_context): return 'beam:transforms:xlang:count', None @staticmethod def from_runner_api_parameter( unused_ptransform, unused_parameter, unused_context): return CountPerElementTransform() @ptransform.PTransform.register_urn( 'beam:transforms:xlang:filter_less_than_eq', bytes) class FilterLessThanTransform(ptransform.PTransform): def __init__(self, payload): self._payload = payload def expand(self, pcoll): return ( pcoll | beam.Filter( lambda elem, target: elem <= target, int(ord(self._payload[0])))) def to_runner_api_parameter(self, unused_context): return ( 'beam:transforms:xlang:filter_less_than', self._payload.encode('utf8')) @staticmethod def from_runner_api_parameter(unused_ptransform, payload, unused_context): return FilterLessThanTransform(payload.decode('utf8')) @ptransform.PTransform.register_urn(TEST_PREFIX_URN, None) @beam.typehints.with_output_types(unicode) class PrefixTransform(ptransform.PTransform): def __init__(self, payload): self._payload = payload def expand(self, pcoll): return pcoll | 'TestLabel' >> beam.Map( lambda x: '{}{}'.format(self._payload, x)) def to_runner_api_parameter(self, unused_context): return TEST_PREFIX_URN, ImplicitSchemaPayloadBuilder( {'data': self._payload}).payload() @staticmethod def from_runner_api_parameter(unused_ptransform, payload, unused_context): return PrefixTransform(parse_string_payload(payload)['data']) @ptransform.PTransform.register_urn(TEST_MULTI_URN, None) class MutltiTransform(ptransform.PTransform): def expand(self, pcolls): return { 'main': (pcolls['main1'], pcolls['main2']) | beam.Flatten() | beam.Map(lambda x, s: x + s, beam.pvalue.AsSingleton( pcolls['side'])).with_output_types(unicode), 'side': pcolls['side'] | beam.Map(lambda x: x + x).with_output_types(unicode), } def to_runner_api_parameter(self, unused_context): return TEST_MULTI_URN, None @staticmethod def from_runner_api_parameter( unused_ptransform, unused_parameter, unused_context): return MutltiTransform() @ptransform.PTransform.register_urn(TEST_GBK_URN, None) class GBKTransform(ptransform.PTransform): def expand(self, pcoll): return pcoll | 'TestLabel' >> beam.GroupByKey() def to_runner_api_parameter(self, unused_context): return TEST_GBK_URN, None @staticmethod def from_runner_api_parameter( unused_ptransform, unused_parameter, unused_context): return GBKTransform() @ptransform.PTransform.register_urn(TEST_CGBK_URN, None) class CoGBKTransform(ptransform.PTransform): class ConcatFn(beam.DoFn): def process(self, element): (k, v) = element return [(k, v['col1'] + v['col2'])] def expand(self, pcoll): return pcoll \ | beam.CoGroupByKey() \ | beam.ParDo(self.ConcatFn()).with_output_types( typing.Tuple[int, typing.Iterable[unicode]]) def to_runner_api_parameter(self, unused_context): return TEST_CGBK_URN, None @staticmethod def from_runner_api_parameter( unused_ptransform, unused_parameter, unused_context): return CoGBKTransform() @ptransform.PTransform.register_urn(TEST_COMGL_URN, None) class CombineGloballyTransform(ptransform.PTransform): def expand(self, pcoll): return pcoll \ | beam.CombineGlobally(sum).with_output_types(int) def to_runner_api_parameter(self, unused_context): return TEST_COMGL_URN, None @staticmethod def from_runner_api_parameter( unused_ptransform, unused_parameter, unused_context): return CombineGloballyTransform() @ptransform.PTransform.register_urn(TEST_COMPK_URN, None) class CombinePerKeyTransform(ptransform.PTransform): def expand(self, pcoll): return pcoll \ | beam.CombinePerKey(sum).with_output_types( typing.Tuple[unicode, int]) def to_runner_api_parameter(self, unused_context): return TEST_COMPK_URN, None @staticmethod def from_runner_api_parameter( unused_ptransform, unused_parameter, unused_context): return CombinePerKeyTransform() @ptransform.PTransform.register_urn(TEST_FLATTEN_URN, None) class FlattenTransform(ptransform.PTransform): def expand(self, pcoll): return pcoll.values() | beam.Flatten().with_output_types(int) def to_runner_api_parameter(self, unused_context): return TEST_FLATTEN_URN, None @staticmethod def from_runner_api_parameter( unused_ptransform, unused_parameter, unused_context): return FlattenTransform() @ptransform.PTransform.register_urn(TEST_PARTITION_URN, None) class PartitionTransform(ptransform.PTransform): def expand(self, pcoll): col1, col2 = pcoll | beam.Partition( lambda elem, n: 0 if elem % 2 == 0 else 1, 2) typed_col1 = col1 | beam.Map(lambda x: x).with_output_types(int) typed_col2 = col2 | beam.Map(lambda x: x).with_output_types(int) return {'0': typed_col1, '1': typed_col2} def to_runner_api_parameter(self, unused_context): return TEST_PARTITION_URN, None @staticmethod def from_runner_api_parameter( unused_ptransform, unused_parameter, unused_context): return PartitionTransform() @ptransform.PTransform.register_urn('payload', bytes) class PayloadTransform(ptransform.PTransform): def __init__(self, payload): self._payload = payload def expand(self, pcoll): return pcoll | beam.Map(lambda x, s: x + s, self._payload) def to_runner_api_parameter(self, unused_context): return b'payload', self._payload.encode('ascii') @staticmethod def from_runner_api_parameter(unused_ptransform, payload, unused_context): return PayloadTransform(payload.decode('ascii')) @ptransform.PTransform.register_urn('fib', bytes) class FibTransform(ptransform.PTransform): def __init__(self, level): self._level = level def expand(self, p): if self._level <= 2: return p | beam.Create([1]) else: a = p | 'A' >> beam.ExternalTransform( 'fib', str(self._level - 1).encode('ascii'), expansion_service.ExpansionServiceServicer()) b = p | 'B' >> beam.ExternalTransform( 'fib', str(self._level - 2).encode('ascii'), expansion_service.ExpansionServiceServicer()) return ((a, b) | beam.Flatten() | beam.CombineGlobally(sum).without_defaults()) def to_runner_api_parameter(self, unused_context): return 'fib', str(self._level).encode('ascii') @staticmethod def from_runner_api_parameter(unused_ptransform, level, unused_context): return FibTransform(int(level.decode('ascii'))) def parse_string_payload(input_byte): payload = ExternalConfigurationPayload() payload.ParseFromString(input_byte) coder = StrUtf8Coder() return { k: coder.decode_nested(v.payload) for k, v in payload.configuration.items() } server = None def cleanup(unused_signum, unused_frame): _LOGGER.info('Shutting down expansion service.') server.stop(None) def main(unused_argv): parser = argparse.ArgumentParser() parser.add_argument( '-p', '--port', type=int, help='port on which to serve the job api') options = parser.parse_args() global server server = grpc.server(UnboundedThreadPoolExecutor()) beam_expansion_api_pb2_grpc.add_ExpansionServiceServicer_to_server( expansion_service.ExpansionServiceServicer( PipelineOptions( ["--experiments", "beam_fn_api", "--sdk_location", "container"])), server) server.add_insecure_port('localhost:{}'.format(options.port)) server.start() _LOGGER.info('Listening for expansion requests at %d', options.port) signal.signal(signal.SIGTERM, cleanup) signal.signal(signal.SIGINT, cleanup) # blocking main thread forever. signal.pause() if __name__ == '__main__': logging.getLogger().setLevel(logging.INFO) main(sys.argv)
32.654088
92
0.74557
6,134
0.590716
0
0
6,915
0.665928
0
0
1,776
0.171032
6a7bee943837f03f68168bdd6b1277bb1e2654a4
268
py
Python
db.py
RunnerPro/RunnerProApi
2e0aba17cba2a019b6d102bc4eac2fd60f164156
[ "MIT" ]
null
null
null
db.py
RunnerPro/RunnerProApi
2e0aba17cba2a019b6d102bc4eac2fd60f164156
[ "MIT" ]
null
null
null
db.py
RunnerPro/RunnerProApi
2e0aba17cba2a019b6d102bc4eac2fd60f164156
[ "MIT" ]
null
null
null
from sqlalchemy import create_engine from sqlalchemy.orm import scoped_session from sqlalchemy.orm import sessionmaker from settings import DB_URI Session = sessionmaker(autocommit=False, autoflush=False, bind=create_engine(DB_URI)) session = scoped_session(Session)
33.5
85
0.850746
0
0
0
0
0
0
0
0
0
0
6a7c09860b07db2134a799e024cf2d3ffbf7dc17
11,429
py
Python
python/tvm/contrib/nvcc.py
ntanhbk44/tvm
f89a929f09f7a0b0ccd0f4d46dc2b1c562839087
[ "Zlib", "Unlicense", "Apache-2.0", "BSD-2-Clause", "MIT", "ECL-2.0" ]
null
null
null
python/tvm/contrib/nvcc.py
ntanhbk44/tvm
f89a929f09f7a0b0ccd0f4d46dc2b1c562839087
[ "Zlib", "Unlicense", "Apache-2.0", "BSD-2-Clause", "MIT", "ECL-2.0" ]
null
null
null
python/tvm/contrib/nvcc.py
ntanhbk44/tvm
f89a929f09f7a0b0ccd0f4d46dc2b1c562839087
[ "Zlib", "Unlicense", "Apache-2.0", "BSD-2-Clause", "MIT", "ECL-2.0" ]
null
null
null
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name """Utility to invoke nvcc compiler in the system""" from __future__ import absolute_import as _abs import subprocess import os import warnings import tvm._ffi from tvm.runtime import ndarray as nd from . import utils from .._ffi.base import py_str def compile_cuda(code, target="ptx", arch=None, options=None, path_target=None): """Compile cuda code with NVCC from env. Parameters ---------- code : str The cuda code. target : str The target format arch : str The architecture options : str or list of str The additional options path_target : str, optional Output file. Return ------ cubin : bytearray The bytearray of the cubin """ temp = utils.tempdir() if target not in ["cubin", "ptx", "fatbin"]: raise ValueError("target must be in cubin, ptx, fatbin") temp_code = temp.relpath("my_kernel.cu") temp_target = temp.relpath("my_kernel.%s" % target) with open(temp_code, "w") as out_file: out_file.write(code) if arch is None: if nd.gpu(0).exist: # auto detect the compute arch argument arch = "sm_" + "".join(nd.gpu(0).compute_version.split(".")) else: raise ValueError("arch(sm_xy) is not passed, and we cannot detect it from env") file_target = path_target if path_target else temp_target cmd = ["nvcc"] cmd += ["--%s" % target, "-O3"] if isinstance(arch, list): cmd += arch else: cmd += ["-arch", arch] if options: if isinstance(options, str): cmd += [options] elif isinstance(options, list): cmd += options else: raise ValueError("options must be str or list of str") cmd += ["-o", file_target] cmd += [temp_code] # NOTE: ccbin option can be used to tell nvcc where to find the c++ compiler # just in case it is not in the path. On Windows it is not in the path by default. # However, we cannot use TVM_CXX_COMPILER_PATH because the runtime env. # Because it is hard to do runtime compiler detection, we require nvcc is configured # correctly by default. # if cxx_compiler_path != "": # cmd += ["-ccbin", cxx_compiler_path] proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) (out, _) = proc.communicate() if proc.returncode != 0: msg = code msg += "\nCompilation error:\n" msg += py_str(out) raise RuntimeError(msg) data = bytearray(open(file_target, "rb").read()) if not data: raise RuntimeError("Compilation error: empty result is generated") return data def find_cuda_path(): """Utility function to find cuda path Returns ------- path : str Path to cuda root. """ if "CUDA_PATH" in os.environ: return os.environ["CUDA_PATH"] cmd = ["which", "nvcc"] proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) (out, _) = proc.communicate() out = py_str(out) if proc.returncode == 0: return os.path.realpath(os.path.join(str(out).strip(), "../..")) cuda_path = "/usr/local/cuda" if os.path.exists(os.path.join(cuda_path, "bin/nvcc")): return cuda_path raise RuntimeError("Cannot find cuda path") def get_cuda_version(cuda_path): """Utility function to get cuda version Parameters ---------- cuda_path : str Path to cuda root. Returns ------- version : float The cuda version """ version_file_path = os.path.join(cuda_path, "version.txt") if not os.path.exists(version_file_path): # Debian/Ubuntu repackaged CUDA path version_file_path = os.path.join(cuda_path, "lib", "cuda", "version.txt") try: with open(version_file_path) as f: version_str = f.readline().replace("\n", "").replace("\r", "") return float(version_str.split(" ")[2][:2]) except FileNotFoundError: pass cmd = [os.path.join(cuda_path, "bin", "nvcc"), "--version"] proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) (out, _) = proc.communicate() out = py_str(out) if proc.returncode == 0: release_line = [l for l in out.split("\n") if "release" in l][0] release_fields = [s.strip() for s in release_line.split(",")] release_version = [f[1:] for f in release_fields if f.startswith("V")][0] major_minor = ".".join(release_version.split(".")[:2]) return float(major_minor) raise RuntimeError("Cannot read cuda version file") @tvm._ffi.register_func("tvm_callback_libdevice_path") def find_libdevice_path(arch): """Utility function to find libdevice Parameters ---------- arch : int The compute architecture in int Returns ------- path : str Path to libdevice. """ cuda_path = find_cuda_path() lib_path = os.path.join(cuda_path, "nvvm/libdevice") if not os.path.exists(lib_path): # Debian/Ubuntu repackaged CUDA path lib_path = os.path.join(cuda_path, "lib/nvidia-cuda-toolkit/libdevice") selected_ver = 0 selected_path = None cuda_ver = get_cuda_version(cuda_path) if cuda_ver in (9.0, 9.1, 10.0, 10.1, 10.2, 11.0, 11.1, 11.2): path = os.path.join(lib_path, "libdevice.10.bc") else: for fn in os.listdir(lib_path): if not fn.startswith("libdevice"): continue ver = int(fn.split(".")[-3].split("_")[-1]) if selected_ver < ver <= arch: selected_ver = ver selected_path = fn if selected_path is None: raise RuntimeError("Cannot find libdevice for arch {}".format(arch)) path = os.path.join(lib_path, selected_path) return path def callback_libdevice_path(arch): try: return find_libdevice_path(arch) except RuntimeError: warnings.warn("Cannot find libdevice path") return "" def get_target_compute_version(target=None): """Utility function to get compute capability of compilation target. Looks for the arch in three different places, first in the target attributes, then the global scope, and finally the GPU device (if it exists). Parameters ---------- target : tvm.target.Target, optional The compilation target Returns ------- compute_version : str compute capability of a GPU (e.g. "8.0") """ # 1. Target if target: if "arch" in target.attrs: compute_version = target.attrs["arch"] major, minor = compute_version.split("_")[1] return major + "." + minor # 2. Global scope from tvm.autotvm.env import AutotvmGlobalScope # pylint: disable=import-outside-toplevel if AutotvmGlobalScope.current.cuda_target_arch: major, minor = AutotvmGlobalScope.current.cuda_target_arch.split("_")[1] return major + "." + minor # 3. GPU if tvm.gpu(0).exist: return tvm.gpu(0).compute_version warnings.warn( "No CUDA architecture was specified or GPU detected." "Try specifying it by adding '-arch=sm_xx' to your target." ) return None def parse_compute_version(compute_version): """Parse compute capability string to divide major and minor version Parameters ---------- compute_version : str compute capability of a GPU (e.g. "6.0") Returns ------- major : int major version number minor : int minor version number """ split_ver = compute_version.split(".") try: major = int(split_ver[0]) minor = int(split_ver[1]) return major, minor except (IndexError, ValueError) as err: # pylint: disable=raise-missing-from raise RuntimeError("Compute version parsing error: " + str(err)) def have_fp16(compute_version): """Either fp16 support is provided in the compute capability or not Parameters ---------- compute_version: str compute capability of a GPU (e.g. "6.0") """ major, minor = parse_compute_version(compute_version) # fp 16 support in reference to: # https://docs.nvidia.com/cuda/cuda-c-programming-guide/#arithmetic-instructions if major == 5 and minor == 3: return True if major >= 6: return True return False def have_int8(compute_version): """Either int8 support is provided in the compute capability or not Parameters ---------- compute_version : str compute capability of a GPU (e.g. "6.1") """ major, _ = parse_compute_version(compute_version) if major >= 6: return True return False def have_tensorcore(compute_version=None, target=None): """Either TensorCore support is provided in the compute capability or not Parameters ---------- compute_version : str, optional compute capability of a GPU (e.g. "7.0"). target : tvm.target.Target, optional The compilation target, will be used to determine arch if compute_version isn't specified. """ if compute_version is None: if tvm.gpu(0).exist: compute_version = tvm.gpu(0).compute_version else: if target is None or "arch" not in target.attrs: warnings.warn( "Tensorcore will be disabled due to no CUDA architecture specified." "Try specifying it by adding '-arch=sm_xx' to your target." ) return False compute_version = target.attrs["arch"] # Compute version will be in the form "sm_{major}{minor}" major, minor = compute_version.split("_")[1] compute_version = major + "." + minor major, _ = parse_compute_version(compute_version) if major >= 7: return True return False def have_cudagraph(): """Either CUDA Graph support is provided""" try: cuda_path = find_cuda_path() cuda_ver = get_cuda_version(cuda_path) if cuda_ver < 10.0: return False return True except RuntimeError: return False def have_bf16(compute_version): """Either bf16 support is provided in the compute capability or not Parameters ---------- compute_version : str compute capability of a GPU (e.g. "8.0") """ major, _ = parse_compute_version(compute_version) if major >= 8: return True return False
30.155673
97
0.627351
0
0
0
0
1,228
0.107446
0
0
5,237
0.45822
6a7c3c233cce04ab709c5ec217d50c3347a2a2a8
2,429
py
Python
calc/history/calculations.py
dhruvshah1996/Project3
d87ad37f6cf2de0d3402c71d21b25258946aad69
[ "MIT" ]
null
null
null
calc/history/calculations.py
dhruvshah1996/Project3
d87ad37f6cf2de0d3402c71d21b25258946aad69
[ "MIT" ]
null
null
null
calc/history/calculations.py
dhruvshah1996/Project3
d87ad37f6cf2de0d3402c71d21b25258946aad69
[ "MIT" ]
null
null
null
"""Calculation history Class""" from calc.calculations.addition import Addition from calc.calculations.subtraction import Subtraction from calc.calculations.multiplication import Multiplication from calc.calculations.division import Division class Calculations: """Calculations class manages the history of calculations""" history = [] # pylint: disable=too-few-public-methods @staticmethod def clear_history(): """clear the history of calculations""" Calculations.history.clear() return True @staticmethod def count_history(): """get number of items in history""" return len(Calculations.history) @staticmethod def get_last_calculation_object(): """get last calculation""" return Calculations.history[-1] @staticmethod def get_last_calculation_result_value(): """get last calculation""" calculation = Calculations.get_last_calculation_object() return calculation.get_result() @staticmethod def get_first_calculation(): """get first calculation""" return Calculations.history[0] @staticmethod def get_calculation(num): """ get a specific calculation from history""" return Calculations.history[num] @staticmethod def add_calculation(calculation): """ get a generic calculation from history""" return Calculations.history.append(calculation) @staticmethod def add_addition_calculation_to_history(values): """create an addition and add object to history using factory method create""" Calculations.add_calculation(Addition.create(values)) #Get the result of the calculation return True @staticmethod def add_subtraction_calculation_to_history(values): """create a subtraction object to history using factory method create""" Calculations.add_calculation(Subtraction.create(values)) return True @staticmethod def add_multiplication_calculation_to_history(values): """Add a multiplication object to history using factory method create""" Calculations.add_calculation(Multiplication.create(values)) return True @staticmethod def add_division_calculation_to_history(values): "Add a division object to history using factory method create" Calculations.add_calculation(Division.create(values)) return True
40.483333
86
0.712227
2,187
0.900371
0
0
1,986
0.81762
0
0
694
0.285714
6a7d299369e55fc318f13ff176616da2592dab8c
526
py
Python
Python/17 - 081 - extraindo dados de uma lista.py
matheusguerreiro/python
f39a1b92409f11cbe7fef5d9261f863f9e0fac0d
[ "MIT" ]
null
null
null
Python/17 - 081 - extraindo dados de uma lista.py
matheusguerreiro/python
f39a1b92409f11cbe7fef5d9261f863f9e0fac0d
[ "MIT" ]
null
null
null
Python/17 - 081 - extraindo dados de uma lista.py
matheusguerreiro/python
f39a1b92409f11cbe7fef5d9261f863f9e0fac0d
[ "MIT" ]
null
null
null
# Aula 17 (Listas (Parte 1)) valores = [] while True: valor = int(input('Digite um Valor ou -1 para Finalizar: ')) if valor < 0: print('\nFinalizando...') break else: valores.append(valor) print(f'Foram digitados {len(valores)} números') valores.sort(reverse=True) print(f'Lista ordenada de forma decrescente: {valores}') if 5 in valores: valores.reverse() print(f'O valor 5 foi digitado e está na {valores.index(5)} posição.') else: print('Valor 5 não encontrado na lista.')
26.3
74
0.652091
0
0
0
0
0
0
0
0
278
0.52354
6a7d44f1e562967fd6fedbdfc2867ad65df6f217
2,163
py
Python
yekpay/migrations/0014_auto_20181120_1453.py
maryam-afzp/django-yekpay
f7b9d7914035ea4f27238eba9e0c70227cc65046
[ "MIT" ]
3
2020-05-17T18:33:22.000Z
2021-12-06T08:31:42.000Z
yekpay/migrations/0014_auto_20181120_1453.py
Glyphack/django-yekpay
8c4a44853207be4ff0b1711c0524fb0201859b19
[ "MIT" ]
null
null
null
yekpay/migrations/0014_auto_20181120_1453.py
Glyphack/django-yekpay
8c4a44853207be4ff0b1711c0524fb0201859b19
[ "MIT" ]
4
2019-11-14T14:16:49.000Z
2021-12-06T08:31:44.000Z
# Generated by Django 2.0.9 on 2018-11-20 11:23 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('yekpay', '0013_auto_20181030_1911'), ] operations = [ migrations.RenameField( model_name='transaction', old_name='authorityStart', new_name='authority_start', ), migrations.RenameField( model_name='transaction', old_name='authorityVerify', new_name='authority_verify', ), migrations.RenameField( model_name='transaction', old_name='failureReason', new_name='failure_reason', ), migrations.RenameField( model_name='transaction', old_name='firstName', new_name='first_name', ), migrations.RenameField( model_name='transaction', old_name='fromCurrencyCode', new_name='from_currency_code', ), migrations.RenameField( model_name='transaction', old_name='lastName', new_name='last_name', ), migrations.RenameField( model_name='transaction', old_name='orderNumber', new_name='order_number', ), migrations.RenameField( model_name='transaction', old_name='postalCode', new_name='postal_code', ), migrations.RenameField( model_name='transaction', old_name='toCurrencyCode', new_name='to_currency_code', ), migrations.AddField( model_name='transaction', name='simulation', field=models.BooleanField(default=False), ), migrations.AddField( model_name='transaction', name='user', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), ]
30.041667
121
0.57374
2,004
0.926491
0
0
0
0
0
0
508
0.234859
6a7e1204635ee097ea34ac3eae2b9d121e4f7471
203
py
Python
polus-cell-nuclei-segmentation/src/dsb2018_topcoders/albu/src/pytorch_zoo/inplace_abn/modules/__init__.py
nishaq503/polus-plugins-dl
511689e82eb29a84761538144277d1be1af7aa44
[ "MIT" ]
null
null
null
polus-cell-nuclei-segmentation/src/dsb2018_topcoders/albu/src/pytorch_zoo/inplace_abn/modules/__init__.py
nishaq503/polus-plugins-dl
511689e82eb29a84761538144277d1be1af7aa44
[ "MIT" ]
1
2021-09-09T23:22:16.000Z
2021-09-09T23:22:16.000Z
polus-cell-nuclei-segmentation/src/dsb2018_topcoders/albu/src/pytorch_zoo/inplace_abn/modules/__init__.py
nishaq503/polus-plugins-dl
511689e82eb29a84761538144277d1be1af7aa44
[ "MIT" ]
4
2021-06-22T13:54:52.000Z
2022-01-26T19:23:39.000Z
from .bn import ABN, InPlaceABN, InPlaceABNWrapper, InPlaceABNSync, InPlaceABNSyncWrapper from .misc import GlobalAvgPool2d from .residual import IdentityResidualBlock from .dense import DenseModule
40.6
90
0.842365
0
0
0
0
0
0
0
0
0
0
6a7e7d0b939c716cda0bb6e7629a5a7ce8b56ac7
10,911
py
Python
python/pyarrow/tests/test_compute.py
kylebrandt/arrow
515197dfe6e83d6fa6fe82bfec134f41b222b748
[ "Apache-2.0" ]
null
null
null
python/pyarrow/tests/test_compute.py
kylebrandt/arrow
515197dfe6e83d6fa6fe82bfec134f41b222b748
[ "Apache-2.0" ]
null
null
null
python/pyarrow/tests/test_compute.py
kylebrandt/arrow
515197dfe6e83d6fa6fe82bfec134f41b222b748
[ "Apache-2.0" ]
null
null
null
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import numpy as np import pytest import pyarrow as pa import pyarrow.compute all_array_types = [ ('bool', [True, False, False, True, True]), ('uint8', np.arange(5)), ('int8', np.arange(5)), ('uint16', np.arange(5)), ('int16', np.arange(5)), ('uint32', np.arange(5)), ('int32', np.arange(5)), ('uint64', np.arange(5, 10)), ('int64', np.arange(5, 10)), ('float', np.arange(0, 0.5, 0.1)), ('double', np.arange(0, 0.5, 0.1)), ('string', ['a', 'b', None, 'ddd', 'ee']), ('binary', [b'a', b'b', b'c', b'ddd', b'ee']), (pa.binary(3), [b'abc', b'bcd', b'cde', b'def', b'efg']), (pa.list_(pa.int8()), [[1, 2], [3, 4], [5, 6], None, [9, 16]]), (pa.large_list(pa.int16()), [[1], [2, 3, 4], [5, 6], None, [9, 16]]), (pa.struct([('a', pa.int8()), ('b', pa.int8())]), [ {'a': 1, 'b': 2}, None, {'a': 3, 'b': 4}, None, {'a': 5, 'b': 6}]), ] numerical_arrow_types = [ pa.int8(), pa.int16(), pa.int64(), pa.uint8(), pa.uint16(), pa.uint64(), pa.float32(), pa.float64() ] @pytest.mark.parametrize('arrow_type', numerical_arrow_types) def test_sum_array(arrow_type): arr = pa.array([1, 2, 3, 4], type=arrow_type) assert arr.sum() == 10 assert pa.compute.sum(arr) == 10 arr = pa.array([], type=arrow_type) assert arr.sum() == None # noqa: E711 assert pa.compute.sum(arr) == None # noqa: E711 @pytest.mark.parametrize('arrow_type', numerical_arrow_types) def test_sum_chunked_array(arrow_type): arr = pa.chunked_array([pa.array([1, 2, 3, 4], type=arrow_type)]) assert pa.compute.sum(arr) == 10 arr = pa.chunked_array([ pa.array([1, 2], type=arrow_type), pa.array([3, 4], type=arrow_type) ]) assert pa.compute.sum(arr) == 10 arr = pa.chunked_array([ pa.array([1, 2], type=arrow_type), pa.array([], type=arrow_type), pa.array([3, 4], type=arrow_type) ]) assert pa.compute.sum(arr) == 10 arr = pa.chunked_array((), type=arrow_type) print(arr, type(arr)) assert arr.num_chunks == 0 assert pa.compute.sum(arr) == None # noqa: E711 @pytest.mark.parametrize(('ty', 'values'), all_array_types) def test_take(ty, values): arr = pa.array(values, type=ty) for indices_type in [pa.int8(), pa.int64()]: indices = pa.array([0, 4, 2, None], type=indices_type) result = arr.take(indices) result.validate() expected = pa.array([values[0], values[4], values[2], None], type=ty) assert result.equals(expected) # empty indices indices = pa.array([], type=indices_type) result = arr.take(indices) result.validate() expected = pa.array([], type=ty) assert result.equals(expected) indices = pa.array([2, 5]) with pytest.raises(IndexError): arr.take(indices) indices = pa.array([2, -1]) with pytest.raises(IndexError): arr.take(indices) def test_take_indices_types(): arr = pa.array(range(5)) for indices_type in ['uint8', 'int8', 'uint16', 'int16', 'uint32', 'int32', 'uint64', 'int64']: indices = pa.array([0, 4, 2, None], type=indices_type) result = arr.take(indices) result.validate() expected = pa.array([0, 4, 2, None]) assert result.equals(expected) for indices_type in [pa.float32(), pa.float64()]: indices = pa.array([0, 4, 2], type=indices_type) with pytest.raises(NotImplementedError): arr.take(indices) @pytest.mark.parametrize('ordered', [False, True]) def test_take_dictionary(ordered): arr = pa.DictionaryArray.from_arrays([0, 1, 2, 0, 1, 2], ['a', 'b', 'c'], ordered=ordered) result = arr.take(pa.array([0, 1, 3])) result.validate() assert result.to_pylist() == ['a', 'b', 'a'] assert result.dictionary.to_pylist() == ['a', 'b', 'c'] assert result.type.ordered is ordered @pytest.mark.parametrize(('ty', 'values'), all_array_types) def test_filter(ty, values): arr = pa.array(values, type=ty) mask = pa.array([True, False, False, True, None]) result = arr.filter(mask, null_selection_behavior='drop') result.validate() assert result.equals(pa.array([values[0], values[3]], type=ty)) result = arr.filter(mask, null_selection_behavior='emit_null') result.validate() assert result.equals(pa.array([values[0], values[3], None], type=ty)) # non-boolean dtype mask = pa.array([0, 1, 0, 1, 0]) with pytest.raises(NotImplementedError): arr.filter(mask) # wrong length mask = pa.array([True, False, True]) with pytest.raises(ValueError, match="must all be the same length"): arr.filter(mask) def test_filter_chunked_array(): arr = pa.chunked_array([["a", None], ["c", "d", "e"]]) expected_drop = pa.chunked_array([["a"], ["e"]]) expected_null = pa.chunked_array([["a"], [None, "e"]]) for mask in [ # mask is array pa.array([True, False, None, False, True]), # mask is chunked array pa.chunked_array([[True, False, None], [False, True]]), # mask is python object [True, False, None, False, True] ]: result = arr.filter(mask) assert result.equals(expected_drop) result = arr.filter(mask, null_selection_behavior="emit_null") assert result.equals(expected_null) def test_filter_record_batch(): batch = pa.record_batch( [pa.array(["a", None, "c", "d", "e"])], names=["a'"]) # mask is array mask = pa.array([True, False, None, False, True]) result = batch.filter(mask) expected = pa.record_batch([pa.array(["a", "e"])], names=["a'"]) assert result.equals(expected) result = batch.filter(mask, null_selection_behavior="emit_null") expected = pa.record_batch([pa.array(["a", None, "e"])], names=["a'"]) assert result.equals(expected) def test_filter_table(): table = pa.table([pa.array(["a", None, "c", "d", "e"])], names=["a"]) expected_drop = pa.table([pa.array(["a", "e"])], names=["a"]) expected_null = pa.table([pa.array(["a", None, "e"])], names=["a"]) for mask in [ # mask is array pa.array([True, False, None, False, True]), # mask is chunked array pa.chunked_array([[True, False], [None, False, True]]), # mask is python object [True, False, None, False, True] ]: result = table.filter(mask) assert result.equals(expected_drop) result = table.filter(mask, null_selection_behavior="emit_null") assert result.equals(expected_null) def test_filter_errors(): arr = pa.chunked_array([["a", None], ["c", "d", "e"]]) batch = pa.record_batch( [pa.array(["a", None, "c", "d", "e"])], names=["a'"]) table = pa.table([pa.array(["a", None, "c", "d", "e"])], names=["a"]) for obj in [arr, batch, table]: # non-boolean dtype mask = pa.array([0, 1, 0, 1, 0]) with pytest.raises(NotImplementedError): obj.filter(mask) # wrong length mask = pa.array([True, False, True]) with pytest.raises(pa.ArrowInvalid, match="must all be the same length"): obj.filter(mask) @pytest.mark.parametrize("typ", ["array", "chunked_array"]) def test_compare_array(typ): if typ == "array": def con(values): return pa.array(values) else: def con(values): return pa.chunked_array([values]) arr1 = con([1, 2, 3, 4, None]) arr2 = con([1, 1, 4, None, 4]) result = arr1 == arr2 assert result.equals(con([True, False, False, None, None])) result = arr1 != arr2 assert result.equals(con([False, True, True, None, None])) result = arr1 < arr2 assert result.equals(con([False, False, True, None, None])) result = arr1 <= arr2 assert result.equals(con([True, False, True, None, None])) result = arr1 > arr2 assert result.equals(con([False, True, False, None, None])) result = arr1 >= arr2 assert result.equals(con([True, True, False, None, None])) @pytest.mark.parametrize("typ", ["array", "chunked_array"]) def test_compare_scalar(typ): if typ == "array": def con(values): return pa.array(values) else: def con(values): return pa.chunked_array([values]) arr = con([1, 2, 3, None]) # TODO this is a hacky way to construct a scalar .. scalar = pa.array([2]).sum() result = arr == scalar assert result.equals(con([False, True, False, None])) result = arr != scalar assert result.equals(con([True, False, True, None])) result = arr < scalar assert result.equals(con([True, False, False, None])) result = arr <= scalar assert result.equals(con([True, True, False, None])) result = arr > scalar assert result.equals(con([False, False, True, None])) result = arr >= scalar assert result.equals(con([False, True, True, None])) def test_compare_chunked_array_mixed(): arr = pa.array([1, 2, 3, 4, None]) arr_chunked = pa.chunked_array([[1, 2, 3], [4, None]]) arr_chunked2 = pa.chunked_array([[1, 2], [3, 4, None]]) expected = pa.chunked_array([[True, True, True, True, None]]) for result in [ arr == arr_chunked, arr_chunked == arr, arr_chunked == arr_chunked2, ]: assert result.equals(expected) def test_arithmetic_add(): left = pa.array([1, 2, 3, 4, 5]) right = pa.array([0, -1, 1, 2, 3]) result = pa.compute.add(left, right) expected = pa.array([1, 1, 4, 6, 8]) assert result.equals(expected) def test_arithmetic_subtract(): left = pa.array([1, 2, 3, 4, 5]) right = pa.array([0, -1, 1, 2, 3]) result = pa.compute.subtract(left, right) expected = pa.array([1, 3, 2, 2, 2]) assert result.equals(expected) def test_arithmetic_multiply(): left = pa.array([1, 2, 3, 4, 5]) right = pa.array([0, -1, 1, 2, 3]) result = pa.compute.multiply(left, right) expected = pa.array([0, -2, 3, 8, 15]) assert result.equals(expected)
32.281065
77
0.59967
0
0
0
0
4,802
0.440106
0
0
1,715
0.157181
6a7ebe45370c220d4cb3303c8715bdc2a5f264ae
7,074
py
Python
python/sdk/client/api/log_api.py
ashwinath/merlin
087a7fa6fb21e4c771d64418bd58873175226ca1
[ "Apache-2.0" ]
null
null
null
python/sdk/client/api/log_api.py
ashwinath/merlin
087a7fa6fb21e4c771d64418bd58873175226ca1
[ "Apache-2.0" ]
null
null
null
python/sdk/client/api/log_api.py
ashwinath/merlin
087a7fa6fb21e4c771d64418bd58873175226ca1
[ "Apache-2.0" ]
null
null
null
# coding: utf-8 """ Merlin API Guide for accessing Merlin's model management, deployment, and serving functionalities # noqa: E501 OpenAPI spec version: 0.7.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import re # noqa: F401 # python 2 and python 3 compatibility library import six from client.api_client import ApiClient class LogApi(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def logs_get(self, name, pod_name, namespace, cluster, **kwargs): # noqa: E501 """Retrieve log from a container # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.logs_get(name, pod_name, namespace, cluster, async_req=True) >>> result = thread.get() :param async_req bool :param str name: (required) :param str pod_name: (required) :param str namespace: (required) :param str cluster: (required) :param str follow: :param str limit_bytes: :param str pretty: :param str previous: :param str since_seconds: :param str since_time: :param str tail_lines: :param str timestamps: :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.logs_get_with_http_info(name, pod_name, namespace, cluster, **kwargs) # noqa: E501 else: (data) = self.logs_get_with_http_info(name, pod_name, namespace, cluster, **kwargs) # noqa: E501 return data def logs_get_with_http_info(self, name, pod_name, namespace, cluster, **kwargs): # noqa: E501 """Retrieve log from a container # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.logs_get_with_http_info(name, pod_name, namespace, cluster, async_req=True) >>> result = thread.get() :param async_req bool :param str name: (required) :param str pod_name: (required) :param str namespace: (required) :param str cluster: (required) :param str follow: :param str limit_bytes: :param str pretty: :param str previous: :param str since_seconds: :param str since_time: :param str tail_lines: :param str timestamps: :return: None If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'pod_name', 'namespace', 'cluster', 'follow', 'limit_bytes', 'pretty', 'previous', 'since_seconds', 'since_time', 'tail_lines', 'timestamps'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method logs_get" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params or params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `logs_get`") # noqa: E501 # verify the required parameter 'pod_name' is set if ('pod_name' not in params or params['pod_name'] is None): raise ValueError("Missing the required parameter `pod_name` when calling `logs_get`") # noqa: E501 # verify the required parameter 'namespace' is set if ('namespace' not in params or params['namespace'] is None): raise ValueError("Missing the required parameter `namespace` when calling `logs_get`") # noqa: E501 # verify the required parameter 'cluster' is set if ('cluster' not in params or params['cluster'] is None): raise ValueError("Missing the required parameter `cluster` when calling `logs_get`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] if 'name' in params: query_params.append(('name', params['name'])) # noqa: E501 if 'pod_name' in params: query_params.append(('pod_name', params['pod_name'])) # noqa: E501 if 'namespace' in params: query_params.append(('namespace', params['namespace'])) # noqa: E501 if 'cluster' in params: query_params.append(('cluster', params['cluster'])) # noqa: E501 if 'follow' in params: query_params.append(('follow', params['follow'])) # noqa: E501 if 'limit_bytes' in params: query_params.append(('limit_bytes', params['limit_bytes'])) # noqa: E501 if 'pretty' in params: query_params.append(('pretty', params['pretty'])) # noqa: E501 if 'previous' in params: query_params.append(('previous', params['previous'])) # noqa: E501 if 'since_seconds' in params: query_params.append(('since_seconds', params['since_seconds'])) # noqa: E501 if 'since_time' in params: query_params.append(('since_time', params['since_time'])) # noqa: E501 if 'tail_lines' in params: query_params.append(('tail_lines', params['tail_lines'])) # noqa: E501 if 'timestamps' in params: query_params.append(('timestamps', params['timestamps'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # Authentication setting auth_settings = ['Bearer'] # noqa: E501 return self.api_client.call_api( '/logs', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
39.3
185
0.607577
6,653
0.940486
0
0
0
0
0
0
3,846
0.543681
6a7ef047892a808f5b9e319a809d26915f83c93f
2,207
py
Python
openmdao/solvers/nonlinear/nonlinear_block_jac.py
bollwyvl/OpenMDAO
4d7a31b2bb39674e2be0d6a13cbe22de3f5353af
[ "Apache-2.0" ]
null
null
null
openmdao/solvers/nonlinear/nonlinear_block_jac.py
bollwyvl/OpenMDAO
4d7a31b2bb39674e2be0d6a13cbe22de3f5353af
[ "Apache-2.0" ]
null
null
null
openmdao/solvers/nonlinear/nonlinear_block_jac.py
bollwyvl/OpenMDAO
4d7a31b2bb39674e2be0d6a13cbe22de3f5353af
[ "Apache-2.0" ]
1
2018-07-27T06:39:15.000Z
2018-07-27T06:39:15.000Z
"""Define the NonlinearBlockJac class.""" from openmdao.recorders.recording_iteration_stack import Recording from openmdao.solvers.solver import NonlinearSolver from openmdao.utils.mpi import multi_proc_fail_check class NonlinearBlockJac(NonlinearSolver): """ Nonlinear block Jacobi solver. """ SOLVER = 'NL: NLBJ' def _single_iteration(self): """ Perform the operations in the iteration loop. """ system = self._system self._solver_info.append_subsolver() system._transfer('nonlinear', 'fwd') with Recording('NonlinearBlockJac', 0, self) as rec: # If this is a parallel group, check for analysis errors and reraise. if len(system._subsystems_myproc) != len(system._subsystems_allprocs): with multi_proc_fail_check(system.comm): for subsys in system._subsystems_myproc: subsys._solve_nonlinear() else: for subsys in system._subsystems_myproc: subsys._solve_nonlinear() system._check_child_reconf() rec.abs = 0.0 rec.rel = 0.0 self._solver_info.pop() def _mpi_print_header(self): """ Print header text before solving. """ if (self.options['iprint'] > 0): pathname = self._system.pathname if pathname: nchar = len(pathname) prefix = self._solver_info.prefix header = prefix + "\n" header += prefix + nchar * "=" + "\n" header += prefix + pathname + "\n" header += prefix + nchar * "=" print(header) def _run_apply(self): """ Run the apply_nonlinear method on the system. """ system = self._system # If this is a parallel group, check for analysis errors and reraise. if len(system._subsystems_myproc) != len(system._subsystems_allprocs): with multi_proc_fail_check(system.comm): super(NonlinearBlockJac, self)._run_apply() else: super(NonlinearBlockJac, self)._run_apply()
32.940299
82
0.584957
1,990
0.901676
0
0
0
0
0
0
491
0.222474