hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4b9cdb57c833e7e628efc0c75d61d7090e29a276 | 393 | py | Python | exercicios/Lista6/Q5.py | AlexandrePeBrito/CursoUdemyPython | 3de58cb30c9f333b32078309847179ff3f9d7e22 | [
"MIT"
] | null | null | null | exercicios/Lista6/Q5.py | AlexandrePeBrito/CursoUdemyPython | 3de58cb30c9f333b32078309847179ff3f9d7e22 | [
"MIT"
] | null | null | null | exercicios/Lista6/Q5.py | AlexandrePeBrito/CursoUdemyPython | 3de58cb30c9f333b32078309847179ff3f9d7e22 | [
"MIT"
] | null | null | null | """ 5. Faa um programa que receba do usurio um arquivo texto e um caracter. Mostre na tela
quantas vezes aquele caractere ocorre dentro do arquivo.
"""
arquivo=open('CursoUdemyPython/exercicios/Lista6/arq.txt')
texto=arquivo.read()
carac=input('Informe um caractere: ')
ca=0
for c in texto:
if(c == carac):
ca+=1
arquivo.close()
print(f"Foi identificado {ca} deste caractere")
| 28.071429 | 92 | 0.725191 |
4b9e62db340ea51b4cda5971027dcd23a1f17c3d | 3,704 | py | Python | superset_config.py | mikiec84/incubator-superset | 3a1c32ae2378902a26873113d98bd55d290233ca | [
"Apache-2.0"
] | 1 | 2020-08-07T16:30:54.000Z | 2020-08-07T16:30:54.000Z | superset_config.py | mikiec84/incubator-superset | 3a1c32ae2378902a26873113d98bd55d290233ca | [
"Apache-2.0"
] | null | null | null | superset_config.py | mikiec84/incubator-superset | 3a1c32ae2378902a26873113d98bd55d290233ca | [
"Apache-2.0"
] | 1 | 2020-08-07T16:30:58.000Z | 2020-08-07T16:30:58.000Z | #---------------------------------------------------------
# Superset specific config
#---------------------------------------------------------
ROW_LIMIT = 5000
SUPERSET_WEBSERVER_PORT = 8088
#---------------------------------------------------------
#---------------------------------------------------------
# Flask App Builder configuration
#---------------------------------------------------------
# Your App secret key
SECRET_KEY = '\2\1ulan123456\1\2\e\y\y\h'
# The SQLAlchemy connection string to your database backend
# This connection defines the path to the database that stores your
# superset metadata (slices, connections, tables, dashboards, ...).
# Note that the connection information to connect to the datasources
# you want to explore are managed directly in the web UI
#SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://superset:superset@localhost:5432/superset'
# ------------------------------
# GLOBALS FOR APP Builder
# ------------------------------
# Uncomment to setup Your App name
APP_NAME = 'Insights'
# Uncomment to setup an App icon
APP_ICON = '/static/assets/images/qmatic_insights-logo.png'
# Extract and use X-Forwarded-For/X-Forwarded-Proto headers?
ENABLE_PROXY_FIX = True
ENABLE_JAVASCRIPT_CONTROLS = True
'''
import os
from flask_appbuilder.security.manager import AUTH_OID, AUTH_REMOTE_USER, AUTH_DB, AUTH_LDAP, AUTH_OAUTH
basedir = os.path.abspath(os.path.dirname(__file__))
SUPERSET_WORKERS = 8
CSRF_ENABLED = True
AUTH_TYPE = AUTH_OAUTH
AUTH_USER_REGISTRATION = False
AUTH_USER_REGISTRATION_ROLE = "Gamma" #"Public"
OAUTH_PROVIDERS = [
{
'name': 'google',
'icon': 'fa-google',
'token_key': 'access_token',
'remote_app': {
'base_url': 'https://www.googleapis.com/oauth2/v2/',
'request_token_params': {
'scope': 'email profile'
},
'request_token_url': None,
'access_token_url': 'https://accounts.google.com/o/oauth2/token',
'authorize_url': 'https://accounts.google.com/o/oauth2/auth',
'consumer_key': '996225546131-1qd2alfrrp1scf6gvkeg63mg2ku85lka.apps.googleusercontent.com',
'consumer_secret': '3fxwT-a8YA1akyuUYFfakMCz'
}
},
{
'name': 'slatest.qmaticcloud.com',
'icon': 'fa-google',
'token_key': 'access_token',
'remote_app': {
#'base_url': 'https://slatest.qmaticcloud.com/oauth2server/oauth/',
'base_url': None,
'request_token_params': {
'scope': 'user_info',
'state': '123'
},
'request_token_url': None,
'access_token_url': 'https://slatest.qmaticcloud.com/oauth2server/oauth/token',
'authorize_url': 'https://slatest.qmaticcloud.com/oauth2server/oauth/authorize',
'consumer_key': 'businessintelligence',
'consumer_secret': 'fSmI0K1uSvnORBk3'
}
},
{
'name': 'msdemo.qmatic.cloud',
'icon': 'fa-google',
'token_key': 'access_token',
'remote_app': {
'base_url': None,
'request_token_params': {
'scope': 'user_info',
'state': '123'
},
'request_token_url': None,
'access_token_url': 'https://msdemo.qmatic.cloud/oauth2server/oauth/token',
'authorize_url': 'https://msdemo.qmatic.cloud/oauth2server/oauth/authorize',
'consumer_key': 'businessintelligence',
'consumer_secret': 'fSmI0K1uSvnORBk3'
}
}
]
'''
| 35.615385 | 107 | 0.551836 |
4b9ee2812f3c3d983291b0a7f5a83dcf6f853ee4 | 5,038 | py | Python | python_code/yolo/extract_car_num.py | mukulbhave/tensorflow | 848b16fa32cd0f180ab80a98254edd2147ea3948 | [
"CNRI-Python"
] | null | null | null | python_code/yolo/extract_car_num.py | mukulbhave/tensorflow | 848b16fa32cd0f180ab80a98254edd2147ea3948 | [
"CNRI-Python"
] | null | null | null | python_code/yolo/extract_car_num.py | mukulbhave/tensorflow | 848b16fa32cd0f180ab80a98254edd2147ea3948 | [
"CNRI-Python"
] | null | null | null | import argparse
import cv2
import re
import numpy as np
import string
import PIL
import os,glob
import ntpath
import time
import matplotlib.pyplot as plt
from PIL import Image
from yad2k.models.keras_yolo import (preprocess_true_boxes, yolo_body,
yolo_eval, yolo_head, yolo_loss)
from yad2k.utils.draw_boxes import draw_boxes
from retrain_yolo import (create_model,get_classes)
import keras.backend as K
from crnn.train_crnn import create_crnn_model
from crnn.crnn_data_gen import *
char_list = string.ascii_letters+string.digits
YOLO_ANCHORS = np.array(
((0.57273, 0.677385), (1.87446, 2.06253), (3.33843, 5.47434),
(7.88282, 3.52778), (9.77052, 9.16828)))
class_names=['plate','no-plate'] | 39.984127 | 133 | 0.639936 |
4ba04308181ebd07871e89cce3a567b034f969f9 | 2,881 | py | Python | examples/time_frequency/plot_tfr_topography.py | Anevar/mne-python | 15b19ed6b9364ae4787f0df2fd7e689b3c0a30bb | [
"BSD-3-Clause"
] | 2 | 2015-09-27T20:33:49.000Z | 2020-04-22T19:10:56.000Z | examples/time_frequency/plot_tfr_topography.py | Anevar/mne-python | 15b19ed6b9364ae4787f0df2fd7e689b3c0a30bb | [
"BSD-3-Clause"
] | null | null | null | examples/time_frequency/plot_tfr_topography.py | Anevar/mne-python | 15b19ed6b9364ae4787f0df2fd7e689b3c0a30bb | [
"BSD-3-Clause"
] | 1 | 2018-09-15T09:45:38.000Z | 2018-09-15T09:45:38.000Z | """
===================================================================
Plot time-frequency representations on topographies for MEG sensors
===================================================================
Both induced power and phase locking values are displayed.
"""
print(__doc__)
# Authors: Alexandre Gramfort <gramfort@nmr.mgh.harvard.edu>
# Denis Engemann <d.engemann@fz-juelich.de>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import fiff
from mne.time_frequency import induced_power
from mne.viz import plot_topo_power, plot_topo_phase_lock
from mne.datasets import sample
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
event_id, tmin, tmax = 1, -0.2, 0.5
# Setup for reading the raw data
raw = fiff.Raw(raw_fname)
events = mne.read_events(event_fname)
include = []
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = fiff.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=False, include=include, exclude='bads')
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6))
data = epochs.get_data() # as 3D matrix
layout = mne.find_layout(epochs.info, 'meg')
###############################################################################
# Calculate power and phase locking value
frequencies = np.arange(7, 30, 3) # define frequencies of interest
n_cycles = frequencies / float(7) # different number of cycle per frequency
Fs = raw.info['sfreq'] # sampling in Hz
decim = 3
power, phase_lock = induced_power(data, Fs=Fs, frequencies=frequencies,
n_cycles=n_cycles, n_jobs=1, use_fft=False,
decim=decim, zero_mean=True)
###############################################################################
# Prepare topography plots, set baseline correction parameters
baseline = (None, 0) # set the baseline for induced power
mode = 'ratio' # set mode for baseline rescaling
###############################################################################
# Show topography of power.
title = 'Induced power - MNE sample data'
plot_topo_power(epochs, power, frequencies, layout, baseline=baseline,
mode=mode, decim=decim, vmin=0., vmax=14, title=title)
plt.show()
###############################################################################
# Show topography of phase locking value (PLV)
mode = None # no baseline rescaling for PLV
title = 'Phase locking value - MNE sample data'
plot_topo_phase_lock(epochs, phase_lock, frequencies, layout,
baseline=baseline, mode=mode, decim=decim, title=title)
plt.show()
| 35.567901 | 79 | 0.591461 |
4ba39b6087c75616c2877cb61a2b0736b03e97e4 | 2,201 | py | Python | A1/greenHouseBluetooth.py | rmit-s3559384-andrew-alvaro/IoT | ec444d0b037ddbd2e3aab01c34ea57fd2bd51d5f | [
"MIT"
] | null | null | null | A1/greenHouseBluetooth.py | rmit-s3559384-andrew-alvaro/IoT | ec444d0b037ddbd2e3aab01c34ea57fd2bd51d5f | [
"MIT"
] | 1 | 2021-06-01T23:39:58.000Z | 2021-06-01T23:39:58.000Z | A1/greenHouseBluetooth.py | AndrewAlvaro/IoT | ec444d0b037ddbd2e3aab01c34ea57fd2bd51d5f | [
"MIT"
] | null | null | null | import bluetooth
import sys, os
import subprocess as sp
import datetime
from pushBulletForBluetooth import pushNotification
from makeReminderforBluetooth import Reminder
import csv
if __name__ == "__main__":
main()
| 31 | 103 | 0.52567 |
4ba3e0dab8146008256a0da74d6aec2d33aa11e9 | 127 | py | Python | appending_to_files.py | jaiminjerry/Python | eb7013c7560b09d37849d653516257d939e143aa | [
"bzip2-1.0.6"
] | null | null | null | appending_to_files.py | jaiminjerry/Python | eb7013c7560b09d37849d653516257d939e143aa | [
"bzip2-1.0.6"
] | null | null | null | appending_to_files.py | jaiminjerry/Python | eb7013c7560b09d37849d653516257d939e143aa | [
"bzip2-1.0.6"
] | 1 | 2021-08-17T03:46:56.000Z | 2021-08-17T03:46:56.000Z | appendMe = '\nNew bit of information'
appendFile = open('example.txt','a')
appendFile.write(appendMe)
appendFile.close()
| 21.166667 | 38 | 0.716535 |
4ba4c531fc5b73ca047fb0191f3bbb5ca13cf62d | 209 | py | Python | udacity/cloud-native-application-architecture/3-message-passing/lesson-3-implementing-message-passing/kafka-python-demo/producer.py | thomasrobertz/mooc | cb87365bfcbe8ccf972f36d70a251c73b3c15a7b | [
"MIT"
] | null | null | null | udacity/cloud-native-application-architecture/3-message-passing/lesson-3-implementing-message-passing/kafka-python-demo/producer.py | thomasrobertz/mooc | cb87365bfcbe8ccf972f36d70a251c73b3c15a7b | [
"MIT"
] | 13 | 2021-12-14T20:59:34.000Z | 2022-03-02T11:09:34.000Z | udacity/cloud-native-application-architecture/3-message-passing/lesson-3-implementing-message-passing/kafka-python-demo/producer.py | thomasrobertz/mooc | cb87365bfcbe8ccf972f36d70a251c73b3c15a7b | [
"MIT"
] | 1 | 2020-08-20T12:53:43.000Z | 2020-08-20T12:53:43.000Z | from kafka import KafkaProducer
TOPIC_NAME = 'items'
KAFKA_SERVER = 'localhost:9092'
producer = KafkaProducer(bootstrap_servers=KAFKA_SERVER)
producer.send(TOPIC_NAME, b'Test Message!!!')
producer.flush()
| 19 | 56 | 0.789474 |
4ba509cf1a05cf33bf195b861b6306f41e7b81ea | 954 | py | Python | myfitnesspal_to_sqlite/cli.py | seeM/myfitnesspal-to-sqlite | ce4c133009cbeacd5fa5410016f81f5eb45e7a64 | [
"Apache-2.0"
] | 4 | 2021-07-14T17:31:40.000Z | 2021-12-03T21:50:09.000Z | myfitnesspal_to_sqlite/cli.py | seeM/myfitnesspal-to-sqlite | ce4c133009cbeacd5fa5410016f81f5eb45e7a64 | [
"Apache-2.0"
] | null | null | null | myfitnesspal_to_sqlite/cli.py | seeM/myfitnesspal-to-sqlite | ce4c133009cbeacd5fa5410016f81f5eb45e7a64 | [
"Apache-2.0"
] | null | null | null | from datetime import datetime
import myfitnesspal
import sqlite_utils
import click
from . import utils
| 21.2 | 82 | 0.705451 |
4ba51c782d7e269031d5abf6080e2a03357844fd | 849 | py | Python | mcir/t1_hist.py | omritomer/mcir | 1554d352172464c6314339195d6ea9a5e00824af | [
"MIT"
] | null | null | null | mcir/t1_hist.py | omritomer/mcir | 1554d352172464c6314339195d6ea9a5e00824af | [
"MIT"
] | null | null | null | mcir/t1_hist.py | omritomer/mcir | 1554d352172464c6314339195d6ea9a5e00824af | [
"MIT"
] | null | null | null | import numpy as np
| 30.321429 | 82 | 0.61013 |
4ba5d882b2fc5de31e1705b7b18a845f264237e7 | 209 | py | Python | main.py | Javert899/pm4py-tool-plugin-skeleton | cfc4aefd02499b323ae60e33f059a6b90e48a95f | [
"MIT"
] | null | null | null | main.py | Javert899/pm4py-tool-plugin-skeleton | cfc4aefd02499b323ae60e33f059a6b90e48a95f | [
"MIT"
] | null | null | null | main.py | Javert899/pm4py-tool-plugin-skeleton | cfc4aefd02499b323ae60e33f059a6b90e48a95f | [
"MIT"
] | null | null | null | import pluginpackageRENAME
import preload
import os
if __name__ == "__main__":
preload.preload()
app = pluginpackageRENAME.app
app.static_folder = os.path.join(os.getcwd(), "html")
app.run()
| 19 | 57 | 0.703349 |
4ba61c47eb12a3d8f57c257b4b752059384399df | 6,948 | py | Python | plot_curve.py | wenhuchen/Hierarchical-DSA | 2dbdacde25ee82c9d42fe980694673d285b1f7f3 | [
"MIT"
] | 45 | 2019-02-27T02:04:08.000Z | 2022-03-21T04:49:22.000Z | plot_curve.py | wenhuchen/Hierarchical-DSA | 2dbdacde25ee82c9d42fe980694673d285b1f7f3 | [
"MIT"
] | 2 | 2019-08-18T03:05:11.000Z | 2020-07-26T13:45:05.000Z | plot_curve.py | wenhuchen/Hierarchical-DSA | 2dbdacde25ee82c9d42fe980694673d285b1f7f3 | [
"MIT"
] | 4 | 2019-03-12T17:40:12.000Z | 2021-06-10T07:59:39.000Z | import matplotlib.pyplot as plt
import numpy as np
import math
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
import json
from scipy.interpolate import interp1d
from data_utils import *
#draw_curve()
# (y_pred + 1.9600 * sigma)[::-1]]),
# alpha=.5, fc='b', ec='None', label='95% confidence interval')
#draw_uncerntainy_curve()
# (y_pred + 1.9600 * sigma)[::-1]]),
# alpha=.5, fc='b', ec='None', label='95% confidence interval')
#draw_SLU_uncerntainy_curve()
#draw_curve()
compute_score()
| 29.692308 | 105 | 0.598877 |
4ba90d216dd9521bb1b314598a55d371117b4821 | 8,392 | py | Python | alipay/aop/api/domain/CircleRecommendItemDTO.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/domain/CircleRecommendItemDTO.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/domain/CircleRecommendItemDTO.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AoiInfoDTO import AoiInfoDTO
from alipay.aop.api.domain.ItemStoreDTO import ItemStoreDTO
| 32.401544 | 83 | 0.596282 |
4bab64e23dd52a8b7e2e5474ebad268f962e7d94 | 3,599 | py | Python | vmcasterpub/uploader_dcap.py | hepix-virtualisation/vmcaster | f4ef1c65bbb81b82aa72a0cd1afc1aa6cf13eb51 | [
"Apache-2.0"
] | null | null | null | vmcasterpub/uploader_dcap.py | hepix-virtualisation/vmcaster | f4ef1c65bbb81b82aa72a0cd1afc1aa6cf13eb51 | [
"Apache-2.0"
] | null | null | null | vmcasterpub/uploader_dcap.py | hepix-virtualisation/vmcaster | f4ef1c65bbb81b82aa72a0cd1afc1aa6cf13eb51 | [
"Apache-2.0"
] | null | null | null | import subprocess
import time
import logging
import os
import signal
log = logging.getLogger(__name__)
| 31.025862 | 108 | 0.594054 |
4babaa82bca32126bf21a61b9966b1e6ecb0d62c | 3,923 | py | Python | drones/serializers.py | maprezdev/restfuldrones | 9448a63b148cdf7da8f46d65067ddbb8773e2fd2 | [
"MIT"
] | null | null | null | drones/serializers.py | maprezdev/restfuldrones | 9448a63b148cdf7da8f46d65067ddbb8773e2fd2 | [
"MIT"
] | null | null | null | drones/serializers.py | maprezdev/restfuldrones | 9448a63b148cdf7da8f46d65067ddbb8773e2fd2 | [
"MIT"
] | null | null | null | # drones/serializers.py file
from rest_framework import serializers
from drones.models import DroneCategory, Drone, Pilot, Competition
from django.contrib.auth.models import User
import drones.views
| 29.946565 | 100 | 0.617894 |
4baef5968ecd4571dc42ca2e3a144059ebfa9562 | 1,471 | py | Python | Calamous.py | Studio-Pasteque/Pokemon | 6b9f457eef8a2dc28cb8b9b69527404b47c9825a | [
"MIT"
] | 2 | 2020-05-27T08:27:58.000Z | 2020-05-27T09:31:45.000Z | Calamous.py | Studio-Pasteque/Pokemon | 6b9f457eef8a2dc28cb8b9b69527404b47c9825a | [
"MIT"
] | null | null | null | Calamous.py | Studio-Pasteque/Pokemon | 6b9f457eef8a2dc28cb8b9b69527404b47c9825a | [
"MIT"
] | null | null | null |
import learnables.py
# cration d'un objet Calamous
| 27.754717 | 114 | 0.56968 |
4bb0265f943903e9ce05ffd83240a67916be1de6 | 5,186 | py | Python | scripts/utils.py | alterapars/drought_classification | 585aaed3f00d5835059be1c80ad998189d9726f7 | [
"MIT"
] | 1 | 2022-02-19T11:42:24.000Z | 2022-02-19T11:42:24.000Z | scripts/utils.py | alterapars/drought_classification | 585aaed3f00d5835059be1c80ad998189d9726f7 | [
"MIT"
] | null | null | null | scripts/utils.py | alterapars/drought_classification | 585aaed3f00d5835059be1c80ad998189d9726f7 | [
"MIT"
] | 2 | 2022-02-02T08:24:37.000Z | 2022-02-03T12:27:05.000Z | import random
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
############################ STATS input data ################################################
def return_nan_percentage(input_data):
"""
prints percentage of nan values in max. 3D sized array
Parameters
----------
input_array : array
max 3D array
Returns
-------
None
"""
total_size = input_data.size
nan_sum = np.isnan(input_data).sum()
perc = float(nan_sum / total_size)
print("percentage of nan values inside dataset is: %.2f" % float(perc) + " %")
# #4D example:
# for i in Training_data:
# return_nan_percentage(i)
# for i in Training_data_germany:
# describe_with_stats(i)
############################ Derive Labels ###############################################
# back to xarray with:
# label_xarray = xr.DataArray(output_3D_array, dims=['time', 'latitude', 'longitude'] )
# to turn list output into a 3D array use:
# TODO: returns list of 2D arrays now, try to return 3D x array to save as net cdf -SEE BELOW
# TODO: write test
# #Example:
# #create data subset of 10 of a data xarray
# data_10 = data[0:10] #first 10 items to test
# print(data.shape)
# #call function with a threshod of 10
# output_array = binary_image_classification(data_10, T=0.5)
# #show one image of the masked output images
# plt.imshow(output_array[0], origin = 'lower')
# #might need to change 'lower' to 'upper"
# TODO:
def save_plots_from_3Darray(
input_array, OUTPUT_PATH, title="drought mask figure Nr:", show_plots=True
):
"""
saves pngs and/or prints images from 3Darrays as png files
Parameters
----------
input_xarray : array
3-D input array in the format [num_samples, height, width]
title: str
title of the plots, number will be added according to iteration index
show_plots: boolean
determines if plots will be displayed as output or not
Returns
-------
None
"""
for k in range(len(input_array[0])):
fig = input_array[k].plot()
plt.title(title + str(k))
plt.axis("equal")
plt.title("drought mask for SMI, month " + str(k))
if show_plots:
plt.show()
fig.figure.savefig(OUTPUT_PATH + title + str(k) + ".png", dpi=100)
print(OUTPUT_PATH + "drought_mask_" + str(k) + ".png")
############################ class imbalance ######################################
# option 1, faster, combine these 2 fcts (recommended):
# print(hide_random_values(0))
# option 2, combine these 2 fcts:
def get_indices(dataset, value=0):
"""dataset = str(), 2D-array
value = int(), value to print the indices for"""
result = np.where(dataset == value)
print("Tuple of arrays returned : ", result)
# zip the 2 arrays (array 1: rows, array 2: columns) to get the exact coordinates
listOfCoordinates = list(zip(result[0], result[1]))
# iterate over the list of coordinates
# for cord in listOfCoordinates:
# print(cord)
print(len(listOfCoordinates))
return listOfCoordinates
def reduce_class_size(input_array, indices_list, T=0.78, value=int(-1)):
"""set entries in array to value=x, randomly and within set percentage of array
list = list, list of indices (2D)
T = int() , percentage to be modified
returns:
"""
output_array = np.copy(input_array)
# determine the percentage of the array that will be modified
len_modifier = int(len(indices_list) * T)
# select percentage T randomly from the list
random_coords = random.sample(listOfCoordinates, len_modifier)
# print(random_coords[:10])
# set selected entries to value
print("selected indices will be set to " + str(value))
for i in random_coords:
# print(labels_reshaped[i])
output_array[i] == value
return output_array
| 26.459184 | 94 | 0.633822 |
4bb0f0499ca35cb26e70156806115a77ce9290c6 | 1,382 | py | Python | 2021/day8/2.py | tomhel/AoC_2019 | c76c34235821864bc763f85d43cbcbfb9ed43469 | [
"MIT"
] | 1 | 2021-12-07T13:18:52.000Z | 2021-12-07T13:18:52.000Z | 2021/day8/2.py | tomhel/AoC | c76c34235821864bc763f85d43cbcbfb9ed43469 | [
"MIT"
] | null | null | null | 2021/day8/2.py | tomhel/AoC | c76c34235821864bc763f85d43cbcbfb9ed43469 | [
"MIT"
] | null | null | null |
print(decode_output())
| 28.791667 | 102 | 0.447902 |
4bb35fc82ab5a2d2bc09de6a0496b0c17ea21b52 | 2,469 | py | Python | Grove_Base_Hat_for_RPI/grove.py-master/grove/button/button.py | tcmoore/RPI-Environmental-Controller | 7f28dcdf08c51db8400ccc0369eb049fdce5e901 | [
"Unlicense",
"MIT"
] | 5 | 2019-11-18T02:26:18.000Z | 2021-02-06T20:31:37.000Z | Grove_Base_Hat_for_RPI/grove.py-master/grove/button/button.py | tcmoore/RPI-Environmental-Controller | 7f28dcdf08c51db8400ccc0369eb049fdce5e901 | [
"Unlicense",
"MIT"
] | null | null | null | Grove_Base_Hat_for_RPI/grove.py-master/grove/button/button.py | tcmoore/RPI-Environmental-Controller | 7f28dcdf08c51db8400ccc0369eb049fdce5e901 | [
"Unlicense",
"MIT"
] | 1 | 2020-08-26T10:22:37.000Z | 2020-08-26T10:22:37.000Z | #!/usr/bin/env python
#
# This is the library for Grove Base Hat.
#
# Button Base Class
#
'''
## License
The MIT License (MIT)
Grove Base Hat for the Raspberry Pi, used to connect grove sensors.
Copyright (C) 2018 Seeed Technology Co.,Ltd.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
| 31.253165 | 78 | 0.64439 |
4bb3c9f13001ae9f4765556a61ae26a55cabde2c | 1,402 | py | Python | Data Structures/Linked Lists/reverse-a-linked-list.py | Owngithub10101/Hackerrank-Problem-Solving | 4e35b609c9f5b94c5bda292b9991baa054a944b6 | [
"MIT"
] | 23 | 2020-02-28T16:18:48.000Z | 2021-12-21T11:51:07.000Z | Data Structures/Linked Lists/reverse-a-linked-list.py | ramanagali/Hackerrank-Problem-Solving | 98f654f984013140d52b9a344146e9e38e46fb81 | [
"MIT"
] | null | null | null | Data Structures/Linked Lists/reverse-a-linked-list.py | ramanagali/Hackerrank-Problem-Solving | 98f654f984013140d52b9a344146e9e38e46fb81 | [
"MIT"
] | 16 | 2020-04-08T10:46:39.000Z | 2021-11-15T03:46:56.000Z | # Reverse a linked list
# Developer: Murillo Grubler
# https://www.hackerrank.com/challenges/reverse-a-linked-list/problem
# Time complexity of reverse function: O(n)
# Complete the reverse function below.
#
# For your reference:
#
# SinglyLinkedListNode:
# int data
# SinglyLinkedListNode next
#
def reverse(head):
ln = SinglyLinkedListNode(head.data)
temp_node = head.next
while temp_node:
next_ln = ln
ln = SinglyLinkedListNode(temp_node.data)
ln.next = next_ln
temp_node = temp_node.next
return ln
if __name__ == '__main__':
tests = int(input())
for tests_itr in range(tests):
llist_count = int(input())
llist = SinglyLinkedList()
for _ in range(llist_count):
llist_item = int(input())
llist.insert_node(llist_item)
result = reverse(llist.head)
while result:
print (result.data, end=' ')
result = result.next
| 24.596491 | 69 | 0.622682 |
4bb64f1dd8e15adacfcfa40dd94e5cebe3d88bea | 1,737 | py | Python | web/api/user/utilities.py | cclrobotics/ARTBot | a0bffabebbc09361bf7748741fe3d30c78af8fbd | [
"MIT"
] | 5 | 2020-12-04T19:28:42.000Z | 2021-12-07T16:14:28.000Z | web/api/user/utilities.py | cclrobotics/ARTBot | a0bffabebbc09361bf7748741fe3d30c78af8fbd | [
"MIT"
] | 50 | 2019-10-08T19:47:24.000Z | 2021-07-26T05:43:37.000Z | web/api/user/utilities.py | cclrobotics/ARTBot | a0bffabebbc09361bf7748741fe3d30c78af8fbd | [
"MIT"
] | 4 | 2019-10-23T04:14:49.000Z | 2021-08-01T01:22:37.000Z | import os
from PIL import Image
import random
from functools import wraps
from flask import jsonify
from flask_jwt_extended import get_current_user
from .artpiece import Artpiece
from .exceptions import InvalidUsage
from web.extensions import cache
#decorator to require admin_acccess for a route
"""
Return a list of images in the 'gallery' folder and their descriptions
Output is list of tuples (image_location, image_description)
output list is in random order for random display order every time
""" | 31.017857 | 89 | 0.716753 |
4bb717792f0ab03afa44f642bc10364fd9b57993 | 2,528 | py | Python | network/utils.py | Goochaozheng/ChunkFusion | 7458a8e08886cc76cfeb87881c51e23b1d0674c3 | [
"MIT"
] | 3 | 2022-03-15T08:34:15.000Z | 2022-03-15T08:40:06.000Z | network/utils.py | Goochaozheng/ChunkFusion | 7458a8e08886cc76cfeb87881c51e23b1d0674c3 | [
"MIT"
] | null | null | null | network/utils.py | Goochaozheng/ChunkFusion | 7458a8e08886cc76cfeb87881c51e23b1d0674c3 | [
"MIT"
] | null | null | null | import spconv
import torch
from torch import nn
| 36.114286 | 168 | 0.71163 |
4bba68abed889d99f735d0534602287dd744310e | 3,794 | py | Python | hemlock/load_scripts/doc_to_mongo.py | Lab41/Hemlock | 2c53cfc11bfbe1e4f901b519db578090fe7a17dd | [
"Apache-2.0"
] | 4 | 2015-05-14T18:59:44.000Z | 2017-03-09T12:49:36.000Z | hemlock/load_scripts/doc_to_mongo.py | Lab41/Hemlock | 2c53cfc11bfbe1e4f901b519db578090fe7a17dd | [
"Apache-2.0"
] | null | null | null | hemlock/load_scripts/doc_to_mongo.py | Lab41/Hemlock | 2c53cfc11bfbe1e4f901b519db578090fe7a17dd | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (c) 2013 In-Q-Tel, Inc/Lab41, All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fnmatch, os, sys, time, uuid
from pymongo import MongoClient
if __name__ == "__main__":
start_time = time.time()
args = get_args()
input, server, port, database, collection = process_args(args)
m_server, m_database, m_collection = mongo_server(server, port, database, collection)
process_doc(input, m_server, m_database, m_collection)
print "Took",time.time() - start_time,"seconds to complete."
| 29.640625 | 89 | 0.552978 |
4bbaecaa33cf5b0c99d08e0e5f803ac656d6dabe | 2,659 | py | Python | unn/models/initializer.py | zongdaoming/TinyTransformer | 8e64f8816117048c388b4b20e3a56760ce149fe3 | [
"Apache-2.0"
] | 2 | 2021-08-08T11:23:14.000Z | 2021-09-16T04:05:23.000Z | unn/models/initializer.py | zongdaoming/TinyTransformer | 8e64f8816117048c388b4b20e3a56760ce149fe3 | [
"Apache-2.0"
] | 1 | 2021-08-08T11:25:47.000Z | 2021-08-08T11:26:15.000Z | unn/models/initializer.py | zongdaoming/TinyTransformer | 8e64f8816117048c388b4b20e3a56760ce149fe3 | [
"Apache-2.0"
] | null | null | null | import copy
import logging
import math
import torch
from torch import nn
logger = logging.getLogger('global')
| 32.036145 | 83 | 0.588943 |
4bbcb3d3943aa14ce46dab08f6f7c37762566694 | 3,000 | py | Python | AudioFile.py | ZZZlax/.Pyrate | 42a85213e0557b2988bf62bb8eac540263e0ce30 | [
"Unlicense"
] | null | null | null | AudioFile.py | ZZZlax/.Pyrate | 42a85213e0557b2988bf62bb8eac540263e0ce30 | [
"Unlicense"
] | null | null | null | AudioFile.py | ZZZlax/.Pyrate | 42a85213e0557b2988bf62bb8eac540263e0ce30 | [
"Unlicense"
] | null | null | null | # This Python file uses the following encoding: utf-8
import os; import sys; import urllib.request; from bs4 import BeautifulSoup; import wikipedia
from PyQt5.QtWebEngineWidgets import *; from PyQt5.QtGui import QIcon; from PyQt5.QtWidgets import *; from PyQt5.QtNetwork import QNetworkProxy
if __name__ == "__main__":
app = QApplication([])
AudioFile().show()
sys.exit(app.exec_())
| 69.767442 | 620 | 0.676667 |
4bbd0476d4a8b8dde9a872c84a83e121621a1703 | 16,649 | py | Python | GeneratorTest.py | Autio/swb_datascraping | ef31fd89c68d86849342495b79985572d0f2fc61 | [
"MIT"
] | null | null | null | GeneratorTest.py | Autio/swb_datascraping | ef31fd89c68d86849342495b79985572d0f2fc61 | [
"MIT"
] | null | null | null | GeneratorTest.py | Autio/swb_datascraping | ef31fd89c68d86849342495b79985572d0f2fc61 | [
"MIT"
] | null | null | null | __author__ = 'petriau'
import requests # for HTTP requests
from bs4 import BeautifulSoup # for HTML parsing
url_SBM_FinanceProgress = 'http://sbm.gov.in/sbmreport/Report/Financial/SBM_StateReleaseAllocationincludingUnapproved.aspx'
# Function to return HTML parsed with BeautifulSoup from a POST request URL and parameters.
listTest = [['__EVENTARGUMENT',''],['__EVENTTARGET', 'ctl00$ContentPlaceHolder1$rptr_state$ctl03$lnkbtn_stName'],['__EVENTVALIDATION',"/wEWTAL2js/IBwLq6fiEBwK4qJKGBgL7uLfDBQLMho26CAKkvMv0BAKrp/OzCAKzzOWcCQLfxNm+CQLZ25fbDALc9b7CDALYxrzSBgLK5tedAQLJrc6KBwLD2Nb1DwKAz9S2BQLD2JrzAgKAz/jyBAKUlKjOAgKvk9PyBQKUlOzLBQKvk5ewAQKNhuujBAK2ocCIDQKNhq+hBwK2oaT3BwLW5PDOBwLdiLPJCQLW5LTMCgLdiPeGBQLPqct8AqCWu5oDAs+pj/oDAqCW/9cOArTAhacLArv/sNsBArTAyaQOArv/1JcBApnX36oFAtbopuwKApnXo6gIAtbo6qkGAv7tmdUPAvHRnK0JAv7t3dICAvHRgJwEAtuMv40FAoTehsMOAtuMg4sIAoTeyoAKAtTR8eIFAquz5dgPAtTRhc0LAquz+e4CAs3DtJABAoKu9fINAs3DyPoGAoKuibEHAp7/hZEKAuGFkd0CAp7/mfsPAuGFpfMFApfxyL4FAriAoXcCl/HcqAsCuIC1tQoC4M/OkQ8Cv46O1Q0C4M/i+wQCv46iawLZlNH1CQKqnLrXDQLZlJXzDAKqnM5tAr6ri/gNAsWF0MkLUJ4OhBgatkYSQhamBAvcsSVIgC8="],
['__VIEWSTATE',"/wEPDwUKMTQwNTE3ODMyMg9kFgJmD2QWAgIDD2QWBAIfDw8WBB4EVGV4dAVMPHNwYW4gY2xhc3M9ImdseXBoaWNvbiBnbHlwaGljb24tY2lyY2xlLWFycm93LWxlZnQiPjwvc3Bhbj4gQmFjayB0byBQcmV2aW91cx4HVmlzaWJsZWcWAh4Hb25jbGljawUoamF2YXNjcmlwdDpoaXN0b3J5LmJhY2soKTsgcmV0dXJuIGZhbHNlO2QCIQ9kFgICAQ9kFggCAw8PFgIfAAUIKENlbnRlcilkZAIFDw8WAh8ABQsoMjAxNi0yMDE3KWRkAgcPFgIfAWgWBAIBDxYCHwFoFgQCAw8QZGQWAWZkAgcPEA8WBh4NRGF0YVRleHRGaWVsZAUHRmluWWVhch4ORGF0YVZhbHVlRmllbGQFB0ZpblllYXIeC18hRGF0YUJvdW5kZ2QQFQIKLS1TZWxlY3QtLQkyMDE2LTIwMTcVAgItMgkyMDE2LTIwMTcUKwMCZ2cWAQIBZAIDD2QWAgIBDw8WBB8ABRRSZWNvcmQgTm90IEZvdW5kICEhIR8BaGRkAgkPFgIeC18hSXRlbUNvdW50AiAWQgIBD2QWBmYPFQEBMWQCAQ8PFgIfAAUMQSAmIE4gSWxhbmRzZGQCBA8VCgYzNDAuMDAEMC4wMAQwLjAwBjM0MC4wMAQwLjAwBTEzLjU5BTEzLjU5BDQuMDAGMzQwLjAwBjMyNi40MWQCAg9kFgZmDxUBATJkAgEPDxYCHwAFDkFuZGhyYSBQcmFkZXNoZGQCBA8VCgc4NTk2LjY5BzY3NzIuODUEMS4xNAgxNTM3MC42OAc1NjQ5LjkzBzMzNDMuNjEHODk5My41NAU1OC41MQc5NzIwLjc1BzYzNzcuMTRkAgMPZBYGZg8VAQEzZAIBDw8WAh8ABRFBcnVuYWNoYWwgUHJhZGVzaGRkAgQPFQoHMTQ2NS44OAY5NjguNTEEMC4wMAcyNDM0LjM5BDAuMDAGMTA4LjAzBjEwOC4wMwQ0LjQ0BzI0MzQuMzkHMjMyNi4zNWQCBA9kFgZmDxUBATRkAgEPDxYCHwAFBUFzc2FtZGQCBA8VCggxNjExMC43OAQwLjAwBDAuMDAIMTYxMTAuNzgGNjg2LjE5BjkxNi4yNwcxNjAyLjQ2BDkuOTUIMTU0MjQuNjAIMTQ1MDguMzJkAgUPZBYGZg8VAQE1ZAIBDw8WAh8ABQVCaWhhcmRkAgQPFQoHNDIwMC4zNQgxMzE4Ni4zNwQwLjAwCDE3Mzg2LjcyBjY4Ni45OAcxMjI2LjgwBzE5MTMuNzgFMTEuMDEIMTY2OTkuNzQIMTU0NzIuOTRkAgYPZBYGZg8VAQE2ZAIBDw8WAh8ABQxDaGhhdHRpc2dhcmhkZAIEDxUKCC01OTYyLjUxBzk5NDcuNTcEMC4wMAczOTg1LjA2BjU3MS4xNgcxODY5LjE5BzI0NDAuMzQFNjEuMjQHMzQxMy45MQcxNTQ0LjcyZAIHD2QWBmYPFQEBN2QCAQ8PFgIfAAUMRCAmIE4gSGF2ZWxpZGQCBA8VCgQxLjQ4BDAuMDAEMC4wMAQxLjQ4BDAuMDAEMC4wMAQwLjAwBDAuMDAEMS40OAQxLjQ4ZAIID2QWBmYPFQEBOGQCAQ8PFgIfAAUDR29hZGQCBA8VCgctMzMzLjk1BDAuMDAEMC4wMActMzMzLjk1BDAuMDAHMjA5NC40OAcyMDk0LjQ4BDAuMDAHLTMzMy45NQgtMjQyOC40M2QCCQ9kFgZmDxUBATlkAgEPDxYCHwAFB0d1amFyYXRkZAIEDxUKCC00Njg4LjA0CDI4MDQ5LjI2BDAuMjMIMjMzNjEuNDUHMjAwNS4zNgc0MTc5LjAzBzYxODQuMzkFMjYuNDcIMjEzNTYuMDgIMTcxNzcuMDZkAgoPZBYGZg8VAQIxMGQCAQ8PFgIfAAUHSGFyeWFuYWRkAgQPFQoGNzc0LjQ5BzY4NzkuMDcEMi4zNQc3NjU1LjkyBjIwOC40MgU5MS42MQYzMDAuMDMEMy45Mgc3NDQ3LjUwBzczNTUuODlkAgsPZBYGZg8VAQIxMWQCAQ8PFgIfAAUQSGltYWNoYWwgUHJhZGVzaGRkAgQPFQoHNTI4My4yOAQwLjAwBTI0LjAzBzUzMDcuMzEGMzEzLjY0BjY2Ni41NgY5ODAuMjAFMTguNDcHNDk5My42Nwc0MzI3LjExZAIMD2QWBmYPFQECMTJkAgEPDxYCHwAFD0phbW11ICYgS2FzaG1pcmRkAgQPFQoHNTM5OS4zNwYyMjkuOTAEMC4wMAc1NjI5LjI3BjEwMS43MQU1MS44NQYxNTMuNTYEMi43Mwc1NTI3LjU1BzU0NzUuNzBkAg0PZBYGZg8VAQIxM2QCAQ8PFgIfAAUJSmhhcmtoYW5kZGQCBA8VCgktMTIyNzYuNjMIMTAzNTguOTYENC4xMggtMTkxMy41NQcxMzkwLjc2BzIzNTcuMjIHMzc0Ny45OAQwLjAwCC0zMzA0LjMxCC01NjYxLjUyZAIOD2QWBmYPFQECMTRkAgEPDxYCHwAFCUthcm5hdGFrYWRkAgQPFQoILTUwNDAuNjQIMTI2NzEuNTAEMC4wMAc3NjMwLjg2Bjk0OS40MwczMzA1LjYyBzQyNTUuMDUFNTUuNzYHNjY4MS40MwczMzc1LjgxZAIPD2QWBmYPFQECMTVkAgEPDxYCHwAFBktlcmFsYWRkAgQPFQoHMjg5MC45MgQwLjAwBDIuODIHMjg5My43NAYxMDcuNjkENS4xMQYxMTIuODAEMy45MAcyNzg2LjA1BzI3ODAuOTRkAhAPZBYGZg8VAQIxNmQCAQ8PFgIfAAUOTWFkaHlhIFByYWRlc2hkZAIEDxUKCS0xNTYzMy43NAgzNDIyMy41MwUyNS4wMAgxODYxNC43OQc5MzYwLjU0BzM0NzIuOTUIMTI4MzMuNDkFNjguOTQHOTI1NC4yNAc1NzgxLjI5ZAIRD2QWBmYPFQECMTdkAgEPDxYCHwAFC01haGFyYXNodHJhZGQCBA8VCggtNDMzMy4xNwgyNjQ0Ny4wOQQwLjAwCDIyMTEzLjkyBjMyNy42OAczNDg5LjAxBzM4MTYuNjkFMTcuMjYIMjE3ODYuMjMIMTgyOTcuMjNkAhIPZBYGZg8VAQIxOGQCAQ8PFgIfAAUHTWFuaXB1cmRkAgQPFQoHLTQ2Ni4yOQcyNzI3LjUwBDAuMDAHMjI2MS4yMQQwLjAwBjE1NS42MwYxNTUuNjMENi44OAcyMjYxLjIxBzIxMDUuNThkAhMPZBYGZg8VAQIxOWQCAQ8PFgIfAAUJTWVnaGFsYXlhZGQCBA8VCgcxNzI3LjY3BzQxMjIuMjQEMC4wMAc1ODQ5LjkxBjIyOS42MAYxMDguMjUGMzM3Ljg1BDUuNzgHNTYyMC4zMQc1NTEyLjA2ZAIUD2QWBmYPFQECMjBkAgEPDxYCHwAFB01pem9yYW1kZAIEDxUKBjM2NC4zMwQwLjAwBDAuMDAGMzY0LjMzBTk1LjExBTczLjgyBjE2OC45MwU0Ni4zNwYyNjkuMjMGMTk1LjQwZAIVD2QWBmYPFQECMjFkAgEPDxYCHwAFCE5hZ2FsYW5kZGQCBA8VCgYzMDIuMDMEMC4wMAQ5Ljg3BjMxMS45MAYxNzMuNDMEMi4yNwYxNzUuNzAFNTYuMzMGMTM4LjQ3BjEzNi4yMGQCFg9kFgZmDxUBAjIyZAIBDw8WAh8ABQZPZGlzaGFkZAIEDxUKCS01MDYzMS40Nwg0NTg1Ni42MQQwLjAwCC00Nzc0Ljg2Bzk1MTAuMzgHMjI4MC4zNQgxMTc5MC43MwQwLjAwCS0xNDI4NS4yNAktMTY1NjUuNTlkAhcPZBYGZg8VAQIyM2QCAQ8PFgIfAAUKUHVkdWNoZXJyeWRkAgQPFQoGNjYzLjEyBDAuMDAEMC4wMAY2NjMuMTIEMC4wMAQwLjAwBDAuMDAEMC4wMAY2NjMuMTIGNjYzLjEyZAIYD2QWBmYPFQECMjRkAgEPDxYCHwAFBlB1bmphYmRkAgQPFQoILTE2NTUuMjkHMjQ4Mi44NQQwLjAwBjgyNy41NgYxNTQuOTIGNTE4LjkwBjY3My44MwU4MS40MgY2NzIuNjMGMTUzLjczZAIZD2QWBmYPFQECMjVkAgEPDxYCHwAFCVJhamFzdGhhbmRkAgQPFQoJLTMwNTk3LjUwCDYyNzMwLjA1BDAuMDAIMzIxMzIuNTUHNjQxNC45Mwc1ODA4LjUyCDEyMjIzLjQ1BTM4LjA0CDI1NzE3LjYyCDE5OTA5LjEwZAIaD2QWBmYPFQECMjZkAgEPDxYCHwAFBlNpa2tpbWRkAgQPFQoGNTE1LjM5BjQ4MC45NgQwLjAwBjk5Ni4zNQQwLjAwBDAuMDAEMC4wMAQwLjAwBjk5Ni4zNQY5OTYuMzVkAhsPZBYGZg8VAQIyN2QCAQ8PFgIfAAUKVGFtaWwgTmFkdWRkAgQPFQoJLTI0MTEwLjAxCDI2ODUwLjk0BDAuNjIHMjc0MS41NgY0NzguMTEGMTU3Ljg5BjYzNi4wMQUyMy4yMAcyMjYzLjQ0BzIxMDUuNTVkAhwPZBYGZg8VAQIyOGQCAQ8PFgIfAAUJVGVsYW5nYW5hZGQCBA8VCgc1ODE0LjI1BDAuMDAEMC4wMAc1ODE0LjI1BjY1Ni43OAc0NjgwLjI0BzUzMzcuMDIFOTEuNzkHNTE1Ny40NwY0NzcuMjNkAh0PZBYGZg8VAQIyOWQCAQ8PFgIfAAUHVHJpcHVyYWRkAgQPFQoHMzYwNy40OAQwLjAwBDAuMDAHMzYwNy40OAU0MC4yMgYxMjguOTEGMTY5LjEzBDQuNjkHMzU2Ny4yNgczNDM4LjM2ZAIeD2QWBmYPFQECMzBkAgEPDxYCHwAFDVV0dGFyIFByYWRlc2hkZAIEDxUKCDI2OTIyLjIyCDE3ODY3LjQ0BDYuNjYINDQ3OTYuMzIHMzg1Ni44MAczNTE4LjMwBzczNzUuMTAFMTYuNDYINDA5MzkuNTIIMzc0MjEuMjFkAh8PZBYGZg8VAQIzMWQCAQ8PFgIfAAULVXR0YXJha2hhbmRkZAIEDxUKCC0xNjU4LjI3Bzg1MjkuMTMEMC4wMAc2ODcwLjg2BzEyMjAuMDkGNjQwLjc5BzE4NjAuODgFMjcuMDgHNTY1MC43Nwc1MDA5Ljk3ZAIgD2QWBmYPFQECMzJkAgEPDxYCHwAFC1dlc3QgQmVuZ2FsZGQCBA8VCgktMTYyMTkuNjcIMzI4NzUuNjAEMC4wMAgxNjY1NS45MwcxNzI3LjgxBzY2NDYuMTkHODM3NC4wMAU1MC4yOAgxNDkyOC4xMgc4MjgxLjkzZAIhD2QWAgICDxUKCS04ODYyNy40NQkzNTQyNTcuOTMFNzYuODQJMjY1NzA3LjM0CDQ2OTE3LjY3CDUxOTEwLjk5CDk4ODI4LjY3BTM3LjE5CTIxODc4OS42NAkxNjY4NzguNjRkGAEFHl9fQ29udHJvbHNSZXF1aXJlUG9zdEJhY2tLZXlfXxYDBQ9jdGwwMCRpY29uX3dvcmQFEGN0bDAwJGljb25fZXhjZWwFEmN0bDAwJGljb25fcHJpbnRlcqLkin/PLgDvwcsQ6/a18eF5HbFe"]
]
paramDictionary = {key: str(value) for key, value in listTest}
def merge_two_dicts(x, y):
'''Given two dicts, merge them into a new dict as a shallow copy.'''
z = x.copy()
z.update(y)
return z
postParams = {
# '__EVENTARGUMENT': '',
# '__EVENTTARGET': 'ctl00$ContentPlaceHolder1$rptr_state$ctl03$lnkbtn_stName',
# '__EVENTVALIDATION': "/wEWTAL2js/IBwLq6fiEBwK4qJKGBgL7uLfDBQLMho26CAKkvMv0BAKrp/OzCAKzzOWcCQLfxNm+CQLZ25fbDALc9b7CDALYxrzSBgLK5tedAQLJrc6KBwLD2Nb1DwKAz9S2BQLD2JrzAgKAz/jyBAKUlKjOAgKvk9PyBQKUlOzLBQKvk5ewAQKNhuujBAK2ocCIDQKNhq+hBwK2oaT3BwLW5PDOBwLdiLPJCQLW5LTMCgLdiPeGBQLPqct8AqCWu5oDAs+pj/oDAqCW/9cOArTAhacLArv/sNsBArTAyaQOArv/1JcBApnX36oFAtbopuwKApnXo6gIAtbo6qkGAv7tmdUPAvHRnK0JAv7t3dICAvHRgJwEAtuMv40FAoTehsMOAtuMg4sIAoTeyoAKAtTR8eIFAquz5dgPAtTRhc0LAquz+e4CAs3DtJABAoKu9fINAs3DyPoGAoKuibEHAp7/hZEKAuGFkd0CAp7/mfsPAuGFpfMFApfxyL4FAriAoXcCl/HcqAsCuIC1tQoC4M/OkQ8Cv46O1Q0C4M/i+wQCv46iawLZlNH1CQKqnLrXDQLZlJXzDAKqnM5tAr6ri/gNAsWF0MkLUJ4OhBgatkYSQhamBAvcsSVIgC8=",
# '__VIEWSTATE': "/wEPDwUKMTQwNTE3ODMyMg9kFgJmD2QWAgIDD2QWBAIfDw8WBB4EVGV4dAVMPHNwYW4gY2xhc3M9ImdseXBoaWNvbiBnbHlwaGljb24tY2lyY2xlLWFycm93LWxlZnQiPjwvc3Bhbj4gQmFjayB0byBQcmV2aW91cx4HVmlzaWJsZWcWAh4Hb25jbGljawUoamF2YXNjcmlwdDpoaXN0b3J5LmJhY2soKTsgcmV0dXJuIGZhbHNlO2QCIQ9kFgICAQ9kFggCAw8PFgIfAAUIKENlbnRlcilkZAIFDw8WAh8ABQsoMjAxNi0yMDE3KWRkAgcPFgIfAWgWBAIBDxYCHwFoFgQCAw8QZGQWAWZkAgcPEA8WBh4NRGF0YVRleHRGaWVsZAUHRmluWWVhch4ORGF0YVZhbHVlRmllbGQFB0ZpblllYXIeC18hRGF0YUJvdW5kZ2QQFQIKLS1TZWxlY3QtLQkyMDE2LTIwMTcVAgItMgkyMDE2LTIwMTcUKwMCZ2cWAQIBZAIDD2QWAgIBDw8WBB8ABRRSZWNvcmQgTm90IEZvdW5kICEhIR8BaGRkAgkPFgIeC18hSXRlbUNvdW50AiAWQgIBD2QWBmYPFQEBMWQCAQ8PFgIfAAUMQSAmIE4gSWxhbmRzZGQCBA8VCgYzNDAuMDAEMC4wMAQwLjAwBjM0MC4wMAQwLjAwBTEzLjU5BTEzLjU5BDQuMDAGMzQwLjAwBjMyNi40MWQCAg9kFgZmDxUBATJkAgEPDxYCHwAFDkFuZGhyYSBQcmFkZXNoZGQCBA8VCgc4NTk2LjY5BzY3NzIuODUEMS4xNAgxNTM3MC42OAc1NjQ5LjkzBzMzNDMuNjEHODk5My41NAU1OC41MQc5NzIwLjc1BzYzNzcuMTRkAgMPZBYGZg8VAQEzZAIBDw8WAh8ABRFBcnVuYWNoYWwgUHJhZGVzaGRkAgQPFQoHMTQ2NS44OAY5NjguNTEEMC4wMAcyNDM0LjM5BDAuMDAGMTA4LjAzBjEwOC4wMwQ0LjQ0BzI0MzQuMzkHMjMyNi4zNWQCBA9kFgZmDxUBATRkAgEPDxYCHwAFBUFzc2FtZGQCBA8VCggxNjExMC43OAQwLjAwBDAuMDAIMTYxMTAuNzgGNjg2LjE5BjkxNi4yNwcxNjAyLjQ2BDkuOTUIMTU0MjQuNjAIMTQ1MDguMzJkAgUPZBYGZg8VAQE1ZAIBDw8WAh8ABQVCaWhhcmRkAgQPFQoHNDIwMC4zNQgxMzE4Ni4zNwQwLjAwCDE3Mzg2LjcyBjY4Ni45OAcxMjI2LjgwBzE5MTMuNzgFMTEuMDEIMTY2OTkuNzQIMTU0NzIuOTRkAgYPZBYGZg8VAQE2ZAIBDw8WAh8ABQxDaGhhdHRpc2dhcmhkZAIEDxUKCC01OTYyLjUxBzk5NDcuNTcEMC4wMAczOTg1LjA2BjU3MS4xNgcxODY5LjE5BzI0NDAuMzQFNjEuMjQHMzQxMy45MQcxNTQ0LjcyZAIHD2QWBmYPFQEBN2QCAQ8PFgIfAAUMRCAmIE4gSGF2ZWxpZGQCBA8VCgQxLjQ4BDAuMDAEMC4wMAQxLjQ4BDAuMDAEMC4wMAQwLjAwBDAuMDAEMS40OAQxLjQ4ZAIID2QWBmYPFQEBOGQCAQ8PFgIfAAUDR29hZGQCBA8VCgctMzMzLjk1BDAuMDAEMC4wMActMzMzLjk1BDAuMDAHMjA5NC40OAcyMDk0LjQ4BDAuMDAHLTMzMy45NQgtMjQyOC40M2QCCQ9kFgZmDxUBATlkAgEPDxYCHwAFB0d1amFyYXRkZAIEDxUKCC00Njg4LjA0CDI4MDQ5LjI2BDAuMjMIMjMzNjEuNDUHMjAwNS4zNgc0MTc5LjAzBzYxODQuMzkFMjYuNDcIMjEzNTYuMDgIMTcxNzcuMDZkAgoPZBYGZg8VAQIxMGQCAQ8PFgIfAAUHSGFyeWFuYWRkAgQPFQoGNzc0LjQ5BzY4NzkuMDcEMi4zNQc3NjU1LjkyBjIwOC40MgU5MS42MQYzMDAuMDMEMy45Mgc3NDQ3LjUwBzczNTUuODlkAgsPZBYGZg8VAQIxMWQCAQ8PFgIfAAUQSGltYWNoYWwgUHJhZGVzaGRkAgQPFQoHNTI4My4yOAQwLjAwBTI0LjAzBzUzMDcuMzEGMzEzLjY0BjY2Ni41NgY5ODAuMjAFMTguNDcHNDk5My42Nwc0MzI3LjExZAIMD2QWBmYPFQECMTJkAgEPDxYCHwAFD0phbW11ICYgS2FzaG1pcmRkAgQPFQoHNTM5OS4zNwYyMjkuOTAEMC4wMAc1NjI5LjI3BjEwMS43MQU1MS44NQYxNTMuNTYEMi43Mwc1NTI3LjU1BzU0NzUuNzBkAg0PZBYGZg8VAQIxM2QCAQ8PFgIfAAUJSmhhcmtoYW5kZGQCBA8VCgktMTIyNzYuNjMIMTAzNTguOTYENC4xMggtMTkxMy41NQcxMzkwLjc2BzIzNTcuMjIHMzc0Ny45OAQwLjAwCC0zMzA0LjMxCC01NjYxLjUyZAIOD2QWBmYPFQECMTRkAgEPDxYCHwAFCUthcm5hdGFrYWRkAgQPFQoILTUwNDAuNjQIMTI2NzEuNTAEMC4wMAc3NjMwLjg2Bjk0OS40MwczMzA1LjYyBzQyNTUuMDUFNTUuNzYHNjY4MS40MwczMzc1LjgxZAIPD2QWBmYPFQECMTVkAgEPDxYCHwAFBktlcmFsYWRkAgQPFQoHMjg5MC45MgQwLjAwBDIuODIHMjg5My43NAYxMDcuNjkENS4xMQYxMTIuODAEMy45MAcyNzg2LjA1BzI3ODAuOTRkAhAPZBYGZg8VAQIxNmQCAQ8PFgIfAAUOTWFkaHlhIFByYWRlc2hkZAIEDxUKCS0xNTYzMy43NAgzNDIyMy41MwUyNS4wMAgxODYxNC43OQc5MzYwLjU0BzM0NzIuOTUIMTI4MzMuNDkFNjguOTQHOTI1NC4yNAc1NzgxLjI5ZAIRD2QWBmYPFQECMTdkAgEPDxYCHwAFC01haGFyYXNodHJhZGQCBA8VCggtNDMzMy4xNwgyNjQ0Ny4wOQQwLjAwCDIyMTEzLjkyBjMyNy42OAczNDg5LjAxBzM4MTYuNjkFMTcuMjYIMjE3ODYuMjMIMTgyOTcuMjNkAhIPZBYGZg8VAQIxOGQCAQ8PFgIfAAUHTWFuaXB1cmRkAgQPFQoHLTQ2Ni4yOQcyNzI3LjUwBDAuMDAHMjI2MS4yMQQwLjAwBjE1NS42MwYxNTUuNjMENi44OAcyMjYxLjIxBzIxMDUuNThkAhMPZBYGZg8VAQIxOWQCAQ8PFgIfAAUJTWVnaGFsYXlhZGQCBA8VCgcxNzI3LjY3BzQxMjIuMjQEMC4wMAc1ODQ5LjkxBjIyOS42MAYxMDguMjUGMzM3Ljg1BDUuNzgHNTYyMC4zMQc1NTEyLjA2ZAIUD2QWBmYPFQECMjBkAgEPDxYCHwAFB01pem9yYW1kZAIEDxUKBjM2NC4zMwQwLjAwBDAuMDAGMzY0LjMzBTk1LjExBTczLjgyBjE2OC45MwU0Ni4zNwYyNjkuMjMGMTk1LjQwZAIVD2QWBmYPFQECMjFkAgEPDxYCHwAFCE5hZ2FsYW5kZGQCBA8VCgYzMDIuMDMEMC4wMAQ5Ljg3BjMxMS45MAYxNzMuNDMEMi4yNwYxNzUuNzAFNTYuMzMGMTM4LjQ3BjEzNi4yMGQCFg9kFgZmDxUBAjIyZAIBDw8WAh8ABQZPZGlzaGFkZAIEDxUKCS01MDYzMS40Nwg0NTg1Ni42MQQwLjAwCC00Nzc0Ljg2Bzk1MTAuMzgHMjI4MC4zNQgxMTc5MC43MwQwLjAwCS0xNDI4NS4yNAktMTY1NjUuNTlkAhcPZBYGZg8VAQIyM2QCAQ8PFgIfAAUKUHVkdWNoZXJyeWRkAgQPFQoGNjYzLjEyBDAuMDAEMC4wMAY2NjMuMTIEMC4wMAQwLjAwBDAuMDAEMC4wMAY2NjMuMTIGNjYzLjEyZAIYD2QWBmYPFQECMjRkAgEPDxYCHwAFBlB1bmphYmRkAgQPFQoILTE2NTUuMjkHMjQ4Mi44NQQwLjAwBjgyNy41NgYxNTQuOTIGNTE4LjkwBjY3My44MwU4MS40MgY2NzIuNjMGMTUzLjczZAIZD2QWBmYPFQECMjVkAgEPDxYCHwAFCVJhamFzdGhhbmRkAgQPFQoJLTMwNTk3LjUwCDYyNzMwLjA1BDAuMDAIMzIxMzIuNTUHNjQxNC45Mwc1ODA4LjUyCDEyMjIzLjQ1BTM4LjA0CDI1NzE3LjYyCDE5OTA5LjEwZAIaD2QWBmYPFQECMjZkAgEPDxYCHwAFBlNpa2tpbWRkAgQPFQoGNTE1LjM5BjQ4MC45NgQwLjAwBjk5Ni4zNQQwLjAwBDAuMDAEMC4wMAQwLjAwBjk5Ni4zNQY5OTYuMzVkAhsPZBYGZg8VAQIyN2QCAQ8PFgIfAAUKVGFtaWwgTmFkdWRkAgQPFQoJLTI0MTEwLjAxCDI2ODUwLjk0BDAuNjIHMjc0MS41NgY0NzguMTEGMTU3Ljg5BjYzNi4wMQUyMy4yMAcyMjYzLjQ0BzIxMDUuNTVkAhwPZBYGZg8VAQIyOGQCAQ8PFgIfAAUJVGVsYW5nYW5hZGQCBA8VCgc1ODE0LjI1BDAuMDAEMC4wMAc1ODE0LjI1BjY1Ni43OAc0NjgwLjI0BzUzMzcuMDIFOTEuNzkHNTE1Ny40NwY0NzcuMjNkAh0PZBYGZg8VAQIyOWQCAQ8PFgIfAAUHVHJpcHVyYWRkAgQPFQoHMzYwNy40OAQwLjAwBDAuMDAHMzYwNy40OAU0MC4yMgYxMjguOTEGMTY5LjEzBDQuNjkHMzU2Ny4yNgczNDM4LjM2ZAIeD2QWBmYPFQECMzBkAgEPDxYCHwAFDVV0dGFyIFByYWRlc2hkZAIEDxUKCDI2OTIyLjIyCDE3ODY3LjQ0BDYuNjYINDQ3OTYuMzIHMzg1Ni44MAczNTE4LjMwBzczNzUuMTAFMTYuNDYINDA5MzkuNTIIMzc0MjEuMjFkAh8PZBYGZg8VAQIzMWQCAQ8PFgIfAAULVXR0YXJha2hhbmRkZAIEDxUKCC0xNjU4LjI3Bzg1MjkuMTMEMC4wMAc2ODcwLjg2BzEyMjAuMDkGNjQwLjc5BzE4NjAuODgFMjcuMDgHNTY1MC43Nwc1MDA5Ljk3ZAIgD2QWBmYPFQECMzJkAgEPDxYCHwAFC1dlc3QgQmVuZ2FsZGQCBA8VCgktMTYyMTkuNjcIMzI4NzUuNjAEMC4wMAgxNjY1NS45MwcxNzI3LjgxBzY2NDYuMTkHODM3NC4wMAU1MC4yOAgxNDkyOC4xMgc4MjgxLjkzZAIhD2QWAgICDxUKCS04ODYyNy40NQkzNTQyNTcuOTMFNzYuODQJMjY1NzA3LjM0CDQ2OTE3LjY3CDUxOTEwLjk5CDk4ODI4LjY3BTM3LjE5CTIxODc4OS42NAkxNjY4NzguNjRkGAEFHl9fQ29udHJvbHNSZXF1aXJlUG9zdEJhY2tLZXlfXxYDBQ9jdGwwMCRpY29uX3dvcmQFEGN0bDAwJGljb25fZXhjZWwFEmN0bDAwJGljb25fcHJpbnRlcqLkin/PLgDvwcsQ6/a18eF5HbFe",
'ctl00$ContentPlaceHolder1$rptr_cen$ctl01$hfd_StateId':"26",
'ctl00$ContentPlaceHolder1$rptr_cen$ctl02$hfd_StateId':"1",
'ctl00$ContentPlaceHolder1$rptr_cen$ctl03$hfd_StateId':"2",
'ctl00$ContentPlaceHolder1$rptr_cen$ctl04$hfd_StateId':"3",
'ctl00$ContentPlaceHolder1$rptr_cen$ctl05$hfd_StateId':"4",
'ctl00$ContentPlaceHolder1$rptr_cen$ctl06$hfd_StateId':"34",
'ctl00$ContentPlaceHolder1$rptr_cen$ctl07$hfd_StateId':"28",
'ctl00$ContentPlaceHolder1$rptr_cen$ctl08$hfd_StateId':"5",
'ctl00$ContentPlaceHolder1$rptr_cen$ctl09$hfd_StateId':"6",
'ctl00$ContentPlaceHolder1$rptr_cen$ctl10$hfd_StateId':"7",
'ctl00$ContentPlaceHolder1$rptr_cen$ctl11$hfd_StateId':"8",
'ctl00$ContentPlaceHolder1$rptr_cen$ctl12$hfd_StateId':"9",
'ctl00$ContentPlaceHolder1$rptr_cen$ctl13$hfd_StateId':"35",
'ctl00$ContentPlaceHolder1$rptr_cen$ctl14$hfd_StateId':"10",
'ctl00$ContentPlaceHolder1$rptr_cen$ctl15$hfd_StateId':"11",
'ctl00$ContentPlaceHolder1$rptr_cen$ctl16$hfd_StateId':"12",
'ctl00$ContentPlaceHolder1$rptr_cen$ctl17$hfd_StateId':"13",
'ctl00$ContentPlaceHolder1$rptr_cen$ctl18$hfd_StateId':"14",
'ctl00$ContentPlaceHolder1$rptr_cen$ctl19$hfd_StateId':"15",
'ctl00$ContentPlaceHolder1$rptr_cen$ctl20$hfd_StateId':"16",
'ctl00$ContentPlaceHolder1$rptr_cen$ctl21$hfd_StateId':"17",
'ctl00$ContentPlaceHolder1$rptr_cen$ctl22$hfd_StateId':"18",
'ctl00$ContentPlaceHolder1$rptr_cen$ctl23$hfd_StateId':"32",
'ctl00$ContentPlaceHolder1$rptr_cen$ctl24$hfd_StateId':"19",
'ctl00$ContentPlaceHolder1$rptr_cen$ctl25$hfd_StateId':"20",
'ctl00$ContentPlaceHolder1$rptr_cen$ctl26$hfd_StateId':"21",
'ctl00$ContentPlaceHolder1$rptr_cen$ctl27$hfd_StateId':"22",
'ctl00$ContentPlaceHolder1$rptr_cen$ctl28$hfd_StateId':"36",
'ctl00$ContentPlaceHolder1$rptr_cen$ctl29$hfd_StateId':"23",
'ctl00$ContentPlaceHolder1$rptr_cen$ctl30$hfd_StateId':"24",
'ctl00$ContentPlaceHolder1$rptr_cen$ctl31$hfd_StateId':"33",
'ctl00$ContentPlaceHolder1$rptr_cen$ctl32$hfd_StateId':"25"
}
p = merge_two_dicts(postParams, paramDictionary)
componentPage = parsePOSTResponse(url_SBM_FinanceProgress, p)
print(componentPage)
x = 'what'
| 228.068493 | 5,731 | 0.910145 |
4bbd48777ac0547cad592918b3cc57a1efffc54a | 677 | py | Python | project euler/q50.py | milkmeat/thomas | fbc72af34267488d931a4885d4e19fce22fea582 | [
"MIT"
] | null | null | null | project euler/q50.py | milkmeat/thomas | fbc72af34267488d931a4885d4e19fce22fea582 | [
"MIT"
] | null | null | null | project euler/q50.py | milkmeat/thomas | fbc72af34267488d931a4885d4e19fce22fea582 | [
"MIT"
] | null | null | null | b=listprimenumber(100)
print b
count=3
for x in range(len(b)-count):
sum=0
for y in range(count):
sum+=b[x+y]
if sum in b:
print sum
#if b[x+0]+b[x+1]+b[x+2] in b:
# print b[x],b[x+1],b[x+2] | 22.566667 | 39 | 0.499261 |
4bbd5d337a02e7405c19f8ae7746f2dbce197b3b | 4,189 | py | Python | s09_files_and_random/solutions/random_walking_simple.py | silverfield/pythonsessions | bf5d82dded7616a5d6998da4eb445708c728794f | [
"MIT"
] | null | null | null | s09_files_and_random/solutions/random_walking_simple.py | silverfield/pythonsessions | bf5d82dded7616a5d6998da4eb445708c728794f | [
"MIT"
] | null | null | null | s09_files_and_random/solutions/random_walking_simple.py | silverfield/pythonsessions | bf5d82dded7616a5d6998da4eb445708c728794f | [
"MIT"
] | null | null | null | __author__ = 'ferrard'
# ---------------------------------------------------------------
# Imports
# ---------------------------------------------------------------
import scipy as sp
import random
import time
# ---------------------------------------------------------------
# Class - Graph
# ---------------------------------------------------------------
# ---------------------------------------------------------------
# Main
# ---------------------------------------------------------------
def main():
random.seed()
g = WalkableGraph('ghana.txt')
g.print()
print()
print("Let's do some walking")
k = 1000
g.random_walk("CapeCoast", k, 0)
# g.probs_after_k_steps(k)
if __name__ == '__main__':
main()
| 34.336066 | 116 | 0.467176 |
4bbef0cf8a34a2518357110d8f321604ae40180d | 337 | py | Python | 51_60/day-60/forms/main.py | srakhe/100-days-py | 4d99ab35eb1376d2f8722c42e0bf98acc18fba20 | [
"MIT"
] | null | null | null | 51_60/day-60/forms/main.py | srakhe/100-days-py | 4d99ab35eb1376d2f8722c42e0bf98acc18fba20 | [
"MIT"
] | null | null | null | 51_60/day-60/forms/main.py | srakhe/100-days-py | 4d99ab35eb1376d2f8722c42e0bf98acc18fba20 | [
"MIT"
] | null | null | null | from flask import Flask, render_template, request
app = Flask(__name__)
if __name__ == '__main__':
app.run()
| 18.722222 | 95 | 0.652819 |
4bbf1d6eb8af61adb06a84718e97dce8dddb1ac0 | 6,094 | py | Python | spartify/settings.py | cl1ckname/Spartify | 3c45236e3f8803af9d01ac638e3d10a834ab7b7d | [
"Apache-2.0"
] | 3 | 2021-07-26T15:43:20.000Z | 2022-02-11T17:22:31.000Z | spartify/settings.py | cl1ckname/Spartify | 3c45236e3f8803af9d01ac638e3d10a834ab7b7d | [
"Apache-2.0"
] | 2 | 2021-07-08T14:25:22.000Z | 2021-08-19T18:17:14.000Z | spartify/settings.py | cl1ckname/Spartify | 3c45236e3f8803af9d01ac638e3d10a834ab7b7d | [
"Apache-2.0"
] | 1 | 2021-08-19T18:17:48.000Z | 2021-08-19T18:17:48.000Z | """
Django settings for spartify project.
Generated by 'django-admin startproject' using Django 2.2.12.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
from dotenv import load_dotenv
load_dotenv()
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get("SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = int(os.environ.get("DEBUG", default=1))
ALLOWED_HOSTS = os.environ.get("DJANGO_ALLOWED_HOSTS",'').split(" ") + ['*', '192.168.43.72', '192.168.0.53', '0.0.0.0']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'social_core.backends.spotify.SpotifyOAuth2',
)
AUTH_USER_MODEL = 'backend.User'
SOCIAL_AUTH_USER_MODEL = 'backend.User'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'channels',
'social_django',
'backend',
'lobby'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# 'backend.middlewares.ApiMiddleware',
]
ROOT_URLCONF = 'spartify.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'spartify.wsgi.application'
ASGI_APPLICATION = 'spartify.routing.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
"ENGINE": os.environ.get("SQL_ENGINE", "django.db.backends.sqlite3"),
"NAME": os.environ.get("SQL_DATABASE", os.path.join(BASE_DIR, "db.sqlite3")),
"USER": os.environ.get("SQL_USER", "user"),
"PASSWORD": os.environ.get("SQL_PASSWORD", "password"),
"HOST": os.environ.get("SQL_HOST", "localhost"),
"PORT": os.environ.get("SQL_PORT", "5432"),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
STATIC_URL = 'staticfiles/'
STATIC_ROOT = os.path.join(BASE_DIR, STATIC_URL)
SOCIAL_AUTH_SPOTIFY_KEY = os.environ['SOCIAL_AUTH_SPOTIFY_KEY']
SOCIAL_AUTH_SPOTIFY_SECRET = os.environ['SOCIAL_AUTH_SPOTIFY_SECRET']
SOCIAL_AUTH_URL_NAMESPACE = 'social'
SOCIAL_AUTH_SPOTIFY_SCOPE = ['user-read-email','user-read-private', 'user-read-playback-state', 'user-modify-playback-state']
# SOCIAL_AUTH_LOGIN_REDIRECT_URL = 'http://{}/complete/spotify/' % os.getenv('HOST')
LOGIN_REDIRECT_URL = 'dashboard'
LOGIN_URL = 'login'
DEFAULT_AUTO_FIELD='django.db.models.AutoField'
SOCIAL_AUTH_PIPELINE = (
'social_core.pipeline.social_auth.social_details',
'social_core.pipeline.social_auth.social_uid',
'social_core.pipeline.social_auth.auth_allowed',
'social_core.pipeline.social_auth.social_user',
'social_core.pipeline.user.get_username',
'social_core.pipeline.user.create_user',
'social_core.pipeline.social_auth.associate_user',
'social_core.pipeline.social_auth.load_extra_data',
'social_core.pipeline.user.user_details',
'backend.pipeline.save_access_token', #save token on login,
)
QUEUE_SESSION_ID = 'queue'
SESSION_EXPIRE_AT_BROWSER_CLOSE = 15
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'api_formatter': {
'format': '{username} -- {endpoint} -- {status_code:d}: {message}',
'style': '{',
},
'lobby_formatter': {
'format': '{id}--{username}: {message} -- {asctime}',
'style': '{',
},
},
'handlers': {
'api_errors': {
'class': 'logging.FileHandler',
'filename': 'logs/api_errors.log',
'formatter': 'api_formatter',
'level': 'ERROR',
},
},
'loggers':{
'backend': {
'handlers': ['api_errors'],
},
},
}
REDIS_HOST = os.environ.get("REDIS_HOST", '127.0.0.1')
REDIS_PORT = 6379
REDIS_DB = 0
CHANNEL_LAYERS = {
'default': {
'BACKEND': 'channels_redis.core.RedisChannelLayer',
"CONFIG": {
"hosts": [(REDIS_HOST, REDIS_PORT)],
},
}
} | 29.019048 | 125 | 0.675583 |
4bc09df45d93aefe38be329327bcf363df1f3d3e | 7,018 | py | Python | bin/tdgwgeo2csv.py | Bauble/bauble.api | 183c97fda076ea870e21e70ecf89a2a94a7f5722 | [
"BSD-3-Clause"
] | null | null | null | bin/tdgwgeo2csv.py | Bauble/bauble.api | 183c97fda076ea870e21e70ecf89a2a94a7f5722 | [
"BSD-3-Clause"
] | 1 | 2015-02-05T13:15:00.000Z | 2015-02-05T13:15:00.000Z | bin/tdgwgeo2csv.py | Bauble/bauble.api | 183c97fda076ea870e21e70ecf89a2a94a7f5722 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
#
# tdwggeo2csv.py
#
# Description: convert TDWG plant distribution files out of the box to a single
# CSV file
#
# TODO: should create new id's for each entry and have a tdwg_code for
# each so we can maintain as much data as possbible
# TODO: we should probably include the original text files in bauble
# and run the conversion script on build
# TODO: add a notes column to geography so we carry over the extra
# geography data(kew regions, notes, etc.) and so that we can add
# notes to them in bauble
import codecs
import os
import re
from optparse import OptionParser
# l1 - Continent, tblLevel1.txt, UTF-8
# l2 - Region, tblLevel2.txt, UTF-8
# l3 - BotanicalCountry, tblLevel4, ISO-8859-15
# l4 - BaseUnit, tblLevel4.txt, ISO-8859-15
# gazette (places), tblGazette.txt, ISO-8859-15
parser = OptionParser()
parser.add_option('-d', '--directory', dest='directory',
help='directory of WGS txt files', metavar='DIR')
(options, args) = parser.parse_args()
if not options.directory:
parser.error('directory required')
cwd, _dummy = os.path.split(__file__)
src_dir = options.directory
# converted rows organized by tdwg_code so we can resolve parents
converted_rows = {}
id_ctr = 1
if __name__ == "__main__":
main()
| 31.470852 | 98 | 0.578655 |
4bc28189f37d50450206554fd6ab1753bd171778 | 7,386 | py | Python | getters/haproxy_haproxylogs.py | gunny26/datalogger | 7bd29ab88f2e2749284d80a6a834c94c0955a7e0 | [
"Apache-2.0"
] | null | null | null | getters/haproxy_haproxylogs.py | gunny26/datalogger | 7bd29ab88f2e2749284d80a6a834c94c0955a7e0 | [
"Apache-2.0"
] | null | null | null | getters/haproxy_haproxylogs.py | gunny26/datalogger | 7bd29ab88f2e2749284d80a6a834c94c0955a7e0 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
"""
Program to read and parse haproxylogs to put them in shape to upload to DataLogger
The input date schould be sorted by date, and finished
the uploaded data will immediately split into TimeseriesArray, so no further data
of this day could be appended
"""
import os
import sys
import gzip
import logging
logging.basicConfig(level=logging.DEBUG)
import datetime
import zlib
import requests
import StringIO
import argparse
# own modules
from datalogger import DataLoggerWeb as DataLoggerWeb
import tilak_haproxylog
def generate_datalogger_csv(logdir, datestring, keys, values, ts_keyname):
"""
create CSV like file with StringIO
"""
if datestring == datetime.date.today().isoformat():
logging.error("todays Logs are actually written and cannot used in datalogger")
return
headers = [ts_keyname, ] + list(keys) + list(values)
linebuffer = []
linebuffer.append("\t".join(headers))
filename = os.path.join(logdir, "haproxylog_%s.gz" % datestring)
logging.info("parsing file %s", filename)
try:
parser = parser_generator(keys, values, gzip.open(filename, "rb"))
for line in aggregator(keys, values, ts_keyname, parser):
linebuffer.append(line)
except IOError as exc:
logging.exception(exc)
return StringIO.StringIO("\n".join(linebuffer))
def datestring_to_date(datestring):
"""
convert string in format YYYY-MM-DD into date object
"""
year, month, day = datestring.split("-")
date = datetime.date(year=int(year), month=int(month), day=int(day))
return date
def main():
"""
what do you think, what main should do
"""
yesterday_datestring = (datetime.date.today() - datetime.timedelta(1)).isoformat()
parser = argparse.ArgumentParser(description='generate TimeseriesArrays on local backend')
parser.add_argument('--url', default="https://datalogger-api.tirol-kliniken.cc/DataLogger", help="url of DataLogger Webapplication")
parser.add_argument('--logdir', default="/data1/haproxy_daily/", help="directory where to find day sorted haproxylogs")
parser.add_argument("-b", '--back', help="how many days back from now")
parser.add_argument("-s", '--startdate', help="start date in isoformat YYY-MM-DD")
parser.add_argument("-e", '--enddate', default=yesterday_datestring, help="stop date in isoformat YYY-MM-DD")
parser.add_argument("-q", '--quiet', action='store_true', help="set to loglevel ERROR")
parser.add_argument("-v", '--verbose', action='store_true', help="set to loglevel DEBUG")
args = parser.parse_args()
if args.quiet is True:
logging.getLogger("").setLevel(logging.ERROR)
if args.verbose is True:
logging.getLogger("").setLevel(logging.DEBUG)
if (args.back is not None) == (args.startdate is not None):
logging.error("option -b and -e are mutual exclusive, use only one")
sys.exit(1)
startdate = None
if args.back is not None:
startdate = (datetime.date.today() - datetime.timedelta(int(args.back))).isoformat()
elif args.startdate is not None:
startdate = args.startdate
else:
logging.error("you have to provide either -b or -s")
sys.exit(1)
# lets get started
datalogger = DataLoggerWeb(args.url)
project = "haproxy"
tablename = "http_host"
baseurl = "%s/upload_raw_file/" % args.url
logdir = args.logdir # where to find haproxy logs
keys = ("http_host", )
values = ("bytes_read", "rsp_1xx", "rsp_2xx", "rsp_3xx", "rsp_4xx", "rsp_5xx", "rsp_other", "srv_queue", "backend_queue", "actconn", "feconn", "beconn", "srv_conn", "retries", "tq", "tw", "tc", "tr", "tt", "hits")
ts_keyname = "ts"
for datestring in datewalk(startdate, args.enddate):
caches = datalogger.get_caches(project, tablename, datestring)
if caches["tsa"]["raw"] is not None:
logging.info("Skipping this datestring, raw data is already available")
continue
try:
stringio = generate_datalogger_csv(logdir, datestring, keys, values, ts_keyname)
#upload data
files = {'myfile': stringio}
url = "/".join((baseurl, project, tablename, datestring))
logging.info("calling %s", url)
response = requests.post(url, files=files)
print response.content
except StandardError as exc:
logging.error("Exception on file datestring %si, skipping this date", datestring)
except zlib.error as exc:
logging.error(exc)
if __name__ == "__main__":
main()
| 40.80663 | 217 | 0.627268 |
4bc441d80eb90948270b0c67a69acd09e054bf96 | 793 | py | Python | packages/jobs/statuses.py | NHSDigital/list-reconciliation | 37b1ebe99a64275e23b0e7fb6a89415b92d14306 | [
"MIT"
] | 4 | 2021-06-25T08:28:54.000Z | 2021-12-16T11:03:42.000Z | packages/jobs/statuses.py | NHSDigital/list-reconciliation | 37b1ebe99a64275e23b0e7fb6a89415b92d14306 | [
"MIT"
] | 184 | 2021-06-24T15:27:08.000Z | 2022-03-17T12:44:28.000Z | packages/jobs/statuses.py | NHSDigital/list-reconciliation | 37b1ebe99a64275e23b0e7fb6a89415b92d14306 | [
"MIT"
] | 3 | 2021-11-05T10:21:44.000Z | 2022-03-04T14:29:24.000Z | from enum import Enum
| 18.880952 | 61 | 0.675914 |
4bc544909f30548d56d19ceee6f586966f0cd714 | 843 | py | Python | DevOps/Data_Science_in_Production/cap02/15_scikit-learn.py | unimauro/Courses | 81e5b9c4cbc9b875eff82f96bda7d21ec4f258b2 | [
"Apache-2.0"
] | 1 | 2020-07-25T04:56:55.000Z | 2020-07-25T04:56:55.000Z | DevOps/Data_Science_in_Production/cap02/15_scikit-learn.py | unimauro/Courses | 81e5b9c4cbc9b875eff82f96bda7d21ec4f258b2 | [
"Apache-2.0"
] | 2 | 2020-06-15T04:42:00.000Z | 2021-08-29T03:48:28.000Z | DevOps/Data_Science_in_Production/cap02/15_scikit-learn.py | unimauro/Courses | 81e5b9c4cbc9b875eff82f96bda7d21ec4f258b2 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
from sklearn.linear_model import LogisticRegression
import mlflow
import mlflow.sklearn
import flask
model_path = "models/logit_games_v1"
model
= mlflow.sklearn.load_model(model_path)
app = flask.Flask(__name__)
data = {"success": False}
params = flask.request.args
if "G1" in params.keys():
new_row = { "G1": params.get("G1"),"G2": params.get("G2"),
"G3": params.get("G3"),"G4": params.get("G4"),
"G5": params.get("G5"),"G6": params.get("G6"),
"G7": params.get("G7"),"G8": params.get("G8"),
"G9": params.get("G9"),"G10":params.get("G10")}
new_x = pd.DataFrame.from_dict(new_row,
orient = "index").transpose()
data["response"] = str(model.predict_proba(new_x)[0][1])
data["success"] = True
return flask.jsonify(data)
if __name__ == '__main__':
app.run(host='0.0.0.0')
| 31.222222 | 58 | 0.688019 |
298a78605da6ac7b5a8526389d33bd97829a9e95 | 12,607 | py | Python | tornado_sockets/views/timeseries.py | willjschmitt/joulia-webserver | 712decb749c2d1bda71af49ecab245378bf30078 | [
"FTL"
] | null | null | null | tornado_sockets/views/timeseries.py | willjschmitt/joulia-webserver | 712decb749c2d1bda71af49ecab245378bf30078 | [
"FTL"
] | 95 | 2016-08-04T01:59:37.000Z | 2021-06-10T18:41:46.000Z | tornado_sockets/views/timeseries.py | willjschmitt/joulia-webserver | 712decb749c2d1bda71af49ecab245378bf30078 | [
"FTL"
] | null | null | null | """Handles websockets and asynchronous endpoints provided by Tornado instead
of Django, but use the Django model framework for a database ORM.
"""
import datetime
import functools
import json
import logging
import tornado.escape
from tornado.ioloop import IOLoop
import tornado.web
import tornado.websocket
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils import timezone
from rest_framework.authtoken.models import Token
from rest_framework.utils import model_meta
from brewery.models import AssetSensor
from brewery.models import RecipeInstance
from brewery.models import TimeSeriesDataPoint
from brewery.serializers import TimeSeriesDataPointSerializer
from joulia.random import random_string
from tornado_sockets.views.django import DjangoAuthenticatedWebSocketHandler
LOGGER = logging.getLogger(__name__)
def new_data(self, parsed_message):
"""Handles a new data point request.
Args:
parsed_message: Data received from websocket.
"""
LOGGER.debug('New data received from %s: %s.', self.get_current_user(),
parsed_message)
data = parsed_message
data["source"] = self.source_id
serializer = TimeSeriesDataPointSerializer(data=data)
serializer.is_valid(raise_exception=True)
serializer.save()
| 39.396875 | 80 | 0.649005 |
298cc1131b4fa0f1be5f4becb01286a44e8b0a9d | 66 | py | Python | report/create_db.py | Kellel/reports | 975e99396301e87176a38dd440a273c9319b3e22 | [
"BSD-3-Clause"
] | null | null | null | report/create_db.py | Kellel/reports | 975e99396301e87176a38dd440a273c9319b3e22 | [
"BSD-3-Clause"
] | null | null | null | report/create_db.py | Kellel/reports | 975e99396301e87176a38dd440a273c9319b3e22 | [
"BSD-3-Clause"
] | null | null | null |
from models import Base, engine
Base.metadata.create_all(engine)
| 16.5 | 32 | 0.818182 |
298ede4e030cbedbbcf9ef9a22b8209288395ba1 | 1,751 | py | Python | plugins/train/model/dfaker.py | aaman123/faceswap | a5825c3457b062c1824ef3f8b02e4f3fa4c2217f | [
"MIT"
] | 2 | 2021-11-11T08:29:01.000Z | 2021-11-11T08:34:50.000Z | plugins/train/model/dfaker.py | aaman123/faceswap | a5825c3457b062c1824ef3f8b02e4f3fa4c2217f | [
"MIT"
] | null | null | null | plugins/train/model/dfaker.py | aaman123/faceswap | a5825c3457b062c1824ef3f8b02e4f3fa4c2217f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
""" DFaker Model
Based on the dfaker model: https://github.com/dfaker """
from keras.initializers import RandomNormal
from keras.layers import Input
from lib.model.nn_blocks import Conv2DOutput, UpscaleBlock, ResidualBlock
from .original import Model as OriginalModel, KerasModel
| 38.911111 | 85 | 0.656768 |
29902382e677a01b98fcb79346e95e0a9cada7e6 | 2,459 | py | Python | classify/train.py | gallupliu/QA | 0e284dd17e27ea9384a1e4d7a4c206eb95e4bf7f | [
"Apache-2.0"
] | 3 | 2017-09-06T07:10:05.000Z | 2019-08-01T03:27:39.000Z | classify/train.py | gallupliu/QA | 0e284dd17e27ea9384a1e4d7a4c206eb95e4bf7f | [
"Apache-2.0"
] | 2 | 2018-01-25T14:46:40.000Z | 2018-01-25T14:53:13.000Z | classify/train.py | gallupliu/QA | 0e284dd17e27ea9384a1e4d7a4c206eb95e4bf7f | [
"Apache-2.0"
] | null | null | null | # encoding: utf-8
"""
@author: gallupliu
@contact: gallup-liu@hotmail.com
@version: 1.0
@license: Apache Licence
@file: train.py
@time: 2018/3/5 22:58
"""
import tensorflow as tf
from classify.dataset import data_utils
from sklearn.model_selection import train_test_split
from classify.model import TextCNN
if __name__ == "__main__":
text,labels = data_utils.loadfile('./data_with_label.csv')
word2idx, vocab = data_utils.load_embedding('./dataset/test_cut.txt', './dataset/wiki_50.model')
ids = data_utils.get_sentence_ids(text, word2idx)
train_ids,test_ids,train_labels,test_labels = train_test_split(ids,labels,test_size=0.1)
# print(len(text),type(text))
# max_length = count_length(text)
# print(max_length)
# train_word2vec()
# print(type(text))
# print(list(word2idx.keys()))
# dataset = tf.data.Dataset.from_tensor_slices((ids, train_labels))
# iterator = dataset.make_initializable_iterator()
# next_element = iterator.get_next()
train_dataset = dataset_input_fn(train_ids, train_labels, 100)
val_dataset = dataset_input_fn(train_ids, train_labels, 100)
iterator = tf.data.Iterator.from_structure(train_dataset.output_types,train_dataset.output_shapes)
next_element,labels = iterator.get_next()
train_iterator_init_op = iterator.make_initializer(train_dataset)
val_iterator_init_op = iterator.make_initializer(val_dataset)
with tf.Session() as sess:
# sess.run(iterator.initializer)
# print(sess.run(next_element))
model = TextCNN(next_element,labels,vocab,120,3,[1,2,3,5],512)
sess.run(tf.global_variables_initializer())
# _,acc,loss = sess.run([model.train_op,model.accuracy,model.loss])
# print(acc,loss)
for _ in range(10):
#
sess.run(train_iterator_init_op)
feed_dict = {model.dropout_keep_prob:1.0}
while True:
try:
_, acc, loss = sess.run([model.train_op, model.accuracy, model.loss],feed_dict=feed_dict)
print(acc,loss)
# print(sess.run(next_element),sess.run(labels))
except tf.errors.OutOfRangeError:
break
| 33.684932 | 109 | 0.684831 |
2991579a0641f47ea260ec96e0a53c12f4df3dbf | 342 | py | Python | authors/apps/author_follows/urls.py | andela/ah-backend-dojo | f2b14f15c4af906da846cafe722f13868d58371f | [
"BSD-3-Clause"
] | 3 | 2019-05-01T10:41:09.000Z | 2021-04-25T22:17:20.000Z | authors/apps/author_follows/urls.py | andela/ah-backend-dojo | f2b14f15c4af906da846cafe722f13868d58371f | [
"BSD-3-Clause"
] | 24 | 2019-04-23T14:56:21.000Z | 2021-12-13T19:58:37.000Z | authors/apps/author_follows/urls.py | andela/ah-backend-dojo | f2b14f15c4af906da846cafe722f13868d58371f | [
"BSD-3-Clause"
] | 4 | 2019-06-29T10:40:32.000Z | 2022-01-04T11:44:53.000Z | from django.urls import path
from .views import FollowStatsViews, AuthorFollowViews
urlpatterns = [
# /authors/followers/ or ../following/
path("<str:follow_state>/", FollowStatsViews.as_view(), name="follows"),
# /authors/<author_username>/follow
path("<str:username>/follow/", AuthorFollowViews.as_view(), name="follow")
] | 34.2 | 78 | 0.719298 |
2992bbf3c6e1e4c6fcb24c568c080fff0f59e86b | 2,299 | py | Python | src/cone/app/tests/test_browser_login.py | lenadax/cone.app | b25c55aedb85e45a962003d2767a22a927cc61c0 | [
"BSD-3-Clause"
] | 1 | 2022-03-13T17:51:09.000Z | 2022-03-13T17:51:09.000Z | src/cone/app/tests/test_browser_login.py | lenadax/cone.app | b25c55aedb85e45a962003d2767a22a927cc61c0 | [
"BSD-3-Clause"
] | 1 | 2021-08-06T08:12:00.000Z | 2021-08-06T08:12:00.000Z | src/cone/app/tests/test_browser_login.py | lenadax/cone.app | b25c55aedb85e45a962003d2767a22a927cc61c0 | [
"BSD-3-Clause"
] | null | null | null | from cone.app import get_root
from cone.app import security
from cone.app import testing
from cone.app.browser.login import login_view
from cone.app.browser.login import logout_view
from cone.tile import render_tile
from cone.tile.tests import TileTestCase
from webob.response import Response
from webob.exc import HTTPFound
| 40.333333 | 94 | 0.660722 |
29934fcb2bb4b9dd5b0dcf07accd0d89e7187b95 | 752 | py | Python | View/telaEditarControle.py | IuriBritoDev/TKINO | 3c689788324bd5badc84c7969f331b076046c211 | [
"MIT"
] | null | null | null | View/telaEditarControle.py | IuriBritoDev/TKINO | 3c689788324bd5badc84c7969f331b076046c211 | [
"MIT"
] | null | null | null | View/telaEditarControle.py | IuriBritoDev/TKINO | 3c689788324bd5badc84c7969f331b076046c211 | [
"MIT"
] | null | null | null | from tkinter import * | 32.695652 | 96 | 0.694149 |
29955858830022ac8b0ab1ecf8622aef64dde5f8 | 395 | py | Python | yao_framework/__init__.py | QuantumBFS/yao-python | c877b3c4f920e76858021b6af8728f839d88fc1d | [
"Apache-2.0"
] | 3 | 2019-12-04T16:40:55.000Z | 2021-12-16T04:28:59.000Z | yao_framework/__init__.py | QuantumBFS/yao-python | c877b3c4f920e76858021b6af8728f839d88fc1d | [
"Apache-2.0"
] | null | null | null | yao_framework/__init__.py | QuantumBFS/yao-python | c877b3c4f920e76858021b6af8728f839d88fc1d | [
"Apache-2.0"
] | 2 | 2021-05-07T01:17:50.000Z | 2021-12-16T04:32:31.000Z | # workaround static linked python
from julia.api import Julia
__julia__ = Julia(compiled_modules=False)
import os
import sys
import subprocess
from .wrappers import apply
script_dir = os.path.dirname(os.path.realpath(__file__))
def install():
"""
Install Julia packages required for yao-framework.
"""
subprocess.check_call(['julia', os.path.join(script_dir, 'install.jl')])
| 21.944444 | 76 | 0.744304 |
29964b779c4f66694fdf10686261f2a4a69976ee | 4,531 | py | Python | src/multiuserpad/twitchutil.py | codingwithsomeguy/multiuserpad | caca02bb3f98e855a0980b8ac9947c05d5b89463 | [
"MIT"
] | 4 | 2020-04-14T03:25:06.000Z | 2020-11-03T14:30:20.000Z | src/multiuserpad/twitchutil.py | codingwithsomeguy/multiuserpad | caca02bb3f98e855a0980b8ac9947c05d5b89463 | [
"MIT"
] | null | null | null | src/multiuserpad/twitchutil.py | codingwithsomeguy/multiuserpad | caca02bb3f98e855a0980b8ac9947c05d5b89463 | [
"MIT"
] | null | null | null | # TODO: Generalize this with the discordutil module, factor out oauth
import logging
from urllib.parse import urlencode
import requests
import json
from flask import request, redirect, session
from creds import get_creds
from config import config
from sessionutil import invalidate_session
# TODO: this should be cached until expiration
# The server can reuse this for API requests
# TODO: factor this out with discord auth to oauth..auth
| 31.685315 | 82 | 0.63827 |
2996df27209f1d350199a6a54bcf14fae9ad1a1a | 6,173 | py | Python | src/pixel_sorting.py | in3rtial/imgsrt | 2dec237b7d797d9964ed874c4e4d72f7eb23eaf0 | [
"CC0-1.0"
] | 2 | 2015-11-08T09:22:30.000Z | 2020-10-15T03:42:24.000Z | src/pixel_sorting.py | in3rtial/imgsrt | 2dec237b7d797d9964ed874c4e4d72f7eb23eaf0 | [
"CC0-1.0"
] | null | null | null | src/pixel_sorting.py | in3rtial/imgsrt | 2dec237b7d797d9964ed874c4e4d72f7eb23eaf0 | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/python3
"""transliteration of Kim Asendorf's pixel sorting script"""
from copy import copy
from random import random, gauss
from PIL import Image
from numpy import int32
from argparse import ArgumentParser
# PROGRAM CONSTANTS
# rgb(103, 105, 128)
BLACK_VALUE = int32(-10000000)
# rgb(164, 114, 128)
WHITE_VALUE = int32((255 << 24) + (230 << 16) + (230 << 8) + 230)
BRIGHTNESS_VALUE = int32(30)
# PIXEL CONVERSION FUNCTIONS
def get_pixel_value(pixel):
"""rgb pixel to int32 processing representation"""
return(int32((((255 << 8) | pixel[0]) << 8 | pixel[1]) << 8 | pixel[2]))
def get_pixel_brightness(pixel):
"""rgb pixel to brightness value"""
return(max((pixel[0], pixel[1], pixel[2])) / 255 * 100)
# PIXEL FINDING FUNCTIONS
def get_next_satisfying(vector, starting_position, condition_fun):
"""find next pixel in the vector after starting position
that satisfies the condition (boolean)
return -1 if not found"""
position = starting_position
while(position < len(vector) and
not(condition_fun(vector[position]))):
position += 1
if(position == (len(vector) - 1) and
not(condition_fun(vector[position]))):
position = - 1
return(position)
# black mode
def get_next_black(vector, starting_position):
"""next black pixel"""
condition = lambda x: int32(get_pixel_value(x)) > BLACK_VALUE
return get_next_satisfying(vector, starting_position, condition)
def get_next_not_black(vector, starting_position):
"""next non black pixel"""
condition = lambda x: int32(get_pixel_value(x)) < BLACK_VALUE
return get_next_satisfying(vector, starting_position, condition)
# bright mode
def get_next_bright(vector, starting_position):
"""next bright pixel"""
condition = lambda x: int32(get_pixel_brightness(x)) < BRIGHTNESS_VALUE
return get_next_satisfying(vector, starting_position, condition)
def get_next_dark(vector, starting_position):
"""next dark pixel"""
condition = lambda x: int32(get_pixel_brightness(x)) > BRIGHTNESS_VALUE
return get_next_satisfying(vector, starting_position, condition)
# white mode
def get_next_white(vector, starting_position):
"""next white pixel"""
condition = lambda x: int32(get_pixel_value(x)) < WHITE_VALUE
return get_next_satisfying(vector, starting_position, condition)
def get_next_not_white(vector, starting_position):
"""next not white pixel"""
condition = lambda x: int32(get_pixel_value(x)) > WHITE_VALUE
return get_next_satisfying(vector, starting_position, condition)
FIND_FUNCTIONS = ((get_next_black, get_next_not_black), # black
(get_next_bright, get_next_dark), # bright
(get_next_white, get_next_not_white)) # white
# PIXEL SORTING FUNCTIONS
def sort_pixels(vector, mode=0, find=FIND_FUNCTIONS):
"""sort pixel in the given vector"""
assert(mode in (0, 1, 2)), "invalid use case"
vector = copy(vector)
position = 0
pos_end = None
while(position < len(vector)):
if((position == -1) or (pos_end == -1)):
break
position = find[mode][0](vector, position)
pos_end = find[mode][1](vector, position)
vector[position:pos_end] = sorted(vector[position:pos_end],
key=lambda x: get_pixel_value(x))
position = pos_end + 1
return(vector)
# IMAGE TRANSFORMATIONS
def to_vectors(rgb_image, row_or_col):
"""rgb image -> list of lists of RGB tuples"""
assert(rgb_image.mode == "RGB"), "must be a RGB image"""
assert(row_or_col in (0, 1)), "row = 0, col = 1"
vectors = []
x_size, y_size = rgb_image.size
if(row_or_col == 0):
for y_coord in range(0, y_size):
row = []
for x_coord in range(0, x_size):
row.append(rgb_image.getpixel((x_coord, y_coord)))
vectors.append(row)
else:
for x_coord in range(0, x_size):
col = []
for y_coord in range(0, y_size):
col.append(rgb_image.getpixel((x_coord, y_coord)))
vectors.append(col)
return(vectors)
# COMPLETE FUNCTIONS
def sort_image(image, row_or_col, mode=0, prob=1, avg_band_size=1):
"""input: (rgb image, row or column, sort mode, probability of sorting,
average band size for sorting)
output: sorted out image)"""
x_size, y_size = image.size
sigma = avg_band_size / 4
vectors = to_vectors(image, row_or_col)
new_vectors = []
position = 0
while(position < len(vectors)):
if(random() < prob):
# calculate the indices of the rows to sort
to_sort = []
coarseness = int(gauss(avg_band_size, sigma))
for index in range(position, position + coarseness):
if(index >= len(vectors)):
break
else:
to_sort.append(index)
for index in to_sort:
new_vectors.append(sort_pixels(vectors[index], mode))
position += coarseness
else:
new_vectors.append(vectors[position])
position += 1
new_image = []
if(row_or_col == 0):
for vector in new_vectors:
for (red, green, blue) in vector:
new_image.append(int(red))
new_image.append(int(green))
new_image.append(int(blue))
else:
for i in range(0, y_size):
for vector in new_vectors:
(red, green, blue) = vector[i]
new_image.append(int(red))
new_image.append(int(green))
new_image.append(int(blue))
return(Image.fromstring('RGB', (x_size, y_size), bytes(new_image)))
__all__ = ["sort_image"]
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("-i", dest="input_image_file", required=True, type=str, help="input")
parser.add_argument("-o", dest="output_image_file", required=True, type=str, help="output")
args = parser.parse_args()
image = Image.open(args.input_image_file)
sort_image(image, 0).save(args.output_image_file)
| 32.661376 | 95 | 0.638749 |
29978909888062a7973e1bdbe5b82311fd8d9b27 | 6,173 | py | Python | main.py | ml4design/text-processing-module | f1bfe1a49d58156e9e48e5ef69b980f89a5981ea | [
"MIT"
] | null | null | null | main.py | ml4design/text-processing-module | f1bfe1a49d58156e9e48e5ef69b980f89a5981ea | [
"MIT"
] | null | null | null | main.py | ml4design/text-processing-module | f1bfe1a49d58156e9e48e5ef69b980f89a5981ea | [
"MIT"
] | null | null | null | import pandas as pd
from preprocessing import preprocess
from wordclouds import wordcloud, find_by_word
from sentiment_analysis import calculate_sentiment, find_by_sentiment
import nltk
import os
import tempfile
from topic_modelling import lda_topic_model, show_topics, show_example_sentences_by_topic
os.environ["MPLCONFIGDIR"] = tempfile.gettempdir()
nltk.download('punkt')
nltk.download('stopwords')
pd.set_option('display.max_columns', None)
#####################################################
# READING THE DATA #
#####################################################
# In this tutorial we will mostly deal with comma separated files (CSV) (similar to the structure of Excel files). Each line of the file is a data record. Each record consists of one or more fields, separated by commas. Check here for more information https://en.wikipedia.org/wiki/Comma-separated_values
# reads the file named "students_eng.csv".
# If you want to read a different file you need to (1) upload it in replit and (2) change "students_eng.csv" to the name of the newly uploaded file. Here we use the Pandas library ("pd") to read our file and in return we get a Pandas Dataframe. For faster processing and experimentation you can also select different subsets of the file's content through the nrows parameter -> number of lines to read.
students_data = pd.read_csv("data/students_eng.csv")
# With the next line you can print the data you just read and see how a Pandas Dataframe looks like (seems quite similar to Excel)
print(students_data.head(3))
# As you can see the data is separated in columns. Let's see how we can get the data from a specific column. The following line allows us to get only the data inside the column named "students_needs". Other options are: study_programme, degree_programme, planned_grad_year, today_feeling, physical_health, student_needs, students_sugg_to_improve_wellbeing
students_data = students_data['student_needs']
#################################################
# TEXT PREPROCESSING #
#################################################
# Here we will pre-process our entire text collection.
# First, we need to merge all the different lines of the "comments" into one big corpus, so that we can later analyze it.
corpus = students_data.to_list()
print(corpus[0:5])
# Then we need to "preprocess" our text. To do so we use the following line of code (more details on what happens under the hood could be found in the "preprocessing.py" file - feel free to take a look at it).
# The following code: makes all words lowercase, create word tokens, removes stopwords, punctuations, and digits, and reduces inflected words to their word stem (stemming).Feel free to experiment by turning any of the following values from True to False. In addition, you can add extra words which you do not want to include in your analysis by adding them within the extra_stopwords brackets e.g. extra_stopwords=["people"] would remove the word people from everywhere in the document. Hint: don't forget to use the quotes!
# tokens = [preprocess(sentence, lower=True, rem_punc=True, word_tokenization=True, rem_numb=True, rem_stopwords=True, stem=True, extra_stopwords = []) for sentence in students_data.to_list()]
# print(tokens)
#############################################
# WORD FREQUENCIES #
#############################################
# Word frequencies calculation is the most basic tool in text processing yet it gives a comprehensive picture of the content in your text collection. One the most ways to visualize word frequencies is WordCloud (which you've already seen if you opened Voyant)
# This function needs two things from you:
# 1. tokens -- the result of our preprocessing step
# 2. the name of the picture it will generate and save to your directory
# 3. Number of words to show
# wordcloud(words = tokens, name_of_output = 'wordcloud', num = 10)
# Text processing often requires working with examples, because words are often contextual and it is difficult to understand what is happening in your text collection. For this purpose, you can find documents by pieces of texts.
# This function needs two things from you:
# 1. tokens -- the result of our preprocessing step (it will look for examples in this collection)
# 2. a word or a phrase the text should include
# test = find_by_word(tokens, 'studi')
#print(test)
#############################################
# Sentiment analysis #
#############################################
# The aim of sentiment analysis is to calculate how emotional your texts are and what is the valence of these texts. In our example we use VADER (Valence Aware Dictionary and sEntiment Reasoner) but you can find other various sentiment analysis tools in the internet.
# VADER calculated how positive, neutral, and negative a text is. It also calculates a compound score which considers all three metrics to give you a precise measurement of the sentiment.
# This function requires only the preprocessed collection of texts
# sent_result = calculate_sentiment(tokens)
# print(sent_result)
# Now, when the sentiment scores are calculated, you can find the most interesting texts by looking at the documents with highest scores (in this example, we look at the 5 most positive documents).
# This function requires three things:
# 1. The result of sentiment calculation
# 2. What score you're interested in
# 3. Number of examples you want to get
# res = find_by_sentiment(df_with_scores = sent_result, score_type = 'pos', num_of_examples = 5)
# print(res)
#############################################
# TOPIC MODELING #
#############################################
# num_of_topics = 4
# word_num_per_topic = 5
# lda_model = lda_topic_model(tokens, topic_num=num_of_topics)
# show_topics(lda_model, word_num_per_topic )
# Check examples assigned to a particular topic ####
# num_of_examples = 5
# show_example_sentences_by_topic(corpus, tokens, lda_model, word_num_per_topic,topic_to_check=1, num_of_examp_to_show = num_of_examples) | 55.116071 | 524 | 0.706302 |
299875f6900cd7a8b095fbe70057acd505857f31 | 4,796 | py | Python | finetune/target_models/multifield.py | IndicoDataSolutions/finetune-transformer-lm | 3534658e5de281e5634c8481b0fb37635b0cb3af | [
"MIT"
] | null | null | null | finetune/target_models/multifield.py | IndicoDataSolutions/finetune-transformer-lm | 3534658e5de281e5634c8481b0fb37635b0cb3af | [
"MIT"
] | null | null | null | finetune/target_models/multifield.py | IndicoDataSolutions/finetune-transformer-lm | 3534658e5de281e5634c8481b0fb37635b0cb3af | [
"MIT"
] | null | null | null | import copy
from finetune.errors import FinetuneError
from finetune.target_models.classifier import Classifier, ClassificationPipeline
from finetune.target_models.regressor import Regressor, RegressionPipeline
from finetune.base import BaseModel
| 38.677419 | 116 | 0.66347 |
2998b411809973174ac82478a06ef6fa40c371df | 157 | py | Python | db_s3_backup/db_interface/dump_protocol.py | saurabhariyan/db-s3-backup | 5b67737f43814f0841d47033c92825206a24e1a1 | [
"MIT"
] | 9 | 2015-08-04T00:54:46.000Z | 2021-08-29T04:21:13.000Z | db_s3_backup/db_interface/dump_protocol.py | saurabhariyan/db-s3-backup | 5b67737f43814f0841d47033c92825206a24e1a1 | [
"MIT"
] | 7 | 2015-05-28T15:57:15.000Z | 2017-01-25T11:29:28.000Z | db_s3_backup/db_interface/dump_protocol.py | saurabhariyan/db-s3-backup | 5b67737f43814f0841d47033c92825206a24e1a1 | [
"MIT"
] | 9 | 2015-05-28T13:09:25.000Z | 2021-02-12T04:57:04.000Z | from exceptions import ValueError
| 26.166667 | 53 | 0.751592 |
299a6f26561daff35ded494f622e2b673df00a7d | 24,679 | py | Python | LPES-video/01.01-podstawy-terminala/01.01.04-pagery.py | opcode-eu-org-website/LPES-wyklady-src | dd4d413f2bb5560790b6365fa7e68e8f1a8a65b0 | [
"MIT"
] | null | null | null | LPES-video/01.01-podstawy-terminala/01.01.04-pagery.py | opcode-eu-org-website/LPES-wyklady-src | dd4d413f2bb5560790b6365fa7e68e8f1a8a65b0 | [
"MIT"
] | null | null | null | LPES-video/01.01-podstawy-terminala/01.01.04-pagery.py | opcode-eu-org-website/LPES-wyklady-src | dd4d413f2bb5560790b6365fa7e68e8f1a8a65b0 | [
"MIT"
] | 1 | 2021-07-03T16:43:47.000Z | 2021-07-03T16:43:47.000Z | # Copyright (c) 2020-2021 Matematyka dla Ciekawych wiata (http://ciekawi.icm.edu.pl/)
# Copyright (c) 2020-2021 Robert Ryszard Paciorek <rrp@opcode.eu.org>
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
try: clipData
except NameError: clipData = []
clipData += [
{ 'section': 'wywietlanie plikw\n z podziaem na strony' },
{
'console': [
[0.070374, "o", eduMovie.clear + eduMovie.prompt()],
[1.067533, "o", "m"],
[1.259512, "o", "a"],
[1.41949, "o", "n"],
[1.611479, "o", " "],
[1.92349, "o", "m"],
[2.115325, "o", "a"],
[2.243465, "o", "n"],
[2.435497, "o", " "],
[3.475488, "o", "\r\n"],
[3.54519, "o", "\u001b[?1049h\u001b[22;0;0t\u001b[?1h\u001b=\r"],
[3.585094, "o", "MAN(1) Manual pager utils MAN(1)\u001b[m\r\n\u001b[m\r\n\u001b[1mNAME\u001b[0m\u001b[m\r\n man - an interface to the on-line reference manuals\u001b[m\r\n\u001b[m\r\n\u001b[1mSYNOPSIS\u001b[0m\u001b[m\r\n \u001b[1mman\u001b[0m [\u001b[1m-C\u001b[0m \u001b[4mfile\u001b[24m] [\u001b[1m-d\u001b[0m] [\u001b[1m-D\u001b[0m] [\u001b[1m--warnings\u001b[0m[=\u001b[4mwarnings\u001b[24m]] [\u001b[1m-R\u001b[0m \u001b[4mencoding\u001b[24m] [\u001b[1m-L\u001b[0m \u001b[4mlo\u001b[24m\u001b[m\r\n \u001b[4mcale\u001b[24m] [\u001b[1m-m\u001b[0m \u001b[4msystem\u001b[24m[,...]] [\u001b[1m-M\u001b[0m \u001b[4mpath\u001b[24m] [\u001b[1m-S\u001b[0m \u001b[4mlist\u001b[24m] [\u001b[1m-e\u001b[0m \u001b[4mextension\u001b[24m] [\u001b[1m-i\u001b[0m|\u001b[1m-I\u001b[0m]\u001b[m\r\n [\u001b[1m--regex\u001b[0m|\u001b[1m--wildcard\u001b[0m] [\u001b[1m--names-only\u001b[0m] [\u001b[1m-a\u001b[0m] [\u001b[1m-u\u001b[0m] [\u001b[1m--no-subpages\u001b[0m] [\u001b[1m-P\u001b[0m\u001b[m\r\n \u001b[4mpager\u001b[24m] [\u001b[1m-r\u001b[0m \u001b[4mprompt\u001b[24m] [\u001b[1m-7\u001b[0m] [\u001b[1m-E\u001b[0m \u001b[4mencoding\u001b[24m] [\u001b[1m--no-hyphenation\u001b[0m] [\u001b[1m--no-justifi\u001b[0m\u001b[m\r\n \u001b[1mcation\u001b[0m] [\u001b[1m-p\u001b[0m \u001b[4mstring\u001b[24m] [\u001b[1m-t\u001b[0m] [\u001b[1m-T\u001b[0m[\u001b[4mdevice\u001b[24m]] [\u001b[1m-H\u001b[0m[\u001b[4mbrowser\u001b[24m]] [\u001b[1m-X\u001b[0m[\u001b[4mdpi\u001b[24m]] [\u001b[1m-Z\u001b[0m]\u001b[m\r\n [[\u001b["],
[3.585479, "o", "4msection\u001b"],
[3.585788, "o", "[24m] \u001b[4mpage\u001b[24m[.\u001b[4msection\u001b[24m] ...] ...\u001b[m\r\n \u001b[1mman\u001b[0m \u001b[1m-k\u001b[0m [\u001b[4mapropos\u001b[24m \u001b[4moptions\u001b[24m] \u001b[4mregexp\u001b[24m ...\u001b[m\r\n \u001b[1mman\u001b[0m \u001b[1m-K\u001b[0m [\u001b[1m-w\u001b[0m|\u001b[1m-W\u001b[0m] [\u001b[1m-S\u001b[0m \u001b[4mlist\u001b[24m] [\u001b[1m-i\u001b[0m|\u001b[1m-I\u001b[0m] [\u001b[1m--regex\u001b[0m] [\u001b[4msection\u001b[24m] \u001b[4mterm\u001b[24m ...\u001b[m\r\n \u001b[1mman\u001b[0m \u001b[1m-f\u001b[0m [\u001b[4mwhatis\u001b[24m \u001b[4moptions\u001b[24m] \u001b[4mpage\u001b[24m ...\u001b[m\r\n \u001b[1mman\u001b[0m \u001b[1m-l\u001b[0m [\u001b[1m-C\u001b[0m \u001b[4mfile\u001b[24m] [\u001b[1m-d\u001b[0m] [\u001b[1m-D\u001b[0m] [\u001b[1m--warnings\u001b[0m[=\u001b[4mwarnings\u001b[24m]] [\u001b[1m-R\u001b[0m \u001b[4mencoding\u001b[24m] [\u001b[1m-L\u001b[0m\u001b[m\r\n \u001b[4mlocale\u001b[24m] [\u001b[1m-P\u001b[0m \u001b[4mpager\u001b[24m] [\u001b[1m-r\u001b[0m \u001b[4mprompt\u001b[24m] [\u001b[1m-7\u001b[0m] [\u001b[1m-E\u001b[0m \u001b[4mencoding\u001b[24m] [\u001b[1m-p\u001b[0m \u001b[4mstring\u001b[24m] [\u001b[1m-t\u001b[0m]\u001b[m\r\n [\u001b[1m-T\u001b[0m[\u001b[4mdevice\u001b[24m]] [\u001b[1m-H\u001b[0m[\u001b[4mbrowser\u001b[24m]] [\u001b[1m-X\u001b[0m[\u001b[4mdpi\u001b[24m]] [\u001b[1m-Z\u001b[0m] \u001b[4mfile\u001b[24m ...\u001b[m\r\n \u001b[1mman\u001b[0m \u001b[1m-w\u001b[0m|\u001b[1m-W\u001b[0m [\u001b[1m-C\u001b[0m \u001b[4mfile\u001b[24m] [\u001b[1m-d\u001b[0m] [\u001b[1m-D\u001b[0m] \u001b[4mpage\u001b[24m ...\u001b[m\r\n \u001b[1mman\u001b[0m \u001b[1m-c\u001b[0m [\u001b[1m"],
[3.586087, "o", "-C\u001b[0m \u001b[4mfile\u001b[24m] [\u001b[1m-d\u001b[0m] [\u001b[1m-D\u001b[0m] \u001b[4mpage\u001b[24m ...\u001b[m\r\n \u001b[1mman\u001b[0m [\u001b[1m-?V\u001b[0m]\u001b[m\r\n\u001b[m\r\n\u001b[1mDESCRIPTION\u001b[0m\u001b[m\r\n\u001b[7m Manual page man(1) line 1 (press h for help or q to quit)\u001b[27m\u001b[K"],
[5.099652, "o", "\r\u001b[K"],
[5.100412, "o", " \u001b[1mman\u001b[0m is the system's manual pager. Each \u001b[4mpage\u001b[24m argument given to \u001b[1mman\u001b[0m is\u001b[m\r\n normally the name of a program, utility or function. The \u001b[4mmanual\u001b[24m \u001b[4mpage\u001b[24m\u001b[m\r\n associated with each of these arguments is then found and displayed. A\u001b[m\r\n \u001b[4msection\u001b[24m, if provided, will direct \u001b[1mman\u001b[0m to look only in that \u001b[4msection\u001b[24m of\u001b[m\r\n the manual. The default action is to search in all of the available\u001b[m\r\n \u001b[4msections\u001b[24m following a pre-defined order (\"1 n l 8 3 2 3posix 3pm 3perl\u001b[m\r\n 3am 5 4 9 6 7\" by default, unless overridden by the \u001b[1mSECTION\u001b[0m directive\u001b[m\r\n in \u001b[4m/etc/manpath.config\u001b[24m), and to show only the first \u001b[4mpage\u001b[24m found, even if\u001b[m\r\n \u001b[4mpage\u001b[24m exists in several \u001b[4msections\u001b[24m.\u001b[m\r\n\u001b[m\r\n The table below shows the \u001b[4msection\u001b[24m numbers of the manual followed by the\u001b[m\r\n types of pages they contain.\u001b[m\r\n\u001b[m\r\n 1 Executable programs or shell commands\u001b[m\r"],
[5.10077, "o", "\n 2 System calls (functions provided by the kernel)\u001b[m\r\n 3 Library calls (functions within program libraries)\u001b[m\r\n 4 Special files (usually found in \u001b[4m/dev\u001b[24m)\u001b[m\r\n 5 File formats and conventions eg \u001b[4m/etc/passwd\u001b[24m\u001b[m\r\n 6 Games\u001b[m\r\n 7 Miscellaneous (including macro packages and conventions), e.g.\u001b[m\r\n \u001b[1mman\u001b[0m(7), \u001b[1mgroff\u001b[0m(7)\u001b[m\r\n 8 System administration commands (usually only for root)\u001b[m\r\n 9 Kernel routines [Non standard]\u001b[m\r\n\u001b[7m Manual page man(1) line 24 (press h for help or q to quit)\u001b[27m\u001b[K"],
[10.259702, "o", "\r\u001b[K"],
[10.260051, "o", "\u001b[m\r\n A manual \u001b[4mpage\u001b[24m consists of several sections.\u001b[m\r\n\u001b[m\r\n Conventional section names include \u001b[1mNAME\u001b[0m, \u001b[1mSYNOPSIS\u001b[0m, \u001b[1mCONFIGURATION\u001b[0m, \u001b[1mDE\u001b[0m\u001b[m\r\n \u001b[1mSCRIPTION\u001b[0m, \u001b[1mOPTIONS\u001b[0m, \u001b[1mEXIT\u001b[0m \u001b[1mSTATUS\u001b[0m, \u001b[1mRETURN\u001b[0m \u001b[1mVALUE\u001b[0m, \u001b[1mERRORS\u001b[0m, \u001b[1mENVIRONMENT\u001b[0m,\u001b[m\r\n"],
[10.260674, "o", " \u001b[1mFILES\u001b[0m, \u001b[1mVERSIONS\u001b[0m, \u001b[1mCONFORMING\u001b[0m \u001b[1mTO\u001b[0m, \u001b[1mNOTES\u001b[0m, \u001b[1mBUGS\u001b[0m, \u001b[1mEXAMPLE\u001b[0m, \u001b[1mAUTHORS\u001b[0m, and\u001b[m\r\n \u001b[1mSEE\u001b[0m \u001b[1mALSO\u001b[0m.\u001b[m\r\n\u001b[m\r\n The following conventions apply to the \u001b[1mSYNOPSIS\u001b[0m section and can be used\u001b[m\r\n as a guide in other sections.\u001b[m\r\n\u001b[m\r\n \u001b[1mbold\u001b[0m \u001b[1mtext\u001b[0m type exactly as shown.\u001b[m\r\n \u001b[4mitalic\u001b[24m \u001b[4mtext\u001b[24m replace with appropriate argument.\u001b[m\r\n [\u001b[1m-abc\u001b[0m] any or all arguments within [ ] are optional.\u001b[m\r\n \u001b[1m-a\u001b[0m|\u001b[1m-b\u001b[0m options delimited by | cannot be used together.\u001b[m\r\n\u001b[m\r\n"],
[10.261539, "o", " \u001b[4margument\u001b[24m ... \u001b[4margument\u001b[24m is repeatable.\u001b[m\r\n [\u001b[4mexpression\u001b[24m] ... entire \u001b[4mexpression\u001b[24m within [ ] is repeatable.\u001b[m\r\n\u001b[m\r\n Exact rendering may vary depending on the output device. For instance,\u001b[m\r\n man will usually not be able to render italics when running in a termi\u001b[m\r\n nal, and will typically use underlined or coloured text instead.\u001b[m\r\n\u001b[m\r\n\u001b[7m Manual page man(1) line 47 (press h for help or q to quit)\u001b[27m\u001b[K"],
["search + 0.467738", "o", "\r\u001b[K/"],
["search + 2.427696", "o", "\u001b[Km\bm"],
["search + 2.811736", "o", "\u001b[Ka\ba"],
["search + 3.187628", "o", "\u001b[Kn\bn"],
["search + 4.835791", "o", "\r\u001b[K"],
["search + 4.838372", "o", "\u001b[1;1H\u001b[m\r\n\u001b[2;1H A manual \u001b[4mpage\u001b[24m consists of several sections.\u001b[m\r\n\u001b[3;1H\u001b[m\r\n\u001b[4;1H Conventional section names include \u001b[1mNAME\u001b[0m, \u001b[1mSYNOPSIS\u001b[0m, \u001b[1mCONFIGURATION\u001b[0m, \u001b[1mDE\u001b[0m\u001b[m\r\n\u001b[5;1H \u001b[1mSCRIPTION\u001b[0m, \u001b[1mOPTIONS\u001b[0m, \u001b[1mEXIT\u001b[0m \u001b[1mSTATUS\u001b[0m, \u001b[1mRETURN\u001b[0m \u001b[1mVALUE\u001b[0m, \u001b[1mERRORS\u001b[0m, \u001b[1mENVIRONMENT\u001b[0m,\u001b[m\r\n\u001b[6;1H \u001b[1mFILES\u001b[0m, \u001b[1mVERSIONS\u001b[0m, \u001b[1mCONFORMING\u001b[0m \u001b[1mTO\u001b[0m, \u001b[1mNOTES\u001b[0m, \u001b[1mBUGS\u001b[0m, \u001b[1mEXAMPLE\u001b[0m, \u001b[1mAUTHORS\u001b[0m, and\u001b[m\r\n\u001b[7;1H \u001b[1mSEE\u001b[0m \u001b[1mALSO\u001b[0m.\u001b[m\r\n\u001b[8;1H\u001b[m\r\n\u001b[9;1H The following conventions apply to the \u001b[1mSYNOPSIS\u001b[0m section and can be used\u001b[m\r\n\u001b[10;1H as a guide in other sections.\u001b[m\r\n\u001b[11;1H\u001b[m\r\n\u001b[12;1H \u001b[1mbold\u001b[0m \u001b[1mtext\u001b[0m type exactly as shown.\u001b[m\r\n\u001b[13;1H \u001b[4mitalic\u001b[24m \u001b[4mtext\u001b[24m replace with appropriate argument.\u001b[m\r\n\u001b[14;1H [\u001b[1m-abc\u001b[0m] any or all arguments within [ ] are optional.\u001b[m\r\n\u001b[15;1H \u001b[1m-a\u001b[0m|\u001b[1m-b\u001b[0m"],
["search + 4.838671", "o", " "],
["search + 4.840168", "o", " options delimited by | cannot be used together.\u001b[m\r\n\u001b[16;1H\u001b[m\r\n\u001b[17;1H \u001b[4margument\u001b[24m ... \u001b[4margument\u001b[24m is repeatable.\u001b[m\r\n\u001b[18;1H [\u001b[4mexpression\u001b[24m] ... entire \u001b[4mexpression\u001b[24m within [ ] is repeatable.\u001b[m\r\n\u001b[19;1H\u001b[m\r\n\u001b[20;1H Exact rendering may vary depending on the output device. For instance,\u001b[m\r\n\u001b[21;1H man will usually not be able to render italics when running in a termi\u001b[m\r\n\u001b[22;1H nal, and will typically use underlined or coloured text instead.\u001b[m\r\n\u001b[23;1H\u001b[m\r\n\u001b[24;1H\u001b[1;1H\u001b[m\r\n\u001b[2;1H A \u001b[7mman\u001b[27mual \u001b[4mpage\u001b[24m consists of several sections.\u001b[m\r\n\u001b[3;1H\u001b[m\r\n\u001b[4;1H Conventional section names include \u001b[1mNAME\u001b[0m, \u001b[1mSYNOPSIS\u001b[0m, \u001b[1mCONFIGURATION\u001b[0m, \u001b[1mDE\u001b[0m\u001b[m\r\n\u001b[5;1H \u001b[1mSCRIPTION\u001b[0m, \u001b[1mOPTIONS\u001b[0m, \u001b[1mEXIT\u001b[0m \u001b[1mSTATUS\u001b[0m, \u001b[1mRETURN\u001b[0m \u001b[1mVALUE\u001b[0m, \u001b[1mERRORS\u001b[0m, \u001b[1mENVIRONMENT\u001b[0m,\u001b[m\r\n\u001b[6;1H \u001b[1mFILES\u001b[0m, \u001b[1mVERSIONS\u001b[0m, \u001b[1mCONFORMING\u001b[0m \u001b[1mTO\u001b[0m, \u001b[1mNOTES\u001b[0m, \u001b[1mBUGS\u001b[0m, "],
["search + 4.840432", "o", " \u001b[1mEXAMPLE\u001b"],
["search + 4.841096", "o", "[0m, \u001b[1mAUTHORS\u001b[0m, and\u001b[m\r\n\u001b[7;1H \u001b[1mSEE\u001b[0m \u001b[1mALSO\u001b[0m.\u001b[m\r\n\u001b[8;1H\u001b[m\r\n\u001b[9;1H The following conventions apply to the \u001b[1mSYNOPSIS\u001b[0m section and can be used\u001b[m\r\n\u001b[10;1H as a guide in other sections.\u001b[m\r\n\u001b[11;1H\u001b[m\r\n\u001b[12;1H \u001b[1mbold\u001b[0m \u001b[1mtext\u001b[0m type exactly as shown.\u001b[m\r\n\u001b[13;1H \u001b[4mitalic\u001b[24m \u001b[4mtext\u001b[24m replace with appropriate argument.\u001b[m\r\n\u001b[14;1H [\u001b[1m-abc\u001b[0m] any or all arguments within [ ] are optional.\u001b[m\r\n\u001b[15;1H \u001b[1m-a\u001b[0m|\u001b[1m-b\u001b[0m options delimited by | cannot be used together.\u001b[m\r\n\u001b[16;1H\u001b[m\r\n\u001b[17;1H \u001b[4margument\u001b[24m ... \u001b[4margument\u001b[24m is repeatable.\u001b[m\r\n\u001b[18;1H [\u001b[4mexpression\u001b[24m] ... entire \u001b[4mexpression\u001b[24m within [ ] is repeatable.\u001b[m\r\n\u001b[19;1H\u001b[m\r\n\u001b[20;1H Exact rendering may vary depending on the output device. For instance,\u001b[m\r\n\u001b[21;1H \u001b[7mman\u001b[27m will usually not be able to render italics when running in a termi\u001b[m\r\n\u001b[22;1H nal, and will ty"],
["search + 4.841183", "o", "pically use und"],
["search + 4.842076", "o", "erlined or coloured text instead.\u001b[m\r\n\u001b[23;1H\u001b[m\r\n\u001b[24;1H The com\u001b[7mman\u001b[27md or function illustration is a pattern that should match all\u001b[m\r\n\u001b[7m Manual page man(1) line 48 (press h for help or q to quit)\u001b[27m\u001b[K"],
["search + 9.491816", "o", "\r\u001b[K/\r\u001b[K"],
["search + 9.492785", "o", " possible invocations. In some cases it is advisable to illustrate sev\u001b[m\r\n eral exclusive invocations as is shown in the \u001b[1mSYNOPSIS\u001b[0m section of this\u001b[m\r\n \u001b[7mman\u001b[27mual page.\u001b[m\r\n\u001b[m\r\n\u001b[1mEXAMPLES\u001b[0m\u001b[m\r\n \u001b[1m\u001b[7mman\u001b[27m\u001b[0m \u001b[4mls\u001b[24m\u001b[m\r\n Display the \u001b[7mman\u001b[27mual page for the \u001b[4mitem\u001b[24m (program) \u001b[4mls\u001b[24m.\u001b[m\r\n\u001b[m\r\n \u001b[1m\u001b[7mman\u001b[27m\u001b[0m \u001b[4m\u001b[7mman\u001b[27m\u001b[24m.\u001b[4m7\u001b[24m\u001b[m\r\n Display the \u001b[7mman\u001b[27mual page for macro package \u001b[4m\u001b[7mman\u001b[27m\u001b[24m from section \u001b[4m7\u001b[24m.\u001b[m\r\n\u001b[m\r\n \u001b[1m\u001b[7mman\u001b[27m\u001b[0m \u001b[1m-a\u001b[0m \u001b[4mintro\u001b[24m\u001b[m\r\n Display, in succession, all of the available \u001b[4mintro\u001b[24m \u001b[7mman\u001b[27mual pages\u001b[m\r\n contained within the \u001b[7mman\u001b[27mual. It is possible to quit between suc\u001b[m\r\n cessive displays or skip any of them.\u001b[m\r\n\u001b[m\r\n \u001b[1m\u001b[7mman\u001b[27m\u001b[0m \u001b[1m-t\u001b[0m \u001b[4malias\u001b[24m | \u001b[4mlpr\u001b[24m \u001b[4m-Pps\u001b[24m\u001b[m\r\n Format the \u001b[7mman\u001b[27mual page referenced by `\u001b[4mali"],
["search + 9.493205", "o", "as\u001b[24m', usually a shell \u001b[7mman\u001b[27m\u001b[m\r\n ual page, into the default \u001b[1mtroff\u001b[0m or \u001b[1mgroff\u001b[0m format and pipe it to the\u001b[m\r\n\u001b[7m Manual page man(1) line 67 (press h for help or q to quit)\u001b[27m\u001b[K"],
["search + 10.755837", "o", "\r\u001b[K/\r\u001b[K"],
["search + 10.75625", "o", " printer named \u001b[4mps\u001b[24m. The default output for \u001b[1mgroff\u001b[0m is usually Post\u001b[m\r\n Script. \u001b[1m\u001b[7mman\u001b[27m\u001b[0m \u001b[1m--help\u001b[0m should advise as to which processor is bound to\u001b[m\r\n the \u001b[1m-t\u001b[0m option.\u001b[m\r\n\u001b[7m Manual page man(1) line 70 (press h for help or q to quit)\u001b[27m\u001b[K"],
["search + 11.707833", "o", "\r\u001b[K/\r\u001b[K"],
["search + 11.708289", "o", "\u001b[m\r\n \u001b[1m\u001b[7mman\u001b[27m\u001b[0m \u001b[1m-l\u001b[0m \u001b[1m-T\u001b[0m\u001b[4mdvi\u001b[24m \u001b[4m./foo.1x.gz\u001b[24m \u001b[1m>\u001b[0m \u001b[4m./foo.1x.dvi\u001b[24m\u001b[m\r\n This com\u001b[7mman\u001b[27md will decompress and format the nroff source \u001b[7mman\u001b[27mual\u001b[m\r\n\u001b[7m Manual page man(1) line 73 (press h for help or q to quit)\u001b[27m\u001b[K"],
["search + 12.75573", "o", "\r\u001b[K/\r\u001b[K"],
["search + 12.75613", "o", " page \u001b[4m./foo.1x.gz\u001b[24m into a \u001b[1mdevice\u001b[0m \u001b[1mindependent\u001b[0m \u001b[1m(dvi)\u001b[0m file. The redi\u001b[m\r\n rection is necessary as the \u001b[1m-T\u001b[0m flag causes output to be directed to\u001b[m\r\n \u001b[1mstdout\u001b[0m with no pager. The output could be viewed with a program\u001b[m\r\n\u001b[7m Manual page man(1) line 76 (press h for help or q to quit)\u001b[27m\u001b[K"],
["up + 0.547791", "o", "\r\u001b[K \u001b[KESC\b\b\bESC\u001b[KO\bO"],
["up + 0.54827", "o", "\u001b[KA\bA\r\u001b[K"],
["up + 0.548773", "o", "\u001b[H\u001bM\u001b[1mEXAMPLES\u001b[0m\u001b[m\r\n\u001b[24;1H\r\u001b[K\u001b[7m Manual page man(1) line 75 (press h for help or q to quit)\u001b[27m\u001b[K"],
["up + 1.339862", "o", "\r\u001b[K \u001b[KESC\b\b\bESC\u001b[KO\bO"],
["up + 1.340289", "o", "\u001b[KA\bA\r\u001b[K\u001b[H\u001bM\u001b[m\r\n\u001b[24;1H\r\u001b[K\u001b[7m Manual page man(1) line 74 (press h for help or q to quit)\u001b[27m\u001b[K"],
["up + 2.035805", "o", "\r\u001b[K \u001b[KESC\b\b\bESC"],
["up + 2.036168", "o", "\u001b[KO\bO\u001b[KA\bA\r\u001b[K\u001b[H\u001bM \u001b[7mman\u001b[27mual page.\u001b[m\r\n\u001b[24;1H\r\u001b[K\u001b[7m Manual page man(1) line 73 (press h for help or q to quit)\u001b[27m\u001b[K"],
["up + 2.827918", "o", "\r\u001b[K \u001b[KESC\b\b\bESC\u001b[KO\bO"],
["up + 2.82833", "o", "\u001b[KA\bA\r\u001b[K\u001b[H\u001bM eral exclusive invocations as is shown in the \u001b[1mSYNOPSIS\u001b[0m section of this\u001b[m\r\n\u001b[24;1H\r\u001b[K\u001b[7m Manual page man(1) line 72 (press h for help or q to quit)\u001b[27m\u001b[K"],
["up + 3.459969", "o", "\r\u001b[K \u001b[KESC\b\b\bESC\u001b[KO\bO"],
["up + 3.46038", "o", "\u001b[KA\bA\r\u001b[K\u001b[H\u001bM possible invocations. In some cases it is advisable to illustrate sev\u001b[m\r\n\u001b[24;1H\r\u001b[K\u001b[7m Manual page man(1) line 71 (press h for help or q to quit)\u001b[27m\u001b[K"],
["up + 4.059878", "o", "\r\u001b[K \u001b[KESC\b\b\bESC\u001b[KO\bO"],
["up + 4.060296", "o", "\u001b[KA\bA\r\u001b[K\u001b[H\u001bM The com\u001b[7mman\u001b[27md or function illustration is a pattern that should match all\u001b[m\r\n\u001b[24;1H\r\u001b[K\u001b[7m Manual page man(1) line 70 (press h for help or q to quit)\u001b[27m\u001b[K"],
["up + 4.731672", "o", "\r\u001b[K \u001b[KESC\b\b\bESC\u001b[KO\bO"],
["up + 4.73192", "o", "\u001b[KA\bA\r\u001b[K\u001b[H\u001bM\u001b[m\r\n\u001b[24;1H\r\u001b[K\u001b[7m Manual page man(1) line 69 (press h for help or q to quit)\u001b[27m\u001b[K"],
["up + 5.331857", "o", "\r\u001b[K \u001b[KESC\b\b\bESC\u001b[KO\bO\u001b[KA\bA\r\u001b[K"],
["up + 5.332293", "o", "\u001b[H\u001bM nal, and will typically use underlined or coloured text instead.\u001b[m\r\n\u001b[24;1H\r\u001b[K\u001b[7m Manual page man(1) line 68 (press h for help or q to quit)\u001b[27m\u001b[K"],
],
'text' : [
# EKRAN: man - przewijnaie stron
'Zapewne zauwaylimy e dokumentacj man przegldalimy ekran po ekranie, <m> natomiast informacje wypisywane przez wywoanie komendy <m> z opcj help wypisyway si hurtem na ekran. <m>'
'Dzieje si tak dlatego e polecenie man korzysta z programu <m> nazywanego pagerem do podziau tekstu na strony. <m>'
'Rol t zazwyczaj peni more lub less a kolejne strony <m> mona wywietla z uyciem spacji. <mark name="search" />'
# EKRAN: man - wyszukiwanie
'Oba te programy pozwalaj take na wyszukiwanie z uyciem ukonika, <m> rozpoczynajcego wprowadzenie szukanego tekstu <m> i klawisza n do wyszukania kolejnego wystpienia. <m>'
'Natomiast zakoczenie dziaania odbywa si za pomoc klawisza q. <m>'
'Zarwno taki sposb wyszukiwania jak te zamykania programu <m> jest wart zapamitania, gdy jest czsto spotykan konwencj. <mark name="up" />'
# EKRAN: man - przewijnaie w gre linia po linii
'Less jest bardziej rozbudowany ni more i pozwala take na <m> przewijanie linia po linii, przewijanie w obu kierunkach <m> z uyciem strzaek oraz page up i page down, <m> a take wyszukiwanie wstecz z uyciem shift n <m>'
]
},
{
'console': [
[0.058688, "o", eduMovie.clear + eduMovie.prompt()],
[0.69478, "o", "l"],
[0.886743, "o", "e"],
[1.142775, "o", "s"],
[1.294708, "o", "s"],
[1.454712, "o", " "],
[1.678724, "o", "/"],
[1.838742, "o", "e"],
[2.086702, "o", "t"],
[2.452806, "o", "c/"],
[2.94277, "o", "p"],
[3.478765, "o", "a"],
[3.702748, "o", "s"],
[3.940034, "o", "\u0007swd"],
[4.270745, "o", "\r\n"],
[4.279067, "o", "\u001b[?1049h\u001b[22;0;0t\u001b[?1h\u001b=\r"],
[4.279456, "o", "root:x:0:0:root:/root:/bin/bash\r\ndaemon:x:1:1:daemon:/usr/sbin:/usr/sbin/nologin\r\nbin:x:2:2:bin:/bin:/usr/sbin/nologin\r\nsys:x:3:3:sys:/dev:/usr/sbin/nologin\r\nsync:x:4:65534:sync:/bin:/bin/sync\r\ngames:x:5:60:games:/usr/games:/usr/sbin/nologin\r\nman:x:6:12:man:/var/cache/man:/usr/sbin/nologin\r\nlp:x:7:7:lp:/var/spool/lpd:/usr/sbin/nologin\r\nmail:x:8:8:mail:/var/mail:/usr/sbin/nologin\r\nnews:x:9:9:news:/var/spool/news:/usr/sbin/nologin\r\nuucp:x:10:10:uucp:/var/spool/uucp:/usr/sbin/nologin\r\nproxy:x:13:13:proxy:/bin:/usr/sbin/nologin\r\nwww-data:x:33:33:www-data:/var/www:/usr/sbin/nologin\r\nbackup:x:34:34:backup:/var/backups:/usr/sbin/nologin\r\nlist:x:38:38:Mailing List Manager:/var/list:/usr/sbin/nologin\r\nirc:x:39:39:ircd:/var/run/ircd:/usr/sbin/nologin\r\ngnats:x:41:41:Gnats Bug-Reporting System (admin):/var/lib/gnats:/usr/sbin/nologi \bn\r\nnobody:x:65534:65534:nobody:/nonexistent:/usr/sbin/nologin\r\nrrp:x:1000:1000:Robert Paciorek,,,:/rrp:/bin/bash"],
[4.27982, "o", "\r\nmessagebus:x:101:104::/var/run/dbus:/bin/false\r\nsshd:x:102:65534::/var/run/sshd:/usr/sbin/nologin\r\nsystemd-timesync:x:103:111:systemd Time Synchronization,,,:/run/systemd:/bin/fal \b\u001b[7m/etc/passwd\u001b[27m\u001b[K"],
[5.254821, "o", "\r\u001b[K \u001b[KESC\b\b\bESC\u001b[KO\bO"],
[5.255221, "o", "\u001b[KB\bB\r\u001b[Kse\r\n:\u001b[K"],
[5.638786, "o", "\r\u001b[K \u001b[KESC\b\b\bESC\u001b[KO\bO"],
[5.639159, "o", "\u001b[KB\bB\r\u001b[Ksystemd-network:x:105:113:systemd Network Management,,,:/run/systemd/netif:/bin/ \b:\u001b[K"],
[6.83884, "o", "\r\u001b[K \u001b[KESC\b\b\bESC\u001b[KO\bO"],
[6.839274, "o", "\u001b[KA\bA\r\u001b[K\u001b[H\u001bMdaemon:x:1:1:daemon:/usr/sbin:/usr/sbin/nologin\r\n\u001b[24;1H\r\u001b[K:\u001b[K"],
[7.126802, "o", "\r\u001b[K \u001b[KESC\b\b\bESC\u001b[KO\bO"],
[7.127176, "o", "\u001b[KA\bA\r\u001b[K\u001b[H\u001bMroot:x:0:0:root:/root:/bin/bash\r\n\u001b[24;1H\r\u001b[K:\u001b[K"],
],
'text' : [
'Polecenia te mog by take uyte do wywietlania plikw, <m> a take, dziki przekierowaniom strumieni, <m> do podziau na strony outputu innych polece. <m>'
]
},
]
| 173.795775 | 1,738 | 0.678066 |
299c02cae606323659e0dd5bb1c799eaccfa8b0a | 2,109 | py | Python | setup.py | tilakpatidar/pytest-snowflake_bdd | db49f0a6d844828b607a2717b96bba517995cf72 | [
"MIT"
] | null | null | null | setup.py | tilakpatidar/pytest-snowflake_bdd | db49f0a6d844828b607a2717b96bba517995cf72 | [
"MIT"
] | null | null | null | setup.py | tilakpatidar/pytest-snowflake_bdd | db49f0a6d844828b607a2717b96bba517995cf72 | [
"MIT"
] | 1 | 2022-01-24T08:26:08.000Z | 2022-01-24T08:26:08.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
import os
from setuptools import setup
gh_run_number = os.environ.get("BUILD_NUMBER", None)
build_number = None if gh_run_number is None or gh_run_number == "" else gh_run_number
version = '0.2.2'
setup(
name='pytest-snowflake_bdd',
version=f"{version}-{build_number}" if build_number else version,
author='Tilak Patidar',
author_email='tilakpatidar@gmail.com',
maintainer='Tilak Patidar',
maintainer_email='tilakpatidar@gmail.com',
license='MIT',
url='https://github.com/tilakpatidar/pytest-snowflake_bdd',
description='Setup test data and run tests on snowflake in BDD style!',
long_description=read('README.rst'),
py_modules=['pytest_snowflake_bdd'],
python_requires='>=3.6.7',
install_requires=['pytest>=6.2.0', 'pytest-bdd>=3.2.1', 'snowflake-sqlalchemy>=1.3.2', 'SQLAlchemy>=1.4.27', \
'pandas>=0.25.3', 'python-dateutil>=2.8.2'],
tests_require=[
'tox',
],
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Pytest',
'Intended Audience :: Developers',
'Topic :: Software Development :: Testing',
'Topic :: Database',
'Topic :: Software Development :: Testing :: BDD',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Framework :: Pytest',
'Operating System :: OS Independent',
'License :: OSI Approved :: MIT License',
],
packages=["pytest_snowflake_bdd"],
entry_points={
'pytest11': [
'pytest-snowflake-bdd = pytest_snowflake_bdd.plugin',
],
},
)
| 33.47619 | 114 | 0.625889 |
299c9b32319909c8f36fc5af498db57a782db34f | 437 | py | Python | integration_tests/test_12_dmaap.py | krasm/python-onapsdk | 87cd3017fc542a8afd3be51fbd89934ed87ed3a7 | [
"Apache-2.0"
] | 4 | 2020-06-13T04:51:27.000Z | 2021-01-06T15:00:51.000Z | integration_tests/test_12_dmaap.py | krasm/python-onapsdk | 87cd3017fc542a8afd3be51fbd89934ed87ed3a7 | [
"Apache-2.0"
] | 10 | 2021-09-20T15:42:47.000Z | 2021-09-23T12:49:51.000Z | integration_tests/test_12_dmaap.py | krasm/python-onapsdk | 87cd3017fc542a8afd3be51fbd89934ed87ed3a7 | [
"Apache-2.0"
] | 8 | 2020-08-28T10:56:02.000Z | 2022-02-11T17:06:03.000Z | # SPDX-License-Identifier: Apache-2.0
# Copyright 2020 Nokia
import pytest
import logging
import os
from onapsdk.dmaap.dmaap import Dmaap
logging.basicConfig(level=os.environ.get("LOGLEVEL", "DEBUG"))
| 20.809524 | 95 | 0.729977 |
299d93368abb2922353eb3246c80b4d0b6d61d48 | 390 | py | Python | awx/main/migrations/0112_deployhistory_date.py | Pavloid21/awx | 224827f6060013b996eb8210597bca68cda65d40 | [
"Apache-2.0"
] | null | null | null | awx/main/migrations/0112_deployhistory_date.py | Pavloid21/awx | 224827f6060013b996eb8210597bca68cda65d40 | [
"Apache-2.0"
] | null | null | null | awx/main/migrations/0112_deployhistory_date.py | Pavloid21/awx | 224827f6060013b996eb8210597bca68cda65d40 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.2.8 on 2020-03-25 13:50
from django.db import migrations, models
| 20.526316 | 54 | 0.602564 |
299f9a135fb0ac674c3200f9214021f3cf9fd561 | 920 | py | Python | policy.py | shantanusingh16/Pytorch-DQN | b7d3270e9e345e85e5c5a5216109529879ab77bd | [
"MIT"
] | 4 | 2018-09-23T19:58:24.000Z | 2022-03-22T20:32:36.000Z | policy.py | shantanusingh16/Pytorch-DQN | b7d3270e9e345e85e5c5a5216109529879ab77bd | [
"MIT"
] | null | null | null | policy.py | shantanusingh16/Pytorch-DQN | b7d3270e9e345e85e5c5a5216109529879ab77bd | [
"MIT"
] | 2 | 2019-05-22T06:02:38.000Z | 2019-10-18T17:08:24.000Z | import numpy as np
import torch
from utils.helpers import process_state, device
def make_epsilon_greedy_policy(estimator, nA):
"""
:param estimator: model that returns q values for a given statem/action pair
:param nA: number of actions in the environment
:return: A function that takes in a state and an epsilon and returns probs for each
action in the form of a numpy array of length nA
"""
def policy_fn(state, epsilon):
"""
:param state: tensor of b x 1 x 84 x 84
:param epsilon:
:return: action probabilities, of size b x nA
"""
A = torch.ones(nA) * epsilon / nA
state = torch.from_numpy(state).float().to(device).unsqueeze(0) / 255.0
q_vals = estimator.forward(state)
best_action = torch.argmax(q_vals, dim=0).unsqueeze(-1) # b
A[best_action] += (1.0 - epsilon)
return A
return policy_fn | 36.8 | 87 | 0.646739 |
29a1b5f087c1d14e9f6ed91d094e1aa061d5a041 | 2,798 | py | Python | phonotactics/onsets/onsets.py | shlomo-Kallner/coventreiya | aa0773693220025f8d2c23644a2c5d9d884773e9 | [
"Apache-2.0"
] | null | null | null | phonotactics/onsets/onsets.py | shlomo-Kallner/coventreiya | aa0773693220025f8d2c23644a2c5d9d884773e9 | [
"Apache-2.0"
] | null | null | null | phonotactics/onsets/onsets.py | shlomo-Kallner/coventreiya | aa0773693220025f8d2c23644a2c5d9d884773e9 | [
"Apache-2.0"
] | null | null | null |
__name__ = 'onsets'
__version__ = '1.5.1'
__package__ = 'phonotactics'
# imports
#some import machinery checking and manipulations...
#import sys
#import os
#from os import path
#if '__file__' in dir():
# __mod_path = path.dirname(__file__)
# if __mod_path not in sys.path:
# sys.path.append(__mod_path)
# __pack_path = path.dirname(__mod_path)
# if __pack_path not in sys.path:
# sys.path.append(__pack_path)
from coventreiya.utils.ver import ver
from coventreiya.utils.ver import gen_ver
from coventreiya.utils.ver import Version_Registry
from coventreiya.phonotactics.abc import abc
########################################################
#
#
# Generating the Onsets
#
#
################################################################################
#
# Version Information Control & UnExported [but Versioned] Object Instantiation
#
#
__versions = Version_Registry( Onsets() )
###################################################################################
#
# Getting/Setting the default/current version...
#
###################################################################################
#
# The original default version -- used for the(now obsolete and removed)
# "default" gen_*_ functions and the pre-generated lists...
# Note: the *COMPATABILITY_ONLY* default gen_*_ functions will self-update to
# accomidate resets (they call into *THE_CURRENT_VERSION_OBJECT*!!)
# the PRE-GENERATED LISTS will not be updated at all..
# Note: VERSION 2_0: the *OLD* gen_*_ functions no longer self-update as
# they are now directly linked to version 1.5.1 only.
#
# from ver_1_5_1 import *
# __versions.current(gen_ver(1,5,1))
| 30.086022 | 84 | 0.592566 |
29a35e6f75f695c4d26d13d7a9c5d6dff08f119d | 6,228 | py | Python | aptitudetech_private/aptitudetech_private/doctype/simplified_time_reporting/simplified_time_reporting.py | CloudGround/aptitudetech_private | d4d150226bd33ea0c76086264286ae7cae52457f | [
"MIT"
] | null | null | null | aptitudetech_private/aptitudetech_private/doctype/simplified_time_reporting/simplified_time_reporting.py | CloudGround/aptitudetech_private | d4d150226bd33ea0c76086264286ae7cae52457f | [
"MIT"
] | null | null | null | aptitudetech_private/aptitudetech_private/doctype/simplified_time_reporting/simplified_time_reporting.py | CloudGround/aptitudetech_private | d4d150226bd33ea0c76086264286ae7cae52457f | [
"MIT"
] | 1 | 2019-05-17T00:04:05.000Z | 2019-05-17T00:04:05.000Z |
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Aptitudetech and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
| 34.793296 | 312 | 0.701028 |
29a405435385e49ddae23458da015f3ba0c567e1 | 442 | py | Python | 6 - Python/Collections/7 - Piling Up!.py | Terence-Guan/Python.HackerRank | 165a5f0e739c7678dfac7eae95443018e2167c3d | [
"MIT"
] | 88 | 2016-10-23T16:41:14.000Z | 2019-12-30T23:51:47.000Z | HackerRank/6 - Python/Collections/7 - Piling Up!.py | natalie-o-perret/coding-challenges | 9a242e0ec54488f59be82592822b31ff51af1633 | [
"MIT"
] | 1 | 2018-10-13T14:31:54.000Z | 2018-10-13T14:31:54.000Z | HackerRank/6 - Python/Collections/7 - Piling Up!.py | natalie-o-perret/coding-challenges | 9a242e0ec54488f59be82592822b31ff51af1633 | [
"MIT"
] | 82 | 2017-02-01T17:02:56.000Z | 2020-02-01T11:45:58.000Z | from collections import deque
T = int(input())
for t in range(T):
n = int(input())
lengths = deque(map(int, input().split()))
top = max(lengths)
while len(lengths) > 0:
left = lengths[0]
right = lengths[-1]
if (right >= left) and (right <= top):
top = right
lengths.pop()
elif (left >= right) and (left <= top):
top = left
lengths.popleft()
else:
break
if len(lengths) == 0:
print("YES")
else:
print("NO")
| 19.217391 | 43 | 0.595023 |
29a40a64c821d23b2e28418293629df23986810c | 6,343 | bzl | Python | tools/bzl/deps.bzl | xshaun/compiler-programl | f90bcd84700d0f245c80440a3d5fd29370d2f973 | [
"Apache-2.0"
] | null | null | null | tools/bzl/deps.bzl | xshaun/compiler-programl | f90bcd84700d0f245c80440a3d5fd29370d2f973 | [
"Apache-2.0"
] | null | null | null | tools/bzl/deps.bzl | xshaun/compiler-programl | f90bcd84700d0f245c80440a3d5fd29370d2f973 | [
"Apache-2.0"
] | null | null | null | load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
| 35.836158 | 153 | 0.760366 |
29a7a6484cb6277e0cdd34fa9a54d64187a477f7 | 1,082 | py | Python | matrix_multiplication_evolution_example.py | bobbywlindsey/stokepy | e7f4ad0c27d8fc22129558db6ae0dbbab0627ace | [
"MIT"
] | 9 | 2017-05-09T20:00:10.000Z | 2020-07-02T18:00:22.000Z | matrix_multiplication_evolution_example.py | bobbywlindsey/stokepy | e7f4ad0c27d8fc22129558db6ae0dbbab0627ace | [
"MIT"
] | null | null | null | matrix_multiplication_evolution_example.py | bobbywlindsey/stokepy | e7f4ad0c27d8fc22129558db6ae0dbbab0627ace | [
"MIT"
] | 2 | 2017-08-10T14:47:07.000Z | 2019-01-25T02:37:34.000Z | import stokepy as sp
import numpy as np
# instantiate class
fmc = sp.FiniteMarkovChain()
# create initial distribution vector
phi = np.array([0, 0, 1, 0, 0])
# generate Markov chain with no boundary conditions
fmc.gen_from_params(phi, p = 0.6, num_states = 5, dim = 1)
# apply boundary condition: absorbing, reflecting, semi-reflecting
# only works for 1 dimension Markov chains at the moment
fmc.apply_boundary_condition(condition='absorbing')
# choose solution method like Matrix Multiplication Evolution
matrx_mult_evo = sp.MatrixMultiplicationEvolution(fmc, phi, steps = 2000,\
rec_class_states = [])
# run the solution
matrx_mult_evo.run()
# get data from the run
average_distribution = matrx_mult_evo.pi
tpdf = matrx_mult_evo.tpdf
absorption_proportions = matrx_mult_evo.absorption_proportions
apbrc = matrx_mult_evo.recurrent_class_absorbed_proportions
mean_absorption_time = matrx_mult_evo.mean_absorption_time
# plot absorption tiems for recurrent classes
matrx_mult_evo.plot_absorption()
| 33.8125 | 76 | 0.750462 |
29a7cf6d7a2997edf3ae4f28829f450e4f401145 | 1,225 | py | Python | tests/__main__.py | nickswebsite/nickswebsite-serializer | 2c131a04a4105afae439670f96b5b72bdfe65854 | [
"Unlicense"
] | 2 | 2017-09-26T16:38:36.000Z | 2018-08-09T15:09:51.000Z | tests/__main__.py | nickswebsite/nickswebsite-serializer | 2c131a04a4105afae439670f96b5b72bdfe65854 | [
"Unlicense"
] | 8 | 2015-02-20T13:16:11.000Z | 2016-12-20T14:55:43.000Z | tests/__main__.py | nickswebsite/nickswebsite-serializer | 2c131a04a4105afae439670f96b5b72bdfe65854 | [
"Unlicense"
] | 6 | 2015-05-20T21:26:40.000Z | 2018-08-08T10:33:04.000Z | import doctest
import sys
import unittest
import r2dto
from tests.test_acceptance import AcceptanceTests
from tests.test_base_serializer import BaseSerializerTests
__all__ = ["doctest", "sys", "unittest", "r2dto", "AcceptanceTests", "BaseSerializerTests"]
try:
import pep8
except ImportError:
print("WARNING: pep8 not installed. Style will not be checked and therefore your build may fail when integrated"
"with the main branch.")
pep8 = None
PEP8_SOURCES = [
"r2dto/__init__.py",
"r2dto/base.py",
"r2dto/fields.py",
"r2dto/validators.py",
"tests/__init__.py",
"tests/__main__.py",
"tests/test_acceptance.py",
"tests/test_base_serializer.py",
]
if __name__ == "__main__":
if pep8 is not None:
sg = pep8.StyleGuide(max_line_length=120)
res = sg.check_files(PEP8_SOURCES)
if res.total_errors != 0:
print("pep8 failed")
sys.exit(1)
doctest_ctx = {
"Serializer": r2dto.Serializer,
"fields": r2dto.fields,
"ValidationError": r2dto.ValidationError,
}
results = doctest.testfile("../README.md", globs=doctest_ctx)
if results.failed != 0:
sys.exit(1)
unittest.main()
| 26.06383 | 117 | 0.660408 |
29a7fecfec58a37e5770387c0619949240d50800 | 10,697 | py | Python | manager/jobs/jobs.py | jlbrewe/hub | c737669e6493ad17536eaa240bed3394b20c6b7d | [
"Apache-2.0"
] | 30 | 2016-03-26T12:08:04.000Z | 2021-12-24T14:48:32.000Z | manager/jobs/jobs.py | jlbrewe/hub | c737669e6493ad17536eaa240bed3394b20c6b7d | [
"Apache-2.0"
] | 1,250 | 2016-03-23T04:56:50.000Z | 2022-03-28T02:27:58.000Z | manager/jobs/jobs.py | jlbrewe/hub | c737669e6493ad17536eaa240bed3394b20c6b7d | [
"Apache-2.0"
] | 11 | 2016-07-14T17:04:20.000Z | 2021-07-01T16:19:09.000Z | """
Module that defines the interface between the `manager` (i.e Django) and the `broker` (i.e. RabbitMQ).
Defines three functions involved in a job's lifecycle:
- `dispatch_job` - send a job to a queue
- `update_job` - update the status of a job by checking it's (intermediate) result
- `check_job` - for a parent job, trigger any child jobs, and / or update it's status
- `cancel_job` - remove job from the queue, or terminate it if already started
"""
import datetime
import logging
import time
from celery import Celery, signature
from celery.result import AsyncResult
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.utils import timezone
from jobs.models import Job, JobMethod, JobStatus, Queue, Worker
logger = logging.getLogger(__name__)
# Setup the Celery app
app = Celery("manager", broker=settings.BROKER_URL, backend=settings.CACHE_URL)
app.conf.update(
# By default Celery will keep on trying to connect to the broker forever
# This overrides that. Initially try again immediately, then add 0.5 seconds for each
# subsequent try (with a maximum of 3 seconds).
# See https://github.com/celery/celery/issues/4296
broker_transport_options={
"max_retries": 10,
"interval_start": 0,
"interval_step": 0.5,
"interval_max": 3,
},
# Needed to ensure STARTED state is emitted
task_track_started=True,
)
def dispatch_job(job: Job) -> Job:
"""
Send a job to a queue.
Decides which queue a job should be sent to and sends it.
The queue can depend upon both the project and the account (either the
account that the project is linked to, or the default account of the job
creator).
"""
if not JobMethod.is_member(job.method):
raise ValueError("Unknown job method '{}'".format(job.method))
if job.method in settings.JOB_METHODS_STAFF_ONLY and (
not job.creator or not job.creator.is_staff
):
raise PermissionDenied
if JobMethod.is_compound(job.method):
children = job.children.all().order_by("id")
if len(children) == 0:
# If there are no children (e.g. a pull job for a project with no sources)
# then job is immediately finished
job.runtime = 0
job.is_active = False
job.status = JobStatus.SUCCESS.value
else:
if job.method == JobMethod.parallel.value:
# Dispatch all child jobs simultaneously
for child in children:
dispatch_job(child)
else:
# Dispatch the first child; subsequent children
# will be status WAITING and will get dispatched later
# on update of the parent.
for index, child in enumerate(children):
if index == 0:
dispatch_job(child)
else:
child.is_active = True
child.status = JobStatus.WAITING.value
child.save()
job.is_active = True
job.status = JobStatus.DISPATCHED.value
else:
# Find queues that have active workers on them
# order by descending priority
queues = list(
Queue.objects.filter(
workers__in=Worker.objects.filter(
# Has not finished
finished__isnull=True,
# Has been updated in the last x minutes
updated__gte=timezone.now() - datetime.timedelta(minutes=15),
),
).order_by("priority")
)
# Fallback to the default Stencila queue
# Apart from anything else having this fallback is useful in development
# because if means that the `overseer` service does not need to be running
# in order keep track of the numbers of workers listening on each queue
# (during development `worker`s listen to the default queue)
if len(queues) == 0:
logger.warning("No queues found with active workers")
queue, _ = Queue.get_or_create(
account_name="stencila", queue_name="default"
)
else:
if job.creator is None or job.project is None:
# Jobs created by anonymous users go on the lowest
# priority queue
priority = 1
else:
# The priority of other jobs is determined by the
# account tier of the project
priority = job.project.account.tier.id
queue = queues[min(len(queues), priority) - 1]
# Add the job's project id, key and secrets to it's kwargs.
# Doing this here ensures it is done for all jobs
# and avoids putting the secrets in the job's `params` field.
kwargs = dict(**job.params) if job.params else {}
kwargs["project"] = job.project.id if job.project else None
kwargs["key"] = job.key
kwargs["secrets"] = job.secrets
# Send the job to the queue
task = signature(
job.method, kwargs=kwargs, queue=queue.name, task_id=str(job.id), app=app,
)
task.apply_async()
job.queue = queue
job.is_active = True
job.status = JobStatus.DISPATCHED.value
job.save()
return job
def update_job(job: Job, data={}, force: bool = False) -> Job:
"""
Update a job.
This method is triggered by a PATCH request from the
`overseer` service. It updates the status, and other fields of
the job, and if the job has a parent, updates it's status too.
See https://stackoverflow.com/a/38267978 for important considerations
in using AsyncResult.
"""
# Avoid unnecessary update
if not job.is_active and not force:
return job
was_active = job.is_active
if JobMethod.is_compound(job.method):
# Update the status of compound jobs based on children
status = job.status
is_active = False
all_previous_succeeded = True
any_previous_failed = False
for child in job.get_children():
# If the child has a 'higher' status then update the
# status of the compound job
status = JobStatus.highest([status, child.status])
# If the child is still waiting then...
if child.status == JobStatus.WAITING.value:
# If all previous have succeeded, dispatch it
if all_previous_succeeded:
dispatch_job(child)
# If any previous have failed, cancel it
elif any_previous_failed:
cancel_job(child)
if child.status != JobStatus.SUCCESS.value:
all_previous_succeeded = False
if child.status == JobStatus.FAILURE.value:
any_previous_failed = True
# If the child is still active then the compound job is active
if child.is_active:
is_active = True
job.is_active = is_active
job.status = JobStatus.RUNNING.value if is_active else status
else:
status = data.get("status")
assert status
# Do not do anything if the new status is lower rank than the
# existing status. This can exist for example when a job is
# terminated (the SUCCESS state is sent after TERMINATED)
if JobStatus.rank(status) < JobStatus.rank(job.status):
return job
# Update fields sent by `overseer` service, including `status`
for key, value in data.items():
setattr(job, key, value)
# If job succeeded then get the result if we haven't already
if status == JobStatus.SUCCESS.value and job.result is None:
response = None
attempts = 0
while not response and attempts < 5:
try:
response = async_result().get(timeout=30)
except Exception:
# Catch all errors, but log them. Occasional
# errors encountered in prod include ResponseError and TimeoutError
logger.warning(
"Error getting async result",
exc_info=True,
extra=dict(id=job.id, method=job.method, attempts=attempts),
)
time.sleep(1)
attempts += 1
if response:
job.result = response.get("result")
job.log = response.get("log")
else:
logger.error(
"Unable to get async result",
extra=dict(id=job.id, method=job.method, attempts=attempts),
)
job.status = JobStatus.FAILURE.value
job.error = dict(
type="RuntimeError", message="Unable to get result of job"
)
# If job failed then get the error
# For FAILURE, `info` is the raised Exception
elif status == JobStatus.FAILURE.value:
info = async_result().info
if info:
job.error = dict(type=type(info).__name__, message=str(info))
# If the job has just ended then mark it as inactive
if JobStatus.has_ended(status):
job.is_active = False
# If the job is no longer active clear its secrets and run its callback
if was_active and not job.is_active:
job.secrets = None
job.run_callback()
# Save before updating parent (and then this again)
job.save()
# If the job has a parent then update it too
if job.parent:
update_job(job.parent)
return job
def cancel_job(job: Job) -> Job:
"""
Cancel a job.
This uses Celery's terminate options which will kill the worker child process.
This is not normally recommended but in this case is OK because there is only
one task per process.
See `worker/worker.py` for the reasoning for using `SIGUSR1`.
See https://docs.celeryproject.org/en/stable/userguide/workers.html#revoke-revoking-tasks
"""
if job.is_active:
if JobMethod.is_compound(job.method):
for child in job.children.all():
cancel_job(child)
else:
app.control.revoke(str(job.id), terminate=True, signal="SIGUSR1")
job.status = JobStatus.CANCELLED.value
job.is_active = False
job.secrets = None
job.save()
return job
| 37.013841 | 102 | 0.598299 |
29a916eb7d2d8321665bd4ae8b4fed316f3bc30f | 217 | py | Python | sklearn-nlp/utils/data_utils.py | fmailhot/sklearn-nlp | 3de76cb71fc85bc1231bdfa9cd78b5f98a0f14f7 | [
"BSD-3-Clause"
] | null | null | null | sklearn-nlp/utils/data_utils.py | fmailhot/sklearn-nlp | 3de76cb71fc85bc1231bdfa9cd78b5f98a0f14f7 | [
"BSD-3-Clause"
] | null | null | null | sklearn-nlp/utils/data_utils.py | fmailhot/sklearn-nlp | 3de76cb71fc85bc1231bdfa9cd78b5f98a0f14f7 | [
"BSD-3-Clause"
] | null | null | null | """Data loading/munging utilities.
This will need to leverage a lot of existing stuff
(e.g. numpy.genfromtxt)...
"""
import logging
| 15.5 | 50 | 0.700461 |
29aa089f836846e2e53f80e15d88b7aa8aa740d4 | 12,785 | py | Python | assignment2/ptb-lm-loss-compute.py | adijo/ift6135-rnn | 88ebcd621cea4042f5ada688f2452ce25d02b761 | [
"Apache-2.0"
] | null | null | null | assignment2/ptb-lm-loss-compute.py | adijo/ift6135-rnn | 88ebcd621cea4042f5ada688f2452ce25d02b761 | [
"Apache-2.0"
] | null | null | null | assignment2/ptb-lm-loss-compute.py | adijo/ift6135-rnn | 88ebcd621cea4042f5ada688f2452ce25d02b761 | [
"Apache-2.0"
] | null | null | null | #!/bin/python
# coding: utf-8
import argparse
import time
import collections
import os
import sys
import torch
import torch.nn
from torch.autograd import Variable
import torch.nn as nn
import numpy as np
from models_grad import RNN, GRU
from models_grad import make_model as TRANSFORMER
parser = argparse.ArgumentParser(description='PyTorch Penn Treebank Language Modeling')
# Arguments you may need to set to run different experiments in 4.1 & 4.2.
parser.add_argument('--data', type=str, default='data',
help='location of the data corpus')
parser.add_argument('--model', type=str, default='TRANSFORMER',
help='type of recurrent net (RNN, GRU, TRANSFORMER)')
parser.add_argument('--optimizer', type=str, default='SGD_LR_SCHEDULE',
help='optimization algo to use; SGD, SGD_LR_SCHEDULE, ADAM')
parser.add_argument('--seq_len', type=int, default=35,
help='number of timesteps over which BPTT is performed')
parser.add_argument('--batch_size', type=int, default=20,
help='size of one minibatch')
parser.add_argument('--initial_lr', type=float, default=20.0,
help='initial learning rate')
parser.add_argument('--hidden_size', type=int, default=512,
help='size of hidden layers. IMPORTANT: for the transformer\
this must be a multiple of 16.')
parser.add_argument('--save_best', action='store_true',
help='save the model for the best validation performance')
parser.add_argument('--num_layers', type=int, default=2,
help='number of hidden layers in RNN/GRU, or number of transformer blocks in TRANSFORMER')
# Other hyperparameters you may want to tune in your exploration
parser.add_argument('--emb_size', type=int, default=200,
help='size of word embeddings')
parser.add_argument('--num_epochs', type=int, default=40,
help='number of epochs to stop after')
parser.add_argument('--dp_keep_prob', type=float, default=0.35,
help='dropout *keep* probability. drop_prob = 1-dp_keep_prob \
(dp_keep_prob=1 means no dropout)')
# Arguments that you may want to make use of / implement more code for
parser.add_argument('--debug', action='store_true')
parser.add_argument('--save_dir', type=str, default='',
help='path to save the experimental config, logs, model \
This is automatically generated based on the command line \
arguments you pass and only needs to be set if you want a \
custom dir name')
parser.add_argument('--evaluate', action='store_true',
help="use this flag to run on the test set. Only do this \
ONCE for each model setting, and only after you've \
completed ALL hyperparameter tuning on the validation set.\
Note we are not requiring you to do this.")
# DO NOT CHANGE THIS (setting the random seed makes experiments deterministic,
# which helps for reproducibility)
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
args = parser.parse_args()
argsdict = args.__dict__
argsdict['code_file'] = sys.argv[0]
# Use the model, optimizer, and the flags passed to the script to make the
# name for the experimental dir
print("\n########## Setting Up Experiment ######################")
flags = [flag.lstrip('--') for flag in sys.argv[1:]]
current_script_path = os.path.dirname(os.path.realpath(__file__))
experiment_path = os.path.join(os.path.sep, current_script_path, args.save_dir, '_'.join([argsdict['model'], argsdict['optimizer']] + flags))
# Increment a counter so that previous results with the same args will not
# be overwritten. Comment out the next four lines if you only want to keep
# the most recent results.
i = 0
while os.path.exists(experiment_path + "_" + str(i)):
i += 1
experiment_path = experiment_path + "_" + str(i)
# Creates an experimental directory and dumps all the args to a text file
os.makedirs(experiment_path)
print("\nPutting log in %s" % experiment_path)
argsdict['save_dir'] = experiment_path
with open(os.path.join(experiment_path, 'exp_config.txt'), 'w') as f:
for key in sorted(argsdict):
f.write(key+' '+str(argsdict[key])+'\n')
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
# Use the GPU if you have one
if torch.cuda.is_available():
print("Using the GPU")
device = torch.device("cuda")
else:
print("WARNING: You are about to run on cpu, and this will likely run out \
of memory. \n You can try setting batch_size=1 to reduce memory usage")
device = torch.device("cpu")
###############################################################################
#
# DATA LOADING & PROCESSING
#
###############################################################################
# HELPER FUNCTIONS
# Processes the raw data from text files
# Yields minibatches of data
# LOAD DATA
print('Loading data from '+args.data)
raw_data = ptb_raw_data(data_path=args.data)
train_data, valid_data, test_data, word_to_id, id_2_word = raw_data
vocab_size = len(word_to_id)
print(' vocabulary size: {}'.format(vocab_size))
###############################################################################
#
# MODEL SETUP
#
###############################################################################
# NOTE ==============================================
# This is where your model code will be called. You may modify this code
# if required for your implementation, but it should not typically be necessary,
# and you must let the TAs know if you do so.
if args.model == 'RNN':
print("seq_length", args.seq_len)
print("batch_size", args.batch_size)
model = RNN(emb_size=args.emb_size, hidden_size=args.hidden_size,
seq_len=args.seq_len, batch_size=args.batch_size,
vocab_size=vocab_size, num_layers=args.num_layers,
dp_keep_prob=args.dp_keep_prob)
elif args.model == 'GRU':
model = GRU(emb_size=args.emb_size, hidden_size=args.hidden_size,
seq_len=args.seq_len, batch_size=args.batch_size,
vocab_size=vocab_size, num_layers=args.num_layers,
dp_keep_prob=args.dp_keep_prob)
elif args.model == 'TRANSFORMER':
if args.debug: # use a very small model
model = TRANSFORMER(vocab_size=vocab_size, n_units=16, n_blocks=2)
else:
# Note that we're using num_layers and hidden_size to mean slightly
# different things here than in the RNNs.
# Also, the Transformer also has other hyper-parameters
# (such as the number of attention heads) which can change it's behavior.
model = TRANSFORMER(vocab_size=vocab_size, n_units=args.hidden_size,
n_blocks=args.num_layers, dropout=1.-args.dp_keep_prob)
# these 3 attributes don't affect the Transformer's computations;
# they are only used in run_epoch
model.batch_size = args.batch_size
model.seq_len = args.seq_len
model.vocab_size = vocab_size
else:
print("Model type not recognized.")
model = model.to(device)
# LOSS FUNCTION
loss_fn = nn.CrossEntropyLoss()
if args.optimizer == 'ADAM':
optimizer = torch.optim.Adam(model.parameters(), lr=args.initial_lr)
# LEARNING RATE SCHEDULE
lr = args.initial_lr
lr_decay_base = 1 / 1.15
m_flat_lr = 14.0 # we will not touch lr for the first m_flat_lr epochs
###############################################################################
#
# DEFINE COMPUTATIONS FOR PROCESSING ONE EPOCH
#
###############################################################################
def repackage_hidden(h):
"""
Wraps hidden states in new Tensors, to detach them from their history.
This prevents Pytorch from trying to backpropagate into previous input
sequences when we use the final hidden states from one mini-batch as the
initial hidden states for the next mini-batch.
Using the final hidden states in this way makes sense when the elements of
the mini-batches are actually successive subsequences in a set of longer sequences.
This is the case with the way we've processed the Penn Treebank dataset.
"""
if isinstance(h, Variable):
return h.detach_()
else:
return tuple(repackage_hidden(v) for v in h)
def run_epoch(model, data):
"""
One epoch of training/validation (depending on flag is_train).
"""
model.eval()
state_dict = torch.load('saved_model.pt', map_location="cpu")
model.load_state_dict(state_dict)
total_loss = np.zeros(model.seq_len)
steps = 0
# LOOP THROUGH MINI BATCHES
for step, (x, y) in enumerate(ptb_iterator(data, model.batch_size, model.seq_len)):
steps += 1
if args.model != 'TRANSFORMER':
hidden = model.init_hidden()
hidden = hidden.to(device)
if args.model == 'TRANSFORMER':
batch = Batch(torch.from_numpy(x).long().to(device))
model.zero_grad()
outputs = model.forward(batch.data, batch.mask).transpose(1, 0)
# print ("outputs.shape", outputs.shape)
else:
inputs = torch.from_numpy(x.astype(np.int64)).transpose(0, 1).contiguous().to(device)#.cuda()
model.zero_grad()
hidden = repackage_hidden(hidden)
outputs, hidden = model(inputs, hidden)
targets = torch.from_numpy(y.astype(np.int64)).transpose(0, 1).contiguous().to(device)#.cuda()
total_loss += np.array([loss_fn(outputs[i], targets[i]).item() for i in range(len(outputs))])
total_loss /= float(steps)
print(total_loss)
###############################################################################
#
# RUN MAIN LOOP (TRAIN AND VAL)
#
###############################################################################
print("\n########## Running Main Loop ##########################")
# Gradient compute
num_epochs = 1
# MAIN LOOP
for epoch in range(num_epochs):
# RUN MODEL ON VALID DATA
run_epoch(model, valid_data)
| 38.509036 | 141 | 0.630504 |
29aa6576959454006572496dfd5c5ae886a2c7c2 | 78 | py | Python | Configuration/Eras/python/Modifier_run3_nanoAOD_devel_cff.py | malbouis/cmssw | 16173a30d3f0c9ecc5419c474bb4d272c58b65c8 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | Configuration/Eras/python/Modifier_run3_nanoAOD_devel_cff.py | gartung/cmssw | 3072dde3ce94dcd1791d778988198a44cde02162 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | Configuration/Eras/python/Modifier_run3_nanoAOD_devel_cff.py | gartung/cmssw | 3072dde3ce94dcd1791d778988198a44cde02162 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
run3_nanoAOD_devel = cms.Modifier()
| 19.5 | 40 | 0.820513 |
29aa65c529d8ece9233ccff13d236d4bc2a7ac6d | 4,892 | py | Python | python-3.4.4.amd64/Lib/site-packages/idlexlib/extensions/ClearWindow.py | CSnap/photogate | 208272ef39f4e86f40d431da2ca523e21701f789 | [
"CC0-1.0"
] | 2 | 2018-12-29T13:47:40.000Z | 2018-12-29T13:47:49.000Z | Build/External/WPy3710/python-3.7.1/Lib/site-packages/idlexlib/extensions/ClearWindow.py | Heono/Turtle-IDE | aa42dd8f658284601b1a8d3ffb92f157de5022e2 | [
"MIT"
] | 1 | 2022-03-17T16:46:04.000Z | 2022-03-17T16:46:04.000Z | Lib/site-packages/idlexlib/extensions/ClearWindow.py | JWerbrouck/RWTH_M1_Projekt | 7ae63a2277361fa3273cf0677b297379482b8240 | [
"bzip2-1.0.6"
] | null | null | null | # IDLEX EXTENSION
## """
## Copyright(C) 2011-2012 The Board of Trustees of the University of Illinois.
## All rights reserved.
##
## Developed by: Roger D. Serwy
## University of Illinois
##
## Permission is hereby granted, free of charge, to any person obtaining
## a copy of this software and associated documentation files (the
## "Software"), to deal with the Software without restriction, including
## without limitation the rights to use, copy, modify, merge, publish,
## distribute, sublicense, and/or sell copies of the Software, and to
## permit persons to whom the Software is furnished to do so, subject to
## the following conditions:
##
## + Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimers.
## + Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimers in the
## documentation and/or other materials provided with the distribution.
## + Neither the names of Roger D. Serwy, the University of Illinois, nor
## the names of its contributors may be used to endorse or promote
## products derived from this Software without specific prior written
## permission.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
## OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
## MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
## IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR
## ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
## CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH
## THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE.
##
##
##
##
## Clear Window Extension
##
## About:
##
## It provides "Clear Shell Window" under "Options"
## with ability to undo.
##
## Part of Issue 6143
##
## """
config_extension_def = """
[ClearWindow]
enable=1
enable_editor=0
enable_shell=1
[ClearWindow_cfgBindings]
clear-window=<Control-Key-l>
"""
jn = lambda x,y: '%i.%i' % (x,y) # join integers to text coordinates
sp = lambda x: tuple(map(int, x.split('.'))) # convert tkinter Text coordinate to a line and column tuple
import sys
import re
from idlelib.UndoDelegator import DeleteCommand
ansi_re = re.compile(r'\x01?\x1b\[(.*?)m\x02?')
| 33.737931 | 107 | 0.634914 |
29aa9a45456c6db0c06ce0852d48191f56cbe430 | 104 | py | Python | src/hardware/GPIO_Map.py | lbowes/ascii-pong | 00e5a5f4b33a360f14299e6fc33f862880e5fb8f | [
"MIT"
] | null | null | null | src/hardware/GPIO_Map.py | lbowes/ascii-pong | 00e5a5f4b33a360f14299e6fc33f862880e5fb8f | [
"MIT"
] | null | null | null | src/hardware/GPIO_Map.py | lbowes/ascii-pong | 00e5a5f4b33a360f14299e6fc33f862880e5fb8f | [
"MIT"
] | 1 | 2021-03-10T17:22:00.000Z | 2021-03-10T17:22:00.000Z | GPIO_CON_1_BUT_1 = 10
GPIO_CON_1_BUT_2 = 9
GPIO_CON_2_BUT_1 = 11
GPIO_CON_2_BUT_2 = 14
GPIO_BUZZER = 15
| 17.333333 | 21 | 0.807692 |
29ac9c03bbaa51b34d7d739bc8607fc9dd0af610 | 309 | py | Python | main.py | yaojenkuo/stockflow | 946609c2fcc1d602032672b57ae7119b4cadae8d | [
"MIT"
] | 33 | 2015-03-08T00:43:37.000Z | 2021-02-18T23:40:05.000Z | main.py | Asoul/stockflow | 946609c2fcc1d602032672b57ae7119b4cadae8d | [
"MIT"
] | null | null | null | main.py | Asoul/stockflow | 946609c2fcc1d602032672b57ae7119b4cadae8d | [
"MIT"
] | 25 | 2015-03-07T15:57:23.000Z | 2021-07-05T01:32:32.000Z | #!/bin/python
# -*- coding: utf-8 -*-
''''''
import sys
from ctrls.Tester import Tester
from models.exampleModel import exampleModel
if __name__ == '__main__':
sys.exit(main())
| 17.166667 | 50 | 0.647249 |
29ad5c4ad4e9d3f8e84eb705d16ecf7d414f2aac | 4,025 | py | Python | tests/test_preprocessing_evaluation_pipelines.py | CLARIN-PL/embeddings | 49fb59b796475ca92bc262ec2bc6def1d89a10e0 | [
"MIT"
] | 33 | 2021-06-15T12:09:29.000Z | 2022-03-26T14:34:16.000Z | tests/test_preprocessing_evaluation_pipelines.py | CLARIN-PL/embeddings | 49fb59b796475ca92bc262ec2bc6def1d89a10e0 | [
"MIT"
] | 201 | 2021-03-23T05:50:23.000Z | 2022-03-31T22:56:04.000Z | tests/test_preprocessing_evaluation_pipelines.py | CLARIN-PL/embeddings | 49fb59b796475ca92bc262ec2bc6def1d89a10e0 | [
"MIT"
] | null | null | null | from tempfile import TemporaryDirectory
from typing import Any, Dict, Tuple
import datasets
import flair
import numpy as np
import pytest
import torch
from flair.data import Corpus
from numpy import typing as nptyping
from embeddings.data.data_loader import HuggingFaceDataLoader
from embeddings.data.dataset import HuggingFaceDataset
from embeddings.pipeline.evaluation_pipeline import (
FlairSequenceLabelingEvaluationPipeline,
ModelEvaluationPipeline,
)
from embeddings.pipeline.preprocessing_pipeline import PreprocessingPipeline
from embeddings.transformation.flair_transformation.column_corpus_transformation import (
ColumnCorpusTransformation,
)
from embeddings.transformation.flair_transformation.downsample_corpus_transformation import (
DownsampleFlairCorpusTransformation,
)
from embeddings.transformation.flair_transformation.split_sample_corpus_transformation import (
SampleSplitsFlairCorpusTransformation,
)
from embeddings.utils.flair_corpus_persister import FlairConllPersister
def test_sequence_labeling_preprocessing_pipeline(
result_path: "TemporaryDirectory[str]",
embedding_name: str,
ner_dataset_name: str,
hidden_size: int,
task_train_kwargs: Dict[str, int],
sequence_labeling_preprocessing_pipeline: Tuple[
PreprocessingPipeline[str, datasets.DatasetDict, Corpus], "TemporaryDirectory[str]"
],
sequence_labeling_evaluation_pipeline: Tuple[
ModelEvaluationPipeline[str, Corpus, Dict[str, nptyping.NDArray[Any]], Dict[str, Any]],
"TemporaryDirectory[str]",
],
) -> None:
flair.set_seed(441)
flair.device = torch.device("cpu")
preprocessing_pipeline, path = sequence_labeling_preprocessing_pipeline
preprocessing_pipeline.run()
evaluation_pipeline, _ = sequence_labeling_evaluation_pipeline
result = evaluation_pipeline.run()
np.testing.assert_almost_equal(
result["seqeval__mode_None__scheme_None"]["overall_accuracy"], 0.7881773
)
np.testing.assert_almost_equal(result["seqeval__mode_None__scheme_None"]["overall_f1"], 0)
path.cleanup()
| 31.692913 | 99 | 0.766957 |
29adb65f2ba3f76e7586b891107a612d5e21f5e3 | 672 | py | Python | Exercises/Exercises_01/06_exercise.py | Szymon-Budziak/ASD_exercises_solutions | 36ccbdae03a6c7e4ad141a2b7b01bef9353574ee | [
"MIT"
] | 7 | 2021-12-28T23:38:42.000Z | 2022-03-29T16:36:16.000Z | Exercises/Exercises_01/06_exercise.py | Szymon-Budziak/ASD_exercises_solutions | 36ccbdae03a6c7e4ad141a2b7b01bef9353574ee | [
"MIT"
] | null | null | null | Exercises/Exercises_01/06_exercise.py | Szymon-Budziak/ASD_exercises_solutions | 36ccbdae03a6c7e4ad141a2b7b01bef9353574ee | [
"MIT"
] | 4 | 2021-06-29T20:21:52.000Z | 2022-03-12T10:04:17.000Z | # Prosz zaimplementowa funkcj, ktra otrzymuje na wejciu posortowan niemalejco tablic A
# o rozmiarze n oraz liczb x i sprawdza, czy x wystpuje w A. Jeli tak, to zwraca najmniejszy indeks,
# pod ktrym x wystpuje.
T = [0, 1, 2, 3, 4, 5, 5, 5, 6]
for i in range(len(T)):
print(i, binary_search(T, 0, len(T) - 1, T[i]))
| 28 | 103 | 0.574405 |
29ae59f7491eb508b08d30811e2ad409b6a63558 | 4,508 | py | Python | lib/sentencers/RuleBasedSentencer.py | gucorpling/GumDrop | 06e705adc5b78b048f199a3d6f50d911fed398e2 | [
"Apache-2.0"
] | null | null | null | lib/sentencers/RuleBasedSentencer.py | gucorpling/GumDrop | 06e705adc5b78b048f199a3d6f50d911fed398e2 | [
"Apache-2.0"
] | null | null | null | lib/sentencers/RuleBasedSentencer.py | gucorpling/GumDrop | 06e705adc5b78b048f199a3d6f50d911fed398e2 | [
"Apache-2.0"
] | null | null | null | import re, io, os, sys
from nltk import word_tokenize
from argparse import ArgumentParser
# Allow package level imports in module
script_dir = os.path.dirname(os.path.realpath(__file__))
lib = os.path.abspath(script_dir + os.sep + "..")
sys.path.append(lib)
from conll_reader import space_join, text2conllu
if __name__ == "__main__":
p = ArgumentParser()
p.add_argument("-f", "--file", default=None, help="file to tokenize")
p.add_argument("-l", "--lang", default="eng", help="language 3 letter code",
choices=["eng", "spa", "fra", "deu", "eus", "nld", "rus", "por", "zho", "tur"])
opts = p.parse_args()
infile = opts.file
lang = opts.lang
# Run test
sentencer = RuleBasedSplitter(lang=lang)
if infile is None:
# Some default test tokens if no file provided
if lang == "zho":
tokens = ['', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', "", '', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', '', '', '', '', '']
elif lang == "nld":
tokens = ['Een', 'ieder', 'heeft', 'recht', 'op', 'onderwijs', ';', 'het', 'onderwijs', 'zal', 'kosteloos',
'zijn,', 'althans', 'wat', 'het', 'lager', 'en', 'basisonderwijs', 'betreft', '.', 'Het', 'lager',
'onderwijs', 'zal', 'verplicht', 'zijn', '.', 'Ambachtsonderwijs', 'en', 'beroepsopleiding',
'zullen', 'algemeen', 'beschikbaar', 'worden', 'gesteld', '.', 'Hoger', 'onderwijs', 'zal',
'openstaan', 'voor', 'een', 'ieder,', 'die', 'daartoe', 'de', 'begaafdheid', 'bezit', '.',
'Het', 'onderwijs', 'zal', 'gericht', 'zijn', 'op', 'de', 'volle', 'ontwikkeling', 'van', 'de',
'menselijke', 'persoonlijkheid', 'en', 'op', 'de', 'versterking', 'van', 'de', 'eerbied', 'voor',
'de', 'rechten', 'van', 'de', 'mens', 'en', 'de', 'fundamentele', 'vrijheden', '.']
else:
tokens = ['Introduction', 'Research', 'has', 'shown', 'examples', '.', 'But', 'we', 'need', 'more', '.']
else:
text = io.open(infile, encoding="utf8").read()
tokens = word_tokenize(text)
sent_starts = sentencer.predict(tokens)
print([(tok, boundary) for tok, boundary in (zip(tokens, sent_starts))])
| 37.882353 | 110 | 0.562999 |
29b0e35636d971fec8136ffc141e0dd2c3c239b5 | 2,878 | py | Python | pyogp/lib/client/tests/test_appearance.py | grobertson/PyOGP.lib.Client | 681492d95b9a901a79071b70c77bfdd55cdb02db | [
"Apache-2.0"
] | null | null | null | pyogp/lib/client/tests/test_appearance.py | grobertson/PyOGP.lib.Client | 681492d95b9a901a79071b70c77bfdd55cdb02db | [
"Apache-2.0"
] | null | null | null | pyogp/lib/client/tests/test_appearance.py | grobertson/PyOGP.lib.Client | 681492d95b9a901a79071b70c77bfdd55cdb02db | [
"Apache-2.0"
] | null | null | null |
"""
Contributors can be viewed at:
http://svn.secondlife.com/svn/linden/projects/2008/pyogp/lib/base/trunk/CONTRIBUTORS.txt
$LicenseInfo:firstyear=2008&license=apachev2$
Copyright 2009, Linden Research, Inc.
Licensed under the Apache License, Version 2.0.
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
or in
http://svn.secondlife.com/svn/linden/projects/2008/pyogp/lib/base/LICENSE.txt
$/LicenseInfo$
"""
# standard python libs
import unittest
from binascii import unhexlify
#related
# pyogp
from pyogp.lib.client.appearance import *
from pyogp.lib.client.settings import Settings
from pyogp.lib.client.agent import Agent
from pyogp.lib.client.region import Region
from pyogp.lib.base.datatypes import *
# pyogp messaging
from pyogp.lib.base.message.udpdeserializer import UDPMessageDeserializer
# pyogp tests
import pyogp.lib.base.tests.config
def test_suite():
from unittest import TestSuite, makeSuite
suite = TestSuite()
suite.addTest(makeSuite(TestAppearance))
return suite
| 32.704545 | 89 | 0.723767 |
29b119e99bde0832d57541650801a62ec77c42f6 | 1,017 | py | Python | jisho_api/word/cfg.py | finia2NA/jisho-api | c80beb44a7b70f24e799cd2a7d579356c58f8625 | [
"Apache-2.0"
] | 26 | 2021-10-05T03:54:33.000Z | 2022-03-26T10:46:31.000Z | jisho_api/word/cfg.py | finia2NA/jisho-api | c80beb44a7b70f24e799cd2a7d579356c58f8625 | [
"Apache-2.0"
] | 7 | 2021-11-22T00:43:30.000Z | 2022-01-12T00:34:22.000Z | jisho_api/word/cfg.py | finia2NA/jisho-api | c80beb44a7b70f24e799cd2a7d579356c58f8625 | [
"Apache-2.0"
] | 4 | 2021-12-08T13:41:07.000Z | 2022-03-25T20:54:07.000Z | from enum import Enum
from typing import List, Optional
from pydantic import BaseModel, HttpUrl
| 19.941176 | 60 | 0.647984 |
29b134fd22e0ec5acfe0ea6bb8fddd3eb700cbd7 | 1,018 | py | Python | tests/validators/test_symbol_required.py | Ennkua/wtforms | c08ec7840c5a78ae8784139f7ee70f9627cf1ab8 | [
"BSD-3-Clause"
] | null | null | null | tests/validators/test_symbol_required.py | Ennkua/wtforms | c08ec7840c5a78ae8784139f7ee70f9627cf1ab8 | [
"BSD-3-Clause"
] | null | null | null | tests/validators/test_symbol_required.py | Ennkua/wtforms | c08ec7840c5a78ae8784139f7ee70f9627cf1ab8 | [
"BSD-3-Clause"
] | null | null | null | import pytest
from wtforms.validators import symbol_required
from wtforms.validators import ValidationError
| 28.277778 | 79 | 0.674853 |
29b245fab6ed28cf6c359207c9c4af61c43d22d1 | 102 | py | Python | ch7/exercises/parrot.py | hewittaj/python_crash_course | 52a3341eec79c2eb6c7f9f1cb7f0806c3b2d61aa | [
"MIT"
] | null | null | null | ch7/exercises/parrot.py | hewittaj/python_crash_course | 52a3341eec79c2eb6c7f9f1cb7f0806c3b2d61aa | [
"MIT"
] | null | null | null | ch7/exercises/parrot.py | hewittaj/python_crash_course | 52a3341eec79c2eb6c7f9f1cb7f0806c3b2d61aa | [
"MIT"
] | null | null | null | # using the input() function
message = input("Tell me something, and I'll repeat it!")
print(message) | 25.5 | 57 | 0.72549 |
29b2e2e2b5e0b11ab0a21e7a356d8c2fabd4abe1 | 1,028 | py | Python | src/Nodes/WhileOp.py | gabrielzezze/z-lang | 89be471fd5618a9d1c9e3eb955608cdc888511c2 | [
"MIT"
] | null | null | null | src/Nodes/WhileOp.py | gabrielzezze/z-lang | 89be471fd5618a9d1c9e3eb955608cdc888511c2 | [
"MIT"
] | null | null | null | src/Nodes/WhileOp.py | gabrielzezze/z-lang | 89be471fd5618a9d1c9e3eb955608cdc888511c2 | [
"MIT"
] | null | null | null | from src.Node import Node
from src.Nodes import Block
from src.SymbolTable import SymbolTable
| 35.448276 | 82 | 0.694553 |
29b5b93fcc93149c869189a925d3bab4277eed76 | 748 | py | Python | googledevices/cli/commands/info.py | vlebourl/googledevices | 5d8604ad48d94170127d1da9f01106a4d3bc4845 | [
"MIT"
] | 19 | 2018-11-24T03:09:59.000Z | 2021-02-11T09:20:11.000Z | googledevices/cli/commands/info.py | vlebourl/googledevices | 5d8604ad48d94170127d1da9f01106a4d3bc4845 | [
"MIT"
] | 13 | 2018-11-24T13:16:38.000Z | 2022-02-22T17:27:08.000Z | googledevices/cli/commands/info.py | vlebourl/googledevices | 5d8604ad48d94170127d1da9f01106a4d3bc4845 | [
"MIT"
] | 4 | 2018-11-26T16:14:42.000Z | 2021-10-20T14:20:40.000Z | """Get information about this package."""
def info(system):
"""Get information about this package."""
import googledevices.utils.const as package
print("Projectname: ", package.NAME)
print("Version: ", package.VERSION)
print("GitHub link: ", package.URLS.get("github"))
print("PyPi link: ", package.URLS.get("pypi"))
print("Maintainers:")
for maintainer in package.MAINTAINERS:
print(" ", maintainer.get("name"), "(", maintainer.get("github"), ")")
print("")
if system:
import platform
print("")
print("System: ", platform.system())
print("Version: ", platform.version())
print("Python version: ", platform.python_version())
| 32.521739 | 81 | 0.593583 |
29b61776c27c79d1d7092a2b9bd2ee11a295186e | 251 | py | Python | Check_if_subarray_with_0_sum_is_exists_or_not.py | KiranPesarlanka9/data-structures-and-algorithms-Problems | 557e3ca7f04b37fa5a709295f455b6338815486e | [
"MIT"
] | 1 | 2019-11-28T12:21:51.000Z | 2019-11-28T12:21:51.000Z | Check_if_subarray_with_0_sum_is_exists_or_not.py | KiranPesarlanka9/data-structures-and-algorithms-Problems | 557e3ca7f04b37fa5a709295f455b6338815486e | [
"MIT"
] | null | null | null | Check_if_subarray_with_0_sum_is_exists_or_not.py | KiranPesarlanka9/data-structures-and-algorithms-Problems | 557e3ca7f04b37fa5a709295f455b6338815486e | [
"MIT"
] | 1 | 2019-12-06T09:18:41.000Z | 2019-12-06T09:18:41.000Z |
arr = [1, 0, -2, 5, -4, 1, 9, -2]
print(check(arr))
| 15.6875 | 34 | 0.49004 |
29b90065070b5025868557255475b9c600fb78b4 | 1,588 | py | Python | scripts/join_completed.py | shannonfenn/data-tools | c730c2f88b8443f3c84a41467a40b2cc59dd8e87 | [
"MIT"
] | null | null | null | scripts/join_completed.py | shannonfenn/data-tools | c730c2f88b8443f3c84a41467a40b2cc59dd8e87 | [
"MIT"
] | null | null | null | scripts/join_completed.py | shannonfenn/data-tools | c730c2f88b8443f3c84a41467a40b2cc59dd8e87 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
import argparse
import pandas as pd
import numpy as np
if __name__ == '__main__':
main()
| 31.137255 | 72 | 0.632872 |
29b95a7e7b6ab6d04a7196faa187fadcabb8c0e4 | 9,859 | py | Python | pet/preprocessor.py | YerongLi/pet | 8323080e9033c38c234431aecacad154ed477472 | [
"Apache-2.0"
] | null | null | null | pet/preprocessor.py | YerongLi/pet | 8323080e9033c38c234431aecacad154ed477472 | [
"Apache-2.0"
] | null | null | null | pet/preprocessor.py | YerongLi/pet | 8323080e9033c38c234431aecacad154ed477472 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import List, Optional
import numpy as np
from pet.utils import InputFeatures, InputExample, PLMInputFeatures, GenerativeInputFeatures, GenerativeInputExample
from pet.pvp import PVPS, PVP
| 48.566502 | 120 | 0.674713 |
29ba20d9bd3f8cb7d67a41fe698ce4a315481ebd | 21 | py | Python | test/__init__.py | mzappitello/http_monitor | 6a20e41bdbab480090de3c8d760bc7c425b9c899 | [
"MIT"
] | null | null | null | test/__init__.py | mzappitello/http_monitor | 6a20e41bdbab480090de3c8d760bc7c425b9c899 | [
"MIT"
] | null | null | null | test/__init__.py | mzappitello/http_monitor | 6a20e41bdbab480090de3c8d760bc7c425b9c899 | [
"MIT"
] | null | null | null | # test __init__ file
| 10.5 | 20 | 0.761905 |
29ba94b0967bd5341e441dd394da5100f547c093 | 3,542 | py | Python | xbrl/const.py | blinkace/pxp | 9155103dc166674137bd0e2fddb609ca44875761 | [
"MIT"
] | 1 | 2022-01-27T14:53:23.000Z | 2022-01-27T14:53:23.000Z | xbrl/const.py | blinkace/pxp | 9155103dc166674137bd0e2fddb609ca44875761 | [
"MIT"
] | null | null | null | xbrl/const.py | blinkace/pxp | 9155103dc166674137bd0e2fddb609ca44875761 | [
"MIT"
] | null | null | null | import re
PREFIX = {}
NSMAP = {}
OIM_COMMON_RESERVED_PREFIXES = {}
OIM_COMMON_RESERVED_PREFIX_MAP = {}
buildPrefixMaps()
LINK_RESERVED_URI_MAP = {
"_": LinkGroup.default,
"footnote": LinkType.footnote,
"explanatoryFact": LinkType.explanatoryFact,
}
| 37.680851 | 78 | 0.660926 |
29bcfd631b01019c349e3bbedaeeb2cbda9283d5 | 2,832 | py | Python | src/cogs/xpevent.py | nsde/lhxp | ef6d1004c704c1156b9b01172e4748634b31b541 | [
"MIT"
] | 2 | 2021-12-18T11:44:31.000Z | 2022-01-07T23:27:00.000Z | src/cogs/xpevent.py | nsde/lhxp | ef6d1004c704c1156b9b01172e4748634b31b541 | [
"MIT"
] | null | null | null | src/cogs/xpevent.py | nsde/lhxp | ef6d1004c704c1156b9b01172e4748634b31b541 | [
"MIT"
] | null | null | null | try:
from .helpers import config, management, xp, spam
except ImportError:
import helpers.config, helpers.management, helpers.xp, helpers.spam
import time
import discord
from discord.ext import commands
from discord.commands import slash_command | 32.551724 | 92 | 0.628884 |
29be043b68e9b14821af31619772ea7a817c2a7b | 2,199 | py | Python | utilities/utils.py | jluech/PGAcloud_Manager | 9008fac26f9d762b2ab527034e46d467b5b0c26f | [
"MIT"
] | null | null | null | utilities/utils.py | jluech/PGAcloud_Manager | 9008fac26f9d762b2ab527034e46d467b5b0c26f | [
"MIT"
] | null | null | null | utilities/utils.py | jluech/PGAcloud_Manager | 9008fac26f9d762b2ab527034e46d467b5b0c26f | [
"MIT"
] | null | null | null | import logging
import os
import subprocess
import sys
import yaml
files_dir = ""
# --- General util commands ---
# --- File and path handling commands ---
| 23.393617 | 76 | 0.637108 |
29c079a0baef167378f06f75800a84013625dfce | 7,958 | py | Python | Scripts Daily/재무정보수집.py | oms1226/msbot | 4c141502ef6899f9e4bb3fe8e03c7eb866487d5e | [
"MIT"
] | 1 | 2020-05-01T07:50:49.000Z | 2020-05-01T07:50:49.000Z | Scripts Daily/재무정보수집.py | oms1226/msbot | 4c141502ef6899f9e4bb3fe8e03c7eb866487d5e | [
"MIT"
] | 1 | 2021-06-01T22:36:14.000Z | 2021-06-01T22:36:14.000Z | Scripts Daily/재무정보수집.py | oms1226/msbot | 4c141502ef6899f9e4bb3fe8e03c7eb866487d5e | [
"MIT"
] | 8 | 2019-10-26T03:30:53.000Z | 2022-03-26T08:06:25.000Z | # -*- coding: utf-8 -*-
import re
import calendar
import datetime, time
from datetime import timedelta
import urllib.request
import requests, json
from http.cookiejar import CookieJar
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
from pandas import DataFrame
import pandas.io.sql as pdsql
from matplotlib import dates
import sqlite3
DATABASE = '..\\DATA\\mymoneybot.sqlite'
if __name__ == "__main__":
# -
build_fundamental_data()
| 37.895238 | 199 | 0.539834 |
29c2e1b7e5523be19b17e937a85dde93fdb45fab | 24,237 | py | Python | apps/recurring_donations/management/commands/process_monthly_donations.py | gannetson/sportschooldeopenlucht | 0c78e5a95b22a963244112e478119ba60c572141 | [
"BSD-3-Clause"
] | 1 | 2019-01-19T06:58:39.000Z | 2019-01-19T06:58:39.000Z | apps/recurring_donations/management/commands/process_monthly_donations.py | gannetson/sportschooldeopenlucht | 0c78e5a95b22a963244112e478119ba60c572141 | [
"BSD-3-Clause"
] | null | null | null | apps/recurring_donations/management/commands/process_monthly_donations.py | gannetson/sportschooldeopenlucht | 0c78e5a95b22a963244112e478119ba60c572141 | [
"BSD-3-Clause"
] | null | null | null | import csv
import os
import math
import logging
import traceback
import requests
import sys
from collections import namedtuple
from optparse import make_option
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.management.base import BaseCommand
from django.db import connection
from django.db import transaction
from django.utils import timezone
from apps.cowry_docdata.adapters import WebDirectDocDataDirectDebitPaymentAdapter
from apps.cowry_docdata.exceptions import DocDataPaymentException
from apps.cowry_docdata.models import DocDataPaymentOrder
from apps.fund.models import RecurringDirectDebitPayment, Order, OrderStatuses, Donation, OrderItem
from apps.projects.models import Project, ProjectPhases
from ...mails import mail_monthly_donation_processed_notification
logger = logging.getLogger(__name__)
#
# Run with:
# ./manage.py process_monthly_donations -v 2 --settings=bluebottle.settings.local (or .production etc.)
#
def update_last_donation(donation, remaining_amount, popular_projects):
"""
Updates the last donation with the remaining amount of the payment. If the donation is more than the project
needs, the project will be filled and the balance will be used to fill the popular projects recursively.
"""
project = Project.objects.get(id=donation.project_id)
# Base case.
if project.projectcampaign.money_donated + remaining_amount <= project.projectcampaign.money_asked or \
len(popular_projects) == 0:
# The remaining amount won't fill up the project or we have no more projects to try. We're done.
logger.debug(u"Donation is less than project '{0}' needs. No further adjustments are needed.".format(project.title))
donation.amount = remaining_amount
donation.donation_type = Donation.DonationTypes.recurring
donation.save()
return
# Recursive case.
else:
# Fill up the project.
logger.debug(u"Donation is more than project '{0}' needs. Filling up project and creating new donation.".format(project.title))
donation.amount = project.projectcampaign.money_asked - project.projectcampaign.money_donated
donation.donation_type = Donation.DonationTypes.recurring
donation.save()
# Create a new Donation and recursively update it with the remaining amount.
ct = ContentType.objects.get_for_model(donation)
order = OrderItem.objects.get(content_type=ct, content_object=donation)
new_project = popular_projects.pop(0)
new_donation = Donation.objects.create(user=donation.user, project=new_project, amount=0, currency='EUR',
donation_type=Donation.DonationTypes.recurring)
OrderItem.objects.create(content_object=new_donation, order=order)
update_last_donation(new_donation, remaining_amount - donation.amount, popular_projects)
def create_recurring_order(user, projects, order=None):
"""
Creates a recurring Order with donations to the supplied projects.
"""
if not order:
order = Order.objects.create(status=OrderStatuses.recurring, user=user, recurring=True)
for p in projects:
project = Project.objects.get(id=p.id)
if project.phase == ProjectPhases.campaign:
donation = Donation.objects.create(user=user, project=project, amount=0, currency='EUR',
donation_type=Donation.DonationTypes.recurring)
OrderItem.objects.create(content_object=donation, order=order)
return order
def correct_donation_amounts(popular_projects, recurring_order, recurring_payment):
"""
Divides the total amount for the monthly donation across all projects. This method deals with the case of a
donation filling up a project.
"""
remaining_amount = recurring_payment.amount
num_donations = recurring_order.donations.count()
amount_per_project = math.floor(recurring_payment.amount / num_donations)
donations = recurring_order.donations
for i in range(0, num_donations - 1):
donation = donations[i]
project = Project.objects.get(id=donation.project_id)
if project.projectcampaign.money_donated + amount_per_project > project.projectcampaign.money_asked:
donation.amount = project.projectcampaign.money_asked - project.projectcampaign.money_donated
else:
donation.amount = amount_per_project
donation.donation_type = Donation.DonationTypes.recurring
donation.save()
remaining_amount -= donation.amount
# Update the last donation with the remaining amount.
update_last_donation(donations[num_donations - 1], remaining_amount, popular_projects)
def set_order_created_datetime(recurring_order, order_created_datetime):
""" Uses custom SQL to set the created time of Order to a consistent value. """
db_table = recurring_order._meta.db_table
pk_name = recurring_order._meta.pk.name
logger.debug("Setting created and updated to {0} on Order {1}.".format(order_created_datetime, recurring_order.id))
cursor = connection.cursor()
sql_statement = "UPDATE {0} SET created = '{1}' WHERE {2} = {3}".format(db_table, order_created_datetime,
pk_name, recurring_order.pk)
cursor.execute(sql_statement)
sql_statement = "UPDATE {0} SET updated = '{1}' WHERE {2} = {3}".format(db_table, order_created_datetime,
pk_name, recurring_order.pk)
cursor.execute(sql_statement)
transaction.commit_unless_managed()
def process_monthly_donations(recurring_payments_queryset, send_email):
""" The starting point for creating DocData payments for the monthly donations. """
recurring_donation_errors = []
RecurringDonationError = namedtuple('RecurringDonationError', 'recurring_payment error_message')
skipped_recurring_payments = []
SkippedRecurringPayment = namedtuple('SkippedRecurringPayment', 'recurring_payment orders')
donation_count = 0
# The adapter is used after the recurring Order and donations have been adjusted. It's created here so that we can
# reuse it to process all recurring donations.
webdirect_payment_adapter = WebDirectDocDataDirectDebitPaymentAdapter()
# A consistent created time to use for the created recurring Orders.
order_created_datetime = timezone.now()
# Fixed lists of the popular projects.
popular_projects_all = list(Project.objects.filter(phase=ProjectPhases.campaign).order_by('-popularity'))
top_three_projects = popular_projects_all[:3]
popular_projects_rest = popular_projects_all[3:]
logger.info("Config: Using these projects as 'Top Three':")
for project in top_three_projects:
logger.info(" {0}".format(project.title))
# The main loop that processes each monthly donation.
for recurring_payment in recurring_payments_queryset:
top_three_donation = False
user_selected_projects = []
# Skip payment if there has been a recurring Order recently.
ten_days_ago = timezone.now() + timezone.timedelta(days=-10)
recent_closed_recurring_orders = Order.objects.filter(user=recurring_payment.user, status=OrderStatuses.closed,
recurring=True, updated__gt=ten_days_ago)
if recent_closed_recurring_orders.count() > 0:
skipped_recurring_payments.append(SkippedRecurringPayment(recurring_payment, list(recent_closed_recurring_orders)))
logger.warn(
"Skipping '{0}' because it looks like it has been processed recently with one of these Orders:".format(
recurring_payment))
for closed_order in recent_closed_recurring_orders:
logger.warn(" Order Number: {0}".format(closed_order.order_number))
continue
# Check if there is a monthly shopping cart (Order status is 'recurring') for this recurring_payment user.
try:
recurring_order = Order.objects.get(user=recurring_payment.user, status=OrderStatuses.recurring)
logger.debug("Using existing recurring Order for user: {0}.".format(recurring_payment.user))
except Order.DoesNotExist:
# There is no monthly shopping cart. The user is supporting the top three projects so we need to create an
# Order with Donations for the top three projects.
logger.debug("Creating new 'Top Three' recurring Order for user {0}.".format(recurring_payment.user))
recurring_order = create_recurring_order(recurring_payment.user, top_three_projects)
top_three_donation = True
except Order.MultipleObjectsReturned:
error_message = "Multiple Orders with status 'recurring' returned for '{0}'. Not processing this recurring donation.".format(
recurring_payment)
logger.error(error_message)
recurring_donation_errors.append(RecurringDonationError(recurring_payment, error_message))
continue
# Check if we're above the DocData minimum for direct debit.
if recurring_payment.amount < 113:
# Cleanup the Order if there's an error.
if top_three_donation:
recurring_order.delete()
error_message = "Payment amount for '{0}' is less than the DocData minimum for direct debit (113). Skipping.".format(
recurring_payment)
logger.error(error_message)
recurring_donation_errors.append(RecurringDonationError(recurring_payment, error_message))
continue
# Remove donations to projects that are no longer in the campaign phase.
for donation in recurring_order.donations:
project = Project.objects.get(id=donation.project.id)
if project.phase != ProjectPhases.campaign:
ctype = ContentType.objects.get_for_model(donation)
order_item = OrderItem.objects.get(object_id=donation.id, content_type=ctype)
order_item.delete()
donation.delete()
if recurring_order.donations.count() > 0:
# There are donations in the recurring Order and we need to redistribute / correct the donation amounts.
# Save a copy of the projects that have been selected by the user so that the monthly shopping cart can
# recreated after the payment has been successfully started.
for donation in recurring_order.donations:
user_selected_projects.append(donation.project)
correct_donation_amounts(popular_projects_all, recurring_order, recurring_payment)
else:
# There are no donations in the recurring Order so we need to create a monthly shopping cart to support the
# top three projects and redistribute / correct the donation amounts.
create_recurring_order(recurring_payment.user, top_three_projects, recurring_order)
if recurring_order.donations.count() == 0:
logger.debug("The top three donations are full. Using next three projects for top three.")
top_three_projects = popular_projects_rest[:3]
popular_projects_rest = popular_projects_rest[3:]
create_recurring_order(recurring_payment.user, top_three_projects, recurring_order)
correct_donation_amounts(popular_projects_rest, recurring_order, recurring_payment)
top_three_donation = True
# At this point the order should be correctly setup and ready for the DocData payment.
if top_three_donation:
donation_type_message = "supporting the 'Top Three' projects"
else:
donation_type_message = "with {0} donations".format(recurring_order.donations.count())
logger.info("Starting payment for '{0}' {1}.".format(recurring_payment, donation_type_message))
# Safety check to ensure the modifications to the donations in the recurring result in an Order total that
# matches the RecurringDirectDebitPayment.
if recurring_payment.amount != recurring_order.total:
# Cleanup the Order if there's an error.
if top_three_donation:
recurring_order.delete()
error_message = "RecurringDirectDebitPayment amount: {0} does not equal recurring Order amount: {1} for '{2}'. Not processing this recurring donation.".format(
recurring_payment.amount, recurring_order.total, recurring_payment)
logger.error(error_message)
recurring_donation_errors.append(RecurringDonationError(recurring_payment, error_message))
continue
# Check if the IBAN / BIC is stored correctly on the RecurringDirectDebitPayment.
if recurring_payment.iban == '' or recurring_payment.bic == '' or \
not recurring_payment.iban.endswith(recurring_payment.account) or \
recurring_payment.bic[:4] != recurring_payment.iban[4:8]:
# Cleanup the Order if there's an error.
if top_three_donation:
recurring_order.delete()
error_message = "Cannot create payment because the IBAN and/or BIC are not available."
logger.error(error_message)
recurring_donation_errors.append(RecurringDonationError(recurring_payment, error_message))
continue
# Create and fill in the DocDataPaymentOrder.
payment = DocDataPaymentOrder()
payment.order = recurring_order
payment.payment_method_id = 'dd-webdirect'
payment.amount = recurring_payment.amount
payment.currency = recurring_payment.currency
payment.customer_id = recurring_payment.user.id
payment.email = recurring_payment.user.email
# Use the recurring payment name (bank account name) to set the first and last name if they're not set.
if not recurring_payment.user.first_name:
if ' ' in recurring_payment.name:
payment.first_name = recurring_payment.name.split(' ')[0]
else:
payment.first_name = recurring_payment.name
else:
payment.first_name = recurring_payment.user.first_name
if not recurring_payment.user.last_name:
if ' ' in recurring_payment.name:
payment.last_name = recurring_payment.name[recurring_payment.name.index(' ') + 1:]
else:
payment.last_name = recurring_payment.name
else:
payment.last_name = recurring_payment.user.last_name
# Try to use the address from the profile if it's set.
address = recurring_payment.user.address
if not address:
# Cleanup the Order if there's an error.
if top_three_donation:
recurring_order.delete()
error_message = "Cannot create a payment for '{0}' because user does not have an address set.".format(recurring_payment)
logger.error(error_message)
recurring_donation_errors.append(RecurringDonationError(recurring_payment, error_message))
continue
# Set a default value for the pieces of the address that we don't have.
unknown_value = u'Unknown'
if not address.line1:
logger.warn("User '{0}' does not have their street and street number set. Using '{1}'.".format(recurring_payment.user, unknown_value))
payment.address = unknown_value
else:
payment.address = address.line1
if not address.city:
logger.warn("User '{0}' does not have their city set. Using '{1}'.".format(recurring_payment.user, unknown_value))
payment.city = unknown_value
else:
payment.city = address.city
if not address.postal_code:
logger.warn("User '{0}' does not have their postal code set. Using '{1}'.".format(recurring_payment.user, unknown_value))
payment.postal_code = unknown_value
else:
payment.postal_code = address.postal_code
# Assume the Netherlands when country not set.
if address.country:
payment.country = address.country.alpha2_code
else:
payment.country = 'NL'
# Try to use the language from the User settings if it's set.
if recurring_payment.user.primary_language:
payment.language = recurring_payment.user.primary_language[:2] # Cut off locale.
else:
payment.language = 'nl'
payment.save()
# Start the WebDirect payment.
try:
webdirect_payment_adapter.create_remote_payment_order(payment)
except DocDataPaymentException as e:
# Cleanup the Order if there's an error.
if top_three_donation:
recurring_order.delete()
error_message = "Problem creating remote payment order."
logger.error(error_message)
recurring_donation_errors.append(
RecurringDonationError(recurring_payment, "{0} {1}".format(error_message, e.message)))
continue
else:
recurring_order.status = OrderStatuses.closed
recurring_order.save()
try:
webdirect_payment_adapter.start_payment(payment, recurring_payment)
except DocDataPaymentException as e:
# Cleanup the Order if there's an error.
if top_three_donation:
recurring_order.delete()
else:
recurring_order.status = OrderStatuses.recurring
recurring_order.save()
error_message = "Problem starting payment."
logger.error(error_message)
recurring_donation_errors.append(
RecurringDonationError(recurring_payment, "{0} {1}".format(error_message, e.message)))
continue
logger.debug("Payment for '{0}' started.".format(recurring_payment))
donation_count += 1
# Send an email to the user.
if send_email:
mail_monthly_donation_processed_notification(recurring_payment, recurring_order)
# Create a new recurring Order (monthly shopping cart) for donations that are not to the 'Top Three'.
if not top_three_donation and len(user_selected_projects) > 0:
new_recurring_order = create_recurring_order(recurring_payment.user, user_selected_projects)
# Adjust donation amounts in a simple way for the recurring Order (the monthly donations shopping cart).
num_donations = new_recurring_order.donations.count()
amount_per_project = math.floor(recurring_payment.amount / num_donations)
donations = new_recurring_order.donations
for i in range(0, num_donations - 1):
donation = donations[i]
donation.amount = amount_per_project
donation.donation_type = Donation.DonationTypes.recurring
donation.save()
# Update the last donation with the remaining amount.
donation = donations[num_donations - 1]
donation.amount = recurring_payment.amount - (amount_per_project * (num_donations - 1))
donation.donation_type = Donation.DonationTypes.recurring
donation.save()
set_order_created_datetime(recurring_order, order_created_datetime)
logger.info("")
logger.info("Recurring Donation Processing Summary")
logger.info("=====================================")
logger.info("")
logger.info("Total number of recurring donations: {0}".format(recurring_payments_queryset.count()))
logger.info("Number of recurring Orders successfully processed: {0}".format(donation_count))
logger.info("Number of errors: {0}".format(len(recurring_donation_errors)))
logger.info("Number of skipped payments: {0}".format(len(skipped_recurring_payments)))
if len(recurring_donation_errors) > 0:
logger.info("")
logger.info("")
logger.info("Detailed Error List")
logger.info("===================")
logger.info("")
for error in recurring_donation_errors:
logger.info("RecurringDirectDebitPayment: {0} {1}".format(error.recurring_payment.id, error.recurring_payment))
logger.info("Error: {0}".format(error.error_message))
logger.info("--")
if len(skipped_recurring_payments) > 0:
logger.info("")
logger.info("")
logger.info("Skipped Recurring Payments")
logger.info("==========================")
logger.info("")
for skipped_payment in skipped_recurring_payments:
logger.info("RecurringDirectDebitPayment: {0} {1}".format(skipped_payment.recurring_payment.id, skipped_payment.recurring_payment))
for closed_order in skipped_payment.orders:
logger.info("Order Number: {0}".format(closed_order.order_number))
logger.info("--")
| 49.564417 | 171 | 0.67562 |
29c3750914f24305e5c021af40b18b30bd0ff4d0 | 5,387 | py | Python | information_extraction/Preprocessor.py | shatha2014/Fashion_Rec | 5f4dd4f1c7c2d18a9364b02f1798125c259e6598 | [
"BSD-2-Clause"
] | 11 | 2018-08-30T10:52:35.000Z | 2021-11-08T06:04:22.000Z | information_extraction/Preprocessor.py | shatha2014/Fashion_Rec | 5f4dd4f1c7c2d18a9364b02f1798125c259e6598 | [
"BSD-2-Clause"
] | 1 | 2020-09-08T19:53:48.000Z | 2021-11-08T13:29:42.000Z | information_extraction/Preprocessor.py | shatha2014/Fashion_Rec | 5f4dd4f1c7c2d18a9364b02f1798125c259e6598 | [
"BSD-2-Clause"
] | 8 | 2018-08-30T10:52:37.000Z | 2022-02-20T09:13:40.000Z | # Author: Kim Hammar <kimham@kth.se> KTH 2018
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import TweetTokenizer
from nltk.tag.perceptron import PerceptronTagger
import nltk
import emoji
nltk.download('averaged_perceptron_tagger')
nltk.download('stopwords')
nltk.download('wordnet')
| 45.652542 | 131 | 0.667347 |
29c3fd69e8b7142e1bb7b65ea92363c60fad4735 | 47 | py | Python | searl/__init__.py | automl/SEARL | bac75d8c9540ff4f0b5b340c612ec384b189bd84 | [
"Apache-2.0"
] | 25 | 2021-03-10T09:10:53.000Z | 2022-03-28T09:11:16.000Z | searl/__init__.py | automl/SEARL | bac75d8c9540ff4f0b5b340c612ec384b189bd84 | [
"Apache-2.0"
] | null | null | null | searl/__init__.py | automl/SEARL | bac75d8c9540ff4f0b5b340c612ec384b189bd84 | [
"Apache-2.0"
] | 4 | 2021-03-17T15:00:02.000Z | 2021-07-24T13:35:39.000Z | from .utils.handler.config import ConfigHandler | 47 | 47 | 0.87234 |
29c4a45e5143815cb47c3724fcaecb30960fac72 | 475 | py | Python | src/kotify/fabric/procfile.py | kotify/kotify.fabric | 5ce50a38210217f643c81438b53466b60fc16cb1 | [
"MIT"
] | null | null | null | src/kotify/fabric/procfile.py | kotify/kotify.fabric | 5ce50a38210217f643c81438b53466b60fc16cb1 | [
"MIT"
] | null | null | null | src/kotify/fabric/procfile.py | kotify/kotify.fabric | 5ce50a38210217f643c81438b53466b60fc16cb1 | [
"MIT"
] | null | null | null | from ._core import Collection, local, task
ns = Collection("start")
ns.add_task(start_all)
ns.add_task(start_main)
ns.add_task(start_minimal)
| 20.652174 | 84 | 0.692632 |
29c698fcf98da3c177cd1347dd70acef351370fb | 888 | py | Python | backend/src/feature_extraction/rolloff.py | AnXi-TieGuanYin-Tea/MusicGenreClassifiaction | a0b9f621b0a5d2451180b12af7681756c5abd138 | [
"MIT"
] | 7 | 2018-05-01T19:39:17.000Z | 2020-01-02T17:11:05.000Z | backend/src/feature_extraction/rolloff.py | AnXi-TieGuanYin-Tea/MusicGenreClassifiaction | a0b9f621b0a5d2451180b12af7681756c5abd138 | [
"MIT"
] | 10 | 2018-12-10T22:16:43.000Z | 2020-08-27T18:23:45.000Z | backend/src/feature_extraction/rolloff.py | AnXi-TieGuanYin-Tea/MusicGenreClassifiaction | a0b9f621b0a5d2451180b12af7681756c5abd138 | [
"MIT"
] | 2 | 2021-04-16T08:20:17.000Z | 2022-01-06T14:06:44.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 17 23:14:28 2018
@author: Akihiro Inui
"""
def rolloff(input_power_spectrum: list, param: float=0.85) -> float:
"""
Spectral Rolloff
:param input_power_spectrum: power spectrum in list
:param param: threadshold for roll off
:return Spectral Rolloff
"""
assert (param <= 0 or param >= 1) is False, "parameter must be between 0 and 1"
# Initialize energy and FFT number
energy = 0
count = 0
# Calculate total energy
total_energy = sum(input_power_spectrum[:]**2)
# Find Count which has energy below param*total_energy
while energy <= param*total_energy and count < len(input_power_spectrum):
energy = pow(input_power_spectrum[count], 2) + energy
count += 1
# Normalise Spectral Rolloff
return count/len(input_power_spectrum)
| 28.645161 | 83 | 0.667793 |
29c6aa30b18efa3ef99f8685007919d2bfcf3019 | 112 | py | Python | webStorm-APICloud/python_tools/Tools/Scripts/2to3.py | zzr925028429/androidyianyan | 8967fdba92473e8e65ee222515dfc54cdae5bb0b | [
"MIT"
] | null | null | null | webStorm-APICloud/python_tools/Tools/Scripts/2to3.py | zzr925028429/androidyianyan | 8967fdba92473e8e65ee222515dfc54cdae5bb0b | [
"MIT"
] | null | null | null | webStorm-APICloud/python_tools/Tools/Scripts/2to3.py | zzr925028429/androidyianyan | 8967fdba92473e8e65ee222515dfc54cdae5bb0b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from lib2to3.main import main
import sys
import os
sys.exit(main("lib2to3.fixes"))
| 16 | 32 | 0.714286 |
29c79f364e1d41c68e19d472b3c1d55bd0b5b9e5 | 1,070 | py | Python | afterglow_core/schemas/api/v1/jobs/field_cal_job.py | SkynetRTN/afterglow-access-server | 3d8d62f622577fdd1ae7b0076cb536251f7bf0cd | [
"Apache-2.0"
] | 2 | 2021-05-24T15:12:07.000Z | 2022-02-17T19:58:16.000Z | afterglow_core/schemas/api/v1/jobs/field_cal_job.py | SkynetRTN/afterglow-access-server | 3d8d62f622577fdd1ae7b0076cb536251f7bf0cd | [
"Apache-2.0"
] | 1 | 2022-02-27T03:01:06.000Z | 2022-02-27T03:01:06.000Z | afterglow_core/schemas/api/v1/jobs/field_cal_job.py | SkynetRTN/afterglow-access-server | 3d8d62f622577fdd1ae7b0076cb536251f7bf0cd | [
"Apache-2.0"
] | 2 | 2021-06-08T18:16:40.000Z | 2021-07-09T14:19:49.000Z | """
Afterglow Core: photometric calibration job schemas
"""
from typing import List as TList
from marshmallow.fields import Integer, List, Nested
from ..job import JobSchema, JobResultSchema
from ..field_cal import FieldCalSchema, FieldCalResultSchema
from ..photometry import PhotSettingsSchema
from .source_extraction_job import SourceExtractionSettingsSchema
__all__ = ['FieldCalJobResultSchema', 'FieldCalJobSchema']
| 31.470588 | 72 | 0.774766 |
29c7ff7b0f45d2d5b8a537d89fbcc9e55ee8907c | 2,692 | py | Python | Python/addRow.py | alexwu2021/practice | ff786d4d16afdef3e031002d22b58a976c8ed16b | [
"MIT"
] | null | null | null | Python/addRow.py | alexwu2021/practice | ff786d4d16afdef3e031002d22b58a976c8ed16b | [
"MIT"
] | 1 | 2021-11-22T05:54:33.000Z | 2021-11-22T05:54:33.000Z | Python/addRow.py | alexwu2021/practice | ff786d4d16afdef3e031002d22b58a976c8ed16b | [
"MIT"
] | null | null | null | #import unittest
#case 1
t = Node(4)
#binary_insert(t, Node(2))
#binary_insert(t, Node(7))
#binary_insert(t, Node(3))
#binary_insert(t, Node(6))
#binary_insert(t, Node(2))
#binary_insert(t, Node(5))
#insertIntoBinaryTreeWithAGivenIntArray(t, [4, 2, 6, 3, 1, 5])
t.left = Node(2)
t.right = Node(6)
t.left.left = Node(3)
t.left.right = Node(1)
t.right.left = Node(5)
pre_order_print(t)
d = 2
v = 99
addRow(t, d, v)
pre_order_print(t)
#in_order_print(t)
#case 2
| 23.206897 | 154 | 0.556092 |
29c8162014a517194fd9f41815841a6c8677d84e | 4,458 | py | Python | src/genie/libs/parser/iosxe/tests/ShowIpInterfaceBrief/cli/equal/golden_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | 204 | 2018-06-27T00:55:27.000Z | 2022-03-06T21:12:18.000Z | src/genie/libs/parser/iosxe/tests/ShowIpInterfaceBrief/cli/equal/golden_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | 468 | 2018-06-19T00:33:18.000Z | 2022-03-31T23:23:35.000Z | src/genie/libs/parser/iosxe/tests/ShowIpInterfaceBrief/cli/equal/golden_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | 309 | 2019-01-16T20:21:07.000Z | 2022-03-30T12:56:41.000Z | expected_output = {
"interface": {
"GigabitEthernet0/0/0": {
"interface_is_ok": "YES",
"ip_address": "10.105.44.23",
"method": "other",
"protocol": "up",
"status": "up"
},
"GigabitEthernet0/0/1": {
"interface_is_ok": "YES",
"ip_address": "10.174.10.1",
"method": "other",
"protocol": "up",
"status": "up"
},
"GigabitEthernet0/0/2": {
"interface_is_ok": "YES",
"ip_address": "10.64.10.1",
"method": "other",
"protocol": "up",
"status": "up"
},
"GigabitEthernet0/0/3": {
"interface_is_ok": "YES",
"ip_address": "10.186.10.1",
"method": "other",
"protocol": "up",
"status": "up"
},
"Loopback65528": {
"interface_is_ok": "YES",
"ip_address": "192.168.1.1",
"method": "other",
"protocol": "up",
"status": "up"
},
"NVI0": {
"interface_is_ok": "YES",
"ip_address": "unassigned",
"method": "unset",
"protocol": "up",
"status": "up"
},
"Sdwan-system-intf": {
"interface_is_ok": "YES",
"ip_address": "192.168.10.64",
"method": "unset",
"protocol": "up",
"status": "up"
},
"TenGigabitEthernet0/0/4": {
"interface_is_ok": "YES",
"ip_address": "10.121.10.1",
"method": "other",
"protocol": "up",
"status": "up"
},
"TenGigabitEthernet0/0/5": {
"interface_is_ok": "YES",
"ip_address": "unassigned",
"method": "unset",
"protocol": "down",
"status": "down"
},
"Tunnel1": {
"interface_is_ok": "YES",
"ip_address": "10.174.10.1",
"method": "TFTP",
"protocol": "up",
"status": "up"
},
"Tunnel2000000001": {
"interface_is_ok": "YES",
"ip_address": "192.168.2.1",
"method": "unset",
"protocol": "up",
"status": "up"
},
"Tunnel2000000002": {
"interface_is_ok": "YES",
"ip_address": "192.168.166.1",
"method": "unset",
"protocol": "up",
"status": "up"
},
"Tunnel3": {
"interface_is_ok": "YES",
"ip_address": "10.186.10.1",
"method": "TFTP",
"protocol": "up",
"status": "up"
},
"TwoGigabitEthernet0/1/0": {
"interface_is_ok": "YES",
"ip_address": "unassigned",
"method": "unset",
"protocol": "up",
"status": "up"
},
"TwoGigabitEthernet0/2/0": {
"interface_is_ok": "YES",
"ip_address": "unassigned",
"method": "unset",
"protocol": "up",
"status": "up"
},
"Ucse1/0/0": {
"interface_is_ok": "YES",
"ip_address": "10.19.14.1",
"method": "other",
"protocol": "down",
"status": "administratively down"
},
"Ucse1/0/1": {
"interface_is_ok": "YES",
"ip_address": "unassigned",
"method": "unset",
"protocol": "down",
"status": "administratively down"
},
"VirtualPortGroup0": {
"interface_is_ok": "YES",
"ip_address": "192.0.2.1",
"method": "other",
"protocol": "up",
"status": "up"
},
"VirtualPortGroup1": {
"interface_is_ok": "YES",
"ip_address": "192.168.2.1",
"method": "other",
"protocol": "up",
"status": "up"
},
"VirtualPortGroup3": {
"interface_is_ok": "YES",
"ip_address": "192.168.3.1",
"method": "other",
"protocol": "up",
"status": "up"
},
"VirtualPortGroup4": {
"interface_is_ok": "YES",
"ip_address": "192.168.166.1",
"method": "other",
"protocol": "up",
"status": "up"
}
}
}
| 29.328947 | 45 | 0.394796 |
29c8dfdb3c65c5e9847d8ee2d3b8fe9a5f54498a | 1,000 | py | Python | ssh.py | telkomdev/keris | 8451f3d69df174e33003e90e4fd70f602412412a | [
"MIT"
] | 1 | 2020-02-11T16:10:11.000Z | 2020-02-11T16:10:11.000Z | ssh.py | telkomdev/keris | 8451f3d69df174e33003e90e4fd70f602412412a | [
"MIT"
] | null | null | null | ssh.py | telkomdev/keris | 8451f3d69df174e33003e90e4fd70f602412412a | [
"MIT"
] | null | null | null | from common import is_connection_ok
import paramiko
"""
execute_ssh(host, port, username, password, cmd)
"""
| 35.714286 | 143 | 0.601 |
29ca0af350d167975f57568f8d8d244098802dd2 | 376 | py | Python | novel/spider/config.py | rrcgat/novel-info | fcda24f9f6da5a4755e942a520045b7b5a53bef4 | [
"MIT"
] | 4 | 2019-04-02T09:44:18.000Z | 2020-04-15T11:47:49.000Z | novel/spider/config.py | rrcgat/novel-info | fcda24f9f6da5a4755e942a520045b7b5a53bef4 | [
"MIT"
] | 1 | 2019-03-04T17:20:39.000Z | 2019-03-04T17:48:18.000Z | novel/spider/config.py | rrcgat/novel-info | fcda24f9f6da5a4755e942a520045b7b5a53bef4 | [
"MIT"
] | 1 | 2020-04-15T11:47:50.000Z | 2020-04-15T11:47:50.000Z | '''
'''
HEADERS_IPHONE = {'user-agent': (
'Mozilla/5.0 '
'(iPhone; CPU iPhone OS 6_0 like Mac OS X) '
'AppleWebKit/536.26 (KHTML, like Gecko) '
'Version/6.0 Mobile/10A5376e Safari/8536.25'
)}
HEADERS_CHROME = {'user-agent': (
'Mozilla/5.0 (X11; Linux x86_64) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/67.0.3396.99 Safari/537.36'
)}
| 22.117647 | 48 | 0.617021 |
29cb4ed39bb073f7561e68074f27a72bbc5b7c7c | 7,167 | py | Python | tests/test_editor_common.py | jpfxgood/ped | f753ca27e4462c321ed28f00e1ef47fbde62990e | [
"MIT"
] | null | null | null | tests/test_editor_common.py | jpfxgood/ped | f753ca27e4462c321ed28f00e1ef47fbde62990e | [
"MIT"
] | 21 | 2020-07-03T13:14:15.000Z | 2020-07-14T14:27:43.000Z | tests/test_editor_common.py | jpfxgood/ped | f753ca27e4462c321ed28f00e1ef47fbde62990e | [
"MIT"
] | null | null | null | from ped_core import editor_common
import io
import pprint
import os
import curses
import curses.ascii
import time
import re
from ped_core import keymap
from ped_core import keytab
from ped_core import clipboard
from ped_test_util import read_str, match_attr, undo_all, window_pos, play_macro, validate_mark, validate_screen, editor_test_suite
import subprocess
| 40.954286 | 205 | 0.599972 |
29cc2793a730a906cbbcc655e8b03fef329faada | 227 | py | Python | rsmtpd/response/smtp_501.py | alfmel/rsmtpd | 10900876b1f83d6c141070a413f81edf3c98ac51 | [
"Apache-2.0"
] | 1 | 2017-06-12T04:10:07.000Z | 2017-06-12T04:10:07.000Z | rsmtpd/response/smtp_501.py | alfmel/rsmtpd | 10900876b1f83d6c141070a413f81edf3c98ac51 | [
"Apache-2.0"
] | null | null | null | rsmtpd/response/smtp_501.py | alfmel/rsmtpd | 10900876b1f83d6c141070a413f81edf3c98ac51 | [
"Apache-2.0"
] | null | null | null | from rsmtpd.response.action import OK
from rsmtpd.response.base_response import BaseResponse
| 25.222222 | 56 | 0.779736 |
29cd522f460b996800fe0d9f2739255f875ef960 | 14,116 | py | Python | qatrack/qatrack_core/tests/test_core.py | crcrewso/qatrackplus | b9da3bc542d9e3eca8b7291bb631d1c7255d528e | [
"MIT"
] | 20 | 2021-03-11T18:37:32.000Z | 2022-03-23T19:38:07.000Z | qatrack/qatrack_core/tests/test_core.py | crcrewso/qatrackplus | b9da3bc542d9e3eca8b7291bb631d1c7255d528e | [
"MIT"
] | 75 | 2021-02-12T02:37:33.000Z | 2022-03-29T20:56:16.000Z | qatrack/qatrack_core/tests/test_core.py | crcrewso/qatrackplus | b9da3bc542d9e3eca8b7291bb631d1c7255d528e | [
"MIT"
] | 5 | 2021-04-07T15:46:53.000Z | 2021-09-18T16:55:00.000Z | import datetime
import re
from django.contrib.sites.models import Site
from django.core import mail
from django.test import TestCase
from django.urls import reverse
from django.utils import timezone
import numpy as np
import pandas as pd
import pytz
from qatrack.qa.tests import utils
from qatrack.qatrack_core.serializers import QATrackJSONEncoder
from qatrack.qatrack_core.utils import end_of_day, relative_dates, start_of_day
| 45.980456 | 110 | 0.63637 |
29cdd1a0441cda0528b31705900a6564e1af5682 | 179 | py | Python | app/blog/urls.py | AjayHao/AtThirty | 96b2ce00be8f3ed07ee5e6e2b1ca13ab25e9521d | [
"MIT"
] | null | null | null | app/blog/urls.py | AjayHao/AtThirty | 96b2ce00be8f3ed07ee5e6e2b1ca13ab25e9521d | [
"MIT"
] | null | null | null | app/blog/urls.py | AjayHao/AtThirty | 96b2ce00be8f3ed07ee5e6e2b1ca13ab25e9521d | [
"MIT"
] | null | null | null | from django.conf.urls import patterns, url
from app.blog import views as blog_views
urlpatterns = [
#django url
url(r'^$', blog_views.index, name='blog_index'),
]
| 16.272727 | 52 | 0.681564 |
29cde250e9d497ca3e7e9d2169fa12a67aa2c621 | 752 | py | Python | core/recc/system/environ.py | bogonets/answer | 57f892a9841980bcbc35fa1e27521b34cd94bc25 | [
"MIT"
] | 3 | 2021-06-20T02:24:10.000Z | 2022-01-26T23:55:33.000Z | core/recc/system/environ.py | bogonets/answer | 57f892a9841980bcbc35fa1e27521b34cd94bc25 | [
"MIT"
] | null | null | null | core/recc/system/environ.py | bogonets/answer | 57f892a9841980bcbc35fa1e27521b34cd94bc25 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from os import environ
from typing import Optional, Dict, Any, Type
| 23.5 | 69 | 0.655585 |
29cf16e7358b9161ab9d90ae6bb97701b983418a | 436 | py | Python | InteractiveProgramming/assignment3.3.py | mr-ice/pipython | ea27af520946cb710cb717815be625489fc8a1a3 | [
"MIT"
] | null | null | null | InteractiveProgramming/assignment3.3.py | mr-ice/pipython | ea27af520946cb710cb717815be625489fc8a1a3 | [
"MIT"
] | null | null | null | InteractiveProgramming/assignment3.3.py | mr-ice/pipython | ea27af520946cb710cb717815be625489fc8a1a3 | [
"MIT"
] | null | null | null | try:
s = raw_input("Enter score between 0.0 and 1.0: ")
score = float(s)
if score < 0 or score > 1:
raise Exception
except ValueError:
print "You didn't even enter a number"
except:
print "Not a possible score."
else:
if score >= 0.9:
print "A"
elif score >= 0.8:
print "B"
elif score >= 0.7:
print "C"
elif score >= 0.6:
print "D"
else:
print "F"
| 20.761905 | 54 | 0.529817 |
29cf80f6c6965927720d1b295a0c8b626681599d | 254 | py | Python | Store/robot-test/say.py | Quanta-Robotics/Robot-Blueberry | 7b7e77e09ac5e9ec5afd947e0db1ecc8773e56da | [
"MIT"
] | 25 | 2021-06-08T07:09:30.000Z | 2021-12-30T06:28:35.000Z | Store/robot-test/say.py | ICT-CoU/Robot-Blueberry | d19fd1be037df9d67de64df57a87006d74cd6c43 | [
"MIT"
] | 2 | 2021-05-23T12:54:51.000Z | 2021-06-07T17:47:56.000Z | Store/robot-test/say.py | ICT-CoU/Robot-Blueberry | d19fd1be037df9d67de64df57a87006d74cd6c43 | [
"MIT"
] | 14 | 2021-06-08T13:02:28.000Z | 2021-12-30T20:07:18.000Z | import pyttsx3
engine = pyttsx3.init()
engine.setProperty('rate', 150)
voices = engine.getProperty('voices')
engine.setProperty("voice", 'english_rp+f4')
talk("My name is robot leena")
| 15.875 | 44 | 0.704724 |
29d39c3e482269db7c4ce7b3f24a9b213141989d | 141 | py | Python | api/server/utils/logger.py | ktolstikhin/vision-service | b87f10f5ec3d22b76c06a0e6c0105fd823e60c39 | [
"MIT"
] | null | null | null | api/server/utils/logger.py | ktolstikhin/vision-service | b87f10f5ec3d22b76c06a0e6c0105fd823e60c39 | [
"MIT"
] | null | null | null | api/server/utils/logger.py | ktolstikhin/vision-service | b87f10f5ec3d22b76c06a0e6c0105fd823e60c39 | [
"MIT"
] | null | null | null | import logging
| 17.625 | 70 | 0.723404 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.