hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f83f6977354074227de8507f3a2a55a87f9d6abe | 5,752 | py | Python | sdks/python/appcenter_sdk/models/BranchConfigurationToolsets.py | Brantone/appcenter-sdks | eeb063ecf79908b6e341fb00196d2cd9dc8f3262 | [
"MIT"
] | null | null | null | sdks/python/appcenter_sdk/models/BranchConfigurationToolsets.py | Brantone/appcenter-sdks | eeb063ecf79908b6e341fb00196d2cd9dc8f3262 | [
"MIT"
] | 6 | 2019-10-23T06:38:53.000Z | 2022-01-22T07:57:58.000Z | sdks/python/appcenter_sdk/models/BranchConfigurationToolsets.py | Brantone/appcenter-sdks | eeb063ecf79908b6e341fb00196d2cd9dc8f3262 | [
"MIT"
] | 2 | 2019-10-23T06:31:05.000Z | 2021-08-21T17:32:47.000Z | # coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: benedetto.abbenanti@gmail.com
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
import pprint
import re # noqa: F401
import six
class BranchConfigurationToolsets(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'xcode': '',
'javascript': '',
'xamarin': '',
'android': ''
}
attribute_map = {
'xcode': 'xcode',
'javascript': 'javascript',
'xamarin': 'xamarin',
'android': 'android'
}
def __init__(self, xcode=None, javascript=None, xamarin=None, android=None): # noqa: E501
"""BranchConfigurationToolsets - a model defined in Swagger""" # noqa: E501
self._xcode = None
self._javascript = None
self._xamarin = None
self._android = None
self.discriminator = None
if xcode is not None:
self.xcode = xcode
if javascript is not None:
self.javascript = javascript
if xamarin is not None:
self.xamarin = xamarin
if android is not None:
self.android = android
@property
def xcode(self):
"""Gets the xcode of this BranchConfigurationToolsets. # noqa: E501
Build configuration when Xcode is part of the build steps # noqa: E501
:return: The xcode of this BranchConfigurationToolsets. # noqa: E501
:rtype:
"""
return self._xcode
@xcode.setter
def xcode(self, xcode):
"""Sets the xcode of this BranchConfigurationToolsets.
Build configuration when Xcode is part of the build steps # noqa: E501
:param xcode: The xcode of this BranchConfigurationToolsets. # noqa: E501
:type:
"""
self._xcode = xcode
@property
def javascript(self):
"""Gets the javascript of this BranchConfigurationToolsets. # noqa: E501
Build configuration when React Native, or other JavaScript tech, is part of the build steps # noqa: E501
:return: The javascript of this BranchConfigurationToolsets. # noqa: E501
:rtype:
"""
return self._javascript
@javascript.setter
def javascript(self, javascript):
"""Sets the javascript of this BranchConfigurationToolsets.
Build configuration when React Native, or other JavaScript tech, is part of the build steps # noqa: E501
:param javascript: The javascript of this BranchConfigurationToolsets. # noqa: E501
:type:
"""
self._javascript = javascript
@property
def xamarin(self):
"""Gets the xamarin of this BranchConfigurationToolsets. # noqa: E501
Build configuration for Xamarin projects # noqa: E501
:return: The xamarin of this BranchConfigurationToolsets. # noqa: E501
:rtype:
"""
return self._xamarin
@xamarin.setter
def xamarin(self, xamarin):
"""Sets the xamarin of this BranchConfigurationToolsets.
Build configuration for Xamarin projects # noqa: E501
:param xamarin: The xamarin of this BranchConfigurationToolsets. # noqa: E501
:type:
"""
self._xamarin = xamarin
@property
def android(self):
"""Gets the android of this BranchConfigurationToolsets. # noqa: E501
Build configuration for Android projects # noqa: E501
:return: The android of this BranchConfigurationToolsets. # noqa: E501
:rtype:
"""
return self._android
@android.setter
def android(self, android):
"""Sets the android of this BranchConfigurationToolsets.
Build configuration for Android projects # noqa: E501
:param android: The android of this BranchConfigurationToolsets. # noqa: E501
:type:
"""
self._android = android
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BranchConfigurationToolsets):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 29.497436 | 113 | 0.595271 | 5,451 | 0.94767 | 0 | 0 | 2,704 | 0.470097 | 0 | 0 | 3,153 | 0.548157 |
f840464edc80ddc50844d1de4a6669b63272a7ea | 1,156 | py | Python | tests/cli/version_test.py | longhuei/floyd-cli | 82709f1e301d7a56ac354e4615a354e2c36d71b8 | [
"Apache-2.0"
] | 162 | 2017-01-27T02:54:17.000Z | 2022-03-03T09:06:28.000Z | tests/cli/version_test.py | longhuei/floyd-cli | 82709f1e301d7a56ac354e4615a354e2c36d71b8 | [
"Apache-2.0"
] | 79 | 2017-02-17T08:58:39.000Z | 2021-05-29T09:24:31.000Z | tests/cli/version_test.py | longhuei/floyd-cli | 82709f1e301d7a56ac354e4615a354e2c36d71b8 | [
"Apache-2.0"
] | 43 | 2017-02-23T10:58:42.000Z | 2022-01-17T10:29:31.000Z | from click.testing import CliRunner
import unittest
from mock import patch, Mock, PropertyMock
from floyd.cli.version import upgrade
class TestFloydVersion(unittest.TestCase):
"""
Tests cli utils helper functions
"""
def setUp(self):
self.runner = CliRunner()
@patch('floyd.cli.version.pip_upgrade')
@patch('floyd.cli.version.conda_upgrade')
@patch('floyd.cli.utils.sys')
def test_floyd_upgrade_with_standard_python(self, mock_sys, conda_upgrade, pip_upgrade):
mock_sys.version = '2.7.13 (default, Jan 19 2017, 14:48:08) \n[GCC 6.3.0 20170118]'
self.runner.invoke(upgrade)
conda_upgrade.assert_not_called()
pip_upgrade.assert_called_once()
@patch('floyd.cli.version.pip_upgrade')
@patch('floyd.cli.version.conda_upgrade')
@patch('floyd.cli.utils.sys')
def test_floyd_upgrade_with_anaconda_python(self, mock_sys, conda_upgrade, pip_upgrade):
mock_sys.version = '3.6.3 |Anaconda, Inc.| (default, Oct 13 2017, 12:02:49) \n[GCC 7.2.0]'
self.runner.invoke(upgrade)
pip_upgrade.assert_not_called()
conda_upgrade.assert_called_once()
| 32.111111 | 98 | 0.702422 | 1,020 | 0.882353 | 0 | 0 | 857 | 0.741349 | 0 | 0 | 353 | 0.305363 |
f8423088619bdfe61a95a3f318f27fab6ca0c75a | 4,181 | py | Python | offthedialbot/help.py | DJam98/bot | 366a46bcca55098e1030a4f05d63e8872a791bf8 | [
"MIT"
] | 2 | 2020-08-31T15:45:07.000Z | 2021-09-26T22:15:43.000Z | offthedialbot/help.py | DJam98/bot | 366a46bcca55098e1030a4f05d63e8872a791bf8 | [
"MIT"
] | 17 | 2020-06-02T02:29:48.000Z | 2021-10-13T23:47:44.000Z | offthedialbot/help.py | DJam98/bot | 366a46bcca55098e1030a4f05d63e8872a791bf8 | [
"MIT"
] | 3 | 2020-05-31T23:17:10.000Z | 2022-03-09T22:23:22.000Z | """Contains HelpCommand class."""
import discord
from discord.ext import commands
from offthedialbot import utils
class HelpCommand(commands.DefaultHelpCommand):
"""Set up help command for the bot."""
async def send_bot_help(self, mapping):
"""Send bot command page."""
list_commands = [
command for cog in [
await self.filter_commands(cog_commands)
for cog, cog_commands in mapping.items()
if cog is not None and await self.filter_commands(cog_commands)
] for command in cog
]
embed = self.create_embed(
title="`$help`",
description="All the commands for Off the Dial Bot!",
fields=[{
"name": "Commands:",
"value": "\n".join([
self.short(command)
for command in await self.filter_commands(mapping[None]) if command.help])
}, {
"name": "Misc Commands:",
"value": "\n".join([
self.short(command)
for command in list_commands])
}]
)
await self.get_destination().send(embed=embed)
async def send_cog_help(self, cog):
"""Send cog command page."""
embed = self.create_embed(
title=cog.qualified_name.capitalize(),
description=cog.description,
**({"fields": [{
"name": f"{cog.qualified_name.capitalize()} Commands:",
"value": "\n".join([
self.short(command)
for command in cog.get_commands()])
}]} if cog.get_commands() else {}))
await self.get_destination().send(embed=embed)
async def send_group_help(self, group):
"""Send command group page."""
embed = self.create_embed(
title=self.short(group, False),
description=group.help,
fields=[{
"name": f"Subcommands:",
"value": "\n".join([
self.short(command)
for command in await self.filter_commands(group.commands)
])
}]
)
await self.get_destination().send(embed=embed)
async def send_command_help(self, command):
"""Send command page."""
embed = self.create_embed(
title=self.short(command, False),
description=command.help,
)
await self.get_destination().send(embed=embed)
async def command_not_found(self, string):
"""Returns message when command is not found."""
return f"Command {self.short(string, False)} does not exist."
async def subcommand_not_found(self, command, string):
"""Returns message when subcommand is not found."""
if isinstance(command, commands.Group) and len(command.all_commands) > 0:
return f"Command {self.short(command, False)} has no subcommand named `{string}`."
else:
return f"Command {self.short(command, False)} has no subcommands."
async def send_error_message(self, error):
"""Send error message, override to support sending embeds."""
await self.get_destination().send(
embed=utils.Alert.create_embed(utils.Alert.Style.DANGER,
title="Command/Subcommand not found.", description=error))
def create_embed(self, fields: list = (), **kwargs):
"""Create help embed."""
embed = discord.Embed(color=utils.Alert.Style.DANGER, **kwargs)
for field in fields:
embed.add_field(**field, inline=False)
embed.set_footer(
text=f"Type {self.clean_prefix}help command for more info on a command. You can also type {self.clean_prefix}help category for more info on a category.")
return embed
def short(self, command, doc=True):
"""List the command as a one-liner."""
sig = self.get_command_signature(command) if not doc else f'{self.clean_prefix}{command}'
return f'`{sig[:-1] if sig.endswith(" ") else sig}` {(command.short_doc if doc else "")}'
help_command = HelpCommand()
| 38.712963 | 165 | 0.577613 | 4,031 | 0.964123 | 0 | 0 | 0 | 0 | 3,159 | 0.755561 | 1,095 | 0.261899 |
f8430cf263194ac34b0078e29e9eec8808714370 | 255 | py | Python | ex10.6.py | Dikaeinstein/Think_Python | 370cb5af25230ff20994206e2d8023fd1d4c2c74 | [
"MIT"
] | null | null | null | ex10.6.py | Dikaeinstein/Think_Python | 370cb5af25230ff20994206e2d8023fd1d4c2c74 | [
"MIT"
] | null | null | null | ex10.6.py | Dikaeinstein/Think_Python | 370cb5af25230ff20994206e2d8023fd1d4c2c74 | [
"MIT"
] | null | null | null | def is_anagram ( word1, word2 ):
'''
Returns True if word1 is 'anagram' of word2 or False if otherwise.
word1: str
word2: str
'''
return sorted(word1) == sorted(word2)
print(is_anagram("silence", "listen"))
| 19.615385 | 70 | 0.576471 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 134 | 0.52549 |
f8433cd21799446edb00e1ccf569de9f138f3e9c | 3,017 | py | Python | learning/modules/resnet/resnet_conditional.py | esteng/guiding-multi-step | 3f0db0ba70b5851cc83878f4ed48cf82342a2ddf | [
"BSD-2-Clause"
] | null | null | null | learning/modules/resnet/resnet_conditional.py | esteng/guiding-multi-step | 3f0db0ba70b5851cc83878f4ed48cf82342a2ddf | [
"BSD-2-Clause"
] | null | null | null | learning/modules/resnet/resnet_conditional.py | esteng/guiding-multi-step | 3f0db0ba70b5851cc83878f4ed48cf82342a2ddf | [
"BSD-2-Clause"
] | null | null | null | import torch
from torch import nn as nn
from learning.modules.blocks import ResBlock, ResBlockConditional
class ResNetConditional(nn.Module):
def __init__(self, embed_size, channels, c_out):
super(ResNetConditional, self).__init__()
self.block1 = ResBlock(channels) # RF: 5x5
self.block1a = ResBlock(channels) # RF: 9x9
self.cblock1 = ResBlockConditional(embed_size, channels) # RF: 9x9
self.block2 = ResBlock(channels) # RF: 13x13
self.block2a = ResBlock(channels) # RF: 17x17
self.cblock2 = ResBlockConditional(embed_size, channels) # RF: 17x17
self.block3 = ResBlock(channels) # RF: 21x21
self.block3a = ResBlock(channels) # RF: 25x25
self.cblock3 = ResBlockConditional(embed_size, channels) # RF: 25x25
self.block4 = ResBlock(channels) # RF: 29x29
self.block4a = ResBlock(channels) # RF: 33x33
self.cblock4 = ResBlockConditional(embed_size, channels) # RF: 33x33
self.block5 = ResBlock(channels) # RF: 37x37
self.block5a = ResBlock(channels) # RF: 41x41
self.cblock5 = ResBlockConditional(embed_size, channels) # RF: 41x41
self.block6 = ResBlock(channels) # RF: 45x45
self.block6a = ResBlock(channels) # RF: 49x49
self.cblock6 = ResBlockConditional(embed_size, channels) # RF: 49x49
self.block7 = ResBlock(channels) # RF: 53x53
self.block7a = ResBlock(channels) # RF: 57x57
self.cblock7 = ResBlockConditional(embed_size, channels) # RF: 57x57
self.block8 = ResBlock(channels) # RF: 61x61
self.block8a = ResBlock(channels) # RF: 65x65
self.cblock8 = ResBlockConditional(embed_size, channels, c_out) # RF: 65x65
def init_weights(self):
for mod in self.modules():
if hasattr(mod, "init_weights") and mod is not self:
mod.init_weights()
def forward(self, inputs, contexts):
x = self.block1(inputs)
x = self.block1a(x)
x = self.cblock1(x, contexts)
x = self.block2(x)
x = self.block2a(x)
x = self.cblock2(x, contexts)
x = self.block3(x)
x = self.block3a(x)
x = self.cblock3(x, contexts)
x = self.block4(x)
x = self.block4a(x)
x = self.cblock4(x, contexts)
x = self.block5(x)
x = self.block5a(x)
x = self.cblock5(x, contexts)
x = self.block6(x)
x = self.block6a(x)
x = self.cblock6(x, contexts)
x = self.block7(x)
x = self.block7a(x)
x = self.cblock7(x, contexts)
x = self.block8(x)
x = self.block8a(x)
x = self.cblock8(x, contexts)
return x | 46.415385 | 86 | 0.552536 | 2,909 | 0.964203 | 0 | 0 | 0 | 0 | 0 | 0 | 272 | 0.090156 |
f8459ec6a60f2e71cf7db3476a3460a08e1783eb | 110 | wsgi | Python | cryptovote/cryptovote.wsgi | cryptovoting/cryptovote | b236cf031a8f9dfa5cca54ff45003313275a0fc8 | [
"MIT"
] | 8 | 2019-05-14T02:41:34.000Z | 2021-11-25T08:07:22.000Z | cryptovote/cryptovote.wsgi | cryptovoting/cryptovote | b236cf031a8f9dfa5cca54ff45003313275a0fc8 | [
"MIT"
] | null | null | null | cryptovote/cryptovote.wsgi | cryptovoting/cryptovote | b236cf031a8f9dfa5cca54ff45003313275a0fc8 | [
"MIT"
] | 2 | 2019-05-14T20:20:07.000Z | 2021-11-25T08:07:24.000Z | # Used for deploying on Apache with mod_wsgi
from cryptovote.app import create_app
application = create_app()
| 27.5 | 44 | 0.818182 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 44 | 0.4 |
f845c07a85b4945884e014911b73cc010e95c5c2 | 802 | py | Python | problems/203_remove-linked-list-elements.py | okuda-seminar/review_leetcode | 9774dbb85b836c3ebab4b24d77774ed05abb7a32 | [
"MIT"
] | null | null | null | problems/203_remove-linked-list-elements.py | okuda-seminar/review_leetcode | 9774dbb85b836c3ebab4b24d77774ed05abb7a32 | [
"MIT"
] | 170 | 2021-05-11T14:03:05.000Z | 2021-11-30T14:22:52.000Z | problems/203_remove-linked-list-elements.py | ryuji0123/review_leetcode | 9774dbb85b836c3ebab4b24d77774ed05abb7a32 | [
"MIT"
] | null | null | null | #
# @lc app=leetcode id=203 lang=python3
#
# [203] Remove Linked List Elements
#
# @lc code=start
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def removeElements(self, head: ListNode, val: int) -> ListNode:
"""
time: O(len(head))
space: O(len(head))
"""
if not head:
return None
current_node = head
while current_node and current_node.next:
if current_node.next.val == val:
current_node.next = current_node.next.next
else:
current_node = current_node.next
if head.val == val:
head = head.next
return head
# @lc code=end
| 23.588235 | 67 | 0.55985 | 535 | 0.667082 | 0 | 0 | 0 | 0 | 0 | 0 | 322 | 0.401496 |
f8470708904f8b5b4aa1dabc0a1785bf58a61c23 | 7,178 | py | Python | qpricesim/model_code/QLearningAgent.py | ToFeWe/qpricesim | 2d4312ed1d1356449f0c168835a0662b238a27bb | [
"MIT"
] | 2 | 2022-03-22T12:16:37.000Z | 2022-03-22T12:48:46.000Z | qpricesim/model_code/QLearningAgent.py | ToFeWe/qpricesim | 2d4312ed1d1356449f0c168835a0662b238a27bb | [
"MIT"
] | null | null | null | qpricesim/model_code/QLearningAgent.py | ToFeWe/qpricesim | 2d4312ed1d1356449f0c168835a0662b238a27bb | [
"MIT"
] | null | null | null | """
A module that defines the QLearning Agent for the pricing game as a class.
Note that we have a numba version (for speed) which inherits everything from
QLearningAgentBase.
"""
import numpy as np
from numba import float64
from numba import int32
from numba import njit
from numba.experimental import jitclass
from .utils_q_learning import numba_argmax
from .utils_q_learning import numba_max
class QLearningAgentBase:
"""
A simple Q-Learning Agent based on numpy. Actions and state are assumed
to be represented by integer numbers/an index and corresponds to the
respective rows / columns in the Q-Matrix.
We assume that the agent can choose every action in every state.
The random seed will be set by a helper function outside this class.
Args:
self.epsilon (float): Exploration probability
self.alpha (float): Learning rate
self.discount (float): Discount rate
self.n_actions (int): Number of actions the agent can pick
"""
def __init__(self, alpha, epsilon, discount, n_actions, n_states):
self.n_actions = n_actions
self.n_states = n_states
self._qvalues = np.random.rand(self.n_states, self.n_actions)
self.alpha = alpha
self.epsilon = epsilon
self.discount = discount
def set_qmatrix(self, new_matrix):
self._qvalues = new_matrix
def get_qvalue(self, state, action):
"""
Returns the Q-value for the given state action
Args:
state (integer): Index representation of a state
action (integer): Index representation of an action
Returns:
float: Q-value for the state-action combination
"""
return self._qvalues[state, action]
def set_qvalue(self, state, action, value):
"""Sets the Qvalue for [state,action] to the given value
Args:
state (integer): Index representation of a state
action (integer): Index representation of an action
value (float): Q-value that is being assigned
"""
self._qvalues[state, action] = value
def get_value(self, state):
"""
Compute the agents estimate of V(s) using current q-values.
Args:
state (integer): Index representation of a state
Returns:
float: Value of the state
"""
value = numba_max(
self._qvalues[
state,
]
)
return value
def get_qmatrix(self):
"""
Returns the qmatrix of the agent
Returns:
array (float): Full Q-Matrix
"""
return self._qvalues
def update(self, state, action, reward, next_state):
"""
Update Q-Value:
Q(s,a) := (1 - alpha) * Q(s,a) + alpha * (r + gamma * V(s'))
Args:
state (integer): Index representation of the current state (Row of the Q-matrix)
action (integer): Index representation of the picked action (Column of the Q-matrix)
reward (float): Reward for picking from picking the action in the given state
next_state (integer): Index representation of the next state (Column of the Q-matrix)
"""
# Calculate the updated Q-value
c_q_value = (1 - self.alpha) * self.get_qvalue(state, action) + self.alpha * (
reward + self.discount * self.get_value(next_state)
)
# Update the Q-values for the next iteration
self.set_qvalue(state, action, c_q_value)
def get_best_action(self, state):
"""
Compute the best action to take in a state (using current q-values).
Args:
state (integer): Index representation of the current state (Row of the Q-matrix)
Returns:
integer: Index representation of the best action (Column of the Q-matrix)
for the given state (Row of the Q-matrix)
"""
# Pick the Action (Row of the Q-matrix) with the highest q-value
best_action = numba_argmax(self._qvalues[state, :])
return best_action
def get_action(self, state):
"""
Compute the action to take in the current state, including exploration.
With probability self.epsilon, we take a random action.
Returns both, the chosen action (with exploration) and the best action (argmax).
If the chosen action is the same as the best action, both returns will be
the same.
Args:
state (integer): Integer representation of the current state (Row of the Q-matrix)
Returns:
tuple: chosen_action, best_action
chosen_action (integer): Index representation of the acutally picked action
(Column of the Q-matrix)
best_action (integer): Index representation of the current best action
(Column of the Q-matrix) in the given state.
"""
# agent parameters:
epsilon = self.epsilon
e_threshold = np.random.random()
# Get the best action.
best_action = self.get_best_action(state)
if e_threshold < epsilon:
# In the numpy.random module randint() is exclusive for the upper
# bound and inclusive for the lower bound -> Actions are array
# indices for us.
chosen_action = np.random.randint(0, self.n_actions)
else:
chosen_action = best_action
return chosen_action, best_action
spec = [
("n_actions", int32),
("n_states", int32),
("_qvalues", float64[:, :]),
("alpha", float64),
("epsilon", float64),
("discount", float64),
]
@jitclass(spec)
class QLearningAgent(QLearningAgentBase):
"""
Wrapper class to create a jitclass for the QLearningAgent.
Not that this class cannot be serialized. Hence, if you want
to save the trained agent as a pickle file, use the base class.
Note that for the random seed to work, you need to do it in
a njit wrapper function. From the numba documentation:
"Calling numpy.random.seed() from non-Numba code (or from object mode code)
will seed the Numpy random generator, not the Numba random generator."
"""
def jitclass_to_baseclass(agent_jit):
"""
A helper function to create a new QLearningAgentBase
object from the jitclass equivalent. This is needed
as we cannot serialize jitclasses in the current
numba version.
The function takes all parameters from the QLearningAgent
*agent_jit* and rewrites it to a new QLearningAgentBase
object.
Args:
agent_jit (QLearningAgent): jitclass instance of agent
Returns:
QLearningAgentBase: Serializable version of the agent
"""
agent_nojit = QLearningAgentBase(
alpha=agent_jit.alpha,
epsilon=agent_jit.epsilon,
discount=agent_jit.discount,
n_actions=agent_jit.n_actions,
n_states=agent_jit.n_states,
)
agent_nojit.set_qmatrix(new_matrix=agent_jit.get_qmatrix())
return agent_nojit
| 31.621145 | 97 | 0.633185 | 5,750 | 0.801059 | 0 | 0 | 551 | 0.076762 | 0 | 0 | 4,722 | 0.657843 |
f848ba579a50c6fd3ee1c43bc3d139711769e3be | 2,825 | py | Python | Code/Test Code/UI Tests/move.py | mwyoung/Cornhole-Robot | 830289fa30619ccec634b84b7cd81177e6b7740c | [
"MIT"
] | null | null | null | Code/Test Code/UI Tests/move.py | mwyoung/Cornhole-Robot | 830289fa30619ccec634b84b7cd81177e6b7740c | [
"MIT"
] | null | null | null | Code/Test Code/UI Tests/move.py | mwyoung/Cornhole-Robot | 830289fa30619ccec634b84b7cd81177e6b7740c | [
"MIT"
] | null | null | null | #!/usr/bin/python
# with help from teleop_keyboard.py,
# https://github.com/ros-teleop/teleop_twist_keyboard/blob/master/teleop_twist_keyboard.py
# Graylin Trevor Jay and Austin Hendrix, BSD licensed
import roslib; #roslib.load_manifest('teleop_move')
import rospy
from geometry_msgs.msg import Twist
import sys, select, termios, tty
starting_msg = """Move with:
i
j k l
(or wasd, space to stop)
CTRL-C to quit
"""
movement={
'i':(1,0,0,0),
'j':(0,0,0,1),
'k':(0,0,0,-1),
'l':(-1,0,0,0),
'w':(1,0,0,0),
'a':(0,0,0,1),
's':(0,0,0,-1),
'd':(-1,0,0,0),
' ':(0,0,0,0),
}
def checkForArrowKeys(key):
if (key=='\x1b[A'):
return "i"
elif (key=='\x1b[D'):
return "j"
elif (key=='\x1b[B'):
return "k"
elif (key=='\x1b[C'):
return "l"
else:
return key
def getKey():
tty.setraw(sys.stdin.fileno())
select.select([sys.stdin], [], [], 0)
key = sys.stdin.read(1)
key = checkForArrowKeys(key)
print key
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
return key
def current_vel(speed,turn):
return "velocity: speed %s turn %s" % (speed, turn)
def main():
# control terminal printing
global settings
settings = termios.tcgetattr(sys.stdin)
#rospy stuff
#pub = rospy.Publisher('cmd_vel', Twist, queue_size = 1)
#rospy.init_node('teleop_move')
# get battery info?
#speed = rospy.get_param("~speed", 0.5)
#turn = rospy.get_param("~turn", 1.0)
speed = 0.5; turn = 1.0;
x = 0; y = 0; z = 0; th = 0; status = 0
try:
print(starting_msg)
print(current_vel(speed,turn))
#execute always
while(1):
key = getKey()
if key in movement.keys():
x = movement[key][0]
y = movement[key][1]
z = movement[key][2]
th = movement[key][3]
print("x ", x, " y ", y, " z ", z, " th ", th)
else:
x = 0; y = 0; z = 0; th = 0
# if control key
if (key == '\x03'):
break
print("??")
key = ""
#twist = Twist()
#twist.linear.x = x*speed; twist.linear.y = y*speed; twist.linear.z = z*speed
#twist.angular.x = 0; twist.angular.y = 0; twist.angular.z = th*turn
#pub.publish(twist)
except Exception as e:
print(e)
finally:
#twist = Twist()
#twist.linear.x = x*speed; twist.linear.y = y*speed; twist.linear.z = z*speed
#twist.angular.x = 0; twist.angular.y = 0; twist.angular.z = th*turn
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
if __name__=="__main__":
main()
| 25 | 92 | 0.524602 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,042 | 0.36885 |
f84a7601115fccffa87d1679d8be58c1f83890a1 | 1,561 | py | Python | stanCode_Projects/my_photoshop/shrink.py | wilson51678/sc-projects | a4b9a0c542449372181f6bd20d4ad81b87bfcb46 | [
"MIT"
] | null | null | null | stanCode_Projects/my_photoshop/shrink.py | wilson51678/sc-projects | a4b9a0c542449372181f6bd20d4ad81b87bfcb46 | [
"MIT"
] | null | null | null | stanCode_Projects/my_photoshop/shrink.py | wilson51678/sc-projects | a4b9a0c542449372181f6bd20d4ad81b87bfcb46 | [
"MIT"
] | null | null | null | """
File: shrink.py
Name: Wilson Wang 2020/08/05
-------------------------------
Create a new "out" image half the width and height of the original.
Set pixels at x=0 1 2 3 in out , from x=0 2 4 6 in original,
and likewise in the y direction.
"""
from simpleimage import SimpleImage
def shrink(filename):
"""
This function should shrink the 'filename' image into a 1/2 size new image.
:param filename: img, the image of origin size
:return img: new_img, the image of half size of the origin photo
"""
img = SimpleImage(filename)
# This step should makes a blank photo, which has half size of the origin photo
new_img = SimpleImage.blank(img.width//2,img.height//2)
for y in range(new_img.height):
for x in range(new_img.width):
# This step catch pixel in origin photo in every two pixel. x=0,2,4,6
img_pixel = img.get_pixel(x*2,y*2)
new_img_pixel = new_img.get_pixel(x,y)
# These three steps are filling pixels from the origin photo into 'new_pixel'
new_img_pixel.red = img_pixel.red
new_img_pixel.green = img_pixel.green
new_img_pixel.blue = img_pixel.blue
return new_img
def main():
"""
This program should shrink any image into a half size photo. 'without code:make_as_big_as'
"""
original = SimpleImage("images/poppy.png")
original.show()
after_shrink = shrink("images/poppy.png")
after_shrink.show()
if __name__ == '__main__':
main()
| 31.857143 | 95 | 0.632287 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 852 | 0.545106 |
f84a986b558a36ee9782c5da91c77b0601aa7b43 | 15,349 | py | Python | src/genie/libs/parser/iosxe/show_ip_dhcp.py | komurzak-cisco/genieparser | e6cd6bb133bab7260b2b82da198fd14a4dec66c7 | [
"Apache-2.0"
] | 1 | 2021-07-26T02:56:27.000Z | 2021-07-26T02:56:27.000Z | src/genie/libs/parser/iosxe/show_ip_dhcp.py | zhangineer/genieparser | d6abcb49bf6d39092d835d9490d817452920ae98 | [
"Apache-2.0"
] | null | null | null | src/genie/libs/parser/iosxe/show_ip_dhcp.py | zhangineer/genieparser | d6abcb49bf6d39092d835d9490d817452920ae98 | [
"Apache-2.0"
] | null | null | null | """
show ip dhcp database
show ip dhcp snooping database
show ip dhcp snooping database detail
"""
# Python
import re
# Metaparser
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import (Schema, Any, Optional,
Or, And, Default, Use)
# Parser Utils
from genie.libs.parser.utils.common import Common
# =======================================
# Schema for 'show ip dhcp database'
# =======================================
class ShowIpDhcpDatabaseSchema(MetaParser):
"""
Schema for show ip dhcp database
"""
schema = {
'url': {
str: {
'read': str,
'written': str,
'status': str,
'delay_in_secs': int,
'timeout_in_secs': int,
'failures': int,
'successes': int
}
}
}
# =======================================
# Parser for 'show ip dhcp database'
# =======================================
class ShowIpDhcpDatabase(ShowIpDhcpDatabaseSchema):
"""
Parser for show ip dhcp database
"""
cli_command = 'show ip dhcp database'
def cli(self, output=None):
if not output:
out = self.device.execute(self.cli_command)
else:
out = output
# URL : ftp://user:password@172.16.4.253/router-dhcp
p1 = re.compile(r'^URL +: +(?P<url>(\S+))$')
# Read : Dec 01 1997 12:01 AM
p2 = re.compile(r'^Read +: +(?P<read>(.+))$')
# Written : Never
p3 = re.compile(r'^Written +: +(?P<written>(\S+))$')
# Status : Last read succeeded. Bindings have been loaded in RAM.
p4 = re.compile(r'^Status +: +(?P<status>(.+))$')
# Delay : 300 seconds
p5 = re.compile(r'^Delay +: +(?P<delay>(\d+))')
# Timeout : 300 seconds
p6 = re.compile(r'^Timeout +: +(?P<timeout>(\d+))')
# Failures : 0
p7 = re.compile(r'^Failures +: +(?P<failures>(\d+))$')
# Successes : 1
p8 = re.compile(r'^Successes +: +(?P<successes>(\d+))$')
ret_dict = {}
for line in out.splitlines():
line.strip()
# URL : ftp://user:password@172.16.4.253/router-dhcp
m = p1.match(line)
if m:
url_dict = ret_dict.setdefault('url', {}).setdefault(m.groupdict()['url'], {})
# ret_dict.update({'url': m.groupdict()['url']})
continue
# Read : Dec 01 1997 12:01 AM
m = p2.match(line)
if m:
url_dict.update({'read': m.groupdict()['read']})
continue
# Written : Never
m = p3.match(line)
if m:
url_dict.update({'written': m.groupdict()['written']})
continue
# Status : Last read succeeded. Bindings have been loaded in RAM.
m = p4.match(line)
if m:
url_dict.update({'status': m.groupdict()['status']})
continue
# Delay : 300 seconds
m = p5.match(line)
if m:
url_dict.update({'delay_in_secs': int(m.groupdict()['delay'])})
continue
# Timeout : 300 seconds
m = p6.match(line)
if m:
url_dict.update({'timeout_in_secs': int(m.groupdict()['timeout'])})
continue
# Failures : 0
m = p7.match(line)
if m:
url_dict.update({'failures': int(m.groupdict()['failures'])})
continue
# Successes : 1
m = p8.match(line)
if m:
url_dict.update({'successes': int(m.groupdict()['successes'])})
continue
return ret_dict
# ===================================================
# Schema for 'show ip dhcp snooping database'
# 'show ip dhcp snooping database detail'
# ===================================================
class ShowIpDhcpSnoopingDatabaseSchema(MetaParser):
"""
Schema for show ip dhcp snooping database
show ip dhcp snooping database detail
"""
schema = {
'agent_url': str,
'write_delay_secs': int,
'abort_timer_secs': int,
'agent_running': str,
'delay_timer_expiry': str,
'abort_timer_expiry': str,
'last_succeeded_time': str,
'last_failed_time': str,
'last_failed_reason': str,
'total_attempts': int,
'startup_failures': int,
'successful_transfers': int,
'failed_transfers': int,
'successful_reads': int,
'failed_reads': int,
'successful_writes': int,
'failed_writes': int,
'media_failures': int,
Optional('detail'): {
'first_successful_access': str,
'last_ignored_bindings_counters': {
'binding_collisions': int,
'expired_leases': int,
'invalid_interfaces': int,
'unsupported_vlans': int,
'parse_failures': int
},
'last_ignored_time': str,
'total_ignored_bindings_counters': {
'binding_collisions': int,
'expired_leases': int,
'invalid_interfaces': int,
'unsupported_vlans': int,
'parse_failures': int
}
}
}
# ===================================================
# Parser for 'show ip dhcp snooping database'
# ===================================================
class ShowIpDhcpSnoopingDatabase(ShowIpDhcpSnoopingDatabaseSchema):
"""
Parser for show ip dhcp snooping database
"""
cli_command = 'show ip dhcp snooping database'
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
# Initializes the Python dictionary variable
ret_dict = {}
# Agent URL :
p1 = re.compile(r'^Agent URL +: +(?P<agent_url>\S*)$')
# Write delay Timer : 300 seconds
p2 = re.compile(r'^Write delay Timer +: +(?P<write_delay_secs>\d+) seconds$')
# Abort Timer : 300 seconds
p3 = re.compile(r'^Abort Timer +: +(?P<abort_timer_secs>\d+) seconds$')
# Agent Running : No
p4 = re.compile(r'^Agent Running +: +(?P<agent_running>\w+)$')
# Delay Timer Expiry : Not Running
p5 = re.compile(r'^Delay Timer Expiry +: +(?P<delay_timer_expiry>.+)$')
# Abort Timer Expiry : Not Running
p6 = re.compile(r'^Abort Timer Expiry +: +(?P<abort_timer_expiry>.+)$')
# Last Succeded Time : None
p7 = re.compile(r'^Last Succee?ded Time +: +(?P<last_succeeded_time>.+)$')
# Last Failed Time : None
p8 = re.compile(r'^Last Failed Time +: +(?P<last_failed_time>.+)$')
# Last Failed Reason : No failure recorded.
p9 = re.compile(r'^Last Failed Reason +: +(?P<last_failed_reason>[\w ]+)\.?$')
# Total Attempts : 0 Startup Failures : 0
p10 = re.compile(r'^Total Attempts +: +(?P<total_attempts>\d+) +Startup Failures +: +(?P<startup_failures>\d+)$')
# Successful Transfers : 0 Failed Transfers : 0
p11 = re.compile(r'^Successful Transfers +: +(?P<successful_transfers>\d+) +Failed Transfers +: +(?P<failed_transfers>\d+)$')
# Successful Reads : 0 Failed Reads : 0
p12 = re.compile(r'^Successful Reads +: +(?P<successful_reads>\d+) +Failed Reads +: +(?P<failed_reads>\d+)$')
# Successful Writes : 0 Failed Writes : 0
p13 = re.compile(r'^Successful Writes +: +(?P<successful_writes>\d+) +Failed Writes +: +(?P<failed_writes>\d+)$')
# Media Failures : 0
p14 = re.compile(r'^Media Failures +: +(?P<media_failures>\d+)$')
# First successful access: Read
p15 = re.compile(r'^First successful access *: +(?P<first_successful_access>\w+)$')
# Last ignored bindings counters :
p16 = re.compile(r'^Last ignored bindings counters *:$')
# Binding Collisions : 0 Expired leases : 0
p17 = re.compile(r'^Binding Collisions +: +(?P<binding_collisions>\d+) +Expired leases +: +(?P<expired_leases>\d+)$')
# Invalid interfaces : 0 Unsupported vlans : 0
p18 = re.compile(r'^Invalid interfaces +: +(?P<invalid_interfaces>\d+) +Unsupported vlans : +(?P<unsupported_vlans>\d+)$')
# Parse failures : 0
p19 = re.compile(r'^Parse failures +: +(?P<parse_failures>\d+)$')
# Last Ignored Time : None
p20 = re.compile(r'^Last Ignored Time +: +(?P<last_ignored_time>.+)$')
# Total ignored bindings counters :
p21 = re.compile(r'^Total ignored bindings counters *:$')
# Processes the matched patterns
for line in out.splitlines():
line.strip()
# Agent URL :
m = p1.match(line)
if m:
ret_dict['agent_url'] = m.groupdict()['agent_url']
continue
# Write delay Timer : 300 seconds
m = p2.match(line)
if m:
ret_dict['write_delay_secs'] = int(m.groupdict()['write_delay_secs'])
continue
# Abort Timer : 300 seconds
m = p3.match(line)
if m:
ret_dict['abort_timer_secs'] = int(m.groupdict()['abort_timer_secs'])
continue
# Agent Running : No
m = p4.match(line)
if m:
ret_dict['agent_running'] = m.groupdict()['agent_running']
continue
# Delay Timer Expiry : Not Running
m = p5.match(line)
if m:
ret_dict['delay_timer_expiry'] = m.groupdict()['delay_timer_expiry']
continue
# Abort Timer Expiry : Not Running
m = p6.match(line)
if m:
ret_dict['abort_timer_expiry'] = m.groupdict()['abort_timer_expiry']
continue
# Last Succeded Time : None
m = p7.match(line)
if m:
ret_dict['last_succeeded_time'] = m.groupdict()['last_succeeded_time']
continue
# Last Failed Time : None
m = p8.match(line)
if m:
ret_dict['last_failed_time'] = m.groupdict()['last_failed_time']
continue
# Last Failed Reason : No failure recorded.
m = p9.match(line)
if m:
ret_dict['last_failed_reason'] = m.groupdict()['last_failed_reason']
continue
# Total Attempts : 0 Startup Failures : 0
m = p10.match(line)
if m:
ret_dict['total_attempts'] = int(m.groupdict()['total_attempts'])
ret_dict['startup_failures'] = int(m.groupdict()['startup_failures'])
continue
# Successful Transfers : 0 Failed Transfers : 0
m = p11.match(line)
if m:
ret_dict['successful_transfers'] = int(m.groupdict()['successful_transfers'])
ret_dict['failed_transfers'] = int(m.groupdict()['failed_transfers'])
continue
# Successful Reads : 0 Failed Reads : 0
m = p12.match(line)
if m:
ret_dict['successful_reads'] = int(m.groupdict()['successful_reads'])
ret_dict['failed_reads'] = int(m.groupdict()['failed_reads'])
continue
# Successful Writes : 0 Failed Writes : 0
m = p13.match(line)
if m:
ret_dict['successful_writes'] = int(m.groupdict()['successful_writes'])
ret_dict['failed_writes'] = int(m.groupdict()['failed_writes'])
continue
# Media Failures : 0
m = p14.match(line)
if m:
ret_dict['media_failures'] = int(m.groupdict()['media_failures'])
continue
# First successful access: Read
m = p15.match(line)
if m:
detail_dict = ret_dict.setdefault('detail', {})
detail_dict['first_successful_access'] = m.groupdict()['first_successful_access']
continue
# Last ignored bindings counters :
m = p16.match(line)
if m:
bindings_dict = detail_dict.setdefault('last_ignored_bindings_counters', {})
continue
# Binding Collisions : 0 Expired leases : 0
m = p17.match(line)
if m:
bindings_dict['binding_collisions'] = int(m.groupdict()['binding_collisions'])
bindings_dict['expired_leases'] = int(m.groupdict()['expired_leases'])
continue
# Invalid interfaces : 0 Unsupported vlans : 0
m = p18.match(line)
if m:
bindings_dict['invalid_interfaces'] = int(m.groupdict()['invalid_interfaces'])
bindings_dict['unsupported_vlans'] = int(m.groupdict()['unsupported_vlans'])
continue
# Parse failures : 0
m = p19.match(line)
if m:
bindings_dict['parse_failures'] = int(m.groupdict()['parse_failures'])
continue
# Last Ignored Time : None
m = p20.match(line)
if m:
detail_dict['last_ignored_time'] = m.groupdict()['last_ignored_time']
continue
# Total ignored bindings counters :
m = p21.match(line)
if m:
bindings_dict = detail_dict.setdefault('total_ignored_bindings_counters', {})
continue
return ret_dict
# ===================================================
# Parser for 'show ip dhcp snooping database detail'
# ===================================================
class ShowIpDhcpSnoopingDatabaseDetail(ShowIpDhcpSnoopingDatabase):
"""
Parser for show ip dhcp snooping database detail
"""
cli_command = 'show ip dhcp snooping database detail'
def cli(self, output=None):
if output is None:
output = self.device.execute(self.cli_command)
return super().cli(output=output) | 37.436585 | 133 | 0.485699 | 14,190 | 0.92449 | 0 | 0 | 0 | 0 | 0 | 0 | 7,120 | 0.463874 |
f84ba2d7e5aa592c0ac62dbc711d229b2f13adeb | 848 | py | Python | vpc_hyp2/Createservers.py | dhanraj-vedanth/IaaS_VPC_CDN | 262dbc7db63d5e76398dadc8015256fb37986e36 | [
"MIT"
] | null | null | null | vpc_hyp2/Createservers.py | dhanraj-vedanth/IaaS_VPC_CDN | 262dbc7db63d5e76398dadc8015256fb37986e36 | [
"MIT"
] | null | null | null | vpc_hyp2/Createservers.py | dhanraj-vedanth/IaaS_VPC_CDN | 262dbc7db63d5e76398dadc8015256fb37986e36 | [
"MIT"
] | null | null | null | import os
import sys
import json
import ipaddress
import paramiko
def func_createcont(br,r,IP):
print("\nMaking containers "+r)
print("sudo docker run -itd --cap-add=NET_ADMIN --name "+r+" main-vm")
os.system("sudo docker run -itd --cap-add=NET_ADMIN --name "+r+" main-vm")
print("ovs-docker add-port "+br+" brock "+r)
os.system("ovs-docker add-port "+br+" brock "+r)
print("sudo docker exec -it "+r+" ip route del default")
os.system("sudo docker exec -it "+r+" ip route del default")
print("sudo docker exec -it "+r+" dhclient brock")
os.system("sudo docker exec -it "+r+" dhclient brock")
print("sudo docker exec -it "+r+" ip addr add "+IP+"/24 dev brock")
os.system("sudo docker exec -it "+r+" ip addr add "+IP+"/24 dev brock")
br=sys.argv[1]
r=sys.argv[2]
IP=sys.argv[3]
func_createcont(br,r,IP)
| 33.92 | 78 | 0.650943 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 482 | 0.568396 |
f84d7afc084777032cfb27a9f3d492736584d51d | 1,051 | py | Python | backend/flaskr/__init__.py | DakyungAndEunji/2021-ICE-Capstone-Project | 71761bf66bd170eae48a8084331ed1d00f9c184b | [
"MIT"
] | 1 | 2021-05-11T04:08:58.000Z | 2021-05-11T04:08:58.000Z | backend/flaskr/__init__.py | DakyungAndEunji/2021-ICE-Capstone-Project | 71761bf66bd170eae48a8084331ed1d00f9c184b | [
"MIT"
] | 11 | 2021-04-06T15:22:47.000Z | 2021-06-01T05:13:43.000Z | backend/flaskr/__init__.py | DakyungAndEunji/2021-ICE-Capstone-Project | 71761bf66bd170eae48a8084331ed1d00f9c184b | [
"MIT"
] | null | null | null | ### flaskr/__init__.py
import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
def create_app(test_config = None):
# create and configure the app
app = Flask(__name__, instance_relative_config=True)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:toor!@localhost:3306/tps?charset=utf8'
app.config['SQLALCHEMY_ECHO'] = True
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
app.secret_key = 'manyrandombyte'
if test_config is None:
# Load the instance config, if it exists, when not testing
app.config.from_pyfile('config.py', silent=True)
else:
# Load the test config if passed in
app.config.from_mapping(test_config)
# ensure the instance folder exists
try:
os.makedirs(app.instance_path)
except OSError:
pass
db.init_app(app)
with app.app_context():
db.create_all()
from flaskr.view import productController
app.register_blueprint(productController.bp)
return app | 26.948718 | 104 | 0.698382 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 341 | 0.324453 |
f84fd6a36061acc80024ef6237230dcd9e8feabc | 7,228 | py | Python | backend/ec2.py | yubinhong/AutoAws | 92a3be4ba4ed582536af9eeaf5b5fbd5cee1035d | [
"MIT"
] | 1 | 2020-02-21T07:40:46.000Z | 2020-02-21T07:40:46.000Z | backend/ec2.py | yubinhong/AutoAws | 92a3be4ba4ed582536af9eeaf5b5fbd5cee1035d | [
"MIT"
] | null | null | null | backend/ec2.py | yubinhong/AutoAws | 92a3be4ba4ed582536af9eeaf5b5fbd5cee1035d | [
"MIT"
] | null | null | null | import boto3
import time
class AwsEc2(object):
def __init__(self, access_key, secret_key):
self.access_key = access_key
self.secret_key = secret_key
self.client = boto3.client(service_name='ec2', region_name="ap-northeast-1", aws_access_key_id=self.access_key,
aws_secret_access_key=self.secret_key)
self.resource = boto3.resource(service_name='ec2', region_name="ap-northeast-1",
aws_access_key_id=self.access_key, aws_secret_access_key=self.secret_key)
def get_instance(self, vpc_id, servername):
res = self.client.describe_instances(
Filters=[
{
'Name': 'vpc-id',
'Values': [
vpc_id,
]
},
{
'Name': 'tag:Name',
'Values': [
servername
]
}
],
)
return res
def get_instance_by_resource(self, vpc_id):
instance_list = self.resource.instances.all()
res_list = []
for i in instance_list:
if i.vpc_id == vpc_id:
res_list.append(i)
return res_list
def get_vpc(self):
res = self.client.describe_vpcs()
return res
def get_subnet(self, vpc_id):
res = self.client.describe_subnets(
Filters=[
{
'Name': 'vpc-id',
'Values': [
vpc_id,
]
},
]
)
return res
def get_security_group(self, **kwargs):
filter_dict = {}
if len(kwargs.keys()) > 0:
for key in kwargs.keys():
if kwargs[key] != '':
filter_dict[key] = kwargs[key]
filter_list = [{'Name': key, 'Values': [value]} for key, value in filter_dict.items()]
res = self.client.describe_security_groups(
Filters=filter_list
)
else:
res = self.client.describe_security_groups()
return res
def create_security_group(self, name, vpc_id):
res = self.client.create_security_group(
Description=name,
GroupName=name,
VpcId=vpc_id,
)
return res
def security_group(self, name, vpc_id):
try:
res = self.create_security_group(name, vpc_id)
except Exception as e:
param_dict = {'group-name': name}
res = self.get_security_group(**param_dict)['SecurityGroups'][0]
return res
def modified_security_group(self, instance_id, groups):
try:
res = self.client.modify_instance_attribute(InstanceId=instance_id, Groups=groups)
result = {'code': 0, 'msg': res}
except Exception as e:
print(e)
result = {'code': 1, 'msg': str(e)}
return result
def create_instance_from_template(self, instance_template_list, vpc_id, subnet_id):
res_list = []
for instance_template in instance_template_list:
res1 = self.security_group(instance_template['name'], vpc_id)
res = self.resource.create_instances(
BlockDeviceMappings=[
{
'DeviceName': '/dev/sda1',
'Ebs': {
'DeleteOnTermination': False,
'VolumeSize': instance_template['disk'],
'VolumeType': 'gp2',
'Encrypted': False
}
},
],
ImageId=instance_template['image_id'],
InstanceType=instance_template['instance_type'],
KeyName=instance_template['key_name'],
NetworkInterfaces=[
{
'AssociatePublicIpAddress': True,
'DeleteOnTermination': True,
'DeviceIndex': 0,
'Groups': [
res1['GroupId'],
],
'SubnetId': subnet_id,
'InterfaceType': 'interface'
},
],
MaxCount=instance_template['count'],
MinCount=instance_template['count'],
)
for instance in res:
status = instance.state
while status['Code'] != 16:
time.sleep(6)
instance.load()
status = instance.state
if status['Code'] == 16:
instance.create_tags(
Tags=[{
'Key': 'Name',
'Value': instance_template['name']
}]
)
res_list.append(instance)
return res_list
def create_instance(self, instance_dict, vpc_id, subnet_id):
res1 = self.security_group(instance_dict['name'], vpc_id)
try:
res = self.resource.create_instances(
BlockDeviceMappings=[
{
'DeviceName': '/dev/sda1',
'Ebs': {
'DeleteOnTermination': False,
'VolumeSize': instance_dict['disk'],
'VolumeType': 'gp2',
'Encrypted': False
}
},
],
ImageId=instance_dict['image_id'],
InstanceType=instance_dict['instance_type'],
KeyName=instance_dict['key_name'],
NetworkInterfaces=[
{
'AssociatePublicIpAddress': True,
'DeleteOnTermination': True,
'DeviceIndex': 0,
'Groups': [
res1['GroupId'],
],
'SubnetId': subnet_id,
'InterfaceType': 'interface'
},
],
MaxCount=instance_dict['count'],
MinCount=instance_dict['count'],
)
except Exception as e:
result = {'code': 1, 'msg': str(e)}
return result
for instance in res:
status = instance.state
while status['Code'] != 16:
time.sleep(6)
instance.load()
status = instance.state
if status['Code'] == 16:
instance.create_tags(
Tags=[{
'Key': 'Name',
'Value': instance_dict['name']
}]
)
result = {'code': 0}
return result
if __name__ == "__main__":
ec2 = AwsEc2("", "")
res = ec2.get_instance_by_resource('xxxxxx')
for i in res:
print(i.placement)
| 34.419048 | 119 | 0.445075 | 7,049 | 0.975235 | 0 | 0 | 0 | 0 | 0 | 0 | 813 | 0.112479 |
f851380879e61799e28a7ffd91239a32f370bf71 | 2,299 | py | Python | control/voiceControl.py | Lluxent/CorporateClashUtility | 36c5f724fb8e0050aab2b3a0bfb02c5b5d0c6272 | [
"MIT"
] | 2 | 2021-03-08T02:30:58.000Z | 2021-03-17T12:57:33.000Z | control/voiceControl.py | Lluxent/CorporateClashUtility | 36c5f724fb8e0050aab2b3a0bfb02c5b5d0c6272 | [
"MIT"
] | null | null | null | control/voiceControl.py | Lluxent/CorporateClashUtility | 36c5f724fb8e0050aab2b3a0bfb02c5b5d0c6272 | [
"MIT"
] | null | null | null | import control
import speech_recognition as sr
def recognize_speech_from_mic(recognizer, microphone):
"""Transcribe speech from recorded from `microphone`.
Returns a dictionary with three keys:
"success": a boolean indicating whether or not the API request was
successful
"error": `None` if no error occured, otherwise a string containing
an error message if the API could not be reached or
speech was unrecognizable
"transcription": `None` if speech could not be transcribed,
otherwise a string containing the transcribed text
"""
# check that recognizer and microphone arguments are appropriate type
if not isinstance(recognizer, sr.Recognizer):
raise TypeError("`recognizer` must be `Recognizer` instance")
if not isinstance(microphone, sr.Microphone):
raise TypeError("`microphone` must be `Microphone` instance")
# adjust the recognizer sensitivity to ambient noise and record audio
# from the microphone
with microphone as source:
recognizer.adjust_for_ambient_noise(source)
audio = recognizer.listen(source)
# set up the response object
response = {
"success" : True,
"error" : None,
"transcription" : None
}
# try recognizing the speech in the recording
# if a RequestError or UnknownValueError exception is caught, update the response object accordingly
try:
response["transcription"] = recognizer.recognize_google(audio)
except sr.RequestError:
# API was unreachable or unresponsive
response["success"] = False
response["error"] = "API unavailable"
except sr.UnknownValueError:
# speech was unintelligible
response["error"] = "Unable to recognize speech"
return response
r = sr.Recognizer()
m = sr.Microphone()
while(True):
while(True):
print('Listening... ')
arg = recognize_speech_from_mic(r, m)
if arg["transcription"]:
break
if not arg["success"]:
break
if arg["error"]:
print('Error! {}'.format(arg["error"]))
pass
print('Heard: {}'.format(arg["transcription"]))
control.doAction(str.lower(arg["transcription"])) | 33.808824 | 104 | 0.653763 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,215 | 0.528491 |
f85295b6cbccfde4504d51121948d6ed5ff3e3c4 | 6,721 | py | Python | lookatweb/rules/objects.py | ivbeg/lookatweb | b98e3ebd29c00e2f718c3392bb31b7202aa82a99 | [
"BSD-3-Clause"
] | 2 | 2018-01-18T13:22:29.000Z | 2018-02-03T13:10:20.000Z | lookatweb/rules/objects.py | ivbeg/lookatweb | b98e3ebd29c00e2f718c3392bb31b7202aa82a99 | [
"BSD-3-Clause"
] | null | null | null | lookatweb/rules/objects.py | ivbeg/lookatweb | b98e3ebd29c00e2f718c3392bb31b7202aa82a99 | [
"BSD-3-Clause"
] | null | null | null | from .consts import *
# Object matching by classid
OBJECTS_CLSID_RULES = [
{'type' : RULETYPE_EQUAL, 'text' : 'clsid:D27CDB6E-AE6D-11cf-96B8-444553540000',
'entities' : [
{'name' : 'web:tech/flash'}
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'clsid:d27cdb6e-ae6d-11cf-96b8-444553540000',
'entities' : [
{'name' : 'web:tech/flash'}
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'clsid:-D27CDB6E-AE6D-11cf-96B8-444553540000',
'entities' : [
{'name' : 'web:tech/flash'}
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'CLSID:22D6F312-B0F6-11D0-94AB-0080C74C7E95',
'entities' : [
{'name' : 'web:tech:activex/wmplayer'}
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'CLSID:22D6F312-B0F6-11D0-94AB-0080C74C7E95',
'entities' : [
{'name' : 'web:tech:activex/wmplayer'}
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'clsid:22D6F312-B0F6-11D0-94AB-0080C74C7E95',
'entities' : [
{'name' : 'web:tech:activex/wmplayer'}
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'clsid:6BF52A52-394A-11D3-B153-00C04F79FAA6',
'entities' : [
{'name' : 'web:tech:activex/wmplayer'}
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'CLSID:CFCDAA03-8BE4-11cf-B84B-0020AFBBCCFA',
'entities' : [
{'name' : 'web:tech:activex/realplayer'}
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'clsid:CFCDAA03-8BE4-11cf-B84B-0020AFBBCCFA',
'entities' : [
{'name' : 'web:tech:activex/realplayer'}
]
},
]
# match object tags by type
OBJECTS_TYPE_RULES = [
{'type' : RULETYPE_EQUAL, 'text' : 'application/x-silverlight-2',
'entities' : [
{'name' : 'web:tech/silverlight'}
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'application/x-shockwave-flash',
'entities' : [
{'name' : 'web:tech/flash'}
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'application/x-oleobject',
'entities' : [
{'name' : 'web:tech/activex'}
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'image/svg+xml',
'entities' : [
{'name' : 'web:tech/svg'}
]
},
]
# match object tags by data
OBJECTS_DATA_RULES = [
{'type' : RULETYPE_REGEXP, 'text' : '^http://img\.yandex\.net/i/time/clock\.swf',
'entities' : [
{'name' : 'web:widgets:clock/yandexclock'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://vimeo\.com',
'entities' : [
{'name' : 'web:media:video/vimeo'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://www\.youtube\.com',
'entities' : [
{'name' : 'web:media:video/youtube'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://cdn\.last\.fm/widgets/chart',
'entities' : [
{'name' : 'web:widgets:audio/lastfm'}
]
},
]
# match object tags by embed src
EMBED_SRC_RULES = [
{'type' : RULETYPE_REGEXP, 'text' : '^http://img\.mail\.ru/r/video2/player_v2\.swf',
'entities' : [
{'name' : 'web:media:video/mailru'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://flv\.video\.yandex\.ru',
'entities' : [
{'name' : 'web:media:video/yandex'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://img\.gismeteo\.ru/flash',
'entities' : [
{'name' : 'web:widgets:meteo/gismeteo'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://www\.clocklink\.com/clocks/',
'entities' : [
{'name' : 'web:widgets:time/clocklink'}
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'http://iii.ru/static/Vishnu.swf',
'entities' : [
{'name' : 'web:widgets:chat/iiiru'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://[a-z0-9]{1,3}\.videos\.sapo\.pt/play',
'entities' : [
{'name' : 'web:media:video/sapovideos'}
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'http://pub.tvigle.ru/swf/tvigle_single_v2.swf',
'entities' : [
{'name' : 'web:media:video/twigle'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://rpod\.ru/i/b/listen_240x400_01/core\.swf',
'entities' : [
{'name' : 'web:media:audio/rpodru'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://vision\.rambler\.ru/i/e\.swf',
'entities' : [
{'name' : 'web:media:video/ramblervision'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://pics\.smotri\.com/scrubber_custom8\.swf',
'entities' : [
{'name' : 'web:media:video/smotricom'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://www\.russia\.ru/player/main\.swf',
'entities' : [
{'name' : 'web:media:video/russiaru'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://video\.google\.(com|ru|ca|de)/googleplayer.swf',
'entities' : [
{'name' : 'web:media:video/googlevideo'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://www\.youtube\.com/v/',
'entities' : [
{'name' : 'web:media:video/youtube'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^/bitrix/templates/',
'entities' : [
{'name' : 'web:cms/bitrix'},
{'name' : 'web:tech:lang/php'},
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^/bitrix/components/',
'entities' : [
{'name' : 'web:cms/bitrix'},
{'name' : 'web:tech:lang/php'},
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://developer\.truveo\.com/apps/listWidget',
'entities' : [
{'name' : 'web:media:video/truveo'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://pics\.rbc\.ru/informer',
'entities' : [
{'name' : 'web:widgets:fin/rbcinformer'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://video\.rutube\.ru',
'entities' : [
{'name' : 'web:media:video/rutube'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://static\.twitter\.com/flash/widgets/profile/TwitterWidget\.swf',
'entities' : [
{'name' : 'web:widgets:blog/twitter'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://vimeo\.com/moogaloop.swf',
'entities' : [
{'name' : 'web:media:video/vimeo'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://www.1tv.ru/(n|p)video',
'entities' : [
{'name' : 'web:media:video/1tvru'}
]
},
]
| 30.139013 | 112 | 0.494867 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,591 | 0.534295 |
f856a06399d0483aa5762d750435935c90b3dd55 | 6,020 | py | Python | src/failprint/cli.py | pawamoy/woof | 5c8eccfe5c1343b5a399b5794c486b3c0de67c78 | [
"0BSD"
] | 6 | 2020-10-14T07:22:31.000Z | 2022-02-13T23:17:56.000Z | src/failprint/cli.py | pawamoy/woof | 5c8eccfe5c1343b5a399b5794c486b3c0de67c78 | [
"0BSD"
] | 10 | 2020-04-29T12:29:43.000Z | 2021-07-31T10:35:36.000Z | src/failprint/cli.py | pawamoy/woof | 5c8eccfe5c1343b5a399b5794c486b3c0de67c78 | [
"0BSD"
] | 1 | 2021-08-07T03:23:41.000Z | 2021-08-07T03:23:41.000Z | # Why does this file exist, and why not put this in `__main__`?
#
# You might be tempted to import things from `__main__` later,
# but that will cause problems: the code will get executed twice:
#
# - When you run `python -m failprint` python will execute
# `__main__.py` as a script. That means there won't be any
# `failprint.__main__` in `sys.modules`.
# - When you import `__main__` it will get executed again (as a module) because
# there's no `failprint.__main__` in `sys.modules`.
"""Module that contains the command line application."""
import argparse
from typing import List, Optional, Sequence
from failprint.capture import Capture
from failprint.formats import accept_custom_format, formats
from failprint.runners import run
class ArgParser(argparse.ArgumentParser):
"""A custom argument parser with a helper method to add boolean flags."""
def add_bool_argument(
self,
truthy: Sequence[str],
falsy: Sequence[str],
truthy_help: str = "",
falsy_help: str = "",
**kwargs,
) -> None:
"""
Add a boolean flag/argument to the parser.
Arguments:
truthy: Values that will store true in the destination.
falsy: Values that will store false in the destination.
truthy_help: Help for the truthy arguments.
falsy_help: Help for the falsy arguments.
**kwargs: Remaining keyword arguments passed to `argparse.ArgumentParser.add_argument`.
"""
truthy_kwargs = {**kwargs, "help": truthy_help, "action": "store_true"}
falsy_kwargs = {**kwargs, "help": falsy_help, "action": "store_false"}
mxg = self.add_mutually_exclusive_group()
mxg.add_argument(*truthy, **truthy_kwargs) # type: ignore # mypy is confused by arguments position
mxg.add_argument(*falsy, **falsy_kwargs) # type: ignore
def add_flags(parser, set_defaults=True) -> ArgParser:
"""
Add some boolean flags to the parser.
We made this method separate and public
for its use in [duty](https://github.com/pawamoy/duty).
Arguments:
parser: The parser to add flags to.
set_defaults: Whether to set default values on arguments.
Returns:
The augmented parser.
"""
# IMPORTANT: the arguments destinations should match
# the parameters names of the failprint.runners.run function.
# As long as names are consistent between the two,
# it's very easy to pass CLI args to the function,
# and it also allows to avoid duplicating the parser arguments
# in dependent projects like duty (https://github.com/pawamoy/duty) :)
parser.add_argument(
"-c",
"--capture",
choices=list(Capture),
type=Capture,
help="Which output to capture. Colors are supported with 'both' only, unless the command has a 'force color' option.",
)
parser.add_argument(
"-f",
"--fmt",
"--format",
dest="fmt",
choices=formats.keys(),
type=accept_custom_format,
default=None,
help="Output format. Pass your own Jinja2 template as a string with '-f custom=TEMPLATE'. "
"Available variables: command, title (command or title passed with -t), code (exit status), "
"success (boolean), failure (boolean), number (command number passed with -n), "
"output (command output), nofail (boolean), quiet (boolean), silent (boolean). "
"Available filters: indent (textwrap.indent).",
)
parser.add_bool_argument(
["-y", "--pty"],
["-Y", "--no-pty"],
dest="pty",
default=True if set_defaults else None,
truthy_help="Enable the use of a pseudo-terminal. PTY doesn't allow programs to use standard input.",
falsy_help="Disable the use of a pseudo-terminal. PTY doesn't allow programs to use standard input.",
)
parser.add_bool_argument(
["-p", "--progress"],
["-P", "--no-progress"],
dest="progress",
default=True if set_defaults else None,
truthy_help="Print progress while running a command.",
falsy_help="Don't print progress while running a command.",
)
parser.add_bool_argument(
["-q", "--quiet"],
["-Q", "--no-quiet"],
dest="quiet",
default=False if set_defaults else None,
truthy_help="Don't print the command output, even if it failed.",
falsy_help="Print the command output when it fails.",
)
parser.add_bool_argument(
["-s", "--silent"],
["-S", "--no-silent"],
dest="silent",
default=False if set_defaults else None,
truthy_help="Don't print anything.",
falsy_help="Print output as usual.",
)
parser.add_bool_argument(
["-z", "--zero", "--nofail"],
["-Z", "--no-zero", "--strict"],
dest="nofail",
default=False if set_defaults else None,
truthy_help="Don't fail. Always return a success (0) exit code.",
falsy_help="Return the original exit code.",
)
return parser
def get_parser() -> ArgParser:
"""
Return the CLI argument parser.
Returns:
An argparse parser.
"""
parser = add_flags(ArgParser(prog="failprint"))
parser.add_argument("-n", "--number", type=int, default=1, help="Command number. Useful for the 'tap' format.")
parser.add_argument("-t", "--title", help="Command title. Default is the command itself.")
parser.add_argument("cmd", metavar="COMMAND", nargs="+")
return parser
def main(args: Optional[List[str]] = None) -> int:
"""
Run the main program.
This function is executed when you type `failprint` or `python -m failprint`.
Arguments:
args: Arguments passed from the command line.
Returns:
An exit code.
"""
parser = get_parser()
opts = parser.parse_args(args).__dict__.items() # noqa: WPS609
return run(**{_: value for _, value in opts if value is not None}).code
| 36.707317 | 126 | 0.635382 | 1,141 | 0.189535 | 0 | 0 | 0 | 0 | 0 | 0 | 3,554 | 0.590365 |
f858848401df27fd04f2c1792b618ab879328af0 | 1,112 | py | Python | siqbal/siqbal/doctype/item_label/item_label.py | smehata/siqbal | 8b6a21fb63c050237593c49757065198c0e2c54a | [
"MIT"
] | 1 | 2021-08-07T12:48:02.000Z | 2021-08-07T12:48:02.000Z | siqbal/siqbal/doctype/item_label/item_label.py | smehata/siqbal | 8b6a21fb63c050237593c49757065198c0e2c54a | [
"MIT"
] | null | null | null | siqbal/siqbal/doctype/item_label/item_label.py | smehata/siqbal | 8b6a21fb63c050237593c49757065198c0e2c54a | [
"MIT"
] | 4 | 2021-01-16T06:14:58.000Z | 2022-02-07T06:36:41.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2020, RC and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class ItemLabel(Document):
def on_submit(self):
for d in self.get("items"):
# Check for Price list if it dosent exsist. create a new one. # Get Item Price name
item_price_name = frappe.db.get_value("Item Price", {"item_code": d.item_code,"price_list": d.price_list},"name")
if not item_price_name:
self.make_item_price(d.item_code,d.price_list,d.item_price)
else :
old_item_price = frappe.db.get_value("Item Price", {"name": item_price_name},"price_list_rate")
# update Item Price if it's available and
if(old_item_price != d.item_price):
frappe.db.set_value("Item Price", item_price_name, "price_list_rate", d.item_price)
def make_item_price(self,item, price_list_name, item_price):
frappe.get_doc({
"doctype": "Item Price",
"price_list": price_list_name,
"item_code": item,
"price_list_rate": item_price
}).insert(ignore_permissions=True)
| 37.066667 | 116 | 0.732914 | 896 | 0.805755 | 0 | 0 | 0 | 0 | 0 | 0 | 411 | 0.369604 |
f859b964e5f9c3a181c35199baa7176223613982 | 1,308 | py | Python | www.py | MurphyWan/Python_Flask | 7ef61c8242b4edf05a1ce8c688564e7895017a76 | [
"MIT"
] | 1 | 2019-01-05T12:35:51.000Z | 2019-01-05T12:35:51.000Z | www.py | MurphyWan/Python_Flask | 7ef61c8242b4edf05a1ce8c688564e7895017a76 | [
"MIT"
] | null | null | null | www.py | MurphyWan/Python_Flask | 7ef61c8242b4edf05a1ce8c688564e7895017a76 | [
"MIT"
] | null | null | null | # coding:utf-8
# author:MurphyWan
# 蓝图放在www.py中
""" controller.py/index.py
from flask import Blueprint
route_index = Blueprint('index_page', __name__)
@route_index.route("/")
def index():
return "Hello World"
"""
from application import app
'''
统一拦截器 , 都放在这里
'''
from web.interceptors.Authinterceptor import *
'''
蓝图功能,对所有url进行蓝图功能配置
'''
from web.controllers.index import route_index
from web.controllers.user.User import route_user
from web.controllers.static import route_static
from web.controllers.account.Account import route_account # 账号管理
from web.controllers.food.Food import route_food # 商品管理
from web.controllers.member.Member import route_member # 会员管理
from web.controllers.finance.Finance import route_finance # 财务管理
from web.controllers.stat.Stat import route_stat # 统计管理
# 注册蓝图
app.register_blueprint(route_index, url_prefix='/')
# 然后在入口文件manager.py中引入www.py,就是调用www中的所有变量
app.register_blueprint(route_user, url_prefix='/user')
app.register_blueprint(route_static, url_prefix='/static')
app.register_blueprint(route_account, url_prefix='/account')
app.register_blueprint(route_food, url_prefix='/food')
app.register_blueprint(route_member, url_prefix='/member')
app.register_blueprint(route_finance, url_prefix='/finance')
app.register_blueprint(route_stat, url_prefix='/stat')
| 29.727273 | 65 | 0.798165 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 555 | 0.379617 |
f85a24e0d9a829e5ba4097a173e5c180ffe2795f | 1,410 | py | Python | Summarizing-Data-with-statistics-/code.py | Tushar23dhongade/ga-learner-dsmp-repo | cf5550a36d2f5d3a91940d7ac8a245d5040cd9d1 | [
"MIT"
] | null | null | null | Summarizing-Data-with-statistics-/code.py | Tushar23dhongade/ga-learner-dsmp-repo | cf5550a36d2f5d3a91940d7ac8a245d5040cd9d1 | [
"MIT"
] | null | null | null | Summarizing-Data-with-statistics-/code.py | Tushar23dhongade/ga-learner-dsmp-repo | cf5550a36d2f5d3a91940d7ac8a245d5040cd9d1 | [
"MIT"
] | null | null | null | # --------------
#Header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#path of the data file- path
data=pd.read_csv(path)
data["Gender"].replace("-","Agender",inplace=True)
gender_count=data.Gender.value_counts()
gender_count.plot(kind="bar")
#Code starts here
# --------------
#Code starts here
alignment=data.Alignment.value_counts()
plt.pie(alignment,labels=["good","bad","newutral"])
# --------------
#Code starts here
sc_df=data[["Strength","Combat"]]
sc_covariance=sc_df.cov().iloc[0,1]
sc_strength=sc_df.Strength.std()
sc_combat=sc_df.Combat.std()
sc_pearson=sc_covariance/(sc_strength*sc_combat)
print(sc_pearson)
ic_df=data[["Intelligence","Combat"]]
ic_covariance=ic_df.cov().iloc[0,1]
ic_intelligence=ic_df.Intelligence.std()
ic_combat=ic_df.Combat.std()
ic_pearson=ic_covariance/(ic_intelligence*ic_combat)
print(ic_pearson)
# --------------
#Code starts here
total_high=data.Total.quantile(0.99)
super_best=data[data.Total>total_high]
super_best_names=list(super_best.Name)
print(super_best_names)
# --------------
#Code starts here
Intelligence, ax_1 = plt.subplots()
ax_1.boxplot(data.Intelligence)
ax_1.set_title('Intelligence')
Speed, ax_2 = plt.subplots()
ax_2.boxplot(data.Speed)
ax_2.set_title('Speed')
Power, ax_3 = plt.subplots()
ax_3.boxplot(data.Power)
ax_3.set_title('Power')
| 20.434783 | 53 | 0.698582 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 328 | 0.232624 |
f85c11db5b31e7e4088a63d0697d91e4986e3c85 | 6,962 | py | Python | soc/python/checkDB.py | idea-fasoc/fasoc | 5a1fc8cf980b24a48b17f4447f13fb50d49e366a | [
"MIT"
] | 48 | 2019-09-16T09:49:54.000Z | 2022-02-09T20:59:10.000Z | soc/python/checkDB.py | idea-fasoc/fasoc | 5a1fc8cf980b24a48b17f4447f13fb50d49e366a | [
"MIT"
] | 18 | 2019-10-15T04:17:35.000Z | 2021-05-25T00:12:52.000Z | soc/python/checkDB.py | idea-fasoc/fasoc | 5a1fc8cf980b24a48b17f4447f13fb50d49e366a | [
"MIT"
] | 8 | 2019-10-15T17:27:41.000Z | 2022-01-26T20:42:07.000Z | #!/usr/bin/env python3
#MIT License
#Copyright (c) 2018 The University of Michigan
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import shutil
import os
import json # json parsing
import zipfile
import sys
from modifyDBFiles import modifyDBFiles
def checkDB(moduleJson,databaseDir,outputDir,ipXactDir,module_number,designName):
genJson = moduleJson['generator']
searchDir = os.path.join(databaseDir,'JSN',genJson)
excluded_name = ['LDO_CONTROLLER','decoder_3to8','mux_8to1','ANALOG_CORE','bu_dco_8stg','dco_8stg','dco_10drv_10cc_30fc_18stg','dco_CC','dco_FC','DCO_MODEL','FUNCTIONS','PLL_CONTROLLER','PLL_CONTROLLER_TDC_COUNTER','SSC_GENERATOR','synth_dco','synth_pll_dco_interp','synth_pll_dco_outbuff','TB_synth_pll','TDC_COUNTER','test_synth_pll','counter','TEMP_ANALOG.nl','TEMP_ANALOG_test.nl','TEMP_AUTO_def','tempsenseInst']
if 'specifications' in moduleJson:
target_specsJson = moduleJson['specifications']
if os.path.exists(searchDir):
if len(os.listdir(searchDir)) != 0:
for file in os.listdir(searchDir):
overlap_tag = True
with open(os.path.join(searchDir,file), 'r') as search_file:
srchJson = json.load(search_file)
if 'specifications' in srchJson:
srch_specifications= srchJson['specifications']
for target_specName, target_specVal in target_specsJson.items():
if target_specVal != "" and isinstance(target_specVal, str) != True:
if target_specName in srch_specifications:
srch_specVal = srch_specifications[target_specName]
if srch_specVal != "" and isinstance(srch_specVal, str) != True:
if isinstance(target_specVal, dict):
if "min" in target_specVal:
if isinstance(srch_specVal, dict):
if srch_specVal["min"] < target_specVal["min"]:
overlap_tag = False
break
else:
if srch_specVal < target_specVal["min"]:
overlap_tag = False
break
if "max" in target_specVal:
if isinstance(srch_specVal, dict):
if srch_specVal["max"] > target_specVal["max"]:
overlap_tag = False
break
else:
if srch_specVal > target_specVal["max"]:
overlap_tag = False
break
else:
if "min" in target_specName:
if isinstance(srch_specVal, dict):
if srch_specVal["min"] < target_specVal:
overlap_tag = False
break
else:
if srch_specVal < target_specVal:
overlap_tag = False
break
elif "max" in target_specName:
if isinstance(srch_specVal, dict):
if srch_specVal["max"] > target_specVal:
overlap_tag = False
break
else:
if srch_specVal > target_specVal:
overlap_tag = False
break
else:
if isinstance(srch_specVal, dict):
if srch_specVal["min"] != target_specVal:
overlap_tag = False
break
if srch_specVal["max"] != target_specVal:
overlap_tag = False
break
else:
if srch_specVal != target_specVal:
overlap_tag = False
break
if overlap_tag:
found_Filename = os.path.join(databaseDir,'ZIP',(file.split('.'))[0]+'.zip')
if os.path.exists(found_Filename):
print(moduleJson['module_name'] + " has been found at the database")
zip_ref = zipfile.ZipFile(found_Filename, 'r')
zip_ref.extractall(outputDir)
zip_ref.close()
for output_file in os.listdir(outputDir):
output_file_name = (output_file.split('.'))[0]
postfix = (output_file.split(output_file_name))[-1]
if (not postfix == '.v') or (postfix == '.v' and output_file_name not in excluded_name):
os.rename(os.path.join(outputDir,output_file),os.path.join(outputDir,moduleJson['module_name'] + postfix))
modifyDBFiles(os.path.join(outputDir,moduleJson['module_name'] + postfix),postfix,moduleJson['module_name'],srchJson["module_name"])
return True
else:#When there is no zipfile it means search was unsuccessfull
return False
return False# when code reaches here it means it could not find the correct file
else:#if the database is empty => search was unsuccessfull
return False
else:#If database does not exist it means search was unsuccessfull
return False
else:#If the target file has no specification, all files are acceptable
if os.path.exists(searchDir):
if len(os.listdir(searchDir)) != 0:
with open(os.path.join(searchDir,os.listdir(searchDir)[0]), 'r') as search_file:
srchJson = json.load(search_file)
found_Filename = os.path.join(databaseDir,'ZIP',(os.listdir(searchDir)[0].split('.'))[0]+'.zip')
if os.path.exists(found_Filename):
print(moduleJson['module_name'] + " has been found at the database")
zip_ref = zipfile.ZipFile(found_Filename, 'r')
zip_ref.extractall(outputDir)
zip_ref.close()
for output_file in os.listdir(outputDir):
output_file_name = (output_file.split('.'))[0]
postfix = (output_file.split(output_file_name))[-1]
if (not postfix == '.v') or (postfix == '.v' and output_file_name not in excluded_name):
os.rename(os.path.join(outputDir,output_file),os.path.join(outputDir,moduleJson['module_name'] + postfix))
modifyDBFiles(os.path.join(outputDir,moduleJson['module_name'] + postfix),postfix,moduleJson['module_name'],srchJson["module_name"])
return True
else:#When there is no zipfile it means search was unsuccessfull
return False
else:#if the database is empty => search was unsuccessfull
return False
else:#If database does not exist it means search was unsuccessfull
return False | 44.063291 | 418 | 0.668342 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,382 | 0.342143 |
f85e27ad10e7814b11be2c93c0c4dca76deac4ea | 2,222 | py | Python | Piquant/Debug/script/matlplotlib_pyplot实操代码.py | QuantPengPeng/Piquant | 88047831a3ce4eb5b67fc68c752243084ba90199 | [
"MIT"
] | 9 | 2019-04-07T06:17:50.000Z | 2021-07-11T14:31:36.000Z | Piquant/Debug/script/matlplotlib_pyplot实操代码.py | QuantPengPeng/Piquant | 88047831a3ce4eb5b67fc68c752243084ba90199 | [
"MIT"
] | 1 | 2019-05-17T01:57:07.000Z | 2019-11-19T01:57:05.000Z | Piquant/Debug/script/matlplotlib_pyplot实操代码.py | QuantPengPeng/Piquant | 88047831a3ce4eb5b67fc68c752243084ba90199 | [
"MIT"
] | 6 | 2019-04-15T07:17:26.000Z | 2019-08-04T02:55:36.000Z |
# coding: utf-8
# In[35]:
import matplotlib.pyplot as plt
from pylab import *
import numpy as np
main_image=plt.figure(figsize=(10,10))
subplots_adjust(hspace=0.3,wspace=0.3)#控制子图间的行间距、列间距
#子图1-单线
x_0=np.linspace(0,2*np.pi,20) #自变量X的取值范围
sub_image_1=plt.subplot(2,2,1)
plt.xlabel('X value')
plt.ylabel('Sin value')
plt.grid(True)
sub_image_1.plot(x_0, np.sin(x), 'r--o',label='Sin(x)')
sub_image_1.legend()#展示图例
sub_image_1.annotate('sin wave', xy=(3,0.25), xytext=(4,0.5), arrowprops=dict(facecolor='black',shrink=0.05))#特定文本注释
sub_image_1.set_title('Sin Waves')
#子图2-多线
x_1=np.linspace(0,2*np.pi,20)
sub_image_2=plt.subplot(2,2,2)
plt.xlabel('X value')
plt.ylabel('Cos and Sin value')
plt.grid(True)
sub_image_2.plot(x_1, np.cos(x), color='blue', linestyle='--',linewidth=1, marker='o', markerfacecolor='red', markersize='6', label='Cos(x)')
sub_image_2.plot(x_1, np.sin(x), color='green', linestyle='-.', linewidth=3, marker='^', markerfacecolor='yellow', markersize='8', label='Sin(x)')
sub_image_2.legend()
sub_image_2.set_title('Cos and Sin Waves')
#子图3-直方图
bins_count=10
mu,sigma=100,20
x_hist=mu+sigma*np.random.randn(1000,1)#randn用于生成符合标准正态分布的包含1000个元素的列序列
sub_image_3=plt.subplot(2,2,3)
plt.xlabel('value')
plt.ylabel('count')
plt.grid(False)
tuple_return=sub_image_3.hist(x_hist, bins=bins_count, facecolor='red', alpha=0.8, edgecolor='black',normed=0)#normed=0画频数直方图,normed=1画频率直方图
sub_image_3.set_title('Frequency Histogram')
plt.xlim((floor(x_hist.min()),ceil(x_hist.max())))
bar_width=(x_hist.max()-x_hist.min())/bins_count
plt.xticks(np.arange(floor(x_hist.min()),ceil(x_hist.max()),round(bar_width)))#刻度设置
for i in range(bins_count):
sub_image_3.text(x_hist.min()+(bar_width*i)+(bar_width/2), tuple_return[0][i], str(tuple_return[0][i]), horizontalalignment='center', verticalalignment='bottom')
#子图3-分段函数
x_part_1=np.linspace(-10,-1,10)#分段函数的离散取值
x_part_2=np.linspace(0,10,11)
sub_image_4=plt.subplot(2,2,4)
plt.xlabel('X value')
plt.ylabel('Y value')
plt.grid(False)
sub_image_4.plot(x_part_1,x_part_1*2+1,'b--o',label='y=2x+1')
sub_image_4.plot(x_part_2,x_part_2**2,'r--o',label='y=x^2')
sub_image_4.legend()
sub_image_4.set_title('PieceWise Function')
#展示
plt.show()
| 32.676471 | 165 | 0.729973 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 687 | 0.28365 |
f85ead752c9700ddc5fb73af13b5441235631493 | 2,190 | py | Python | gencode/python/udmi/schema/event_audit.py | johnrandolph/udmi | 5e9de32fc71de8d006cda2eba4d3372eaf24c7c0 | [
"Apache-2.0"
] | 1 | 2022-02-24T22:57:37.000Z | 2022-02-24T22:57:37.000Z | gencode/python/udmi/schema/event_audit.py | johnrandolph/udmi | 5e9de32fc71de8d006cda2eba4d3372eaf24c7c0 | [
"Apache-2.0"
] | 5 | 2022-02-24T21:32:24.000Z | 2022-03-23T15:52:25.000Z | gencode/python/udmi/schema/event_audit.py | johnrandolph/udmi | 5e9de32fc71de8d006cda2eba4d3372eaf24c7c0 | [
"Apache-2.0"
] | null | null | null | """Generated class for event_audit.json"""
class Object0C54FB6D:
"""Generated schema class"""
def __init__(self):
self.subFolder = None
self.subType = None
@staticmethod
def from_dict(source):
if not source:
return None
result = Object0C54FB6D()
result.subFolder = source.get('subFolder')
result.subType = source.get('subType')
return result
@staticmethod
def map_from(source):
if not source:
return None
result = {}
for key in source:
result[key] = Object0C54FB6D.from_dict(source[key])
return result
@staticmethod
def expand_dict(input):
result = {}
for property in input:
result[property] = input[property].to_dict() if input[property] else {}
return result
def to_dict(self):
result = {}
if self.subFolder:
result['subFolder'] = self.subFolder # 5
if self.subType:
result['subType'] = self.subType # 5
return result
from .common import Entry
class AuditEvent:
"""Generated schema class"""
def __init__(self):
self.timestamp = None
self.version = None
self.target = None
self.status = None
@staticmethod
def from_dict(source):
if not source:
return None
result = AuditEvent()
result.timestamp = source.get('timestamp')
result.version = source.get('version')
result.target = Object0C54FB6D.from_dict(source.get('target'))
result.status = Entry.from_dict(source.get('status'))
return result
@staticmethod
def map_from(source):
if not source:
return None
result = {}
for key in source:
result[key] = AuditEvent.from_dict(source[key])
return result
@staticmethod
def expand_dict(input):
result = {}
for property in input:
result[property] = input[property].to_dict() if input[property] else {}
return result
def to_dict(self):
result = {}
if self.timestamp:
result['timestamp'] = self.timestamp # 5
if self.version:
result['version'] = self.version # 5
if self.target:
result['target'] = self.target.to_dict() # 4
if self.status:
result['status'] = self.status.to_dict() # 4
return result
| 23.548387 | 77 | 0.648402 | 2,115 | 0.965753 | 0 | 0 | 1,277 | 0.583105 | 0 | 0 | 228 | 0.10411 |
f85f1ff5fdc55f6eaa86305ff1243afdf2c3c231 | 7,624 | py | Python | colour/models/rgb.py | canavandl/colour | a453cd37b6135a9092d5ea5b2aafb8d19134bdff | [
"BSD-3-Clause"
] | 1 | 2019-06-27T11:32:48.000Z | 2019-06-27T11:32:48.000Z | colour/models/rgb.py | canavandl/colour | a453cd37b6135a9092d5ea5b2aafb8d19134bdff | [
"BSD-3-Clause"
] | null | null | null | colour/models/rgb.py | canavandl/colour | a453cd37b6135a9092d5ea5b2aafb8d19134bdff | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
RGB Colourspace Transformations
===============================
Defines the *RGB* colourspace transformations:
- :func:`XYZ_to_RGB`
- :func:`RGB_to_XYZ`
- :func:`RGB_to_RGB`
See Also
--------
`RGB Colourspaces IPython Notebook
<http://nbviewer.ipython.org/github/colour-science/colour-ipython/blob/master/notebooks/models/rgb.ipynb>`_ # noqa
"""
from __future__ import division, unicode_literals
import numpy as np
from colour.models import xy_to_XYZ
from colour.adaptation import chromatic_adaptation_matrix
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013 - 2014 - Colour Developers'
__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-science@googlegroups.com'
__status__ = 'Production'
__all__ = ['XYZ_to_RGB',
'RGB_to_XYZ',
'RGB_to_RGB']
def XYZ_to_RGB(XYZ,
illuminant_XYZ,
illuminant_RGB,
to_RGB,
chromatic_adaptation_method='CAT02',
transfer_function=None):
"""
Converts from *CIE XYZ* colourspace to *RGB* colourspace using given
*CIE XYZ* colourspace matrix, *illuminants*, *chromatic adaptation* method,
*normalised primary matrix* and *transfer function*.
Parameters
----------
XYZ : array_like, (3,)
*CIE XYZ* colourspace matrix.
illuminant_XYZ : array_like
*CIE XYZ* colourspace *illuminant* *xy* chromaticity coordinates.
illuminant_RGB : array_like
*RGB* colourspace *illuminant* *xy* chromaticity coordinates.
to_RGB : array_like, (3, 3)
*Normalised primary matrix*.
chromatic_adaptation_method : unicode, optional
('XYZ Scaling', 'Bradford', 'Von Kries', 'Fairchild', 'CAT02')
*Chromatic adaptation* method.
transfer_function : object, optional
*Transfer function*.
Returns
-------
ndarray, (3,)
*RGB* colourspace matrix.
Notes
-----
- Input *CIE XYZ* colourspace matrix is in domain [0, 1].
- Input *illuminant_XYZ* *xy* chromaticity coordinates are in domain
[0, 1].
- Input *illuminant_RGB* *xy* chromaticity coordinates are in domain
[0, 1].
- Output *RGB* colourspace matrix is in domain [0, 1].
Examples
--------
>>> XYZ = np.array([0.1151847498, 0.1008, 0.0508937252])
>>> illuminant_XYZ = (0.34567, 0.35850)
>>> illuminant_RGB = (0.31271, 0.32902)
>>> chromatic_adaptation_method = 'Bradford'
>>> to_RGB = np.array([
... [3.24100326, -1.53739899, -0.49861587],
... [-0.96922426, 1.87592999, 0.04155422],
... [0.05563942, -0.2040112, 1.05714897]])
>>> XYZ_to_RGB(
... XYZ,
... illuminant_XYZ,
... illuminant_RGB,
... to_RGB,
... chromatic_adaptation_method) # doctest: +ELLIPSIS
array([ 0.1730350..., 0.0821103..., 0.0567249...])
"""
np.array([
[3.24100326, -1.53739899, -0.49861587],
[-0.96922426, 1.87592999, 0.04155422],
[0.05563942, -0.2040112, 1.05714897]])
cat = chromatic_adaptation_matrix(xy_to_XYZ(illuminant_XYZ),
xy_to_XYZ(illuminant_RGB),
method=chromatic_adaptation_method)
adapted_XYZ = np.dot(cat, XYZ)
RGB = np.dot(to_RGB.reshape((3, 3)), adapted_XYZ.reshape((3, 1)))
if transfer_function is not None:
RGB = np.array([transfer_function(x) for x in np.ravel(RGB)])
return np.ravel(RGB)
def RGB_to_XYZ(RGB,
illuminant_RGB,
illuminant_XYZ,
to_XYZ,
chromatic_adaptation_method='CAT02',
inverse_transfer_function=None):
"""
Converts from *RGB* colourspace to *CIE XYZ* colourspace using given
*RGB* colourspace matrix, *illuminants*, *chromatic adaptation* method,
*normalised primary matrix* and *transfer function*.
Parameters
----------
RGB : array_like, (3,)
*RGB* colourspace matrix.
illuminant_RGB : array_like
*RGB* colourspace *illuminant* chromaticity coordinates.
illuminant_XYZ : array_like
*CIE XYZ* colourspace *illuminant* chromaticity coordinates.
to_XYZ : array_like, (3, 3)
*Normalised primary matrix*.
chromatic_adaptation_method : unicode, optional
('XYZ Scaling', 'Bradford', 'Von Kries', 'Fairchild', 'CAT02')
*Chromatic adaptation* method.
inverse_transfer_function : object, optional
*Inverse transfer function*.
Returns
-------
ndarray, (3,)
*CIE XYZ* colourspace matrix.
Notes
-----
- Input *RGB* colourspace matrix is in domain [0, 1].
- Input *illuminant_RGB* *xy* chromaticity coordinates are in domain
[0, 1].
- Input *illuminant_XYZ* *xy* chromaticity coordinates are in domain
[0, 1].
- Output *CIE XYZ* colourspace matrix is in domain [0, 1].
Examples
--------
>>> RGB = np.array([0.17303501, 0.08211033, 0.05672498])
>>> illuminant_RGB = (0.31271, 0.32902)
>>> illuminant_XYZ = (0.34567, 0.35850)
>>> chromatic_adaptation_method = 'Bradford'
>>> to_XYZ = np.array([
... [0.41238656, 0.35759149, 0.18045049],
... [0.21263682, 0.71518298, 0.0721802],
... [0.01933062, 0.11919716, 0.95037259]])
>>> RGB_to_XYZ(
... RGB,
... illuminant_RGB,
... illuminant_XYZ,
... to_XYZ,
... chromatic_adaptation_method) # doctest: +ELLIPSIS
array([ 0.1151847..., 0.1008 , 0.0508937...])
"""
if inverse_transfer_function is not None:
RGB = np.array([inverse_transfer_function(x)
for x in np.ravel(RGB)])
XYZ = np.dot(to_XYZ.reshape((3, 3)), RGB.reshape((3, 1)))
cat = chromatic_adaptation_matrix(
xy_to_XYZ(illuminant_RGB),
xy_to_XYZ(illuminant_XYZ),
method=chromatic_adaptation_method)
adapted_XYZ = np.dot(cat, XYZ.reshape((3, 1)))
return np.ravel(adapted_XYZ)
def RGB_to_RGB(RGB,
input_colourspace,
output_colourspace,
chromatic_adaptation_method='CAT02'):
"""
Converts from given input *RGB* colourspace to output *RGB* colourspace
using given *chromatic adaptation* method.
Parameters
----------
RGB : array_like, (3,)
*RGB* colourspace matrix.
input_colourspace : RGB_Colourspace
*RGB* input colourspace.
output_colourspace : RGB_Colourspace
*RGB* output colourspace.
chromatic_adaptation_method : unicode, optional
('XYZ Scaling', 'Bradford', 'Von Kries', 'Fairchild', 'CAT02')
*Chromatic adaptation* method.
ndarray, (3,)
*RGB* colourspace matrix.
Notes
-----
- *RGB* colourspace matrices are in domain [0, 1].
Examples
--------
>>> from colour import sRGB_COLOURSPACE, PROPHOTO_RGB_COLOURSPACE
>>> RGB = np.array([0.35521588, 0.41, 0.24177934])
>>> RGB_to_RGB(
... RGB,
... sRGB_COLOURSPACE,
... PROPHOTO_RGB_COLOURSPACE) # doctest: +ELLIPSIS
array([ 0.3579334..., 0.4007138..., 0.2615704...])
"""
cat = chromatic_adaptation_matrix(
xy_to_XYZ(input_colourspace.whitepoint),
xy_to_XYZ(output_colourspace.whitepoint),
chromatic_adaptation_method)
trs_matrix = np.dot(output_colourspace.to_RGB,
np.dot(cat, input_colourspace.to_XYZ))
return np.dot(trs_matrix, RGB)
| 31.766667 | 115 | 0.613458 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,395 | 0.707634 |
f85f4b7c7b491177a0f091a1844ac24655fff102 | 1,768 | py | Python | tests/assign_folds_test.py | turku-rad-ai/pe-detection | d9b49800de45a40030db72db65f4806b23d97a63 | [
"Apache-2.0"
] | null | null | null | tests/assign_folds_test.py | turku-rad-ai/pe-detection | d9b49800de45a40030db72db65f4806b23d97a63 | [
"Apache-2.0"
] | null | null | null | tests/assign_folds_test.py | turku-rad-ai/pe-detection | d9b49800de45a40030db72db65f4806b23d97a63 | [
"Apache-2.0"
] | null | null | null | from typing import List
import pandas as pd
import pytest
from preprocessing.assign_folds import assign_folds
testdata = [
[
[
"patient1",
"patient2",
"patient3",
"patient4",
"patient5",
"patient6",
"patient7",
"patient8",
"patient9",
"patient1", # second 1
"patient3", # second 3
"patient10",
],
[
"image1.dcm",
"image2.dcm",
"image3.dcm",
"image4.dcm",
"image5.dcm",
"image6.dcm",
"image7.dcm",
"image8.dcm",
"image9.dcm",
"image10.dcm",
"image11.dcm",
"image12.dcm",
],
[1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1],
3,
]
]
@pytest.mark.parametrize("patient_ids,dcm_filenames,dataset_labels,folds", testdata)
def test_assign_folds(
patient_ids: List[str],
dcm_filenames: List[str],
dataset_labels: List[int],
folds: int,
):
data = {
"PatientID": patient_ids,
"dcm_filename": dcm_filenames,
"dataset_label": dataset_labels,
}
df = pd.DataFrame(data=data)
df = assign_folds(df, fold_count=folds)
# pat_fold - column must have been added
assert "pat_fold" in df.columns
# Check that folds are on proper range
assert df["pat_fold"].min() == 0
assert df["pat_fold"].max() == folds - 1
# Test that each patient belongs to one and only one fold
assert min([item.shape[0] for item in list(df.groupby("PatientID")["pat_fold"].unique())]) == 1
assert max([item.shape[0] for item in list(df.groupby("PatientID")["pat_fold"].unique())]) == 1
| 24.901408 | 99 | 0.526584 | 0 | 0 | 0 | 0 | 898 | 0.507919 | 0 | 0 | 583 | 0.329751 |
f85fde926cda35a9fc74acc2b0acaa097f44bc32 | 456 | py | Python | src/apps/notes/models.py | mentalnoteapp/backend-django-rest-framework | 82d95fbe1aeb93b85105bf7ae94a3c13534f72cb | [
"MIT"
] | null | null | null | src/apps/notes/models.py | mentalnoteapp/backend-django-rest-framework | 82d95fbe1aeb93b85105bf7ae94a3c13534f72cb | [
"MIT"
] | null | null | null | src/apps/notes/models.py | mentalnoteapp/backend-django-rest-framework | 82d95fbe1aeb93b85105bf7ae94a3c13534f72cb | [
"MIT"
] | null | null | null | from django.contrib.auth.models import User
from django.db import models
from apps.tags.models import Tag
class Note(models.Model):
owner = models.ForeignKey(User, related_name="notes")
tags = models.ManyToManyField(Tag, related_name="notes", blank=True)
created = models.DateTimeField(auto_now_add=True)
title = models.CharField(max_length=250, blank=True)
note = models.TextField()
def __str__(self):
return self.title
| 26.823529 | 72 | 0.736842 | 346 | 0.758772 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 0.030702 |
f86011337ef051c071ef0fd89e5bf4792bb54439 | 1,116 | py | Python | tests/test_main.py | dadaloop82/viseron | 1c6c446a4856e16c0e2ed6b9323d169fbdcae20f | [
"MIT"
] | 399 | 2020-08-31T21:13:07.000Z | 2022-03-31T18:54:26.000Z | tests/test_main.py | dadaloop82/viseron | 1c6c446a4856e16c0e2ed6b9323d169fbdcae20f | [
"MIT"
] | 157 | 2020-09-01T18:59:56.000Z | 2022-03-25T07:14:19.000Z | tests/test_main.py | dadaloop82/viseron | 1c6c446a4856e16c0e2ed6b9323d169fbdcae20f | [
"MIT"
] | 53 | 2020-09-01T07:35:59.000Z | 2022-03-28T23:21:16.000Z | """Tests for __main__.py."""
# import logging
from unittest.mock import MagicMock, patch
import pytest
import viseron.__main__
@pytest.fixture
def mocked_viseron(mocker):
"""Mock Viseron class."""
mocker.patch("viseron.__main__.Viseron", return_value="Testing")
def test_init(simple_config, mocked_viseron):
"""Test init."""
viseron.__main__.main()
# viseron.__main__.LOGGER.info("testing")
with patch.object(viseron.__main__, "main", MagicMock()) as mock_main:
with patch.object(viseron.__main__, "__name__", "__main__"):
viseron.__main__.init()
mock_main.assert_called_once()
# class TestMyFormatter:
# """Tests for class MyFormatter."""
# def test_format(self):
# """Test formatter."""
# formatter = viseron.__main__.MyFormatter()
# record = logging.makeLogRecord(
# {
# "name": "test_logger",
# "level": 10,
# "pathname": "test_main.py",
# "msg": "Testing, message repeated 2 times",
# }
# )
# formatter.format(record)
| 27.219512 | 74 | 0.606631 | 0 | 0 | 0 | 0 | 142 | 0.12724 | 0 | 0 | 654 | 0.586022 |
f8626522d55b3754f7c28ddbfd44245ded575b28 | 11,950 | py | Python | ironicclient/tests/unit/v1/test_allocation.py | ljmcgann/python-ironicclient | a5485dc29fe551e4cb5feaad52cd93d67b0ab53e | [
"Apache-2.0"
] | 41 | 2015-01-29T20:10:48.000Z | 2022-01-26T10:04:28.000Z | ironicclient/tests/unit/v1/test_allocation.py | ljmcgann/python-ironicclient | a5485dc29fe551e4cb5feaad52cd93d67b0ab53e | [
"Apache-2.0"
] | null | null | null | ironicclient/tests/unit/v1/test_allocation.py | ljmcgann/python-ironicclient | a5485dc29fe551e4cb5feaad52cd93d67b0ab53e | [
"Apache-2.0"
] | 46 | 2015-01-19T17:46:52.000Z | 2021-12-19T01:22:47.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from unittest import mock
import testtools
from ironicclient import exc
from ironicclient.tests.unit import utils
import ironicclient.v1.allocation
ALLOCATION = {'uuid': '11111111-2222-3333-4444-555555555555',
'name': 'Allocation-name',
'owner': None,
'state': 'active',
'node_uuid': '66666666-7777-8888-9999-000000000000',
'last_error': None,
'resource_class': 'baremetal',
'traits': [],
'candidate_nodes': [],
'extra': {}}
ALLOCATION2 = {'uuid': '55555555-4444-3333-2222-111111111111',
'name': 'Allocation2-name',
'owner': 'fake-owner',
'state': 'allocating',
'node_uuid': None,
'last_error': None,
'resource_class': 'baremetal',
'traits': [],
'candidate_nodes': [],
'extra': {}}
CREATE_ALLOCATION = copy.deepcopy(ALLOCATION)
for field in ('state', 'node_uuid', 'last_error'):
del CREATE_ALLOCATION[field]
fake_responses = {
'/v1/allocations':
{
'GET': (
{},
{"allocations": [ALLOCATION, ALLOCATION2]},
),
'POST': (
{},
CREATE_ALLOCATION,
),
},
'/v1/allocations/%s' % ALLOCATION['uuid']:
{
'GET': (
{},
ALLOCATION,
),
'DELETE': (
{},
None,
),
},
'/v1/allocations/?node=%s' % ALLOCATION['node_uuid']:
{
'GET': (
{},
{"allocations": [ALLOCATION]},
),
},
'/v1/allocations/?owner=%s' % ALLOCATION2['owner']:
{
'GET': (
{},
{"allocations": [ALLOCATION2]},
),
},
}
fake_responses_pagination = {
'/v1/allocations':
{
'GET': (
{},
{"allocations": [ALLOCATION],
"next": "http://127.0.0.1:6385/v1/allocations/?limit=1"}
),
},
'/v1/allocations/?limit=1':
{
'GET': (
{},
{"allocations": [ALLOCATION2]}
),
},
'/v1/allocations/?marker=%s' % ALLOCATION['uuid']:
{
'GET': (
{},
{"allocations": [ALLOCATION2]}
),
},
}
fake_responses_sorting = {
'/v1/allocations/?sort_key=updated_at':
{
'GET': (
{},
{"allocations": [ALLOCATION2, ALLOCATION]}
),
},
'/v1/allocations/?sort_dir=desc':
{
'GET': (
{},
{"allocations": [ALLOCATION2, ALLOCATION]}
),
},
}
class AllocationManagerTest(testtools.TestCase):
def setUp(self):
super(AllocationManagerTest, self).setUp()
self.api = utils.FakeAPI(fake_responses)
self.mgr = ironicclient.v1.allocation.AllocationManager(self.api)
def test_allocations_list(self):
allocations = self.mgr.list()
expect = [
('GET', '/v1/allocations', {}, None),
]
self.assertEqual(expect, self.api.calls)
self.assertEqual(2, len(allocations))
expected_resp = ({}, {"allocations": [ALLOCATION, ALLOCATION2]},)
self.assertEqual(expected_resp,
self.api.responses['/v1/allocations']['GET'])
def test_allocations_list_by_node(self):
allocations = self.mgr.list(node=ALLOCATION['node_uuid'])
expect = [
('GET', '/v1/allocations/?node=%s' % ALLOCATION['node_uuid'], {},
None),
]
self.assertEqual(expect, self.api.calls)
self.assertEqual(1, len(allocations))
expected_resp = ({}, {"allocations": [ALLOCATION, ALLOCATION2]},)
self.assertEqual(expected_resp,
self.api.responses['/v1/allocations']['GET'])
def test_allocations_list_by_owner(self):
allocations = self.mgr.list(owner=ALLOCATION2['owner'])
expect = [
('GET', '/v1/allocations/?owner=%s' % ALLOCATION2['owner'], {},
None),
]
self.assertEqual(expect, self.api.calls)
self.assertEqual(1, len(allocations))
expected_resp = ({}, {"allocations": [ALLOCATION, ALLOCATION2]},)
self.assertEqual(expected_resp,
self.api.responses['/v1/allocations']['GET'])
def test_allocations_show(self):
allocation = self.mgr.get(ALLOCATION['uuid'])
expect = [
('GET', '/v1/allocations/%s' % ALLOCATION['uuid'], {}, None),
]
self.assertEqual(expect, self.api.calls)
self.assertEqual(ALLOCATION['uuid'], allocation.uuid)
self.assertEqual(ALLOCATION['name'], allocation.name)
self.assertEqual(ALLOCATION['owner'], allocation.owner)
self.assertEqual(ALLOCATION['node_uuid'], allocation.node_uuid)
self.assertEqual(ALLOCATION['state'], allocation.state)
self.assertEqual(ALLOCATION['resource_class'],
allocation.resource_class)
expected_resp = ({}, ALLOCATION,)
self.assertEqual(
expected_resp,
self.api.responses['/v1/allocations/%s'
% ALLOCATION['uuid']]['GET'])
def test_create(self):
allocation = self.mgr.create(**CREATE_ALLOCATION)
expect = [
('POST', '/v1/allocations', {}, CREATE_ALLOCATION),
]
self.assertEqual(expect, self.api.calls)
self.assertTrue(allocation)
self.assertIn(
ALLOCATION,
self.api.responses['/v1/allocations']['GET'][1]['allocations'])
def test_delete(self):
allocation = self.mgr.delete(allocation_id=ALLOCATION['uuid'])
expect = [
('DELETE', '/v1/allocations/%s' % ALLOCATION['uuid'], {}, None),
]
self.assertEqual(expect, self.api.calls)
self.assertIsNone(allocation)
expected_resp = ({}, ALLOCATION,)
self.assertEqual(
expected_resp,
self.api.responses['/v1/allocations/%s'
% ALLOCATION['uuid']]['GET'])
class AllocationManagerPaginationTest(testtools.TestCase):
def setUp(self):
super(AllocationManagerPaginationTest, self).setUp()
self.api = utils.FakeAPI(fake_responses_pagination)
self.mgr = ironicclient.v1.allocation.AllocationManager(self.api)
def test_allocations_list_limit(self):
allocations = self.mgr.list(limit=1)
expect = [
('GET', '/v1/allocations/?limit=1', {}, None),
]
self.assertEqual(expect, self.api.calls)
self.assertEqual(1, len(allocations))
expected_resp = (
{}, {"next": "http://127.0.0.1:6385/v1/allocations/?limit=1",
"allocations": [ALLOCATION]},)
self.assertEqual(expected_resp,
self.api.responses['/v1/allocations']['GET'])
def test_allocations_list_marker(self):
allocations = self.mgr.list(marker=ALLOCATION['uuid'])
expect = [
('GET', '/v1/allocations/?marker=%s' % ALLOCATION['uuid'],
{}, None),
]
self.assertEqual(expect, self.api.calls)
self.assertEqual(1, len(allocations))
expected_resp = (
{}, {"next": "http://127.0.0.1:6385/v1/allocations/?limit=1",
"allocations": [ALLOCATION]},)
self.assertEqual(expected_resp,
self.api.responses['/v1/allocations']['GET'])
def test_allocations_list_pagination_no_limit(self):
allocations = self.mgr.list(limit=0)
expect = [
('GET', '/v1/allocations', {}, None),
('GET', '/v1/allocations/?limit=1', {}, None)
]
self.assertEqual(expect, self.api.calls)
self.assertEqual(2, len(allocations))
expected_resp = (
{}, {"next": "http://127.0.0.1:6385/v1/allocations/?limit=1",
"allocations": [ALLOCATION]},)
self.assertEqual(expected_resp,
self.api.responses['/v1/allocations']['GET'])
class AllocationManagerSortingTest(testtools.TestCase):
def setUp(self):
super(AllocationManagerSortingTest, self).setUp()
self.api = utils.FakeAPI(fake_responses_sorting)
self.mgr = ironicclient.v1.allocation.AllocationManager(self.api)
def test_allocations_list_sort_key(self):
allocations = self.mgr.list(sort_key='updated_at')
expect = [
('GET', '/v1/allocations/?sort_key=updated_at', {}, None)
]
self.assertEqual(expect, self.api.calls)
self.assertEqual(2, len(allocations))
expected_resp = ({}, {"allocations": [ALLOCATION2, ALLOCATION]},)
self.assertEqual(
expected_resp,
self.api.responses['/v1/allocations/?sort_key=updated_at']['GET'])
def test_allocations_list_sort_dir(self):
allocations = self.mgr.list(sort_dir='desc')
expect = [
('GET', '/v1/allocations/?sort_dir=desc', {}, None)
]
self.assertEqual(expect, self.api.calls)
self.assertEqual(2, len(allocations))
expected_resp = ({}, {"allocations": [ALLOCATION2, ALLOCATION]},)
self.assertEqual(
expected_resp,
self.api.responses['/v1/allocations/?sort_dir=desc']['GET'])
@mock.patch('time.sleep', autospec=True)
@mock.patch('ironicclient.v1.allocation.AllocationManager.get', autospec=True)
class AllocationWaitTest(testtools.TestCase):
def setUp(self):
super(AllocationWaitTest, self).setUp()
self.mgr = ironicclient.v1.allocation.AllocationManager(mock.Mock())
def _fake_allocation(self, state, error=None):
return mock.Mock(state=state, last_error=error)
def test_success(self, mock_get, mock_sleep):
allocations = [
self._fake_allocation('allocating'),
self._fake_allocation('allocating'),
self._fake_allocation('active'),
]
mock_get.side_effect = allocations
result = self.mgr.wait('alloc1')
self.assertIs(result, allocations[2])
self.assertEqual(3, mock_get.call_count)
self.assertEqual(2, mock_sleep.call_count)
mock_get.assert_called_with(
self.mgr, 'alloc1', os_ironic_api_version=None,
global_request_id=None)
def test_error(self, mock_get, mock_sleep):
allocations = [
self._fake_allocation('allocating'),
self._fake_allocation('error'),
]
mock_get.side_effect = allocations
self.assertRaises(exc.StateTransitionFailed,
self.mgr.wait, 'alloc1')
self.assertEqual(2, mock_get.call_count)
self.assertEqual(1, mock_sleep.call_count)
mock_get.assert_called_with(
self.mgr, 'alloc1', os_ironic_api_version=None,
global_request_id=None)
def test_timeout(self, mock_get, mock_sleep):
mock_get.return_value = self._fake_allocation('allocating')
self.assertRaises(exc.StateTransitionTimeout,
self.mgr.wait, 'alloc1', timeout=0.001)
mock_get.assert_called_with(
self.mgr, 'alloc1', os_ironic_api_version=None,
global_request_id=None)
| 33.194444 | 78 | 0.573138 | 8,565 | 0.716736 | 0 | 0 | 1,944 | 0.162678 | 0 | 0 | 2,711 | 0.226862 |
f8629eacf541222ae1970586720f609c2d762f08 | 1,105 | py | Python | api/routes/auth.py | rit-sse/api | 4dbd04db98284225510d9ae8249514be80d4706a | [
"MIT"
] | 1 | 2015-07-17T19:20:45.000Z | 2015-07-17T19:20:45.000Z | api/routes/auth.py | rit-sse/api | 4dbd04db98284225510d9ae8249514be80d4706a | [
"MIT"
] | 33 | 2015-07-18T02:31:51.000Z | 2015-08-04T02:07:41.000Z | api/routes/auth.py | rit-sse/api | 4dbd04db98284225510d9ae8249514be80d4706a | [
"MIT"
] | 7 | 2015-07-17T16:29:18.000Z | 2021-08-31T01:03:53.000Z | from flask import session, redirect, url_for
from flask.json import jsonify
from api import app, oauth
from api import models
@app.route("/api/v2/login")
def _get_api_v2_login():
redirect_uri = url_for("_get_api_v2_redirect", _external=True)
return oauth.google.authorize_redirect(redirect_uri)
@app.route("/api/v2/redirect")
def _get_api_v2_redirect():
token = oauth.google.authorize_access_token()
user = oauth.google.parse_id_token(token)
session["user"] = user
return redirect("/api/v2/whoami")
@app.route("/api/v2/logout")
def _get_api_v2_logout():
session.pop("user", None)
return redirect("/")
@app.route("/api/v2/whoami")
def _get_api_v2_whoami():
if not "user" in session:
return jsonify({"error": "not logged in"})
return jsonify(
{
"google": session["user"],
"officer": models.Officer.is_officer(session["user"]["email"]),
"primary": models.Officer.is_primary_officer(session["user"]["email"]),
"rit_student": session["user"]["email"].split("@")[1] == "g.rit.edu",
}
)
| 27.625 | 83 | 0.656109 | 0 | 0 | 0 | 0 | 967 | 0.875113 | 0 | 0 | 244 | 0.220814 |
f863fdd49bdc9fc91c5a6863a1a6f2c9cb1fed2c | 418 | py | Python | mybatis/column_generator.py | xliangwu/com.caveup.machine_learn | 793131c4767f45d468a813752c07d02f623a7b99 | [
"Apache-2.0"
] | 1 | 2018-09-19T06:27:14.000Z | 2018-09-19T06:27:14.000Z | mybatis/column_generator.py | xliangwu/com.caveup.machine_learn | 793131c4767f45d468a813752c07d02f623a7b99 | [
"Apache-2.0"
] | null | null | null | mybatis/column_generator.py | xliangwu/com.caveup.machine_learn | 793131c4767f45d468a813752c07d02f623a7b99 | [
"Apache-2.0"
] | null | null | null | def column_generator():
with open('columns.csv', encoding='utf-8') as f:
for line in f:
keyword = line.strip('\n')
# <columnOverride column="tid" property="tid"/>
# print(r'<columnOverride column="{}" property="{}"/>'.format(keyword,keyword))
print(r'<ignoreColumn column="{}"/>'.format(keyword, keyword))
if __name__ == '__main__':
column_generator()
| 34.833333 | 91 | 0.586124 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 190 | 0.454545 |
f86413e599720995225d5a002a0228bfbc9b7ed7 | 22,250 | py | Python | ttslab/voices/afrikaans_default.py | jkleczar/ttslab | 33fe0c3f88c1533816b2602b52e4162760d9c5f0 | [
"BSD-3-Clause"
] | null | null | null | ttslab/voices/afrikaans_default.py | jkleczar/ttslab | 33fe0c3f88c1533816b2602b52e4162760d9c5f0 | [
"BSD-3-Clause"
] | null | null | null | ttslab/voices/afrikaans_default.py | jkleczar/ttslab | 33fe0c3f88c1533816b2602b52e4162760d9c5f0 | [
"BSD-3-Clause"
] | 1 | 2019-02-25T10:27:41.000Z | 2019-02-25T10:27:41.000Z | # -*- coding: utf-8 -*-
""" This file contains language-specific implementation for an
Afrikaans voice.
The idea is that this file contains subclassed Voice and Phoneset
implementations. This package ttslab/voices may then also contain
speaker specific implementations e.g. "afrikaans_SPEAKER.py"
"""
from __future__ import unicode_literals, division, print_function #Py2
__author__ = "Daniel van Niekerk"
__email__ = "dvn.demitasse@gmail.com"
import re
from collections import OrderedDict
from .. phoneset import Phoneset
from .. defaultvoice import LwaziHTSVoice, LwaziPromHTSVoice
from .. synthesizer_htsme import SynthesizerHTSME
import ttslab.hts_labels_prom as hts_labels_prom
class LwaziAfrikaansPhoneset(Phoneset):
""" The clusters and syllabification are ripped from the English
implementation and should be revisited...
"""
def __init__(self):
#Phoneset.__init__(self)
#syllable_clusters are processed in order, thus a list, not a set...
self.features = {"name": "Lwazi Afrikaans Phoneset",
"syllable_clusters": ["VCV", "VCCV", "VCCCV", "VCCCCV",
"VCGV", "VCCGV", "VCCCGV", "VV"],
"wellformed_plosive_clusters": [["p","l"], ["b","l"], ["k","l"], ["g","l"], ["p","r"],
["b","r"], ["t","r"], ["d","r"], ["k","r"], ["g","r"],
["t","w"], ["d","w"], ["g","w"], ["k","w"]],
"wellformed_fricative_clusters": [["f","l"], ["f","r"], ["f","j"], ["ʃ","j"]],
"wellformed_other_clusters": [["m","j"], ["n","j"]],
"wellformed_s_clusters": [["s","p"], ["s","t"], ["s","k"], ["s","m"], ["s","n"],
["s","f"], ["s","w"], ["s","l"], ["s","p","l"],
["s","p","r"], ["s","t","r"], ["s","k","l"],
["s","k","r"], ["s","k","w"]]
}
self.features["wellformed_clusters"] = (self.features["wellformed_plosive_clusters"] +
self.features["wellformed_fricative_clusters"] +
self.features["wellformed_other_clusters"] +
self.features["wellformed_s_clusters"])
self.features["silence_phone"] = "pau"
self.features["closure_phone"] = "paucl"
self.phones = {"pau" : set(["pause"]),
"paucl" : set(["closure"]),
"ʔ" : set(["glottal-stop"]),
"ə" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_mid", "position_central"]),
"əi" : set(["class_sonorant", "class_syllabic", "vowel", "duration_diphthong"]),
"a" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_low", "position_back"]),
"ai" : set(["class_sonorant", "class_syllabic", "vowel", "duration_diphthong"]),
"ɛ" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_mid", "position_front"]),
"œ" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_mid", "position_front", "articulation_rounded"]),
"əu" : set(["class_sonorant", "class_syllabic", "vowel", "duration_diphthong"]),
"œy" : set(["class_sonorant", "class_syllabic", "vowel", "duration_diphthong"]),
"ŋ" : set(["class_sonorant", "class_consonantal", "consonant", "manner_nasal", "place_velar", "voiced"]),
"ɔ" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_mid", "position_back", "articulation_rounded"]),
"ɔi" : set(["class_sonorant", "class_syllabic", "vowel", "duration_diphthong"]),
"ʃ" : set(["class_consonantal", "consonant", "manner_fricative", "place_post-alveolar"]),
"ʒ" : set(["class_consonantal", "consonant", "manner_fricative", "place_post-alveolar", "voiced"]),
"æ" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_low", "position_front"]),
"ɑː" : set(["class_sonorant", "class_syllabic", "vowel", "duration_long", "height_low", "position_back"]),
"ɑːi" : set(["class_sonorant", "class_syllabic", "vowel", "duration_diphthong"]),
"b" : set(["class_consonantal", "consonant", "manner_plosive", "place_bilabial", "voiced"]),
"d" : set(["class_consonantal", "consonant", "manner_plosive", "place_alveolar", "voiced"]),
"iə" : set(["class_sonorant", "class_syllabic", "vowel", "duration_long", "height_mid", "position_front"]),
"øː" : set(["class_sonorant", "class_syllabic", "vowel", "duration_long", "height_mid", "position_front", "articulation_rounded"]),
"f" : set(["class_consonantal", "consonant", "manner_fricative", "manner_strident", "place_labiodental"]),
"g" : set(["class_consonantal", "consonant", "manner_plosive", "place_velar", "voiced"]),
"ɦ" : set(["consonant", "manner_fricative", "place_glottal", "voiced"]),
"i" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_high", "position_front"]),
"iu" : set(["class_sonorant", "class_syllabic", "vowel", "duration_diphthong"]),
"j" : set(["class_sonorant", "consonant", "manner_approximant", "manner_glide", "place_palatal", "voiced"]),
"k" : set(["class_consonantal", "consonant", "manner_plosive", "place_velar"]),
"l" : set(["class_sonorant", "class_consonantal", "consonant", "manner_approximant", "manner_liquid", "manner_lateral", "place_alveolar", "voiced"]),
"m" : set(["class_sonorant", "class_consonantal", "consonant", "manner_nasal", "place_bilabial", "voiced"]),
"n" : set(["class_sonorant", "class_consonantal", "consonant", "manner_nasal", "place_alveolar", "voiced"]),
"uə" : set(["class_sonorant", "class_syllabic", "vowel", "duration_long", "height_mid", "position_back", "articulation_rounded"]),
"uəi" : set(["class_sonorant", "class_syllabic", "vowel", "duration_diphthong"]),
"p" : set(["class_consonantal", "consonant", "manner_plosive", "place_bilabial"]),
"r" : set(["class_sonorant", "class_consonantal", "consonant", "manner_trill", "place_alveolar", "voiced"]),
"s" : set(["class_consonantal", "consonant", "manner_fricative", "manner_strident", "place_alveolar"]),
"t" : set(["class_consonantal", "consonant", "manner_plosive", "place_alveolar"]),
"tʃ" : set(["class_consonantal", "consonant", "manner_affricate", "place_alveolar"]),
"u" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_high", "position_back"]),
"ui" : set(["class_sonorant", "class_syllabic", "vowel", "duration_diphthong"]),
"v" : set(["class_consonantal", "consonant", "manner_fricative", "manner_strident", "place_labiodental", "voiced"]),
"w" : set(["class_sonorant", "consonant", "manner_approximant", "manner_glide", "place_labial", "place_velar", "voiced"]),
"x" : set(["class_consonantal", "consonant", "manner_fricative", "place_velar"]),
"y" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_high", "position_front"]),
"z" : set(["class_consonantal", "consonant", "manner_fricative", "manner_strident", "place_alveolar", "voiced"])
}
self.map = {"pau":"pau",
"paucl":"paucl",
"ʔ":"paugs",
"ə":"q", #sin
"əi":"qi", #wyn
"a":"a", #man
"ai":"ai", #katjie
"ɛ":"E", #ken
"œ":"qoeq", #mus
"əu":"qu", #bou
"œy":"qoeqy", #huis
"ŋ":"N", #sing
"ɔ":"O", #son
"ɔi":"Oi", #potjie
"ʃ":"S", #chef
"ʒ":"Z", #mirage
"æ":"qaeq", #ek
"ɑː":"AA", #aan
"ɑːi":"AAi", #saai
"b":"b",
"d":"d",
"iə":"iq", #seer
"øː":"qooq", #seun
"f":"f",
"g":"g",
"ɦ":"hq",
"i":"i", #sien
"iu":"iu", #meeu
"j":"j",
"k":"k",
"l":"l",
"m":"m",
"n":"n",
"uə":"uq", #room
"uəi":"uqi", #rooi
"p":"p",
"r":"r",
"s":"s",
"t":"t",
"tʃ":"tS", #tjek
"u":"u", #boek
"ui":"ui", #boei
"v":"v", #wens
"w":"w", #twee
"x":"x", #gee
"y":"y", #muur
"z":"z",
"xxx":"xxx"
}
def is_plosive(self, phonename):
return "manner_plosive" in self.phones[phonename]
def is_voiced(self, phonename):
return ("voiced" in self.phones[phonename] or
"vowel" in self.phones[phonename])
def is_obstruent(self, phonename):
return ("class_consonantal" in self.phones[phonename] and
"class_sonorant" not in self.phones[phonename] and
"class_syllabic" not in self.phones[phonename])
def is_vowel(self, phonename):
return "vowel" in self.phones[phonename]
def is_glide(self, phonename):
return "manner_glide" in self.phones[phonename]
def is_liquid(self, phonename):
return "manner_liquid" in self.phones[phonename]
def is_syllabicconsonant(self, phonename):
return "class_syllabic" in self.phones[phonename] and "consonant" in self.phones[phonename]
def is_fricative(self, phonename):
return "manner_fricative" in self.phones[phonename]
def is_nasal(self, phonename):
return "manner_nasal" in self.phones[phonename]
def sonority_level(self, phonename):
""" Assigns levels of sonority to phones based on their nature...
"""
if self.is_vowel(phonename):
if "height_low" in self.phones[phonename]:
return 9
if "height_mid" in self.phones[phonename]:
return 8
if "height_high" in self.phones[phonename]:
return 7
if self.is_liquid(phonename):
return 6
if self.is_nasal(phonename):
return 5
if self.is_fricative(phonename):
if self.is_voiced(phonename):
return 4
else:
return 3
if self.is_plosive(phonename):
if self.is_voiced(phonename):
return 2
else:
return 1
return 0
def _process_cluster(self, cluster, phonelist, match):
""" Break cluster into syllables according to the rules defined by
T.A. Hall, "English syllabification as the interaction of
markedness constraints" in Studia Linguistica, vol. 60, 2006,
pp. 1-33
Need to refactor the if statements to make clearer/simpler...
Implementation for English... needs to be revisited...
"""
phonecluster = phonelist[match.start() : match.end()]
if cluster == "VCV":
#always split -> V.CV:
return "V.CV"
if cluster == "VCCV":
CC = phonecluster[1:3]
#if CC cluster is Tautosyllabic -> V.CCV:
if ((CC in self.features["wellformed_clusters"] and
self.sonority_level(CC[1]) > self.sonority_level(CC[0])) or
(CC[0] == "s" and
self.is_plosive(CC[1]) and
not self.is_voiced(CC[1]))):
return "V.CCV"
#if CC cluster is Heterosyllabic -> VC.CV:
if ((self.sonority_level(CC[1]) < self.sonority_level(CC[0])) or
(self.sonority_level(CC[1]) == self.sonority_level(CC[0])) or
(CC not in self.features["wellformed_clusters"] and
self.sonority_level(CC[1]) > self.sonority_level(CC[0]))):
return "VC.CV"
if cluster == "VCCCV":
CCC = phonecluster[1:4]
C2C3 = CCC[1:]
#if CCC are all obstruents -> VC.CCV:
if all([self.is_obstruent(C) for C in CCC]):
return "VC.CCV"
#if C2C3 are wellformed onsets -> VC.CCV:
if C2C3 in self.features["wellformed_clusters"]:
return "VC.CCV"
else:
return "VCC.CV"
if cluster == "VCCCCV":
#always split -> VC.CCCV:
return "VC.CCCV"
if cluster == "VCGV":
CG = phonecluster[1:3]
if not self.is_plosive(CG[0]): #C not a stop
return "VC.GV"
else:
if CG not in self.features["wellformed_clusters"]: #C a stop and CG not wellformed
return "VC.GV"
else:
return "V.CGV" #C a stop and CG wellformed
if cluster == "VCCGV":
CCG = phonecluster[1:4]
if CCG[0] == "s":
return "V.CCGV"
else:
return "VC.CGV"
if cluster == "VCCCGV":
return "VC.CCGV"
if cluster == "VV": #not described in the Hall paper...
return "V.V"
def syllabify(self, phonelist):
""" Classes:
C -> Consonant,
V -> Short/Long Vowel/Syllabic sonorant/Diphthong
G -> Glide
"""
#make a copy (to be edited internally)
plist = list(phonelist)
#first construct string representing relevant classes...
classstr = ""
for phone in plist:
if self.is_vowel(phone):
classstr += "V"
elif self.is_glide(phone):
classstr += "G"
else:
classstr += "C"
#Begin Aby's hacks:
# - Change the last phoneclass under certain conditions..
try:
if (self.is_syllabicconsonant(plist[-1]) and
self.is_obstruent(plist[-2])):
classstr = classstr[:-1] + "V"
if (self.is_syllabicconsonant(plist[-1]) and
self.is_nasal(plist[-2])):
classstr = classstr[:-1] + "V"
except IndexError:
pass
#End Aby's hacks...
#find syllable_clusters in order and apply syllabification
#process on each...this should be redone... FIXME!!!
for cluster in self.features["syllable_clusters"]:
match = re.search(cluster, classstr)
while match:
#syllabify cluster
clustersylstr = self._process_cluster(cluster, plist, match)
#update classstr...
start, end = match.span()
classstr = clustersylstr.join([classstr[:start], classstr[end:]])
plist = (plist[:match.start() + clustersylstr.index(".")] +
[""] + plist[match.start() + clustersylstr.index("."):])
#next match...
match = re.search(cluster, classstr)
sylls = [[]]
index = 0
for char in classstr:
if char != ".":
sylls[-1].append(phonelist[index])
index += 1
else:
sylls.append([])
return sylls
class LwaziAfrikaans_simpleGPOS_HTSVoice(LwaziPromHTSVoice):
""" GPOS from Festival English example...
"""
PREPOSITIONS = ["in", "van", "vir", "op", "daardie", "met",
"by", "vanaf", "as", "teen", "voor", "onder",
"na", "oor", "terwyl", "sonder", "dat", "deur",
"tussen", "per", "af", "langs", "hierdie", "naas"]
DETERMINERS = ["die", "n", "geen", "nie", "elke", "nog", "al",
"enige", "beide", "baie"]
MODAL = ["sal", "wil", "mag", "sou", "wou", "moet", "wees"]
CONJUNCTIONS = ["en", "maar", "omdat", "want", "of"]
INTERROGATIVE_PRONOUNS = ["wie", "wat", "watter", "waar", "hoe", "wanneer", "hoekom"]
PERSONAL_PRONOUNS = ["haar", "sy", "hulle", "hul", "ons", "syne", "myne", "hare"]
AUXILIARY_VERBS = ["is", "het"]
GPOS = dict([(word, "prep") for word in PREPOSITIONS] +
[(word, "det") for word in DETERMINERS] +
[(word, "md") for word in MODAL] +
[(word, "cc") for word in CONJUNCTIONS] +
[(word, "wp") for word in INTERROGATIVE_PRONOUNS] +
[(word, "pps") for word in PERSONAL_PRONOUNS] +
[(word, "aux") for word in AUXILIARY_VERBS])
def __init__(self, phoneset, g2p, pronundict, pronunaddendum, synthesizer):
LwaziHTSVoice.__init__(self,
phoneset=phoneset,
g2p=g2p,
pronundict=pronundict,
pronunaddendum=pronunaddendum,
synthesizer=synthesizer)
self.processes = {"text-to-words": OrderedDict([("tokenizer", "default"),
("normalizer", "default"),
("gpos", None),
("phrasifier", None)]),
"text-to-segments": OrderedDict([("tokenizer", "default"),
("normalizer", "default"),
("gpos", None),
("phrasifier", None),
("phonetizer", None),
("pauses", None)]),
"text-to-label": OrderedDict([("tokenizer", "default"),
("normalizer", "default"),
("gpos", None),
("phrasifier", None),
("phonetizer", None),
("pauses", None),
("synthesizer", "label_only")]),
"text-to-wave": OrderedDict([("tokenizer", "default"),
("normalizer", "default"),
("gpos", None),
("phrasifier", None),
("phonetizer", None),
("pauses", None),
("synthesizer", "label_and_synth")]),
"utt-to-label": OrderedDict([("synthesizer", "label_only")]),
"utt-to-wave": OrderedDict([("synthesizer", "label_and_synth")])}
def gpos(self, utt, processname):
word_rel = utt.get_relation("Word")
for word_item in word_rel:
if word_item["name"] in self.GPOS:
word_item["gpos"] = "nc"
else:
word_item["gpos"] = "c"
return utt
class SynthesizerHTSME_Prominence(SynthesizerHTSME):
def hts_label(self, utt, processname):
lab = []
starttime = 0
for phone_item in utt.get_relation("Segment"):
if "end" in phone_item:
endtime = hts_labels_prom.float_to_htk_int(phone_item["end"])
else:
endtime = None
phlabel = [hts_labels_prom.p(phone_item),
hts_labels_prom.a(phone_item),
hts_labels_prom.b(phone_item),
hts_labels_prom.c(phone_item),
hts_labels_prom.d(phone_item),
hts_labels_prom.e(phone_item),
hts_labels_prom.f(phone_item),
hts_labels_prom.g(phone_item),
hts_labels_prom.h(phone_item),
hts_labels_prom.i(phone_item),
hts_labels_prom.j(phone_item)]
if endtime is not None:
lab.append("%s %s " % (str(starttime).rjust(10), str(endtime).rjust(10)) + "/".join(phlabel))
else:
lab.append("/".join(phlabel))
starttime = endtime
utt["hts_label"] = lab
return utt
| 52.352941 | 177 | 0.465573 | 21,581 | 0.967801 | 0 | 0 | 0 | 0 | 0 | 0 | 8,110 | 0.363693 |
f865843e860d96b7840567719ae0919a197d73ae | 144,813 | py | Python | scripts/Iodide/project_misc.py | tsherwen/sparse2spatial | 6f5240c7641ad7a894476672b78c8184c514bf87 | [
"MIT"
] | 1 | 2020-01-14T21:40:29.000Z | 2020-01-14T21:40:29.000Z | scripts/Iodide/project_misc.py | tsherwen/sparse2spatial | 6f5240c7641ad7a894476672b78c8184c514bf87 | [
"MIT"
] | null | null | null | scripts/Iodide/project_misc.py | tsherwen/sparse2spatial | 6f5240c7641ad7a894476672b78c8184c514bf87 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This module contains analysis done for the Ocean iodide (Oi!) project
This includes presentation at conferences etc...
"""
import numpy as np
import pandas as pd
import sparse2spatial as s2s
import sparse2spatial.utils as utils
import matplotlib
import matplotlib.pyplot as plt
# import AC_tools (https://github.com/tsherwen/AC_tools.git)
import AC_tools as AC
# Get iodide specific functions
import observations as obs
def main():
"""
Run various misc. scripted tasks linked to the "iodide in the ocean" project
"""
pass
# ---- ----- ----- ----- ----- ----- ----- ----- -----
# ----- ----- Misc (associated iodide project tasks)
# These include getting CTM (GEOS-Chem) output for Anoop/Sawalha/TropMet
# --- Make planeflight files for cruise
# mk_pf_files4Iodide_cruise()
# mk_pf_files4Iodide_cruise(mk_column_output_files=True)
# Test the input files for these cruises?
# test_input_files4Iodide_cruise_with_plots()
# Test output files for cruises
# TEST_iodide_cruise_output()
# TEST_AND_PROCESS_iodide_cruise_output()
# TEST_AND_PROCESS_iodide_cruise_output(just_process_surface_data=False)
# Get numbers for data paper (data descriptor paper)
# get_numbers_for_data_paper()
# Get Longhurst province labelled NetCDF for res
# add_LonghurstProvince2NetCDF(res='4x5', ExStr='TEST_VI' )
# add_LonghurstProvince2NetCDF(res='2x2.5', ExStr='TEST_V' )
# add_LonghurstProvince2NetCDF(res='0.125x0.125', ExStr='TEST_VIII' )
# Add Longhurst Province to a lower res NetCDF file
# folder = './'
# filename = 'Oi_prj_output_iodide_field_1x1_deg_0_5_centre.nc'
# filename = 'Oi_prj_output_iodide_field_0_5x0_5_deg_centre.nc'
# ds = xr.open_dataset(folder+filename)
# add_LonghurstProvince2NetCDF(ds=ds, res='0.5x0.5', ExStr='TEST_VIII')
# process this to csv files for Indian' sea-surface paper
# ---------------------------------------------------------------------------
# ---------- Functions to produce output for Iodide obs. paper -------------
# ---------------------------------------------------------------------------
def get_PDF_of_iodide_exploring_data_rootset(show_plot=False,
ext_str=None):
""" Get PDF of plots exploring the iodide dataset """
import seaborn as sns
sns.set(color_codes=True)
# Get the data
df = obs.get_processed_df_obs_mod() # NOTE this df contains values >400nM
#
if ext_str == 'Open_ocean':
# Kludge data
# Kludge_tinel_data=True
# if Kludge_tinel_data:
# new_Data = [ 'He_2014', 'He_2013']
# new_Data += ['Chance_2018_'+i for i in 'I', 'II', 'III']
# df.loc[ df['Data_Key'].isin(new_Data), 'Coastal'] = False
# only take data flagged open ocean
df = df.loc[df[u'Coastal'] == 0.0, :]
elif ext_str == 'Coastal':
df = df.loc[df[u'Coastal'] == 1.0, :]
elif ext_str == 'all':
print('Using entire dataset')
else:
print('Need to set region of data to explore - currently', ext_str)
sys.exit()
# setup PDF
savetitle = 'Oi_prj_data_root_exploration_{}'.format(ext_str)
dpi = 320
pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi)
# colours to use?
# current_palette = sns.color_palette()
current_palette = sns.color_palette("colorblind")
# --- --- --- --- --- --- --- ---
# ---- Add in extra varibles
# iodide / iodate
I_div_IO3_var = 'I$^{-}$/IO$_{3}^{-}$ (ratio)'
df[I_div_IO3_var] = df['Iodide'] / df['Iodate']
# total iodide
I_plus_IO3 = 'I$^{-}$+IO$_{3}^{-}$'
df[I_plus_IO3] = df['Iodide'] + df['Iodate']
# --- Add ocean basin to dataframe
area_var = 'Region'
df[area_var] = None
# setup a dummy column
# --- --- --- --- --- --- --- ---
# --- Plot dataset locations
sns.reset_orig()
# Get lats, lons and size of dataset
lats = df['Latitude'].values
lons = df['Longitude'].values
N_size = df.shape[0]
if ext_str == 'Open_ocean':
title = 'Iodide data (Open Ocean) explored in PDF (N={})'
else:
title = 'Iodide data (all) explored in this PDF (N={})'
# plot up
AC.plot_lons_lats_spatial_on_map(lats=lats, lons=lons,
title=title.format(N_size),
split_title_if_too_long=False,
f_size=10)
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
if show_plot:
plt.show()
plt.close()
# --- --- --- --- --- --- --- ---
# --- iodide to iodide ratio
import seaborn as sns
sns.set(color_codes=True)
current_palette = sns.color_palette("colorblind")
# plot up with no limits
df.plot(kind='scatter', y=I_div_IO3_var, x='Latitude')
# beautify
plt.title(I_div_IO3_var + ' ({}, y axis unlimited)'.format(ext_str))
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
if show_plot:
plt.show()
plt.close()
# plot up with limits at 3
ylimits = 1.5, 0.75, 0.5,
for ylimit in ylimits:
df.plot(kind='scatter', y=I_div_IO3_var, x='Latitude')
# beautify
title = ' ({}, y axis limit: {})'.format(ext_str, ylimit)
plt.title(I_div_IO3_var + title)
plt.ylim(-0.05, ylimit)
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
if show_plot:
plt.show()
plt.close()
# --- --- --- --- --- --- --- ---
# TODO - update to use proper definitions
# for southern ocean use the files below
# for rest https://www.nodc.noaa.gov/woce/woce_v3/wocedata_1/woce-uot/summary/bound.htm
#
# --- iodide to iodide ratio ( split by region )
# Between 120E and -80E its Pacific
upper_val = 120
lower_val = -80
unit = '$^{o}$E'
bool_1 = df[u'Longitude'] >= upper_val
bool_2 = df[u'Longitude'] < lower_val
bool = (np.column_stack((bool_2, bool_1)).any(axis=1))
varname = 'Pacific Ocean ({} to {}{})'.format(upper_val, lower_val, unit)
df.loc[bool, area_var] = varname
# Between -80E and 30E its Atlantic
upper_val = -80
lower_val = 30
unit = '$^{o}$E'
bool_1 = df[u'Longitude'] >= upper_val
bool_2 = df[u'Longitude'] < lower_val
bool = (np.column_stack((bool_2, bool_1)).all(axis=1))
varname = 'Atlantic Ocean ({} to {}{})'.format(lower_val, upper_val, unit)
df.loc[bool, area_var] = varname
# Between 30E and 120E its Indian
upper_val = 30
lower_val = 120
unit = '$^{o}$E'
bool_1 = df[u'Longitude'] >= upper_val
bool_2 = df[u'Longitude'] < lower_val
bool = (np.column_stack((bool_2, bool_1)).all(axis=1))
varname = 'Indian Ocean ({} to {}{})'.format(lower_val, upper_val, unit)
df.loc[bool, area_var] = varname
# if latitude below 60S, overwrite to be Southern ocean
varname = 'Southern Ocean'
df.loc[df['Latitude'] < -60, area_var] = varname
# --- --- --- --- --- --- --- ---
# --- locations of data
sns.reset_orig()
# loop regions
for var_ in list(set(df[area_var].tolist())):
# select data for area
df_tmp = df[df[area_var] == var_]
# locations ?
lons = df_tmp[u'Longitude'].tolist()
lats = df_tmp[u'Latitude'].tolist()
# Now plot
AC.plot_lons_lats_spatial_on_map(lons=lons, lats=lats)
# fig=fig, ax=ax , color='blue', label=label, alpha=alpha,
# window=window, axis_titles=axis_titles, return_axis=True,
# p_size=p_size)
plt.title('{} ({})'.format(var_, ext_str))
if show_plot:
plt.show()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# --- --- --- --- --- --- --- ---
# --- iodide to iodide ratio
import seaborn as sns
sns.set(color_codes=True)
current_palette = sns.color_palette("colorblind")
# loop regions
for var_ in list(set(df[area_var].tolist())):
# select data for area
df_tmp = df[df[area_var] == var_]
# plot up with no limits
df_tmp.plot(kind='scatter', y=I_div_IO3_var, x='Latitude')
# beautify
plt.title(I_div_IO3_var + ' ({}, y axis unlimited)'.format(var_))
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
if show_plot:
plt.show()
plt.close()
# plot up with limits at 3
ylimits = 1.5, 0.75, 0.5
for ylimit in ylimits:
df_tmp.plot(kind='scatter', y=I_div_IO3_var, x='Latitude')
# beautify
title = ' ({}, y axis limit: {})'.format(var_, ylimit)
plt.title(I_div_IO3_var + title)
plt.ylim(-0.05, ylimit)
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
if show_plot:
plt.show()
plt.close()
# --- --- --- --- --- --- --- ---
# --- iodide + iodide
import seaborn as sns
sns.set(color_codes=True)
current_palette = sns.color_palette("colorblind")
# loop regions
for var_ in list(set(df[area_var].tolist())):
# select data for area
df_tmp = df[df[area_var] == var_]
# plot up with no limits
df_tmp.plot(kind='scatter', y=I_plus_IO3, x='Latitude')
# beautify
plt.title(I_plus_IO3 + ' ({}, y axis unlimited)'.format(var_))
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
if show_plot:
plt.show()
plt.close()
# plot up with limits at 3
# ylimits = 1.5, 0.75, 0.5
# for ylimit in ylimits:
# df.plot(kind='scatter', y=I_plus_IO3, x='Latitude' )
# # beautify
# title= ' ({}, y axis limited to {})'.format(var_, ylimit)
# plt.title( I_plus_IO3 + title )
# plt.ylim(-0.05, ylimit )
# # Save to PDF and close plot
# AC.plot2pdfmulti( pdff, savetitle, dpi=dpi )
# if show_plot: plt.show()
# plt.close()
# plot up with limits on y
ylimits = [100, 600]
# for ylimit in ylimits:
df_tmp.plot(kind='scatter', y=I_plus_IO3, x='Latitude')
# beautify
title = ' ({}, y axis={}-{})'.format(var_, ylimits[0], ylimits[1])
plt.title(I_plus_IO3 + title)
plt.ylim(ylimits[0], ylimits[1])
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
if show_plot:
plt.show()
plt.close()
# -- Save entire pdf
AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi)
# ---------------------------------------------------------------------------
# ---------- Funcs. to process iodine obs/external data --------------------
# ---------------------------------------------------------------------------
def check_points_for_cruises(target='Iodide', verbose=False, debug=False):
"""
Check the cruise points for the new data (Tinel, He, etc...)
"""
# Get the observational data
df = obs.get_processed_df_obs_mod() # NOTE this df contains values >400nM
# And the metadata
metadata_df = obs.get_iodide_obs_metadata()
# Only consider new datasets
new_cruises = metadata_df[metadata_df['In Chance2014?'] == 'N']
df = df[df['Data_Key'].isin(new_cruises['Data_Key'].tolist())]
# Strings to format printing
ptr_str_I = '- '*5 + 'Cruise: {:<20}'
ptr_str_II = '(Source: {:<20}, Location: {:<15}, N: {}, N(Iodide): {})'
# Print by cruise
for data_key in set(df['Data_Key']):
df_m_tmp = metadata_df[metadata_df['Data_Key'] == data_key]
df_tmp = df[df['Data_Key'] == data_key]
# Extract metadata
Cruise = df_m_tmp['Cruise'].values[0]
Source = df_m_tmp['Source'].values[0]
Location = df_m_tmp['Location'].values[0]
#
N = df_tmp.shape[0]
N_I = df_tmp[target].dropna().shape[0]
print(ptr_str_I.format(Cruise))
print(ptr_str_II.format(Source, Location, N, N_I))
# Points for all cruises
N = df.shape[0]
N_I = df[target].dropna().shape[0]
print(ptr_str_I.format('ALL new data'))
print(ptr_str_II.format('', '', N, N_I))
def plot_threshold_plus_SD_spatially(var=None, value=None, std=None, res='4x5',
fillcontinents=True, show_plot=False,
dpi=320, save2png=True,
verbose=True, debug=False):
"""
Plot up the spatial extent of a input variable value + Std. Dev.
"""
# - Local variables
# Get the core input variables
data_root = utils.get_file_locations('data_root')
filename = 'Oi_prj_feature_variables_{}.nc'.format(res)
ds = xr.open_dataset(data_root + filename)
# make sure the dataset has units
ds = add_units2ds(ds)
# Use appropriate plotting settings for resolution
if res == '0.125x0.125':
centre = True
else:
centre = False
# Get data
arr = ds[var].mean(dim='time').values
# colour in values above and below threshold (works)
arr[arr >= value] = 1
arr[arr >= value-std] = 0.5
arr[(arr != 1) & (arr != 0.5)] = 0.01
# Get units from dataset
units = ds[var].units
# Plot up
title_str = "'{}' ({}) threshold Value ({}) + \n Standard deviation ({})"
title = title_str.format(var, units, value, std)
if var == 'WOA_TEMP_K':
title += ' (in degC={}, std={})'.format(value-273.15, std)
# Plot using AC_tools
AC.plot_spatial_figure(arr,
# extend=extend,
# fixcb=fixcb, nticks=nticks, \
res=res, show=False, title=title, \
fillcontinents=fillcontinents, centre=centre, units=units,
# f_size=f_size,
no_cb=False)
# Use a tight layout
plt.tight_layout()
# Now save or show
if show_plot:
plt.show()
savetitle = 'Oi_prj_threshold_std_4_var_{}_{}'.format(var, res)
if save2png:
plt.savefig(savetitle+'.png', dpi=dpi)
plt.close()
# ---------------------------------------------------------------------------
# -------------- Reproduction of Chance et al (2014) figures ----------------
# ---------------------------------------------------------------------------
def plot_up_iodide_vs_latitude(show_plot=True):
"""
Reproduce Fig. 3 in Chance et al (2014)
Notes
----
- figure captions:
Variation of sea-surface iodide concentration with latitude for entire
data set (open diamonds) and open ocean data only (filled diamonds).
For clarity, one exceptionally high coastal iodide value (700 nM, 58.25N)
has been omitted.
"""
# - Get data
df = get_core_Chance2014_obs()
# Select data of interest
# ( later add a color selection based on coastal values here? )
vars = ['Iodide', 'Latitude']
print(df)
# and select coastal/open ocean
df_coastal = df[df['Coastal'] == True][vars]
df_open_ocean = df[~(df['Coastal'] == True)][vars]
# - Now plot Obs.
# plot coastal
ax = df_coastal.plot(kind='scatter', x='Latitude', y='Iodide', marker='D',
color='blue', alpha=0.1,
# markerfacecolor="None", **kwds )
)
# plot open ocean
ax = df_open_ocean.plot(kind='scatter', x='Latitude', y='Iodide',
marker='D', color='blue', alpha=0.5, ax=ax,
# markerfacecolor="None", **kwds )
)
# Update aesthetics of plot
plt.ylabel('[Iodide], nM')
plt.xlabel('Latitude, $^{o}$N')
plt.ylim(-5, 500)
plt.xlim(-80, 80)
# save or show?
if show_plot:
plt.show()
plt.close()
def plot_up_ln_iodide_vs_Nitrate(show_plot=True):
"""
Reproduc Fig. 11 in Chance et al (2014)
Original caption:
Ln[iodide] concentration plotted against observed ( ) and
climatological ( ) nitrate concentration obtained from the World
Ocean Atlas as described in the text for all data (A) and nitrate
concentrations below 2 mM (B) and above 2 mM (C). Dashed lines in B
and C show the relationships between iodide and nitrate adapted from
Campos et al.41 by Ganzeveld et al.27
"""
# - location of data to plot
df = obs.get_processed_df_obs_mod()
# take log of iodide
df['Iodide'] = np.log(df['Iodide'].values)
# - Plot up all nitrate concentrations
df.plot(kind='scatter', x='Nitrate', y='Iodide', marker='D',
color='k') # ,
plt.ylabel('LN[Iodide], nM')
plt.xlabel('LN[Nitrate], mM')
if show_plot:
plt.show()
plt.close()
# - Plot up all nitrate concentrations below 2 mM
df_tmp = df[df['Nitrate'] < 2]
df_tmp.plot(kind='scatter', x='Nitrate', y='Iodide', marker='D',
color='k') # ,
plt.ylabel('LN[Iodide], nM')
plt.xlabel('LN[Nitrate], mM')
if show_plot:
plt.show()
plt.close()
# - Plot up all nitrate concentrations above 2 mM
df_tmp = df[df['Nitrate'] > 2]
df_tmp.plot(kind='scatter', x='Nitrate', y='Iodide', marker='D',
color='k'),
plt.ylabel('LN[Iodide], nM')
plt.xlabel('LN[Nitrate], mM')
if show_plot:
plt.show()
plt.close()
def plot_up_ln_iodide_vs_SST(show_plot=True):
"""
Reproduc Fig. 8 in Chance et al (2014)
Original caption:
Ln[iodide] concentration plotted against observed sea surface
temperature ( ) and climatological sea surface temperature ( ) values
obtained from the World Ocean Atlas as described in the text.
"""
# - location of data to plot
folder = utils.get_file_locations('data_root')
f = 'Iodine_obs_WOA.csv'
df = pd.read_csv(folder+f, encoding='utf-8')
# take log of iodide
df['Iodide'] = np.log(df['Iodide'].values)
# - Plot up all nitrate concentrations
df.plot(kind='scatter', x='Temperature', y='Iodide', marker='D',
color='k')
plt.ylabel('LN[Iodide], nM')
plt.xlabel('Sea surface temperature (SST), $^{o}$C')
if show_plot:
plt.show()
plt.close()
def plot_up_ln_iodide_vs_salinity(show_plot=True):
"""
Reproduc Fig. 8 in Chance et al (2014)
Original caption:
Ln[iodide] concentration plotted against observed salinity ( , ) and
climatological salinity ( ) values obtained from the World Ocean Atlas as
described in the text for: (A) all data; (B) samples with salinity greater
than 30, shown in shaded area in (A). Note samples with salinity less than
30 have been excluded from further analysis and are not shown in Fig. 8–11.
"""
# - location of data to plot
folder = utils.get_file_locations('data_root')
f = 'Iodine_obs_WOA.csv'
df = pd.read_csv(folder+f, encoding='utf-8')
# Just select non-coastal data
# df = df[ ~(df['Coastal']==True) ]
# take log of iodide
df['Iodide'] = np.log(df['Iodide'].values)
# - Plot up all nitrate concentrations
df.plot(kind='scatter', x='Salinity', y='Iodide', marker='D', color='k')
plt.ylabel('LN[Iodide], nM')
plt.xlabel('Salinity')
plt.xlim(-2, AC.myround(max(df['Salinity']), 10, round_up=True))
if show_plot:
plt.show()
plt.close()
# - Plot up all nitrate concentrations
df_tmp = df[df['Salinity'] < 30]
df_tmp.plot(kind='scatter', x='Salinity',
y='Iodide', marker='D', color='k')
plt.ylabel('LN[Iodide], nM')
plt.xlabel('Salinity')
plt.xlim(-2, AC.myround(max(df['Salinity']), 10, round_up=True))
if show_plot:
plt.show()
plt.close()
# - Plot up all nitrate concentrations
df_tmp = df[df['Salinity'] > 30]
df_tmp.plot(kind='scatter', x='Salinity',
y='Iodide', marker='D', color='k')
plt.ylabel('LN[Iodide], nM')
plt.xlabel('Salinity')
plt.xlim(29, AC.myround(max(df['Salinity']), 10, round_up=True))
if show_plot:
plt.show()
plt.close()
def plot_pair_grid(df=None, vars_list=None):
"""
Make a basic pair plot to test the data
"""
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from itertools import cycle
# make a kde plot
def make_kde(*args, **kwargs):
sns.kdeplot(*args, cmap=next(make_kde.cmap_cycle), **kwargs)
# define colormap to cycle
make_kde.cmap_cycle = cycle(('Blues_r', 'Greens_r', 'Reds_r', 'Purples_r'))
# Plot a pair plot
pg = sns.PairGrid(data, vars=vars_list)
# ---------------------------------------------------------------------------
# ---------------- New plotting of iodine obs/external data -----------------
# ---------------------------------------------------------------------------
def explore_extracted_data_in_Oi_prj_explore_Arctic_Antarctic_obs(dsA=None,
res='0.125x0.125',
dpi=320):
"""
Analyse the gridded data for the Arctic and Antarctic
"""
import matplotlib
import matplotlib.pyplot as plt
matplotlib.style.use('ggplot')
import seaborn as sns
sns.set()
# - local variables
# Get input variables
if isinstance(dsA, type(None)):
filename = 'Oi_prj_predicted_iodide_{}.nc'.format(res)
# folder = '/shared/earth_home/ts551/labbook/Python_progs/'
folder = '/shared/earth_home/ts551/data/iodide/'
filename = 'Oi_prj_feature_variables_{}.nc'.format(res)
dsA = xr.open_dataset(folder + filename)
# ds = xr.open_dataset( filename )
# variables to consider
vars2analyse = list(dsA.data_vars)
# Add LWI to array - NOTE: 1 = water in Nature run LWI files !
# ( The above comment is not correct! why is this written here? )
folderLWI = utils.get_file_locations(
'AC_tools')+'/data/LM/TEMP_NASA_Nature_run/'
filenameLWI = 'ctm.nc'
LWI = xr.open_dataset(folderLWI+filenameLWI)
# updates dates (to be Jan=>Dec)
new_dates = [datetime.datetime(1970, i, 1) for i in LWI['time.month']]
LWI.time.values = new_dates
# Sort by new dates
LWI = LWI.loc[{'time': sorted(LWI.coords['time'].values)}]
# LWI = AC.get_LWI_map(res=res)[...,0]
dsA['IS_WATER'] = dsA['WOA_TEMP'].copy()
dsA['IS_WATER'].values = (LWI['LWI'] == 0)
# add is land
dsA['IS_LAND'] = dsA['IS_WATER'].copy()
dsA['IS_LAND'].values = (LWI['LWI'] == 1)
# get surface area
s_area = AC.calc_surface_area_in_grid(res=res) # m2 land map
dsA['AREA'] = dsA['WOA_TEMP'].mean(dim='time')
dsA['AREA'].values = s_area.T
# - Select data of interest by variable for locations
# setup dicts to store the extracted values
df65N, df65S, dfALL = {}, {}, {}
# - setup booleans for the data
# now loop and extract variablesl
vars2use = [
'WOA_Nitrate',
# 'WOA_Salinity', 'WOA_Phosphate', 'WOA_TEMP_K', 'Depth_GEBCO',
]
# setup PDF
savetitle = 'Oi_prj_explore_Arctic_Antarctic_ancillaries_space_PERTURBED'
pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi)
# Loop by dataset (region) and plots
for var_ in vars2use:
# select the boolean for if water
IS_WATER = dsA['IS_WATER'].values
if IS_WATER.shape != dsA[var_].shape:
# special case for depth
# get value for >= 65
ds_tmp = dsA.sel(lat=(dsA['lat'] >= 65))
arr = np.ma.array(12*[ds_tmp[var_].values])
arr = arr[ds_tmp['IS_WATER'].values]
# add to saved arrays
df65N[var_] = arr
del ds_tmp
# get value for >= 65
ds_tmp = dsA.sel(lat=(dsA['lat'] <= -65))
arr = np.ma.array(12*[ds_tmp[var_].values])
arr = arr[ds_tmp['IS_WATER'].values]
# add to saved arrays
df65S[var_] = arr
del ds_tmp
# get value for all
ds_tmp = dsA.copy()
arr = np.ma.array(12*[ds_tmp[var_].values])
arr = arr[ds_tmp['IS_WATER'].values]
# add to saved arrays
dfALL[var_] = arr
del ds_tmp
else:
# get value for >= 65
ds_tmp = dsA.sel(lat=(dsA['lat'] >= 65))
arr = ds_tmp[var_].values[ds_tmp['IS_WATER'].values]
# add to saved arrays
df65N[var_] = arr
del ds_tmp
# get value for >= 65
ds_tmp = dsA.sel(lat=(dsA['lat'] <= -65))
arr = ds_tmp[var_].values[ds_tmp['IS_WATER'].values]
# add to saved arrays
df65S[var_] = arr
del ds_tmp
# get value for >= 65
ds_tmp = dsA.copy()
arr = ds_tmp[var_].values[ds_tmp['IS_WATER'].values]
# add to saved arrays
dfALL[var_] = arr
del ds_tmp
# setup a dictionary of regions to plot from
dfs = {
'>=65N': pd.DataFrame(df65N), '>=65S': pd.DataFrame(df65S),
'Global': pd.DataFrame(dfALL),
}
# - plot up the PDF distribution of each of the variables.
for var2use in vars2use:
print(var2use)
# set a single axis to use.
fig, ax = plt.subplots()
for dataset in datasets:
# select the DataFrame
df = dfs[dataset][var2use]
# Get sample size
N_ = df.shape[0]
# do a dist plot
label = '{} (N={})'.format(dataset, N_)
sns.distplot(df, ax=ax, label=label)
# Make sure the values are correctly scaled
ax.autoscale()
# Plot up the perturbations too
for perturb in perturb2use:
perturb
# Beautify
title_str = "PDF of ancillary input for '{}'"
fig.suptitle(title_str.format(var2use))
plt.legend()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# -Save entire pdf
AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi)
def explore_extracted_data_in_Oi_prj_explore_Arctic_Antarctic_obs(dsA=None,
res='0.125x0.125',
dpi=320):
"""
Analyse the input data for the Arctic and Antarctic
"""
import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
matplotlib.style.use('ggplot')
import seaborn as sns
sns.set()
# - local variables
# get input variables
if isinstance(dsA, type(None)):
filename = 'Oi_prj_predicted_iodide_{}.nc'.format(res)
# folder = '/shared/earth_home/ts551/labbook/Python_progs/'
folder = '/shared/earth_home/ts551/data/iodide/'
filename = 'Oi_prj_feature_variables_{}.nc'.format(res)
dsA = xr.open_dataset(folder + filename)
# ds = xr.open_dataset( filename )
# variables to consider
vars2analyse = list(dsA.data_vars)
# add LWI to array - NOTE: 1 = water in Nature run LWI files !
# ( The above comment is not correct! why is this written here? )
folderLWI = utils.get_file_locations(
'AC_tools')+'/data/LM/TEMP_NASA_Nature_run/'
filenameLWI = 'ctm.nc'
LWI = xr.open_dataset(folderLWI+filenameLWI)
# updates dates (to be Jan=>Dec)
new_dates = [datetime.datetime(1970, i, 1) for i in LWI['time.month']]
LWI.time.values = new_dates
# Sort by new dates
LWI = LWI.loc[{'time': sorted(LWI.coords['time'].values)}]
# LWI = AC.get_LWI_map(res=res)[...,0]
dsA['IS_WATER'] = dsA['WOA_TEMP'].copy()
dsA['IS_WATER'].values = (LWI['LWI'] == 0)
# add is land
dsA['IS_LAND'] = dsA['IS_WATER'].copy()
dsA['IS_LAND'].values = (LWI['LWI'] == 1)
# get surface area
s_area = AC.calc_surface_area_in_grid(res=res) # m2 land map
dsA['AREA'] = dsA['WOA_TEMP'].mean(dim='time')
dsA['AREA'].values = s_area.T
# - Select data of interest by variable for locations
# setup dicts to store the extracted values
df65N, df65S, dfALL = {}, {}, {}
# - setup booleans for the data
# now loop and extract variablesl
vars2use = [
'WOA_Nitrate', 'WOA_Salinity', 'WOA_Phosphate', 'WOA_TEMP_K', 'Depth_GEBCO',
]
for var_ in vars2use:
# select the boolean for if water
IS_WATER = dsA['IS_WATER'].values
if IS_WATER.shape != dsA[var_].shape:
# special case for depth
# get value for >= 65
ds_tmp = dsA.sel(lat=(dsA['lat'] >= 65))
arr = np.ma.array(12*[ds_tmp[var_].values])
arr = arr[ds_tmp['IS_WATER'].values]
# add to saved arrays
df65N[var_] = arr
del ds_tmp
# get value for >= 65
ds_tmp = dsA.sel(lat=(dsA['lat'] <= -65))
arr = np.ma.array(12*[ds_tmp[var_].values])
arr = arr[ds_tmp['IS_WATER'].values]
# add to saved arrays
df65S[var_] = arr
del ds_tmp
# get value for all
ds_tmp = dsA.copy()
arr = np.ma.array(12*[ds_tmp[var_].values])
arr = arr[ds_tmp['IS_WATER'].values]
# add to saved arrays
dfALL[var_] = arr
del ds_tmp
else:
# get value for >= 65
ds_tmp = dsA.sel(lat=(dsA['lat'] >= 65))
arr = ds_tmp[var_].values[ds_tmp['IS_WATER'].values]
# add to saved arrays
df65N[var_] = arr
del ds_tmp
# get value for >= 65
ds_tmp = dsA.sel(lat=(dsA['lat'] <= -65))
arr = ds_tmp[var_].values[ds_tmp['IS_WATER'].values]
# add to saved arrays
df65S[var_] = arr
del ds_tmp
# get value for >= 65
ds_tmp = dsA.copy()
arr = ds_tmp[var_].values[ds_tmp['IS_WATER'].values]
# add to saved arrays
dfALL[var_] = arr
del ds_tmp
# setup a dictionary of regions to plot from
dfs = {
'>=65N': pd.DataFrame(df65N), '>=65S': pd.DataFrame(df65S),
'Global': pd.DataFrame(dfALL),
}
# - Loop regions and plot PDFs of variables of interest
# vars2use = dfs[ dfs.keys()[0] ].columns
# set PDF
savetitle = 'Oi_prj_explore_Arctic_Antarctic_ancillaries_space'
pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi)
# Loop by dataset (region) and plots
datasets = sorted(dfs.keys())
for dataset in datasets:
# select the DataFrame
df = dfs[dataset][vars2use]
# Get sample size
N_ = df.shape[0]
# do a pair plot
g = sns.pairplot(df)
# Add a title
plt.suptitle("Pairplot for '{}' (N={})".format(dataset, N_))
# adjust plots
g.fig.subplots_adjust(top=0.925, left=0.085)
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# - Plot up the PDF distribution of each of the variables.
for var2use in vars2use:
print(var2use)
# set a single axis to use.
fig, ax = plt.subplots()
for dataset in datasets:
# select the DataFrame
df = dfs[dataset][var2use]
# Get sample size
N_ = df.shape[0]
# do a dist plot
label = '{} (N={})'.format(dataset, N_)
sns.distplot(df, ax=ax, label=label)
# Make sure the values are correctly scaled
ax.autoscale()
# Beautify
title_str = "PDF of ancillary input for '{}'"
fig.suptitle(title_str.format(var2use))
plt.legend()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# - Plot up the number of oceanic data points by lat for each lat
# Plot up number of samples for South pole
ds = dsA.sel(lat=(dsA['lat'] <= -65))
var_ = 'WOA_Salinity'
N = {}
for lat in ds['lat'].values:
ds_tmp = ds.sel(lat=lat)
N[lat] = ds_tmp[var_].values[ds_tmp['IS_WATER'].values].shape[-1]
N = pd.Series(N)
N.plot()
plt.ylabel('number of gridboxes in predictor array')
plt.xlabel('Latitude $^{\circ}$N')
plt.title('Number of gridboxes for Antarctic (<= -65N)')
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# Plot up number of samples for North pole
ds = dsA.sel(lat=(dsA['lat'] >= 65))
var_ = 'WOA_Salinity'
N = {}
for lat in ds['lat'].values:
ds_tmp = ds.sel(lat=lat)
N[lat] = ds_tmp[var_].values[ds_tmp['IS_WATER'].values].shape[-1]
N = pd.Series(N)
N.plot()
plt.ylabel('number of gridboxes in predictor array')
plt.xlabel('Latitude $^{\circ}$N')
plt.title('Number of gridboxes')
plt.title('Number of gridboxes for Arctic (>= 65N)')
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# Save entire pdf
AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi)
def explore_observational_data_in_Arctic_parameter_space(RFR_dict=None,
plt_up_locs4var_conds=False,
testset='Test set (strat. 20%)',
dpi=320):
"""
Analysis the input observational data for the Arctic and Antarctic
"""
import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
matplotlib.style.use('ggplot')
import seaborn as sns
sns.set()
# - local variables
df = RFR_dict['df']
# Set splits in data to look at
dfs = {}
# All data
dfs['All data'] = df.copy()
# Get all the data above 65 N
dfs['>=65N'] = df.loc[df['Latitude'] >= 65, :]
# Get all the data above 65 N and in the testset
bool_ = dfs['>=65N'][testset] == False
dfs['>=65N (training)'] = dfs['>=65N'].loc[bool_, :]
# Get all the data below 65 S
dfs['<=65S'] = df.loc[df['Latitude'] <= -65, :]
# Get all the data above 65 N and in the testset
bool_ = dfs['<=65S'][testset] == False
dfs['<=65S (training)'] = dfs['<=65S'].loc[bool_, :]
# - variables to explore?
vars2use = [
'WOA_Nitrate', 'WOA_Salinity', 'WOA_Phosphate', 'WOA_TEMP_K', 'Depth_GEBCO',
]
# - Loop regions and plot pairplots of variables of interest
# set PDF
savetitle = 'Oi_prj_explore_Arctic_Antarctic_obs_space'
pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi)
# Loop by dataset (region) and plots
datasets = sorted(dfs.keys())
for dataset in datasets:
# select the DataFrame
df = dfs[dataset]
# Get sample size
N_ = df.shape[0]
# do a pair plot
g = sns.pairplot(df[vars2use])
# Add a title
plt.suptitle("Pairplot for '{}' (N={})".format(dataset, N_))
# adjust plots
g.fig.subplots_adjust(top=0.925, left=0.085)
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# - Loop regions and plot PDFs of variables of interest
# Loop by dataset (region) and plots
import seaborn as sns
sns.reset_orig()
datasets = sorted(dfs.keys())
for dataset in datasets:
fig, ax = plt.subplots()
# select the DataFrame
dfA = dfs[dataset]
# Set title
title = "Locations for '{}'".format(dataset)
p_size = 50
alpha = 1
# plot up Non coatal locs
df = dfA.loc[dfA['Coastal'] == False, :]
color = 'blue'
label = 'Non-coastal (N={})'.format(int(df.shape[0]))
m = AC.plot_lons_lats_spatial_on_map(title=title, f_size=15,
lons=df['Longitude'].values,
lats=df['Latitude'].values,
label=label, fig=fig, ax=ax, color=color,
return_axis=True)
# Plot up coatal locs
df = dfA.loc[dfA['Coastal'] == True, :]
color = 'green'
label = 'Coastal (N={})'.format(int(df.shape[0]))
lons = df['Longitude'].values
lats = df['Latitude'].values
m.scatter(lons, lats, edgecolors=color, c=color, marker='o',
s=p_size, alpha=alpha, label=label)
plt.legend()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# - Loop regions and plot PDFs of variables of interest
import matplotlib.pyplot as plt
matplotlib.style.use('ggplot')
import seaborn as sns
sns.set()
df = RFR_dict['df']
dfs = {}
# All data
dfs['All data'] = df.copy()
# Get all the data above 65 N
dfs['>=65N'] = df.loc[df['Latitude'] >= 65, :]
# Get all the data below 65 S
dfs['<=65S'] = df.loc[df['Latitude'] <= -65, :]
# - variables to explore?
vars2use = [
'WOA_Nitrate', 'WOA_Salinity', 'WOA_Phosphate', 'WOA_TEMP_K', 'Depth_GEBCO',
]
# plot up the PDF distribution of each of the variables.
datasets = sorted(dfs.keys())
for var2use in vars2use:
print(var2use)
# set a single axis to use.
fig, ax = plt.subplots()
for dataset in datasets:
# select the DataFrame
df = dfs[dataset][var2use]
# Get sample size
N_ = df.shape[0]
# do a dist plot
label = '{} (N={})'.format(dataset, N_)
sns.distplot(df, ax=ax, label=label)
# Make sure the values are correctly scaled
ax.autoscale()
# Beautify
title_str = "PDF of ancillary input for '{}'"
fig.suptitle(title_str.format(var2use))
plt.legend()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# - Loop regions and plot PDFs of variables of interest
if plt_up_locs4var_conds:
df = RFR_dict['df']
dfs = {}
# Nitrate greater of equal to
var_ = 'Nitrate >=15'
dfs[var_] = df.loc[df['WOA_Nitrate'] >= 15, :]
# Nitrate greater of equal to
var_ = 'Nitrate <=15'
dfs[var_] = df.loc[df['WOA_Nitrate'] <= 15, :]
# Nitrate greater of equal to
var_ = 'Nitrate >=10'
dfs[var_] = df.loc[df['WOA_Nitrate'] >= 10, :]
# Nitrate greater of equal to
var_ = 'Nitrate <=10'
dfs[var_] = df.loc[df['WOA_Nitrate'] <= 10, :]
# Nitrate greater of equal to
var_ = 'Nitrate <=9'
dfs[var_] = df.loc[df['WOA_Nitrate'] <= 9, :]
# Nitrate greater of equal to
var_ = 'Nitrate <=8'
dfs[var_] = df.loc[df['WOA_Nitrate'] <= 8, :]
# Nitrate greater of equal to
var_ = 'Nitrate <=7'
dfs[var_] = df.loc[df['WOA_Nitrate'] <= 7, :]
# Nitrate greater of equal to
var_ = 'Nitrate <=6'
dfs[var_] = df.loc[df['WOA_Nitrate'] <= 6, :]
# Nitrate greater of equal to
var_ = 'Nitrate <=5'
dfs[var_] = df.loc[df['WOA_Nitrate'] <= 5, :]
# Nitrate greater of equal to
var_ = 'Nitrate <=4'
dfs[var_] = df.loc[df['WOA_Nitrate'] <= 4, :]
# Nitrate greater of equal to
var_ = 'Nitrate <=3'
dfs[var_] = df.loc[df['WOA_Nitrate'] <= 3, :]
# Nitrate greater of equal to
var_ = 'Nitrate <=2'
dfs[var_] = df.loc[df['WOA_Nitrate'] <= 2, :]
# Nitrate greater of equal to
var_ = 'Nitrate <=1'
dfs[var_] = df.loc[df['WOA_Nitrate'] <= 1, :]
# Loop by dataset (nitrate values) and plots
import seaborn as sns
sns.reset_orig()
datasets = sorted(dfs.keys())
for dataset in datasets:
fig, ax = plt.subplots()
# select the DataFrame
dfA = dfs[dataset]
# Set title
title = "Locations for '{}'".format(dataset)
p_size = 50
alpha = 1
# plot up Non coatal locs
df = dfA.loc[dfA['Coastal'] == False, :]
color = 'blue'
label = 'Non-coastal (N={})'.format(int(df.shape[0]))
m = AC.plot_lons_lats_spatial_on_map(title=title, f_size=15,
lons=df['Longitude'].values,
lats=df['Latitude'].values,
label=label, fig=fig, ax=ax, color=color,
return_axis=True)
# plot up coatal locs
df = dfA.loc[dfA['Coastal'] == True, :]
color = 'green'
label = 'Coastal (N={})'.format(int(df.shape[0]))
lons = df['Longitude'].values
lats = df['Latitude'].values
m.scatter(lons, lats, edgecolors=color, c=color, marker='o',
s=p_size, alpha=alpha, label=label)
plt.legend()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# - Save entire pdf
AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi)
def Driver2analyse_new_data_vs_existing_data():
"""
Driver to plot up all options for old vs. new analysis plots
"""
regions = 'all', 'coastal', 'noncoastal'
for limit_to_400nM in True, False:
for region in regions:
analyse_new_data_vs_existing_data(region=region,
limit_to_400nM=limit_to_400nM)
def analyse_new_data_vs_existing_data(limit_to_400nM=True, region='all'):
"""
build a set of analysis plots exploring the difference between new and
exisiting datasets
"""
# - Get obs. data
# Get data (inc. additions) and meta data
df_meta = obs.get_iodide_obs_metadata()
pro_df = obs.get_processed_df_obs_mod()
# - Setup plotting
# misc. shared variables
axlabel = '[I$^{-}_{aq}$] (nM)'
# setup PDf
savetitle = 'Oi_prj_new_vs_existing_datasets'
if limit_to_400nM:
# Exclude v. high values (N=7 - in final dataset)
pro_df = pro_df.loc[pro_df['Iodide'] < 400.]
savetitle += '_limited_to_400nM'
if region == 'all':
savetitle += '_all'
elif region == 'coastal':
pro_df = pro_df.loc[pro_df['Coastal'] == 1, :]
savetitle += '_{}'.format(region)
elif region == 'noncoastal':
pro_df = pro_df.loc[pro_df['Coastal'] == 0, :]
savetitle += '_{}'.format(region)
else:
sys.exit()
pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi)
# colours to use?
import seaborn as sns
sns.set(color_codes=True)
current_palette = sns.color_palette("colorblind")
# - Plot up new data ( ~timeseries? )
New_datasets = df_meta.loc[df_meta['In Chance2014?'] == 'N'].Data_Key
var2plot = 'Iodide'
for dataset in New_datasets:
# Select new dataset
tmp_df = pro_df.loc[pro_df['Data_Key'] == dataset]
Cruise = tmp_df['Cruise'].values[0]
# if dates present in DataFrame, update axis
dates4cruise = pd.to_datetime(tmp_df['Date'].values)
if len(set(dates4cruise)) == tmp_df.shape[0]:
tmp_df.index = dates4cruise
xlabel = 'Date'
else:
xlabel = 'Obs #'
tmp_df[var2plot].plot()
ax = plt.gca()
plt.xlabel(xlabel)
plt.ylabel(axlabel)
title_str = "New {} data from '{}' ({})"
plt.title(title_str.format(var2plot.lower(), Cruise, dataset))
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# - Plot up new data ( PDF of iodide )
var2plot = 'Iodide'
for dataset in New_datasets:
# Select new dataset
tmp_df = pro_df.loc[pro_df['Data_Key'] == dataset]
Cruise = tmp_df['Cruise'].values[0]
# - Plot up PDF plots for the dataset
# plot whole dataset
obs_arr = pro_df[var2plot].values
ax = sns.distplot(obs_arr, axlabel=axlabel,
color='k', label='Whole dataset')
# plot just new data
ax = sns.distplot(tmp_df[var2plot], axlabel=axlabel, label=Cruise,
color='red', ax=ax)
# force y axis extend to be correct
ax.autoscale()
# Beautify
title = "PDF of '{}' {} data ({}) at obs. locations"
plt.title(title.format(dataset, var2plot, axlabel))
plt.legend()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# - Plot up new data ( PDF of salinity )
var2plot = u'WOA_Salinity'
for dataset in New_datasets:
# Select new dataset
tmp_df = pro_df.loc[pro_df['Data_Key'] == dataset]
Cruise = tmp_df['Cruise'].values[0]
# - Plot up PDF plots for the dataset
# plot whole dataset
obs_arr = pro_df[var2plot].values
ax = sns.distplot(obs_arr, axlabel=axlabel,
color='k', label='Whole dataset')
# plot just new data
ax = sns.distplot(tmp_df[var2plot], axlabel=axlabel, label=Cruise,
color='red', ax=ax)
# force y axis extend to be correct
ax.autoscale()
# Beautify
title = "PDF of '{}' {} data ({}) at obs. locations"
plt.title(title.format(dataset, var2plot, axlabel))
plt.legend()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# - Plot up new data ( PDF of temperature )
var2plot = 'WOA_TEMP'
for dataset in New_datasets:
# Select new dataset
tmp_df = pro_df.loc[pro_df['Data_Key'] == dataset]
Cruise = tmp_df['Cruise'].values[0]
# - Plot up PDF plots for the dataset
# plot whole dataset
obs_arr = pro_df[var2plot].values
ax = sns.distplot(obs_arr, axlabel=axlabel,
color='k', label='Whole dataset')
# plot just new data
ax = sns.distplot(tmp_df[var2plot], axlabel=axlabel, label=Cruise,
color='red', ax=ax)
# force y axis extend to be correct
ax.autoscale()
# Beautify
title = "PDF of '{}' {} data ({}) at obs. locations"
plt.title(title.format(dataset, var2plot, axlabel))
plt.legend()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# - Plot up new data ( PDF of depth )
var2plot = u'Depth_GEBCO'
for dataset in New_datasets:
# Select new dataset
tmp_df = pro_df.loc[pro_df['Data_Key'] == dataset]
Cruise = tmp_df['Cruise'].values[0]
# - Plot up PDF plots for the dataset
# plot whole dataset
obs_arr = pro_df[var2plot].values
ax = sns.distplot(obs_arr, axlabel=axlabel,
color='k', label='Whole dataset')
# plot just new data
ax = sns.distplot(tmp_df[var2plot], axlabel=axlabel, label=Cruise,
color='red', ax=ax)
# force y axis extend to be correct
ax.autoscale()
# Beautify
title = "PDF of '{}' {} data ({}) at obs. locations"
plt.title(title.format(dataset, var2plot, axlabel))
plt.legend()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# -- Save entire pdf
AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi)
def get_diagnostic_plots_analysis4observations(inc_all_extract_vars=False,
include_hexbin_plots=False,
model_name='TEMP+DEPTH+SAL',
show_plot=False, dpi=320):
"""
Produce a PDF of comparisons of observations in dataset inventory
"""
# - Setup plotting
# misc. shared variables
axlabel = '[I$^{-}_{aq}$] (nM)'
# setup PDf
savetitle = 'Oi_prj_obs_plots'
if inc_all_extract_vars:
savetitle += '_all_extract_vars'
include_hexbin_plots = True
pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi)
# colours to use?
import seaborn as sns
# - Get obs. data
# Get data (inc. additions) and meta data
df_meta = obs.get_iodide_obs_metadata()
pro_df = obs.get_processed_df_obs_mod()
LOCAL_model_name = 'RFR({})'.format(model_name)
pro_df[LOCAL_model_name] = get_model_predictions4obs_point(pro_df,
model_name=model_name)
# Exclude v. high values (N=4 - in intial dataset)
# Exclude v. high values (N=7 - in final dataset)
pro_df = pro_df.loc[pro_df['Iodide'] < 400.]
# Add coastal flag to data
coastal_flag = 'coastal_flagged'
pro_df = get_coastal_flag(df=pro_df, coastal_flag=coastal_flag)
non_coastal_df = pro_df.loc[pro_df['coastal_flagged'] == 0]
dfs = {'Open-Ocean': non_coastal_df, 'All': pro_df}
# TODO ... add test dataset in here
# Get the point data for params...
point_ars_dict = {}
for key_ in dfs.keys():
point_ars_dict[key_] = {
'Obs.': dfs[key_]['Iodide'].values,
'MacDonald et al (2014)': dfs[key_]['MacDonald2014_iodide'].values,
'Chance et al (2014)': dfs[key_][u'Chance2014_STTxx2_I'].values,
'Chance et al (2014) - Mutivariate': dfs[key_][
u'Chance2014_Multivariate'
].values,
LOCAL_model_name: dfs[key_][LOCAL_model_name],
}
point_ars_dict = point_ars_dict['Open-Ocean']
parm_name_dict = {
'MacDonald et al (2014)': 'MacDonald2014_iodide',
'Chance et al (2014)': u'Chance2014_STTxx2_I',
'Chance et al (2014) - Mutivariate': u'Chance2014_Multivariate',
LOCAL_model_name: LOCAL_model_name,
}
point_data_names = sorted(point_ars_dict.keys())
point_data_names.pop(point_data_names.index('Obs.'))
param_names = point_data_names
# setup color dictionary
current_palette = sns.color_palette("colorblind")
colour_dict = dict(zip(param_names, current_palette[:len(param_names)]))
colour_dict['Obs.'] = 'K'
# --- Plot up locations of old and new data
import seaborn as sns
sns.reset_orig()
plot_up_data_locations_OLD_and_new(save_plot=False, show_plot=False)
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# --- Plot up all params against coastal data
import seaborn as sns
sns.set(color_codes=True)
sns.set_context("paper")
xlabel = 'Obs.'
# just non-coastal
for param_name in sorted(parm_name_dict.keys()):
Y = non_coastal_df[parm_name_dict[param_name]].values
X = non_coastal_df['Iodide'].values
title = 'Regression plot of Open-ocean [I$^{-}_{aq}$] (nM) \n'
title = title + '{} vs {} parameterisation'.format(xlabel, param_name)
ax = sns.regplot(x=X, y=Y)
# get_hexbin_plot(x=X, y=Y, xlabel=None, ylabel=point_name, log=False,
# title=None, add_ODR_trendline2plot=True)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(param_name)
# Adjust X and Y range
max_val = max(max(X), max(Y))
smidgen = max_val * 0.05
plt.xlim(0-smidgen, max_val+smidgen)
plt.ylim(0-smidgen, max_val+smidgen)
# Add 1:1
one2one = np.arange(0, max_val*2)
plt.plot(one2one, one2one, color='k', linestyle='--', alpha=0.75,
label='1:1')
plt.legend()
if show_plot:
plt.show()
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# --- Plot up all params against all data
import seaborn as sns
sns.set(color_codes=True)
sns.set_context("paper")
xlabel = 'Obs.'
X = point_ars_dict[xlabel]
for param_name in point_data_names:
Y = point_ars_dict[param_name]
title = 'Regression plot of all [I$^{-}_{aq}$] (nM) \n'
title = title + '{} vs {} parameterisation'.format(xlabel, param_name)
ax = sns.regplot(x=X, y=Y)
# get_hexbin_plot(x=X, y=Y, xlabel=None, ylabel=point_name, log=False,
# title=None, add_ODR_trendline2plot=True)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(param_name)
# Adjust X and Y range
max_val = max(max(X), max(Y))
smidgen = max_val * 0.05
plt.xlim(0-smidgen, max_val+smidgen)
plt.ylim(0-smidgen, max_val+smidgen)
# Add 1:1
one2one = np.arange(0, max_val*2)
plt.plot(one2one, one2one, color='k', linestyle='--', alpha=0.75,
label='1:1')
plt.legend()
if show_plot:
plt.show()
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# ---- Plot up new data
New_datasets = df_meta.loc[df_meta['In Chance2014?'] == 'N'].Data_Key
var2plot = 'Iodide'
for dataset in New_datasets:
tmp_df = pro_df.loc[pro_df['Data_Key'] == dataset]
Cruise = tmp_df['Cruise'].values[0]
# if dates present in DataFrame, update axis
dates4cruise = pd.to_datetime(tmp_df['Date'].values)
if len(set(dates4cruise)) == tmp_df.shape[0]:
tmp_df.index = dates4cruise
xlabel = 'Date'
else:
xlabel = 'Obs #'
tmp_df[var2plot].plot()
ax = plt.gca()
# ax.axhline(30, color='red', label='Chance et al 2014 coastal divide')
plt.xlabel(xlabel)
plt.ylabel(axlabel)
title_str = "New {} data from '{}' ({})"
plt.title(title_str.format(var2plot.lower(), Cruise, dataset))
# plt.legend()
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# Plot up Salinity
# var2plot = 'WOA_Salinity'
# for dataset in New_datasets:
# tmp_df = pro_df.loc[ pro_df['Data_Key'] == dataset ]
# tmp_df[var2plot].plot()
# ax= plt.gca()
# ax.axhline(30, color='red', label='Chance et al 2014 coastal divide')
# plt.xlabel( 'Obs #')
# plt.ylabel( 'PSU' )
# plt.title( '{} during cruise from {}'.format( var2plot, dataset ) )
# plt.legend()
# AC.plot2pdfmulti( pdff, savetitle, dpi=dpi )
# plt.close()
# ---- Plot up key comparisons for coastal an non-coastal data
for key_ in sorted(dfs.keys()):
# --- Ln(Iodide) vs. T
ylabel = 'ln(Iodide)'
Y = dfs[key_][ylabel].values
xlabel = 'WOA_TEMP'
X = dfs[key_][xlabel].values
# Plot up
ax = sns.regplot(x=X, y=Y)
# Beautify
title = '{} vs {} ({} data)'.format(ylabel, xlabel, key_)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if show_plot:
plt.show()
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# --- Ln(Iodide) vs. 1/T
ylabel = 'ln(Iodide)'
Y = dfs[key_][ylabel].values
xlabel = 'WOA_TEMP_K'
X = 1 / dfs[key_][xlabel].values
# Plot up
ax = sns.regplot(x=X, y=Y)
# Beautify
title = '{} vs {} ({} data)'.format(ylabel, '1/'+xlabel, key_)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if show_plot:
plt.show()
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# --- Ln(Iodide) vs. 1/T
ylabel = 'ln(Iodide)'
Y = dfs[key_][ylabel].values
xlabel = 'WOA_Salinity'
X = dfs[key_][xlabel].values
# Plot up
ax = sns.regplot(x=X, y=Y)
# Beautify
title = '{} vs {} ({} data)'.format(ylabel, xlabel, key_)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if show_plot:
plt.show()
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# ---
if inc_all_extract_vars:
for key_ in sorted(dfs.keys()):
# List extract vraiables
extracted_vars = [
u'WOA_TEMP', u'WOA_Nitrate', u'WOA_Salinity', u'WOA_Dissolved_O2', u'WOA_Phosphate', u'WOA_Silicate', u'Depth_GEBCO', u'SeaWIFs_ChlrA', u'WOA_MLDpt', u'WOA_MLDpt_max', u'WOA_MLDpt_sum', u'WOA_MLDpd', u'WOA_MLDpd_max', u'WOA_MLDpd_sum', u'WOA_MLDvd', u'WOA_MLDvd_max', u'WOA_MLDvd_sum', u'DOC', u'DOCaccum', u'Prod', u'SWrad'
]
# Loop extraced variables and plot
for var_ in extracted_vars:
ylabel = var_
xlabel = 'Iodide'
tmp_df = dfs[key_][[xlabel, ylabel]]
# Kludge to remove '--' from MLD columns
for col in tmp_df.columns:
bool_ = [i == '--' for i in tmp_df[col].values]
tmp_df.loc[bool_, :] = np.NaN
if tmp_df[col].dtype == 'O':
tmp_df[col] = pd.to_numeric(tmp_df[col].values,
errors='coerce')
print(var_, tmp_df.min(), tmp_df.max())
# X = dfs[key_][xlabel].values
# Plot up ax = sns.regplot(x=xlabel, y=ylabel, data=tmp_df )
# Beautify
title = '{} vs {} ({} data)'.format(ylabel, xlabel, key_)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
if show_plot:
plt.show()
plt.close()
# --- Plot up Just observations and predicted values from models as PDF
import seaborn as sns
sns.set(color_codes=True)
sns.set_context("paper")
# plot 1st model...
point_name = 'Obs.'
arr = point_ars_dict[point_name]
ax = sns.distplot(arr, axlabel=axlabel, label=point_name,
color=colour_dict[point_name])
# Add MacDonald, Chance...
for point_name in point_data_names:
arr = point_ars_dict[point_name]
ax = sns.distplot(arr, axlabel=axlabel, label=point_name,
color=colour_dict[point_name])
# force y axis extend to be correct
ax.autoscale()
# Beautify
plt.title('PDF of predicted iodide ({}) at obs. points'.format(axlabel))
plt.legend()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# --- Plot up Just observations and predicted values from models as CDF
import seaborn as sns
sns.set(color_codes=True)
sns.set_context("paper")
# plot 1st model...
point_name = 'Obs.'
arr = point_ars_dict[point_name]
ax = sns.distplot(arr, axlabel=axlabel, label=point_name,
color=colour_dict[point_name],
hist_kws=dict(cumulative=True), kde_kws=dict(cumulative=True))
# Add MacDonald, Chance...
for point_name in point_data_names:
arr = point_ars_dict[point_name]
ax = sns.distplot(arr, axlabel=axlabel, label=point_name,
color=colour_dict[point_name],
hist_kws=dict(cumulative=True), kde_kws=dict(cumulative=True))
# force y axis extend to be correct
ax.autoscale()
# Beautify
plt.title('CDF of predicted iodide ({}) at obs. points'.format(axlabel))
plt.legend()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# --- Plot up parameterisations as regression
# import seaborn as sns; sns.set(color_codes=True)
# sns.set_context("paper")
# xlabel = 'Obs.'
# X = point_ars_dict[xlabel]
# for point_name in point_data_names:
# title = 'Regression plot of [I$^{-}_{aq}$] (nM) '
# title = title + '{} vs {} parameterisation'.format(xlabel, point_name )
# Y = point_ars_dict[point_name]
# ax = sns.regplot(x=X, y=Y )
# # get_hexbin_plot(x=X, y=Y, xlabel=None, ylabel=point_name, log=False,
# # title=None, add_ODR_trendline2plot=True)
# plt.title(title)
# plt.xlabel(xlabel)
# plt.ylabel(point_name)
# # Save to PDF and close plot
# AC.plot2pdfmulti( pdff, savetitle, dpi=dpi )
# plt.close()
# --- Plot up parameterisations as hexbin plot
if include_hexbin_plots:
xlabel = 'Obs.'
X = point_ars_dict[xlabel]
for point_name in point_data_names:
title = 'Hexbin of [I$^{-}_{aq}$] (nM) \n'
title = title + '{} vs {} parameterisation'.format(xlabel,
point_name)
Y = point_ars_dict[point_name]
get_hexbin_plot(x=X, y=Y, xlabel=None, ylabel=point_name,
log=False, title=title, add_ODR_trendline2plot=True)
# plt.show()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# -- Save entire pdf
AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi)
def plot_PDF_iodide_obs_mod(bins=10):
"""
plot up PDF of predicted values vs. observations
"""
import matplotlib.pyplot as plt
import seaborn as sns
# Location of data to plot
folder = utils.get_file_locations('data_root')
f = 'Iodine_obs_WOA.csv'
df = pd.read_csv(folder+f, encoding='utf-8')
# Just select non-coastal data
print(df.shape)
df = df[~(df['Coastal'] == True)]
# df = df[ ~(df['Coastal']==True) ]
# Salinity greater than 30
# df = df[ (df['Salinity'] > 30 ) ]
print(df.shape)
# Plot up data
# Macdonaly et al 2014 values
ax = sns.distplot(df['MacDonald2014_iodide'],
label='MacDonald2014_iodide', bins=bins)
# Chance et al 2014 values
ax = sns.distplot(df['Chance2014_STTxx2_I'],
label='Chance2014_STTxx2_I', bins=bins)
# Iodide obs.
ax = sns.distplot(df['Iodide'], label='Iodide, nM', bins=bins)
# Update aesthetics and show plot?
plt.xlim(-50, 400)
plt.legend(loc='upper right')
plt.show()
def plt_predicted_iodide_vs_obs_Q1_Q3(dpi=320, show_plot=False,
limit_to_400nM=False, inc_iodide=False):
"""
Plot predicted iodide on a latitudinal basis
NOTES
- the is the just obs. location equivilent of the plot produced to show
predict values for all global locations
(Oi_prj_global_predicted_vals_vs_lat)
"""
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(color_codes=True)
sns.set_context("paper")
# Get data
folder = utils.get_file_locations('data_root')
f = 'Iodine_obs_WOA.csv'
df = pd.read_csv(folder+f, encoding='utf-8')
# Local variables
# sub select variables of interest.
params2plot = [
'Chance2014_STTxx2_I', 'MacDonald2014_iodide',
]
# Set names to overwrite variables with
rename_titles = {u'Chance2014_STTxx2_I': 'Chance et al. (2014)',
u'MacDonald2014_iodide': 'MacDonald et al. (2014)',
'RFR(Ensemble)': 'RFR(Ensemble)',
'Iodide': 'Obs.',
# u'Chance2014_Multivariate': 'Chance et al. (2014) (Multi)',
}
# filename to save values
filename = 'Oi_prj_global_predicted_vals_vs_lat_only_obs_locs'
# include iodide observations too?
if inc_iodide:
params2plot += ['Iodide']
filename += '_inc_iodide'
CB_color_cycle = AC.get_CB_color_cycle()
color_d = dict(zip(params2plot, CB_color_cycle))
#
if limit_to_400nM:
df = df.loc[df['Iodide'] < 400, :]
filename += '_limited_400nM'
# - Process data
# Add binned mean
# bins = np.arange(-70, 70, 10 )
bins = np.arange(-80, 90, 10)
# groups = df.groupby( np.digitize(df[u'Latitude'], bins) )
groups = df.groupby(pd.cut(df['Latitude'], bins))
# Take means of groups
# groups_avg = groups.mean()
groups_des = groups.describe().unstack()
# - setup plotting
fig, ax = plt.subplots(dpi=dpi)
# - Plot up
X = groups_des['Latitude']['mean'].values # groups_des.index
# X =bins
print(groups_des)
# plot groups
for var_ in params2plot:
# Get quartiles
Q1 = groups_des[var_]['25%'].values
Q3 = groups_des[var_]['75%'].values
# Add median
ax.plot(X, groups_des[var_]['50%'].values,
color=color_d[var_], label=rename_titles[var_])
# add shading for Q1/Q3
ax.fill_between(X, Q1, Q3, alpha=0.2, color=color_d[var_])
# - Plot observations
# Highlight coastal obs
tmp_df = df.loc[df['Coastal'] == True, :]
X = tmp_df['Latitude'].values
Y = tmp_df['Iodide'].values
plt.scatter(X, Y, color='k', marker='D', facecolor='none', s=3,
label='Coastal obs.')
# non-coastal obs
tmp_df = df.loc[df['Coastal'] == False, :]
X = tmp_df['Latitude'].values
Y = tmp_df['Iodide'].values
plt.scatter(X, Y, color='k', marker='D', facecolor='k', s=3,
label='Non-coastal obs.')
# - Beautify
# Add legend
plt.legend()
# Limit plotted y axis extent
plt.ylim(-20, 420)
plt.ylabel('[I$^{-}_{aq}$] (nM)')
plt.xlabel('Latitude ($^{\\rm o}$N)')
plt.savefig(filename, dpi=dpi)
if show_plot:
plt.show()
plt.close()
def plot_up_data_locations_OLD_and_new(save_plot=True, show_plot=False,
extension='eps', dpi=720):
"""
Plot up old and new data on map
"""
import seaborn as sns
sns.reset_orig()
# - Setup plot
figsize = (11, 5)
fig, ax = plt.subplots(figsize=figsize, dpi=dpi)
p_size = 25
alpha = 0.5
window = True
axis_titles = False
# - Get all observational data
df, md_df = obs.get_iodide_obs()
# Seperate into new and old data
ChanceStr = 'In Chance2014?'
df[ChanceStr] = None
for ds in list(set(md_df['Data_Key'])):
bool = df['Data_Key'] == ds
IsChance = md_df.loc[md_df['Data_Key'] == ds, ChanceStr].values[0]
df.loc[bool, ChanceStr] = IsChance
new_metadata_df = md_df.loc[
md_df['In Chance2014?'] == 'N'
]
new_Data_Keys = new_metadata_df['Data_Key'].values
bool = df['Data_Key'].isin(new_Data_Keys)
# old data
df1 = df.loc[~bool]
# new data
df2 = df.loc[bool]
# --- add existing data
# Get existing data... (Chance et al 2014 )
# folder = utils.get_file_locations('data_root')
# f = 'Iodine_obs_WOA.csv'
# df1 = pd.read_csv(folderf, encoding='utf-8' )
# Select lons and lats
lats1 = df1['Latitude'].values
lons1 = df1['Longitude'].values
# Plot up and return basemap axis
label = 'Chance et al. (2014) (N={})'.format(
df1['Iodide'].dropna().shape[0])
m = AC.plot_lons_lats_spatial_on_map(lons=lons1, lats=lats1,
fig=fig, ax=ax, color='blue', label=label,
alpha=alpha,
window=window, axis_titles=axis_titles,
return_axis=True, p_size=p_size)
# - Add in new data following Chance2014?
# this is ~ 5 samples from the Atlantic (and some from Indian ocean?)
# ... get this at a later date...
# - Add in SOE-9 data
# f = 'Iodine_climatology_ISOE9.xlsx'
# df2 = pd.read_excel(folder'/Liselotte_data/'+f, skiprows=1 )
# Data from SOE-9
lats2 = df2['Latitude'].values
lons2 = df2['Longitude'].values
color = 'red'
label = 'Additional data (N={})'
label = label.format(df2['Iodide'].dropna().shape[0])
m.scatter(lons2, lats2, edgecolors=color, c=color, marker='o',
s=p_size, alpha=alpha, label=label)
# - Save out / show
leg = plt.legend(fancybox=True, loc='upper right')
leg.get_frame().set_alpha(0.95)
if save_plot:
savename = 'Oi_prj_Obs_locations.{}'.format(extension)
plt.savefig(savename, bbox_inches='tight', dpi=dpi)
if show_plot:
plt.show()
def plot_up_data_locations_OLD_and_new_CARTOPY(save_plot=True, show_plot=False,
extension='eps', dpi=720):
"""
Plot up old and new data on map
"""
import seaborn as sns
sns.reset_orig()
# - Setup plot
# figsize = (11, 5)
figsize = (11*2, 5*2)
fig = plt.figure(figsize=figsize, dpi=dpi)
# fig, ax = plt.subplots(figsize=figsize, dpi=dpi)
fig, ax = None, None
p_size = 15
alpha = 0.5
window = True
axis_titles = False
# - Get all observational data
df, md_df = obs.get_iodide_obs()
# Seperate into new and old data
ChanceStr = 'In Chance2014?'
df[ChanceStr] = None
for ds in list(set(md_df['Data_Key'])):
bool = df['Data_Key'] == ds
IsChance = md_df.loc[md_df['Data_Key'] == ds, ChanceStr].values[0]
df.loc[bool, ChanceStr] = IsChance
new_metadata_df = md_df.loc[
md_df['In Chance2014?'] == 'N'
]
new_Data_Keys = new_metadata_df['Data_Key'].values
bool = df['Data_Key'].isin(new_Data_Keys)
# old data
df1 = df.loc[~bool]
# new data
df2 = df.loc[bool]
# --- add existing data
# Get existing data... (Chance et al 2014 )
# folder = utils.get_file_locations('data_root')
# f = 'Iodine_obs_WOA.csv'
# df1 = pd.read_csv(folderf, encoding='utf-8' )
# Select lons and lats
lats1 = df1['Latitude'].values
lons1 = df1['Longitude'].values
# Plot up and return basemap axis
label = 'Chance et al. (2014) (N={})'.format(
df1['Iodide'].dropna().shape[0])
ax = plot_lons_lats_spatial_on_map_CARTOPY(lons=lons1, lats=lats1,
fig=fig, ax=ax, color='blue', label=label,
alpha=alpha, dpi=dpi,
# window=window, axis_titles=axis_titles,
# return_axis=True,
# add_detailed_map=True,
add_background_image=False,
add_gridlines=False,
s=p_size)
# - Add in new data following Chance2014?
# this is ~ 5 samples from the Atlantic (and some from Indian ocean?)
# ... get this at a later date...
# - Add in SOE-9 data
# f = 'Iodine_climatology_ISOE9.xlsx'
# df2 = pd.read_excel(folder'/Liselotte_data/'+f, skiprows=1 )
# Data from SOE-9
lats2 = df2['Latitude'].values
lons2 = df2['Longitude'].values
color = 'red'
label = 'Additional data (N={})'
label = label.format(df2['Iodide'].dropna().shape[0])
ax.scatter(lons2, lats2, edgecolors=color, c=color, marker='o',
s=p_size, alpha=alpha, label=label, zorder=1000)
# - Save out / show
leg = plt.legend(fancybox=True, loc='upper right', prop={'size': 6})
leg.get_frame().set_alpha(0.95)
if save_plot:
savename = 'Oi_prj_Obs_locations.{}'.format(extension)
plt.savefig(savename, bbox_inches='tight', dpi=dpi)
if show_plot:
plt.show()
def map_plot_of_locations_of_obs():
"""
Plot up locations of observations of data to double check
"""
import matplotlib.pyplot as plt
# - Settings
plot_all_as_one_plot = True
show = True
# - Get data
folder = utils.get_file_locations('data_root')
f = 'Iodine_obs_WOA.csv'
df = pd.read_csv(folder+f, encoding='utf-8')
# only consider non-coastal locations
print(df.shape)
# df = df[ df['Coastal'] == 1.0 ] # select coastal locations
# df = df[ df['Coastal'] == 0.0 ] # select non coastal locations
# only consider locations with salinity > 30
df = df[df['Salinity'] > 30.0] # select coastal locations
print(df.shape)
# Get coordinate values
all_lats = df['Latitude'].values
all_lons = df['Longitude'].values
# Get sub lists of unique identifiers for datasets
datasets = list(set(df['Data_Key']))
n_datasets = len(datasets)
# - Setup plot
#
f_size = 10
marker = 'o'
p_size = 75
dpi = 600
c_list = AC.color_list(int(n_datasets*1.25))
print(c_list, len(c_list))
# plot up white background
arr = np.zeros((72, 46))
vmin, vmax = 0, 0
# - just plot up all sites to test
if plot_all_as_one_plot:
# Setup a blank basemap plot
fig = plt.figure(figsize=(12, 6), dpi=dpi,
facecolor='w', edgecolor='k')
ax1 = fig.add_subplot(111)
plt, m = AC.map_plot(arr.T, return_m=True, cmap=plt.cm.binary,
f_size=f_size*2,
fixcb=[
vmin, vmax], ax=ax1, no_cb=True, resolution='c',
ylabel=True, xlabel=True)
# Scatter plot of points.
m.scatter(all_lons, all_lats, edgecolors=c_list[1], c=c_list[1],
marker=marker, s=p_size, alpha=1,)
# Save and show?
plt.savefig('Iodide_dataset_locations.png', dpi=dpi, transparent=True)
if show:
plt.show()
else:
chunksize = 5
chunked_list = AC.chunks(datasets, chunksize)
counter = 0
for n_chunk_, chunk_ in enumerate(chunked_list):
# Setup a blank basemap plot
fig = plt.figure(figsize=(12, 6), dpi=dpi, facecolor='w',
edgecolor='k')
ax1 = fig.add_subplot(111)
plt, m = AC.map_plot(arr.T, return_m=True, cmap=plt.cm.binary,
f_size=f_size*2,
fixcb=[vmin, vmax], ax=ax1,
no_cb=True, resolution='c',
ylabel=True, xlabel=True)
# Loop all datasets
for n_dataset_, dataset_ in enumerate(chunk_):
print(n_chunk_, counter, dataset_, c_list[counter])
#
df_sub = df[df['Data_Key'] == dataset_]
lats = df_sub['Latitude'].values
lons = df_sub['Longitude'].values
# Plot up and save.
color = c_list[n_chunk_::chunksize][n_dataset_]
m.scatter(lons, lats, edgecolors=color, c=color,
marker=marker, s=p_size, alpha=.5, label=dataset_)
# add one to counter
counter += 1
plt.legend()
# save chunk...
plt.savefig('Iodide_datasets_{}.png'.format(n_chunk_), dpi=dpi,
transparent=True)
if show:
plt.show()
def plot_up_parameterisations(df=None, save2pdf=True, show=False):
"""
Plot up parameterisations
"""
import matplotlib.pyplot as plt
import seaborn as sns
# Consider both Chance and MacDonald parameterisations
params = [i for i in df.columns if ('Mac' in i)]
params += [i for i in df.columns if ('Chance' in i)]
# get details of parameterisations
# filename='Chance_2014_Table2_PROCESSED_17_04_19.csv'
filename = 'Chance_2014_Table2_PROCESSED.csv'
folder = utils.get_file_locations('data_root')
param_df = pd.read_csv(folder+filename)
# only consider non-coastal locations?
print(df.shape)
# df = df[ df['Coastal'] == 1.0 ] # select coastal locations
# df = df[ df['Coastal'] == 0.0 ] # select non coastal locations
# only consider locations with salinity > 30
df = df[df['Salinity'] > 30.0] # select coastal locations
print(df.shape)
# df = df[ df['Iodide'] < 300 ]
# Setup pdf
if save2pdf:
dpi = 320
savetitle = 'Chance2014_params_vs_recomputed_params'
pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi)
# - Loop parameterisations
# for param in params[:2]: # Only loop two if debugging
for param in params:
# Get meta data for parameter
sub_df = param_df[param_df['TMS ID'] == param]
# Setup a new figure
fig = plt.figure()
# Extract Iodide and param data...
# Take logs of data?
iodide_var = 'Iodide'
try:
print(sub_df['ln(iodide)'].values[0])
if sub_df['ln(iodide)'].values[0] == 'yes':
iodide_var = 'ln(Iodide)'
print('Using log values for ', param)
else:
print('Not using log values for ', param)
except:
print('FAILED to try and use log data for ', param)
X = df[iodide_var].values
# And parameter data?
Y = df[param].values
# Remove nans...
tmp_df = pd.DataFrame(np.array([X, Y]).T, columns=['X', 'Y'])
print(tmp_df.shape)
tmp_df = tmp_df.dropna()
print(tmp_df.shape)
X = tmp_df['X'].values
Y = tmp_df['Y'].values
# PLOT UP as X vs. Y scatter...
title = '{} ({})'.format(param, sub_df['Independent variable'].values)
ax = mk_X_Y_scatter_plot_param_vs_iodide(X=X, Y=Y, title=title,
iodide_var=iodide_var)
# Add Chance2014's R^2 to plot...
try:
R2 = str(sub_df['R2'].values[0])
c = str(sub_df['c'].values[0])
m = str(sub_df['m'].values[0])
eqn = 'y={}x+{}'.format(m, c)
print(R2, c, m, eqn)
alt_text = 'Chance et al (2014) R$^2$'+':{} ({})'.format(R2, eqn)
ax.annotate(alt_text, xy=(0.5, 0.90), textcoords='axes fraction',
fontsize=10)
except:
print('FAILED to get Chance et al values for', param)
# plt.text( 0.75, 0.8, alt_text, ha='center', va='center')
# show/save?
if save2pdf:
# Save out figure
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
if show:
plt.show()
del fig
# save entire pdf
if save2pdf:
AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi)
plt.close("all")
def mk_X_Y_scatter_plot_param_vs_iodide(X=None, Y=None, iodide_var=None,
title=None):
"""
Plots up a X vs. Y plot for a parameterisation of iodine (Y) against obs iodide (X)
"""
import matplotlib.pyplot as plt
import seaborn as sns
# Plot up
plt.scatter(X, Y, marker='+', alpha=0.5)
plt.title(title)
plt.ylabel('Param. [Iodide], nM')
plt.xlabel('Obs. [{}], nM'.format(iodide_var))
# Add a trendline
ax = plt.gca()
AC.Trendline(ax, X=X, Y=Y, color='green')
# Adjust x and y axis limits
round_max_X = AC.myround(max(X), 50, round_up=True)
round_max_Y = AC.myround(max(Y), 50, round_up=True)
if iodide_var == 'ln(Iodide)':
round_max_X = AC.myround(max(X), 5, round_up=True)
round_max_Y = AC.myround(max(Y), 5, round_up=True)
plt.xlim(-(round_max_X/40), round_max_X)
plt.ylim(-(round_max_Y/40), round_max_Y)
# Add an N value to plot
alt_text = '(N={})'.format(len(X))
ax.annotate(alt_text, xy=(0.8, 0.10),
textcoords='axes fraction', fontsize=10)
return ax
def compare_obs_ancillaries_with_extracted_values_WINDOW(dpi=320, df=None):
"""
Plot up a window plot of the observed vs. climatological ancillaries
"""
import seaborn as sns
sns.set(color_codes=True)
current_palette = sns.color_palette("colorblind")
sns.set_style("darkgrid")
sns.set_context("paper", font_scale=0.75)
# Get the observational data
if isinstance(df, type(None)):
df = obs.get_processed_df_obs_mod() # NOTE this df contains values >400nM
# - Map observational variables to their shared extracted variables
all_vars = df.columns.tolist()
# Dictionary
obs_var_dict = {
# Temperature
'WOA_TEMP': 'Temperature',
# Chlorophyll-a
'SeaWIFs_ChlrA': 'Chl-a',
# Nitrate
'WOA_Nitrate': 'Nitrate',
# Salinity
'WOA_Salinity': 'Salinity'
# There is also 'Nitrite' and 'Ammonium'
}
# units dict?
units_dict = {
'SeaWIFs_ChlrA': "mg m$^{-3}$", # Chance et al uses micro g/L
'WOA_Salinity': 'PSU', # https://en.wikipedia.org/wiki/Salinity
'WOA_Nitrate': "$\mu$M",
'WOA_TEMP': '$^{o}$C',
}
# Colors to use
CB_color_cycle = AC.get_CB_color_cycle()
# set the order the dict keys are accessed
vars_sorted = list(sorted(obs_var_dict.keys()))[::-1]
# setup plot
fig = plt.figure(dpi=dpi, figsize=(5, 7.35))
# - 1st plot Salinity ( all and >30 PSU )
# - All above
var2plot = 'WOA_Salinity'
plot_n = 1
color = CB_color_cycle[0]
# Make a new axis
ax = fig.add_subplot(3, 2, plot_n, aspect='equal')
# Get the data
df_tmp = df[[obs_var_dict[var2plot], var2plot]].dropna()
N_ = int(df_tmp[[var2plot]].shape[0])
MSE_ = np.mean((df_tmp[obs_var_dict[var2plot]] - df_tmp[var2plot])**2)
RMSE_ = np.sqrt(MSE_)
print(N_, MSE_, RMSE_)
X = df_tmp[obs_var_dict[var2plot]].values
Y = df_tmp[var2plot].values
# Plot up the data as a scatter
ax.scatter(X, Y, edgecolors=color, facecolors='none', s=5)
# Label Y axis
if plot_n in np.arange(1, 6)[::2]:
ax.set_ylabel('Extracted')
# Title the plots
title = 'Salinity (all, {})'.format(units_dict[var2plot])
ax.text(0.5, 1.05, title, horizontalalignment='center',
verticalalignment='center', transform=ax.transAxes)
# Add N value
stats_str = 'N={} \nRMSE={:.3g}'.format(N_, RMSE_)
ax.text(0.05, 0.9, stats_str, horizontalalignment='left',
verticalalignment='center', transform=ax.transAxes)
# Add a 1:1 line
ax_max = df_tmp.max().max()
ax_max = AC.myround(ax_max, 5, round_up=True) * 1.05
ax_min = df_tmp.min().min()
ax_min = ax_min - (ax_max*0.05)
x_121 = np.arange(ax_min, ax_max*1.5)
ax.plot(x_121, x_121, alpha=0.5, color='k', ls='--')
# Add ODR line
xvalues, Y_ODR = AC.get_linear_ODR(x=X, y=Y, xvalues=x_121,
return_model=False, maxit=10000)
ax.plot(xvalues, Y_ODR, color=color, ls='--')
# Force axis extents
ax.set_aspect('equal')
ax.set_xlim(ax_min, ax_max)
ax.set_ylim(ax_min, ax_max)
ax.set_aspect('equal')
# - All above
var2plot = 'WOA_Salinity'
plot_n = 2
color = CB_color_cycle[0]
# Make a new axis
ax = fig.add_subplot(3, 2, plot_n, aspect='equal')
# Get the data
df_tmp = df[[obs_var_dict[var2plot], var2plot]].dropna()
# Select only data greater that 30 PSU
df_tmp = df_tmp.loc[df_tmp[obs_var_dict[var2plot]] >= 30, :]
N_ = int(df_tmp[[var2plot]].shape[0])
MSE_ = np.mean((df_tmp[obs_var_dict[var2plot]] - df_tmp[var2plot])**2)
RMSE_ = np.sqrt(MSE_)
print(N_, MSE_, RMSE_)
X = df_tmp[obs_var_dict[var2plot]].values
Y = df_tmp[var2plot].values
# plot up
ax.scatter(X, Y, edgecolors=color, facecolors='none', s=5)
# label Y axis
if plot_n in np.arange(1, 6)[::2]:
ax.set_ylabel('Extracted')
# title the plots
title = 'Salinity ($\geq$ 30, PSU)'.format(units_dict[var2plot])
ax.text(0.5, 1.05, title, horizontalalignment='center',
verticalalignment='center', transform=ax.transAxes)
# Add N value
stats_str = 'N={} \nRMSE={:.3g}'.format(N_, RMSE_)
ax.text(0.05, 0.9, stats_str, horizontalalignment='left',
verticalalignment='center', transform=ax.transAxes)
# add a 1:1 line
ax_max = df_tmp.max().max()
ax_max = AC.myround(ax_max, 1, round_up=True) * 1.05
ax_min = 29
x_121 = np.arange(ax_min, ax_max*1.5)
ax.plot(x_121, x_121, alpha=0.5, color='k', ls='--')
# add ODR line
xvalues, Y_ODR = AC.get_linear_ODR(x=X, y=Y, xvalues=x_121,
return_model=False, maxit=10000)
ax.plot(xvalues, Y_ODR, color=color, ls='--')
# Force axis extents
ax.set_aspect('equal')
ax.set_xlim(ax_min, ax_max)
ax.set_ylim(ax_min, ax_max)
ax.set_aspect('equal')
# --- Loop and plot
for n_var2plot, var2plot in enumerate(['WOA_TEMP', 'WOA_Nitrate', ]):
plot_n = 2 + 1 + n_var2plot
color = CB_color_cycle[plot_n]
# Make a new axis
ax = fig.add_subplot(3, 2, plot_n, aspect='equal')
# Get the data
df_tmp = df[[obs_var_dict[var2plot], var2plot]].dropna()
N_ = int(df_tmp[[var2plot]].shape[0])
MSE_ = np.mean((df_tmp[obs_var_dict[var2plot]] - df_tmp[var2plot])**2)
RMSE_ = np.sqrt(MSE_)
print(N_, MSE_, RMSE_)
X = df_tmp[obs_var_dict[var2plot]].values
Y = df_tmp[var2plot].values
# plot up
ax.scatter(X, Y, edgecolors=color, facecolors='none', s=5)
# label Y axis
if plot_n in np.arange(1, 6)[::2]:
ax.set_ylabel('Extracted')
# title the plots
title = '{} ({})'.format(obs_var_dict[var2plot], units_dict[var2plot])
ax.text(0.5, 1.05, title, horizontalalignment='center',
verticalalignment='center', transform=ax.transAxes)
# Add N value
stats_str = 'N={} \nRMSE={:.3g}'.format(N_, RMSE_)
ax.text(0.05, 0.9, stats_str, horizontalalignment='left',
verticalalignment='center', transform=ax.transAxes)
# add a 1:1 line
ax_max = df_tmp.max().max()
ax_max = AC.myround(ax_max, 5, round_up=True) * 1.05
ax_min = df_tmp.min().min()
ax_min = ax_min - (ax_max*0.05)
x_121 = np.arange(ax_min, ax_max*1.5)
ax.plot(x_121, x_121, alpha=0.5, color='k', ls='--')
# Add a line for orthogonal distance regression (ODR)
xvalues, Y_ODR = AC.get_linear_ODR(x=X, y=Y, xvalues=x_121,
return_model=False, maxit=10000)
ax.plot(xvalues, Y_ODR, color=color, ls='--')
# Force axis extents
ax.set_aspect('equal')
ax.set_xlim(ax_min, ax_max)
ax.set_ylim(ax_min, ax_max)
ax.set_aspect('equal')
# --- 1st plot Salinity ( all and >30 PSU )
# - All above
var2plot = 'SeaWIFs_ChlrA'
plot_n = 5
color = CB_color_cycle[5]
# Make a new axis
ax = fig.add_subplot(3, 2, plot_n, aspect='equal')
# Get the data
df_tmp = df[[obs_var_dict[var2plot], var2plot]].dropna()
N_ = int(df_tmp[[var2plot]].shape[0])
MSE_ = np.mean((df_tmp[obs_var_dict[var2plot]] - df_tmp[var2plot])**2)
RMSE_ = np.sqrt(MSE_)
print(N_, MSE_, RMSE_)
X = df_tmp[obs_var_dict[var2plot]].values
Y = df_tmp[var2plot].values
# plot up
ax.scatter(X, Y, edgecolors=color, facecolors='none', s=5)
# label Y axis
if plot_n in np.arange(1, 6)[::2]:
ax.set_ylabel('Extracted')
ax.set_xlabel('Observed')
# title the plots
title = 'ChlrA (all, {})'.format(units_dict[var2plot])
ax.text(0.5, 1.05, title, horizontalalignment='center',
verticalalignment='center', transform=ax.transAxes)
# Add N value
stats_str = 'N={} \nRMSE={:.3g}'.format(N_, RMSE_)
ax.text(0.05, 0.9, stats_str, horizontalalignment='left',
verticalalignment='center', transform=ax.transAxes)
# add a 1:1 line
ax_max = df_tmp.max().max()
ax_max = AC.myround(ax_max, 5, round_up=True) * 1.05
ax_min = df_tmp.min().min()
ax_min = ax_min - (ax_max*0.05)
x_121 = np.arange(ax_min, ax_max*1.5)
ax.plot(x_121, x_121, alpha=0.5, color='k', ls='--')
# add ODR line
xvalues, Y_ODR = AC.get_linear_ODR(x=X, y=Y, xvalues=x_121,
return_model=False, maxit=10000)
ax.plot(xvalues, Y_ODR, color=color, ls='--')
# Force axis extents
ax.set_aspect('equal')
ax.set_xlim(ax_min, ax_max)
ax.set_ylim(ax_min, ax_max)
ax.set_aspect('equal')
# - All above
var2plot = 'SeaWIFs_ChlrA'
plot_n = 6
color = CB_color_cycle[5]
# Make a new axis
ax = fig.add_subplot(3, 2, plot_n, aspect='equal')
# Get the data
df_tmp = df[[obs_var_dict[var2plot], var2plot]].dropna()
# Select only data greater that 30 PSU
df_tmp = df_tmp.loc[df_tmp[obs_var_dict[var2plot]] <= 5, :]
N_ = int(df_tmp[[var2plot]].shape[0])
MSE_ = np.mean((df_tmp[obs_var_dict[var2plot]] - df_tmp[var2plot])**2)
RMSE_ = np.sqrt(MSE_)
print(N_, MSE_, RMSE_)
X = df_tmp[obs_var_dict[var2plot]].values
Y = df_tmp[var2plot].values
# plot up
ax.scatter(X, Y, edgecolors=color, facecolors='none', s=5)
# label Y axis
if plot_n in np.arange(1, 6)[::2]:
ax.set_ylabel('Extracted')
ax.set_xlabel('Observed')
# title the plots
units = units_dict[var2plot]
title = 'ChlrA ($\leq$5 {})'.format(units)
ax.text(0.5, 1.05, title, horizontalalignment='center',
verticalalignment='center', transform=ax.transAxes)
# Add N value
stats_str = 'N={} \nRMSE={:.3g}'.format(N_, RMSE_)
ax.text(0.05, 0.9, stats_str, horizontalalignment='left',
verticalalignment='center', transform=ax.transAxes)
# add a 1:1 line
ax_max = df_tmp.max().max()
ax_max = AC.myround(ax_max, 1, round_up=True) * 1.05
ax_min = df_tmp.min().min()
ax_min = ax_min - (ax_max*0.05)
x_121 = np.arange(ax_min, ax_max*1.5)
ax.plot(x_121, x_121, alpha=0.5, color='k', ls='--')
# add ODR line
xvalues, Y_ODR = AC.get_linear_ODR(x=X, y=Y, xvalues=x_121,
return_model=False, maxit=10000)
ax.plot(xvalues, Y_ODR, color=color, ls='--')
# Force axis extents
ax.set_aspect('equal')
ax.set_xlim(ax_min, ax_max)
ax.set_ylim(ax_min, ax_max)
ax.set_aspect('equal')
# -- adjust figure and save
# Adjust plot
left = 0.075
right = 0.975
wspace = 0.05
hspace = 0.175
top = 0.95
bottom = 0.075
fig.subplots_adjust(left=left, bottom=bottom, right=right, top=top,
wspace=wspace, hspace=hspace)
# Save
filename = 'Oi_prj_Chance2014_Obs_params_vs_NEW_extracted_params_WINDOW'
plt.savefig(filename, dpi=dpi)
def compare_obs_ancillaries_with_extracted_values(df=None, save2pdf=True,
show=False, dpi=320):
"""
Some species in the dataframe have observed as well as climatology values.
For these species, plot up X/Y and latitudinal comparisons
"""
import seaborn as sns
sns.set(color_codes=True)
current_palette = sns.color_palette("colorblind")
sns.set_style("darkgrid")
# Get the observational data
if isinstance(df, type(None)):
df = obs.get_processed_df_obs_mod() # NOTE this df contains values >400nM
# - Map observational variables to their shared extracted variables
all_vars = df.columns.tolist()
# Dictionary
obs_var_dict = {
# Temperature
'WOA_TEMP': 'Temperature',
# Chlorophyll-a
'SeaWIFs_ChlrA': 'Chl-a',
# Nitrate
'WOA_Nitrate': 'Nitrate',
# Salinity
'WOA_Salinity': 'Salinity'
# There is also 'Nitrite' and 'Ammonium'
}
# Dict of units for variables
units_dict = {
'SeaWIFs_ChlrA': "mg m$^{-3}$", # Chance et al uses micro g/L
'WOA_Salinity': 'PSU', # https://en.wikipedia.org/wiki/Salinity
'WOA_Nitrate': "$\mu$M",
'WOA_TEMP': '$^{o}$C',
}
# sort dataframe by latitude
# df = df.sort_values('Latitude', axis=0, ascending=True)
# set the order the dict keys are accessed
vars_sorted = list(sorted(obs_var_dict.keys()))[::-1]
# Setup pdf
if save2pdf:
savetitle = 'Oi_prj_Chance2014_Obs_params_vs_NEW_extracted_params'
pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi)
# - Get variables and confirm which datasets are being used for plot
dfs = {}
for key_ in vars_sorted:
print(obs_var_dict[key_], key_)
# drop nans...
index2use = df[[obs_var_dict[key_], key_]].dropna().index
dfs[key_] = df.loc[index2use, :]
# Check which datasets are being used
ptr_str = 'For variable: {} (#={})- using: {} \n'
for key_ in vars_sorted:
datasets = list(set(dfs[key_]['Data_Key']))
dataset_str = ', '.join(datasets)
print(ptr_str.format(key_, len(datasets), dataset_str))
# - Loop variables and plot as a scatter plot...
for key_ in vars_sorted:
print(obs_var_dict[key_], key_)
# new figure
fig = plt.figure()
# drop nans...
df_tmp = df[[obs_var_dict[key_], key_]].dropna()
N_ = int(df_tmp[[key_]].shape[0])
print(N_)
# Plot up
sns.regplot(x=obs_var_dict[key_], y=key_, data=df_tmp)
# Add title
plt.title('X-Y plot of {} (N={})'.format(obs_var_dict[key_], N_))
plt.ylabel('Extracted ({}, {})'.format(key_, units_dict[key_]))
plt.xlabel('Obs. ({}, {})'.format(
obs_var_dict[key_], units_dict[key_]))
# Save out figure &/or show?
if save2pdf:
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
if show:
plt.show()
plt.close()
# - Loop variables and plot verus lat (with difference)
for key_ in vars_sorted:
print(obs_var_dict[key_], key_)
# New figure
fig = plt.figure()
# Drop nans...
df_tmp = df[[obs_var_dict[key_], key_, 'Latitude']].dropna()
N_ = int(df_tmp[[key_]].shape[0])
print(N_)
# Get data to analyse
obs = df_tmp[obs_var_dict[key_]].values
climate = df_tmp[key_].values
X = df_tmp['Latitude'].values
# Plot up
plt.scatter(X, obs, label=obs_var_dict[key_], color='red',
marker="o")
plt.scatter(X, climate, label=key_, color='blue',
marker="o")
plt.scatter(X, climate-obs, label='diff', color='green',
marker="o")
# Athesetics of plot?
plt.legend()
plt.xlim(-90, 90)
plt.ylabel('{} ({})'.format(obs_var_dict[key_], units_dict[key_]))
plt.xlabel('Latitude ($^{o}$N)')
plt.title('{} (N={}) vs. latitude'.format(obs_var_dict[key_], N_))
# Save out figure &/or show?
if save2pdf:
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
if show:
plt.show()
plt.close()
# Save entire pdf
if save2pdf:
AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi)
def plot_up_lat_STT_var(restrict_data_max=True, restrict_min_salinity=True):
"""
Plot up a "pretty" plot of STT vs Lat, with scatter sizes and color by var.
"""
# - Get data as a DataFrame
df = obs.get_processed_df_obs_mod()
if restrict_data_max:
# df = df[ df['Iodide']< 450. ]
df = df[df['Iodide'] < 400.] # Updated to use 400 nM as upper value
if restrict_min_salinity:
df = df[df['WOA_Salinity'] > 30.]
# Add modulus
df["Latitude (Modulus)"] = np.sqrt(df["Latitude"].copy()**2)
# - Local vars
X_varname = "Latitude (Modulus)"
Y_varname = "WOA_TEMP"
S_varname = 'Iodide'
S_label = S_varname
C_varname = S_varname
# - plot
fig, ax = plt.subplots(facecolor='w', edgecolor='w')
df.plot(kind="scatter", x=X_varname, y=Y_varname, alpha=0.4,
s=df[S_varname], label=S_label, figsize=(10, 7),
c=S_varname, cmap=plt.get_cmap("jet"), colorbar=True,
sharex=False, ax=ax, fig=fig)
plt.show()
def plot_up_lat_varI_varII(restrict_data_max=True, restrict_min_salinity=True):
"""
Plot up a "pretty" plot of STT vs Lat, with scatter sizes and color by var.
"""
# - Get data as a DataFrame
df = obs.get_processed_df_obs_mod()
if restrict_data_max:
# df = df[ df['Iodide']< 450. ]
df = df[df['Iodide'] < 400.] # Updated to use 400 nM as upper value
if restrict_min_salinity:
df = df[df['WOA_Salinity'] > 30.]
df["Latitude (Modulus)"] = np.sqrt(df["Latitude"].copy()**2)
# - Local variables
# override? (unhashed)
varI = 'Iodide'
varII = "WOA_TEMP"
# name local vars
X_varname = "Latitude (Modulus)"
Y_varname = varI
S_varname = varII
S_label = S_varname
C_varname = S_varname
# - plot up
fig, ax = plt.subplots(facecolor='w', edgecolor='w')
df.plot(kind="scatter", x=X_varname, y=Y_varname, alpha=0.4,
s=df[S_varname], label=S_label, figsize=(10, 7),
c=S_varname, cmap=plt.get_cmap("jet"), colorbar=True,
sharex=False, ax=ax, fig=fig)
plt.ylim(-5, 500)
plt.show()
def plot_chance_param(df=None, X_var='Temperature', Y_var='Iodide',
data_str='(Obs.) data'):
"""
Plot up chance et al (2014) param vs. data in DataFrame
"""
# Only include finite data points for temp
# ( NOTE: down to 1/3 of data of obs. data?! )
df = df[np.isfinite(df[X_var])]
# Add a variable for C**2 fit
Xvar2plot = X_var+'($^{2}$)'
df[Xvar2plot] = df[X_var].loc[:].values**2
# Plot up data and param.
fig, ax = plt.subplots(facecolor='w', edgecolor='w')
# Plot up
df.plot(kind='scatter', x=Xvar2plot, y=Y_var, ax=ax)
# Add a line of best fit reported param.
actual_data = df[Xvar2plot].values
test_data = np.linspace(AC.myround(actual_data.min()),
AC.myround(actual_data.max()), 20)
m = 0.225
c = 19.0
plt.plot(test_data, ((test_data*m)+c), color='green', ls='--',
label='Chance et al (2014) param.')
# Limit axis to data
plt.xlim(-50, AC.myround(df[Xvar2plot].values.max(), 1000))
plt.ylim(-20, AC.myround(df[Y_var].values.max(), 50, round_up=True))
# Add title and axis labels
N = actual_data.shape[0]
title = 'Linear param vs. {} (N={})'.format(data_str, N)
plt.title(title)
plt.xlabel(X_var + ' ($^{o}$C$^{2}$)')
plt.ylabel(Y_var + ' (nM)')
plt.legend(loc='upper left')
# And show/save
tmp_str = data_str.replace(" ", '_').replace("(", "_").replace(")", "_")
savetitle = 'Chance_param_vs_{}.png'.format(tmp_str)
plt.savefig(savetitle)
plt.show()
def plot_macdonald_param(df=None, X_var='Temperature', Y_var='Iodide',
data_str='(Obs.) data'):
"""
Plot up MacDonald et al (2014) param vs. data in DataFrame
"""
# Only include finite data points for temp
# ( NOTE: down to 1/3 of data of obs. data?! )
df = df[np.isfinite(df[X_var])]
# Add a variable for
Xvar2plot = '1/'+X_var
df[Xvar2plot] = 1. / (df[X_var].loc[:].values+273.15)
Y_var2plot = 'ln({})'.format(Y_var)
df[Y_var2plot] = np.log(df[Y_var].values)
# Plot up data and param.
fig, ax = plt.subplots(facecolor='w', edgecolor='w')
df.plot(kind='scatter', x=Xvar2plot, y=Y_var2plot, ax=ax)
# Add a line of best fit reported param.
# (run some numbers through this equation... )
actual_data = df[X_var].values + 273.15
test_data = np.linspace(actual_data.min(), actual_data.max(), 20)
test_data_Y = 1.46E6*(np.exp((-9134./test_data))) * 1E9
plt.plot(1./test_data, np.log(test_data_Y),
color='green', ls='--', label='MacDonald et al (2014) param.')
# Limit axis to data
plt.xlim(df[Xvar2plot].values.min()-0.000025,
df[Xvar2plot].values.max()+0.000025)
plt.ylim(0, 7)
# Add title and axis labels
N = actual_data.shape[0]
title = 'Arrhenius param vs. {} (N={})'.format(data_str, N)
plt.title(title)
plt.xlabel(Xvar2plot + ' ($^{o}$K)')
plt.ylabel(Y_var2plot + ' (nM)')
plt.legend(loc='lower left')
# And show/save
tmp_str = data_str.replace(" ", '_').replace("(", "_").replace(")", "_")
savetitle = 'MacDonald_parameterisation_vs_{}.png'.format(tmp_str)
plt.savefig(savetitle)
plt.show()
def plot_current_parameterisations():
"""
Plot up a comparison of Chance et al 2014 and MacDonald et al 2014 params.
"""
# - Get obs and processed data
# get raw obs
raw_df = get_core_Chance2014_obs()
# don't consider iodide values above 30
raw_df = raw_df[raw_df['Iodide'] > 30.]
# - get processed obs.
pro_df = obs.get_processed_df_obs_mod()
restrict_data_max, restrict_min_salinity = True, True
if restrict_data_max:
# pro_df = pro_df[ pro_df['Iodide'] < 450. ] # used for July Oi! mtg.
# restrict below 400 (per. com. RJC)
pro_df = pro_df[pro_df['Iodide'] < 400.]
if restrict_min_salinity:
pro_df = pro_df[pro_df['WOA_Salinity'] > 30.]
# - Plots with raw obs.
# Plot up "linear" fit of iodide and temperature. (Chance et al 2014)
# plot up Chance
# plot_chance_param(df=raw_df.copy())
# Plot up "Arrhenius" fit of iodide and temperature. ( MacDonald et al 2014)
plot_macdonald_param(df=raw_df.copy())
# - Plots with extract Vars.
# Plot up "linear" fit of iodide and temperature. (Chance et al 2014)
# plot_chance_param(df=pro_df.copy(), data_str='Extracted data',
# X_var='WOA_TEMP')
# Plot up "Arrhenius" fit of iodide and temperature. ( MacDonald et al 2014)
plot_macdonald_param(df=pro_df.copy(), data_str='Extracted data',
X_var='WOA_TEMP')
# ---------------------------------------------------------------------------
# ---------------- Misc. Support for iodide project ------------------------
# ---------------------------------------------------------------------------
def explore_diferences_for_Skagerak():
"""
Explore how the Skagerak data differs from the dataset as a whole
"""
# - Get the observations and model output
folder = utils.get_file_locations('data_root')
filename = 'Iodine_obs_WOA_v8_5_1_ENSEMBLE_csv__avg_nSkag_nOutliers.csv'
dfA = pd.read_csv(folder+filename, encoding='utf-8')
# - Local variables
diffvar = 'Salinity diff'
ds_str = 'Truesdale_2003_I'
obs_var_dict = {
# Temperature
'WOA_TEMP': 'Temperature',
# Chlorophyll-a
'SeaWIFs_ChlrA': 'Chl-a',
# Nitrate
'WOA_Nitrate': 'Nitrate',
# Salinity
'WOA_Salinity': 'Salinity'
# There is also 'Nitrite' and 'Ammonium'
}
# - Analysis / updates to DataFrames
dfA[diffvar] = dfA['WOA_Salinity'].values - dfA['diffvar'].values
# - Get just the Skagerak dataset
df = dfA.loc[dfA['Data_Key'] == ds_str]
prt_str = 'The general stats on the Skagerak dataset ({}) are: '
print(prt_str.format(ds_str))
# general stats on the iodide numbers
stats = df['Iodide'].describe()
for idx in stats.index.tolist():
vals = stats[stats.index == idx].values[0]
print('{:<10}: {:<10}'.format(idx, vals))
# - stats on the in-situ data
print('\n')
prt_str = 'The stats on the Skagerak ({}) in-situ ancillary obs. are: '
print(prt_str.format(ds_str))
# which in-situ variables are there
vals = df[obs_var_dict.values()].count()
prt_str = "for in-situ variable '{:<15}' there are N={} values"
for idx in vals.index.tolist():
vals2prt = vals[vals.index == idx].values[0]
print(prt_str.format(idx, vals2prt))
def check_numbers4old_chance_and_new_chance():
"""
Do checks on which datasets have changed between versions
"""
# - Get all observational data
NIU, md_df = obs.get_iodide_obs()
folder = '/work/home/ts551/data/iodide/'
filename = 'Iodide_data_above_20m_v8_5_1.csv'
df = pd.read_csv(folder+filename)
df = df[np.isfinite(df['Iodide'])] # remove NaNs
verOrig = 'v8.5.1'
NOrig = df.shape[0]
# Add the is chance flag to the dataset
ChanceStr = 'In Chance2014?'
df[ChanceStr] = None
for ds in list(set(md_df['Data_Key'])):
bool = df['Data_Key'] == ds
IsChance = md_df.loc[md_df['Data_Key'] == ds, ChanceStr].values[0]
df.loc[bool, ChanceStr] = IsChance
# Where are the new iodide data points
newLODds = set(df.loc[df['ErrorFlag'] == 7]['Data_Key'])
prt_str = 'The new datasets from ErrorFlag 7 are in: {}'
print(prt_str.format(' , '.join(newLODds)))
# Versions with a different number of iodide values
filename = 'Iodide_data_above_20m_v8_2.csv'
df2 = pd.read_csv(folder + filename)
df2 = convert_old_Data_Key_names2new(df2) # Use data descriptor names
df2 = df2[np.isfinite(df2['Iodide'])] # remove NaNs
ver = '8.2'
prt_str = 'Version {} of the data - N={} (vs {} N={})'
print(prt_str.format(ver, df2.shape[0], verOrig, NOrig))
# Do analysis by dataset
for ds in list(set(md_df['Data_Key'])):
N0 = df.loc[df['Data_Key'] == ds, :].shape[0]
N1 = df2.loc[df2['Data_Key'] == ds, :].shape[0]
IsChance = list(set(df.loc[df['Data_Key'] == ds, ChanceStr]))[0]
prt_str = "DS: '{}' (Chance2014={}) has changed by {} to {} ({} vs. {})"
if N0 != N1:
print(prt_str.format(ds, IsChance, N0-N1, N0, verOrig, ver))
def get_numbers_for_data_paper():
"""
Get various numbers/analysis for data descriptor paper
"""
# - Get the full iodide sea-surface dataset
filename = 'Iodide_data_above_20m.csv'
folder = utils.get_file_locations('s2s_root')+'/Iodide/inputs/'
df = pd.read_csv(folder + filename, encoding='utf-8')
# Exclude non finite data points.
df = df.loc[np.isfinite(df['Iodide']), :]
# Save the full data set as .csv for use in Data Descriptor paper
cols2use = [
u'Data_Key', u'Data_Key_ID', 'Latitude', u'Longitude',
# u'\xce\xb4Iodide',
'Year',
# u'Month (Orig.)', # This is RAW data, therefore Month is observation one
u'Month',
'Day',
'Iodide', u'δIodide',
'ErrorFlag', 'Method', 'Coastal', u'LocatorFlag',
]
df = df[cols2use]
# Map references to final .csv from metadata
md_df = obs.get_iodide_obs_metadata()
col2use = u'Reference'
Data_keys = set(df['Data_Key'].values)
for Data_key in Data_keys:
# Get ref for dataset from metadata
bool_ = md_df[u'Data_Key'] == Data_key
REF = md_df.loc[bool_, :][col2use].values[0].strip()
# Add to main data array
bool_ = df[u'Data_Key'] == Data_key
df.loc[bool_, col2use] = REF
# Round up the iodide values
df['Iodide'] = df['Iodide'].round(1).values
df[u'δIodide'] = df[u'δIodide'].round(1).values
df[u'Longitude'] = df[u'Longitude'].round(6).values
df[u'Latitude'] = df[u'Latitude'].round(6).values
# Now lock in values by settings to strings.
df[cols2use] = df[cols2use].astype(str)
# save the resultant file out
filename = 'Oi_prj_Iodide_obs_surface4DataDescriptorPaper.csv'
df.to_csv(filename, encoding='utf-8')
# Get number of samples of iodide per dataset
md_df = obs.get_iodide_obs_metadata()
md_df.index = md_df['Data_Key']
s = pd.Series()
Data_Keys = md_df['Data_Key']
for Data_Key in Data_Keys:
df_tmp = df.loc[df['Data_Key'] == Data_Key]
s[Data_Key] = df_tmp.shape[0]
md_df['n'] = s
md_df.index = np.arange(md_df.shape[0])
md_df.to_csv('Oi_prj_metadata_with_n.csv', encoding='utf-8')
# Check sum for assignment?
prt_str = '# Assigned values ({}) should equal original DataFrame size:{}'
print(prt_str.format(md_df['n'].sum(), str(df.shape[0])))
# Get number of samples of iodide per obs. technique
Methods = set(df['Method'])
s_ds = pd.Series()
s_n = pd.Series()
for Method in Methods:
df_tmp = df.loc[df['Method'] == Method]
s_n[Method] = df_tmp.shape[0]
s_ds[Method] = len(set(df_tmp['Data_Key']))
# Combine and save
dfS = pd.DataFrame()
dfS['N'] = s_n
dfS['datasets'] = s_ds
dfS.index.name = 'Method'
# Reset index
index2use = [str(i) for i in sorted(pd.to_numeric(dfS.index))]
dfS = dfS.reindex(index2use)
dfS.to_csv('Oi_prj_num_in_Methods.csv', encoding='utf-8')
# Check sum on assignment of methods
prt_str = '# Assigned methods ({}) should equal original DataFrame size:{}'
print(prt_str.format(dfS['N'].sum(), str(df.shape[0])))
prt_str = '# Assigned datasets ({}) should equal # datasets: {}'
print(prt_str.format(dfS['datasets'].sum(), len(set(df['Data_Key']))))
# Check which methods are assign to each dataset
dfD = pd.DataFrame(index=sorted(set(df['Method'].values)))
S = []
for Data_Key in Data_Keys:
df_tmp = df.loc[df['Data_Key'] == Data_Key]
methods_ = set(df_tmp['Method'].values)
dfD[Data_Key] = pd.Series(dict(zip(methods_, len(methods_)*[True])))
# Do any datasets have more than one method?
print('These datasets have more than one method: ')
print(dfD.sum(axis=0)[dfD.sum(axis=0) > 1])
def mk_PDF_plot_for_Data_descriptor_paper():
"""
Make a PDF plot for the data descriptor paper
"""
import seaborn as sns
sns.set(color_codes=True)
# Get the data
df = obs.get_processed_df_obs_mod() # NOTE this df contains values >400nM
# df = df.loc[df['Iodide'] <400, : ]
# split data into all, Coastal and Non-Coastal
dfs = {}
dfs['All'] = df.copy()
dfs['Coastal'] = df.loc[df['Coastal'] == 1, :]
dfs['Non-coastal'] = df.loc[df['Coastal'] != 1, :]
# if hist=True, use a count instead of density
hist = False
# Loop and plot
axlabel = '[I$^{-}_{aq}$] (nM)'
fig, ax = plt.subplots()
vars2plot = dfs.keys()
for key in vars2plot:
sns.distplot(dfs[key]['Iodide'].values, ax=ax,
axlabel=axlabel, label=key, hist=hist)
# force y axis extend to be correct
ax.autoscale()
# Add a legend
plt.legend()
# Add a label for the Y axis
plt.ylabel('Density')
# save plot
if hist:
savename = 'Oi_prj_Data_descriptor_PDF'
else:
savename = 'Oi_prj_Data_descriptor_PDF_just_Kernal'
plt.savefig(savename+'.png', dpi=dpi)
def mk_pf_files4Iodide_cruise(dfs=None, test_input_files=False,
mk_column_output_files=False, num_tracers=103):
"""
Make planeflight input files for iodide cruises
"""
# Get locations for cruises as
if isinstance(dfs, type(None)):
dfs = get_iodide_cruise_data_from_Anoop_txt_files()
# Test the input files?
if test_input_files:
test_input_files4Iodide_cruise_with_plots(dfs=dfs)
# Make planeflight files for DataFrames of cruises data (outputting columns values)
if mk_column_output_files:
# slist = ['O3', 'IO', 'BrO', 'CH2O']
slist = ['TRA_002', 'TRA_046', 'TRA_092', 'TRA_020', 'GLYX']
met_vars = [
'GMAO_ABSH', 'GMAO_PSFC', 'GMAO_SURF', 'GMAO_TEMP', 'GMAO_UWND', 'GMAO_VWND'
]
slist = slist + met_vars
for key_ in dfs.keys():
print(key_, dfs[key_].shape)
df = dfs[key_].dropna()
print(df.shape)
# add TYPE flag
df['TYPE'] = 'IDC'
# Grid box level centers [hPa]
alts_HPa = AC.gchemgrid('c_hPa_geos5_r')
# Loop and add in column values
dfs_all = []
for n_alt, hPa_ in enumerate(alts_HPa):
print(hPa_, n_alt)
df_ = df.copy()
df_['PRESS'] = hPa_
dfs_all += [df_]
df = pd.concat(dfs_all)
# make sure rows are in date order
df.sort_values(['datetime', 'PRESS'], ascending=True, inplace=True)
# now output files
AC.prt_PlaneFlight_files(df=df, slist=slist)
# Make planeflight files for DataFrames of cruises data
# (outputting surface values)
else:
met_vars = [
'GMAO_ABSH', 'GMAO_PSFC', 'GMAO_SURF', 'GMAO_TEMP', 'GMAO_UWND', 'GMAO_VWND'
]
assert isinstance(num_tracers, int), 'num_tracers must be an integer'
slist = ['TRA_{:0>3}'.format(i) for i in np.arange(1, num_tracers+1)]
species = ['OH', 'HO2', 'GLYX']
slist = slist + species + met_vars
for key_ in dfs.keys():
print(key_)
df = dfs[key_].dropna()
# add TYPE flag
df['TYPE'] = 'IDS'
#
df['PRESS'] = 1013.0
# now output files
AC.prt_PlaneFlight_files(df=df, slist=slist)
def test_input_files4Iodide_cruise_with_plots(dfs=None, show=False):
""""
Plot up maps of iodide cruise routes
"""
# Get locations for cruises as
if isinstance(dfs, type(None)):
dfs = get_iodide_cruise_data_from_Anoop_txt_files()
# - Test input files
# file to save?
savetitle = 'GC_pf_input_iodide_cruises'
dpi = 320
pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi)
vars2test = ['LON', 'LAT']
for key_ in dfs.keys():
df = dfs[key_]
for var_ in vars2test:
# -- Plot X vs Y plot
df_tmp = df[['datetime', var_]]
# calc NaNs
VAR_dropped_N = int(df_tmp.shape[0])
df_tmp = df_tmp.dropna()
VAR_N_data = int(df_tmp.shape[0])
VAR_dropped_N = VAR_dropped_N-VAR_N_data
# plot
df_tmp.plot(x='datetime', y=var_)
#
title = "Timeseries of '{}' for '{}'".format(var_, key_)
title += ' (ALL N={}, exc. {} NaNs)'.format(VAR_N_data,
VAR_dropped_N)
plt.title(title)
# Save / show
file2save_str = 'Iodide_input_file_{}_check_{}.png'.format(
key_, var_)
plt.savefig(file2save_str)
if show:
plt.show()
print(df_tmp[var_].describe())
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
# -- Plot up cruise track as map
del df_tmp
df_tmp = df.dropna()
lons = df_tmp['LON'].values
lats = df_tmp['LAT'].values
title = "Cruise track for '{}'".format(key_)
print('!'*100, 'plotting map for: ', key_)
AC.plot_lons_lats_spatial_on_map(lons=lons, lats=lats, title=title)
plt.ylim(AC.myround(lats.min()-20, 10, ),
AC.myround(lats.max()+20, 10, round_up=True))
plt.xlim(AC.myround(lons.min()-20, 10, ),
AC.myround(lons.max()+20, 10, round_up=True))
if show:
plt.show()
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
# Save entire pdf
AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi)
def get_iodide_cruise_data_from_Anoop_txt_files(verbose=False):
"""
Get observational data and locations from Anoop's txt files
"""
# - Local variables
folder = utils.get_file_locations('data_root')
folder += 'LOCS_Inamdar_Mahajan_cruise_x3/'
cruise_files = {
# 1 8th Southern Ocean Expedition (SOE-8), possibly on the RV Sagar Nidhi
# 'Iodide1': 'cruise1_2014.xlsx',
'SOE-8': 'cruise1_2014.xlsx',
# 2 2nd International Indian Ocean Expedition (<-2),
# possibly one of several cruises in this program
# (IIOE-1 was decades ago). On board RV Sagar Nidhi.
# 'Iodide2': 'cruise2_2015.xlsx',
'IIOE-1': 'cruise2_2015.xlsx',
# 3 9th Southern Ocean Expedition (SOE-9), cruise Liselotte Tinel took samples on
# Ship RV Agulhas.
# 'Iodide3': 'cruise3_2016.xlsx',
'SOE-9': 'cruise3_2016.xlsx',
}
# - Extract data
dfs = {}
for cruise_name in cruise_files.keys():
print('Extracting: ', cruise_name, cruise_files[cruise_name])
# cruise_name = cruise_files.keys()[0]
df = pd.read_excel(folder+cruise_files[cruise_name])
names_dict = {
'Date': 'date', 'UTC': 'date', 'time (UTC)': 'time', 'lat': 'LAT',
'lon': 'LON'
}
if verbose:
print(df.head())
df.rename(columns=names_dict, inplace=True)
if verbose:
print(df.head())
# convert dates to datetime
# def _convert_datetime(x):
# return (270-atan2(x['date'],x['GMAO_UWND'])*180/pi)%360
# df['datetime'] = df.apply( f, axis=1)
df['datetime'] = df['date'].astype(str)+' '+df['time'].astype(str)
df['datetime'] = pd.to_datetime(df['datetime'])
df.index = df['datetime'].values
if verbose:
print(df.head())
dfs[cruise_name] = df[['datetime', 'LON', 'LAT']]
return dfs
def TEST_AND_PROCESS_iodide_cruise_output(just_process_surface_data=False):
"""
Process, plot (test values), then save planeflight values to csv
"""
# Local variables
wd = '/scratch/ts551/GC/v10-01_HAL/'
files_dict = {
'SOE-8': wd+'run.ClBr.Iodide2015.SOE-8',
'IIOE-1': wd+'run.ClBr.Iodide2016.IIOE-1',
'SOE-9': wd+'run.ClBr.Iodide2017.SOE-9',
}
# Test surface output
if just_process_surface_data:
extra_str = 'surface'
dfs = {}
for key_ in files_dict.keys():
wd = files_dict[key_]+'/plane_flight_logs_{}/'.format(extra_str)
df = process_planeflight_files(wd=wd)
dfs[key_] = df
get_test_plots_surface_pf_output(df=df,
name='{} ({})'.format(key_, extra_str))
# Save the output as .csv
for key_ in dfs.keys():
savetitle = 'GC_planeflight_compiled_output_for_{}_{}.csv'
savetitle = savetitle.format(key_, extra_str)
savetitle = AC.rm_spaces_and_chars_from_str(savetitle)
dfs[key_].to_csv(savetitle)
# - Process the output files for column values
else:
specs = ['O3', 'BrO', 'IO', 'CH2O']
extra_str = 'column'
dfs = {}
file_str = 'GC_planeflight_compiled_output_for_{}_{}_II.csv'
for key_ in files_dict.keys():
# for key_ in ['IIOE-1']:
print(key_)
pf_wd = files_dict[key_]+'/plane_flight_logs_{}/'.format(extra_str)
df = process_planeflight_files(wd=pf_wd)
# now process to column values
df = process_planeflight_column_files(wd=files_dict[key_], df=df)
dfs[key_] = df
# Save the output as .csv
savetitle = file_str.format(key_, extra_str)
df['datetime'] = df.index
df.to_csv(AC.rm_spaces_and_chars_from_str(savetitle))
# Test plots?
for key_ in files_dict.keys():
savetitle = file_str.format(key_, extra_str)
df = pd.read_csv(AC.rm_spaces_and_chars_from_str(savetitle))
df.index = pd.to_datetime(df['datetime'])
get_test_plots_surface_pf_output(df=df,
name='{} ({})'.format(
key_, extra_str),
specs=specs, units='molec cm$^{-2}$',
scale=1)
def process_planeflight_column_files(wd=None, df=None, res='4x5', debug=False):
"""
Process column of v/v values into single values for total column
"""
# wd=files_dict[key_]; df = dfs[ key_ ]; res='4x5'
specs = ['O3', u'BrO', u'IO', u'CH2O', u'GLYX']
timestamps = list(sorted(set(df.index)))
timestamps_with_duplicates = []
RMM_air = AC.constants('RMM_air')
AVG = AC.constants('AVG')
specs = ['O3', 'BrO', 'IO', 'CH2O']
# get lon lat array of time in troposphere
TPS = AC.get_GC_output(wd=wd+'/', vars=['TIME_TPS__TIMETROP'],
trop_limit=True)
# convert this to boolean (<1 == not strat)
TPS[TPS != 1] = 9999.9
TPS[TPS == 1] = False
TPS[TPS == 9999.9] = True
# And dates
CTM_DATES = AC.get_gc_datetime(wd=wd+'/')
CTM_months = np.array([i.month for i in CTM_DATES])
# a EPOCH = datetime.datetime(1970,1,1)
# CTM_EPOCH = np.array([ (i.month-EPOCH).total_seconds() for i in CTM_DATES ])
# Also get grid of surface area ( m^2 ) and convert to cm2
S_AREA = AC.get_surface_area(res=res) * 10000
A_M = AC.get_GC_output(wd, vars=['BXHGHT_S__AD'], trop_limit=True,
dtype=np.float64)
# VOL = AC.get_volume_np( wd=wd, res=res, s_area=S_AREA[...,None])
big_data_l = []
dates = []
# for ts in timestamps[::1000]: # Test processing on first 1000 points
n_timestamps = len(timestamps)
for n_ts, ts in enumerate(timestamps):
print('progress= {:.3f} %'.format((float(n_ts) / n_timestamps)*100.))
tmp_df = df.loc[df.index == ts]
if debug:
print(ts, tmp_df.shape)
# List of pressures (one set = 47 )
PRESS_ = tmp_df['PRESS'].values
# special condition for where there is more than column set
# for a timestamp
# assert( len(PRESS) == 47 )
if len(PRESS_) != 47:
timestamps_with_duplicates += [ts]
prt_str = 'WARNING: DOUBLE UP IN TIMESTEP:{} ({}, shape={})'
print(prt_str.format(ts, len(PRESS_), tmp_df.shape))
print('Just using 1st 47 values')
tmp_df = tmp_df[0:47]
dates += [ts]
else:
dates += [ts]
# Now reverse data (as outputted from highest to lowest)
tmp_df = tmp_df.loc[::-1]
# select everyother value?
# lon select locations
LAT_ = tmp_df['LAT'].values
LON_ = tmp_df['LON'].values
# check there is only one lat and lon
assert len(set(LAT_)) == 1
assert len(set(LON_)) == 1
# - Select 3D vars from ctm.nc file
# get LON, LAT index of box
LON_ind = AC.get_gc_lon(LON_[0], res=res)
LAT_ind = AC.get_gc_lat(LAT_[0], res=res)
# time_ind = AC.find_nearest( CTM_EPOCH, (ts-EPOCH).total_seconds() )
time_ind = AC.find_nearest(CTM_months, ts.month)
# tropspause height? ('TIME_TPS__TIMETROP)
TPS_ = TPS[LON_ind, LAT_ind, :, time_ind]
# Select surface area of grid box
S_AREA_ = S_AREA[LON_ind, LAT_ind, 0]
# comput column by spec
A_M_ = A_M[LON_ind, LAT_ind, :, time_ind]
# Number of molecules per grid box
MOLECS_ = (((A_M_*1E3) / RMM_air) * AVG)
# Extract for species
data_l = []
for spec in specs:
# Get species in v/v
data_ = tmp_df[spec].values
# Mask for troposphere
data_ = np.ma.array(data_[:38], mask=TPS_)
# Get number of molecules
data_ = (data_ * MOLECS_).sum()
# Convert to molecs/cm2
data_ = data_ / S_AREA_
# Store data
data_l += [data_]
# Save location
data_l += [LON_[0], LAT_[0]]
# Save data for all specs
big_data_l += [data_l]
# Convert to DataFrame.
df_col = pd.DataFrame(big_data_l)
df_col.index = dates # timestamps[::1000]
df_col.columns = specs + ['LON', 'LAT']
print(df_col.shape)
return df_col
def process_planeflight_files(wd=None):
"""
Process planeflight files to pd.DataFrame
"""
import glob
import seaborn as sns
sns.set_context("paper", font_scale=0.75)
# Get planeflight data
files = glob.glob(wd+'plane.log.*')
print(wd, len(files), files[0])
names, POINTS = AC.get_pf_headers(files[0])
dfs = [AC.pf_csv2pandas(file=i, vars=names) for i in files]
df = pd.concat(dfs)
# Rename axis
TRA_XXs = [i for i in df.columns if ('TRA_' in i)]
TRA_dict = dict(
zip(TRA_XXs, [v10_ClBrI_TRA_XX_2_name(i) for i in TRA_XXs]))
df.rename(columns=TRA_dict, inplace=True)
return df
def get_test_plots_surface_pf_output(wd=None, name='Planeflight',
df=None, specs=None, units=None, scale=1,
show_plot=False):
"""
Test model output at surface for Indian sgip cruises
"""
import seaborn as sns
sns.set(color_codes=True)
# Get data
if isinstance(df, type(None)):
df = process_planeflight_files(wd=wd, name=name)
# Now add summary plots
dpi = 320
savetitle = 'GC_planeflight_summary_plots_for_{}_V'.format(name)
savetitle = AC.rm_spaces_and_chars_from_str(savetitle)
pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi, no_dstr=True)
# Locations outputted for?
title = 'Locations of {} output'.format(name)
fig, ax = plt.subplots()
AC.plot_lons_lats_spatial_on_map(title=title, f_size=15,
lons=df['LON'].values, lats=df['LAT'].values,
fig=fig, ax=ax)
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi, no_dstr=True)
if show_plot:
plt.show()
# Timeseries of key species
if isinstance(specs, type(None)):
key_spec = ['O3', 'NO', 'NO2', 'OH', 'HO2', 'IO', 'BrO']
extras = ['SO4', 'DMS', 'CH2O', ]
species = ['OH', 'HO2', 'GLYX']
specs = key_spec + extras + species
specs += ['LON', 'LAT']
met = ['GMAO_ABSH', 'GMAO_PSFC', 'GMAO_SURF', 'GMAO_TEMP',
'GMAO_UWND', 'GMAO_VWND']
specs += met
print(specs)
for spec in specs:
fig, ax = plt.subplots()
if isinstance(units, type(None)):
units, scale = AC.tra_unit(spec, scale=True)
try:
spec_LaTeX = AC.latex_spec_name(spec)
except:
spec_LaTeX = spec
print(spec, units, spec_LaTeX, scale)
dates = pd.to_datetime(df.index).values
plt.plot(dates, df[spec].values*scale)
plt.ylabel('{} ({})'.format(spec, units))
title_str = "Timeseries of modelled '{}' during {}"
plt.title(title_str.format(spec_LaTeX, name))
plt.xticks(rotation=45)
plt.subplots_adjust(bottom=0.15)
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi, no_dstr=True)
if show_plot:
plt.show()
plt.close()
# Save entire pdf
AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi, no_dstr=True)
def mk_data_files4Indian_seasurface_paper(res='0.125x0.125'):
"""
Make data files for the indian ocean surface iodide paper
"""
AreasOfInterest = {
'SubT_NA': ('NASW', 'NATR', 'NASE', ),
'SubT_SA': ('SATL',),
'SubT_NP': (u'NPSW', 'NPTG'),
'SubT_SP': ('SPSG',),
'SubT_SI': ('ISSG',),
}
AreasOfInterest_Names = AreasOfInterest.copy()
# Get dictionaries of province numbers and names
num2prov = LonghurstProvinceFileNum2Province(
None, invert=True, rtn_dict=True)
MRnum2prov = MarineRegionsOrg_LonghurstProvinceFileNum2Province(
None, invert=True, rtn_dict=True)
Rnum2prov = RosieLonghurstProvinceFileNum2Province(
None, invert=True, rtn_dict=True)
# Convert regions to the LP numbers
PrtStr = "{} = Requested province: {} - R's #={}, MIT(GitHub) #={}, LH(2010) #={}"
for key_ in AreasOfInterest.keys():
for a_ in AreasOfInterest[key_]:
print(PrtStr.format(
key_, a_, Rnum2prov[a_], num2prov[a_], MRnum2prov[a_]))
nums = [MRnum2prov[i] for i in AreasOfInterest[key_]]
AreasOfInterest[key_] = nums
# - Get data all together
Filename = 'Oi_prj_predicted_iodide_0.125x0.125_No_Skagerrak_WITH_Provinces.nc'
# folder = '/work/home/ts551/data/iodide/'
folder = './'
ds = xr.open_dataset(folder + Filename)
params = ['Chance2014_STTxx2_I',
'MacDonald2014_iodide', 'Ensemble_Monthly_mean']
vars2use = params + ['LonghurstProvince']
ds = ds[vars2use]
# Also add the features of interest
Filename = 'Oi_prj_feature_variables_0.125x0.125_WITH_Provinces.nc'
ds2 = xr.open_dataset(folder + Filename)
vars2add = ['WOA_MLDpt', 'WOA_Nitrate', 'WOA_TEMP', 'WOA_Salinity']
for var in vars2add:
ds[var] = ds2[var]
# Add axis X/Y assignment
attrs = ds['lat'].attrs
attrs["axis"] = 'Y'
ds['lat'].attrs = attrs
attrs = ds['lon'].attrs
attrs["axis"] = 'X'
ds['lon'].attrs = attrs
# - Now extract the data and check the locations being extracted
# Make files with the data of interest.
file_str = 'Oi_OS_Longhurst_provinces_{}_{}_{}.{}'
for key_ in AreasOfInterest.keys():
nums = AreasOfInterest[key_]
ds_tmp = ds.where(np.isin(ds.LonghurstProvince.values, nums))
# - Plot a diagnostic figure
fig, ax = plt.subplots()
ds_tmp['LonghurstProvince'].mean(dim='time').plot(ax=ax)
# get names and numbers of assigned areas
Names = AreasOfInterest_Names[key_]
nums = [str(i) for i in AreasOfInterest[key_]]
# Add a title
nums = [str(i) for i in nums]
title = "For '{}' ({}), \n plotting #(s)={}"
title = title.format(key_, ', '.join(Names), ', '.join(nums))
plt.title(title)
# Save to png
png_filename = file_str.format(key_, '', res, 'png')
plt.savefig(png_filename, dpi=dpi)
plt.close()
# - What is the area extent of the data
var2use = 'WOA_Nitrate'
ds_lat = ds_tmp[var2use].dropna(dim='lat', how='all')
min_lat = ds_lat['lat'].min() - 2
max_lat = ds_lat['lat'].max() + 2
ds_lon = ds_tmp[var2use].dropna(dim='lon', how='all')
min_lon = ds_lon['lon'].min() - 2
max_lon = ds_lon['lon'].max() + 2
# - Now save by species
vars2save = [i for i in ds_tmp.data_vars if i != 'LonghurstProvince']
for var_ in vars2save:
print(var_)
da = ds_tmp[var_]
# select the minimum area for the areas
da = da.sel(lat=(da.lat >= min_lat))
da = da.sel(lat=(da.lat < max_lat))
if key_ in ('SubT_NP' 'SubT_SP'):
print('just limiting lat for: {}'.format(key_))
else:
da = da.sel(lon=(da.lon >= min_lon))
da = da.sel(lon=(da.lon < max_lon))
# Save the data to NetCDF.
filename = file_str.format(key_, var_, res, '')
filename = AC.rm_spaces_and_chars_from_str(filename)
da.to_netcdf(filename+'.nc')
# ---------------------------------------------------------------------------
# --------------- Functions for Atmospheric impacts work -------------------
# ---------------------------------------------------------------------------
def Do_analysis_and_mk_plots_for_EGU19_poster():
"""
Driver function for analysis and plotting for EGU poster
"""
# - Get data
# data locations and names as a dictionary
wds = get_run_dict4EGU_runs()
runs = list(sorted(wds.keys()))
# Get emissions
dsDH = GetEmissionsFromHEMCONetCDFsAsDatasets(wds=wds)
# Process the datasets?
# a = [ AC.get_O3_burden( wd=wds[i] ) for i in runs ]
# Get datasets objects from directories and in a dictionary
dsD = {}
for run in runs:
ds = xr.open_dataset(wds[run]+'ctm.nc')
dsD[run] = ds
# - Do analysis
# Get summary emission stats
Check_global_statistics_on_emissions(dsDH=dsDH)
# Look at differences in surface concentration.
extra_str = 'EGU_runs_surface_Iy_stats_'
df = evalulate_burdens_and_surface_conc(run_dict=wds, extra_str=extra_str)
# Get general statistics about the emissions vs. Macdoanld et al 2014
REF1 = 'Macdonald2014'
extra_str = 'EGU_runs_general_stats_vs_{}_'.format(REF1)
df = AC.get_general_stats4run_dict_as_df(run_dict=wds, REF1=REF1,
extra_str=extra_str)
# Get general statistics about the emissions vs. Macdoanld et al 2014
REF1 = 'Chance2014'
extra_str = 'EGU_runs_general_stats_vs_{}_'.format(REF1)
df = AC.get_general_stats4run_dict_as_df(run_dict=wds, REF1=REF1,
extra_str=extra_str)
# Get general statistics about the emissions vs. Macdoanld et al 2014
REF1 = 'ML_Iodide'
extra_str = 'EGU_runs_general_stats_vs_{}_'.format(REF1)
df = AC.get_general_stats4run_dict_as_df(run_dict=wds, REF1=REF1,
extra_str=extra_str)
# Get general statistics about the emissions vs. Macdoanld et al 2014
REF1 = 'No_HOI_I2'
extra_str = 'EGU_runs_general_stats_vs_{}_'.format(REF1)
df = AC.get_general_stats4run_dict_as_df(run_dict=wds, REF1=REF1,
extra_str=extra_str)
# - Get spatial plots
# plot up emissions
plot_up_surface_emissions(dsDH=dsDH)
# - Do diferences plots
# - look at the HOI/I2 surface values and IO.
# species to look at?
specs = ['O3', 'NO2', 'IO', 'HOI', 'I2']
# Chance vs. ML_iodide
AC.plot_up_surface_changes_between2runs(ds_dict=dsD, BASE='Chance2014',
NEW='ML_Iodide', specs=specs,
update_PyGChem_format2COARDS=True)
# Macdonald vs. ML_iodide
AC.plot_up_surface_changes_between2runs(ds_dict=dsD, BASE='Macdonald2014',
NEW='ML_Iodide', specs=specs,
update_PyGChem_format2COARDS=True)
# Macdonald vs. Chance
AC.plot_up_surface_changes_between2runs(ds_dict=dsD, BASE='Macdonald2014',
NEW='Chance2014', specs=specs,
update_PyGChem_format2COARDS=True)
# Macdonald vs. No_HOI_I2
AC.plot_up_surface_changes_between2runs(ds_dict=dsD, BASE='Macdonald2014',
NEW='No_HOI_I2', specs=specs,
update_PyGChem_format2COARDS=True)
# ML_iodide vs. No_HOI_I2
AC.plot_up_surface_changes_between2runs(ds_dict=dsD, BASE='No_HOI_I2',
NEW='ML_Iodide', specs=specs,
update_PyGChem_format2COARDS=True)
# ds_dict=dsD.copy(); BASE='Macdonald2014'; NEW='ML_Iodide'
# - Get production figures.
# surface ozone figure - made in powerpoint for now...
# Plot up emissions for EGU presentation
BASE = 'ML_Iodide'
DIFF1 = 'Chance2014'
DIFF2 = 'Macdonald2014'
plot_up_EGU_fig05_emiss_change(ds_dict=dsD, BASE=BASE, DIFF1=DIFF1, DIFF2=DIFF2,
update_PyGChem_format2COARDS=True)
def plot_up_EGU_fig05_emiss_change(ds_dict=None, levs=[1], specs=[],
BASE='', DIFF1='', DIFF2='', prefix='IJ_AVG_S__',
update_PyGChem_format2COARDS=False):
"""
Plot up the change in emissions for EGU poster
"""
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
# Species to plot
vars2use = [prefix+i for i in specs]
unit = None
PDFfilenameStr = 'Oi_surface_change_{}_vs_{}_lev_{:0>2}'
# Set datasets to use and Just include the variables to plot in the dataset
title1 = BASE
title2 = DIFF1
title2 = DIFF2
ds1 = ds_dict[BASE][vars2use].copy()
ds2 = ds_dict[DIFF1][vars2use].copy()
ds2 = ds_dict[DIFF2][vars2use].copy()
# Average over time
print(ds1, ds2, ds3)
ds1 = ds1.mean(dim='time')
ds2 = ds2.mean(dim='time')
ds3 = ds3.mean(dim='time')
# Remove vestigial coordinates.
# (e.g. the time_0 coord... what is this?)
vars2drop = ['time_0']
dsL = [ds1, ds2, ds3]
for var2drop in vars2drop:
for n, ds in enumerate(dsL):
CoordVars = [i for i in ds.coords]
if var2drop in CoordVars:
ds = ds.drop(var2drop)
dsL[n] = ds
ds1, ds2, ds3 = dsL
# Update dimension names
if update_PyGChem_format2COARDS:
ds1 = Convert_PyGChem_Iris_DataSet2COARDS_NetCDF(ds=ds1)
ds2 = Convert_PyGChem_Iris_DataSet2COARDS_NetCDF(ds=ds2)
ds3 = Convert_PyGChem_Iris_DataSet2COARDS_NetCDF(ds=ds3)
# Setup plot
# plot up map with mask present
fig = plt.figure(figsize=(10, 6))
vmin = -100
vmax = 100
# Add initial plot
axn = [1, 1, 1]
ax = fig.add_subplot(*axn, projection=ccrs.Robinson(), aspect='auto')
ax.plot.imshow(x='lon', y='lat', ax=ax,
vmin=vmin, vmax=vmax,
transform=ccrs.PlateCarree())
plt.title(savename)
plt.savefig(savename+'.png')
plt.close()
def evalulate_burdens_and_surface_conc(run_dict=None, extra_str='', REF1=None,
REF2=None, REF_wd=None, res='4x5', trop_limit=True,
save2csv=True, prefix='GC_', run_names=None,
debug=False):
"""
Check general statistics on the CTM model runs
"""
# Extract names and locations of data
if isinstance(run_dict, type(None)):
run_dict = get_run_dict4EGU_runs()
if isinstance(run_names, type(None)):
run_names = sorted(run_dict.keys())
wds = [run_dict[i] for i in run_names]
# Mass unit scaling
mass_scale = 1E3
mass_unit = 'Tg'
# v/v scaling?
ppbv_unit = 'ppbv'
ppbv_scale = 1E9
pptv_unit = 'pptv'
pptv_scale = 1E12
# Get shared variables from a single model run
if isinstance(REF_wd, type(None)):
REF_wd = wds[0]
# get time in the troposphere diagnostic
t_p = AC.get_GC_output(wd=REF_wd, vars=[u'TIME_TPS__TIMETROP'],
trop_limit=True)
# Temperature
K = AC.get_GC_output(wd=REF_wd, vars=[u'DAO_3D_S__TMPU'], trop_limit=True)
# airmass
a_m = AC.get_air_mass_np(wd=REF_wd, trop_limit=True)
# Surface area?
s_area = AC.get_surface_area(res)[..., 0] # m2 land map
# ----
# - Now build analysis in pd.DataFrame
#
# - Tropospheric burdens?
# Get tropospheric burden for run
varname = 'O3 burden ({})'.format(mass_unit)
ars = [AC.get_O3_burden(i, t_p=t_p).sum() for i in wds]
df = pd.DataFrame(ars, columns=[varname], index=run_names)
# Get NO2 burden
NO2_varname = 'NO2 burden ({})'.format(mass_unit)
ars = [AC.get_trop_burden(spec='NO2', t_p=t_p, wd=i, all_data=False).sum()
for i in wds]
# convert to N equivalent
ars = [i/AC.species_mass('NO2')*AC.species_mass('N') for i in ars]
df[NO2_varname] = ars
# Get NO burden
NO_varname = 'NO burden ({})'.format(mass_unit)
ars = [AC.get_trop_burden(spec='NO', t_p=t_p, wd=i, all_data=False).sum()
for i in wds]
# convert to N equivalent
ars = [i/AC.species_mass('NO')*AC.species_mass('N') for i in ars]
df[NO_varname] = ars
# Combine NO and NO2 to get NOx burden
NOx_varname = 'NOx burden ({})'.format(mass_unit)
df[NOx_varname] = df[NO2_varname] + df[NO_varname]
# Get HOI burden
varname = 'HOI burden ({})'.format(mass_unit)
ars = [AC.get_trop_burden(spec='HOI', t_p=t_p, wd=i, all_data=False).sum()
for i in wds]
# convert to I equivalent
ars = [i/AC.species_mass('HOI')*AC.species_mass('I') for i in ars]
df[varname] = ars
# Get I2 burden
varname = 'I2 burden ({})'.format(mass_unit)
ars = [AC.get_trop_burden(spec='I2', t_p=t_p, wd=i, all_data=False).sum()
for i in wds]
# convert to I equivalent
ars = [i/AC.species_mass('I2')*AC.species_mass('I') for i in ars]
df[varname] = ars
# Get I2 burden
varname = 'IO burden ({})'.format(mass_unit)
ars = [AC.get_trop_burden(spec='IO', t_p=t_p, wd=i, all_data=False).sum()
for i in wds]
# convert to I equivalent
ars = [i/AC.species_mass('IO')*AC.species_mass('I') for i in ars]
df[varname] = ars
# Scale units
for col_ in df.columns:
if 'Tg' in col_:
df.loc[:, col_] = df.loc[:, col_].values/mass_scale
# - Surface concentrations?
# Surface ozone
O3_sur_varname = 'O3 surface ({})'.format(ppbv_unit)
ars = [AC.get_avg_surface_conc_of_X(spec='O3', wd=i, s_area=s_area)
for i in wds]
df[O3_sur_varname] = ars
# Surface NOx
NO_sur_varname = 'NO surface ({})'.format(ppbv_unit)
ars = [AC.get_avg_surface_conc_of_X(spec='NO', wd=i, s_area=s_area)
for i in wds]
df[NO_sur_varname] = ars
NO2_sur_varname = 'NO2 surface ({})'.format(ppbv_unit)
ars = [AC.get_avg_surface_conc_of_X(spec='NO2', wd=i, s_area=s_area)
for i in wds]
df[NO2_sur_varname] = ars
NOx_sur_varname = 'NOx surface ({})'.format(ppbv_unit)
df[NOx_sur_varname] = df[NO2_sur_varname] + df[NO_sur_varname]
# Surface HOI
HOI_sur_varname = 'HOI surface ({})'.format(pptv_unit)
ars = [AC.get_avg_surface_conc_of_X(spec='HOI', wd=i, s_area=s_area)
for i in wds]
df[HOI_sur_varname] = ars
# Surface I2
I2_sur_varname = 'I2 surface ({})'.format(pptv_unit)
ars = [AC.get_avg_surface_conc_of_X(spec='I2', wd=i, s_area=s_area)
for i in wds]
df[I2_sur_varname] = ars
# Surface I2
I2_sur_varname = 'IO surface ({})'.format(pptv_unit)
ars = [AC.get_avg_surface_conc_of_X(spec='IO', wd=i, s_area=s_area)
for i in wds]
df[I2_sur_varname] = ars
# - Scale units
for col_ in df.columns:
if 'ppbv' in col_:
df.loc[:, col_] = df.loc[:, col_].values*ppbv_scale
if 'pptv' in col_:
df.loc[:, col_] = df.loc[:, col_].values*pptv_scale
# - Processing and save?
# Calculate % change from base case for each variable
if not isinstance(REF1, type(None)):
for col_ in df.columns:
pcent_var = col_+' (% vs. {})'.format(REF1)
df[pcent_var] = (df[col_]-df[col_][REF1]) / df[col_][REF1] * 100
if not isinstance(REF2, type(None)):
for col_ in df.columns:
pcent_var = col_+' (% vs. {})'.format(REF2)
df[pcent_var] = (df[col_]-df[col_][REF2]) / df[col_][REF2] * 100
# Re-order columns
df = df.reindex_axis(sorted(df.columns), axis=1)
# Reorder index
df = df.T.reindex_axis(sorted(df.T.columns), axis=1).T
# Now round the numbers
df = df.round(3)
# Save csv to disk
csv_filename = '{}_summary_statistics{}.csv'.format(prefix, extra_str)
df.to_csv(csv_filename)
# return the DataFrame too
return df
def Check_sensitivity_of_HOI_I2_param2WS():
"""
Check the sensitivity of the Carpenter et al 2013 parameterisation to wind speed
"""
import seaborn as sns
sns.set(color_codes=True)
sns.set_context("paper", font_scale=1.75)
import matplotlib.pyplot as plt
# Core calculation for HOI emission
def calc_HOI_flux_eqn_20(I=None, O3=None, WS=None, ):
""" Eqn 20 from Carpenter et al 2013 """
return O3 * ((4.15E5 * (np.sqrt(I) / WS)) -
(20.6 / WS) - (2.36E4 * np.sqrt(I)))
# Slightly simpler calculation for HOI emission
def calc_HOI_flux_eqn_21(I=None, O3=None, WS=None, ):
""" Eqn 21 from Carpenter et al 2013 """
return O3 * np.sqrt(I) * ((3.56E5/WS) - 2.16E4)
# Plot up values for windspeed
WS_l = np.arange(5, 40, 0.1)
# - plot up
# Eqn 20
Y = [calc_HOI_flux_eqn_20(I=100E-9, O3=20, WS=i) for i in WS_l]
plt.plot(WS_l, Y, label='Eqn 20')
# Eqn 21
Y = [calc_HOI_flux_eqn_21(I=100E-9, O3=20, WS=i) for i in WS_l]
plt.plot(WS_l, Y, label='Eqn 21')
# Update aesthetics of plot and save
plt.title('Flu HOI vs. wind speed')
plt.ylabel('HOI flux, nmol m$^{-2}$ d$^{-1}$')
plt.xlabel('Wind speed (ms)')
plt.legend()
plt.show()
if __name__ == "__main__":
main()
| 38.048607 | 340 | 0.57818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 51,927 | 0.358557 |
f8675dfd4e125d168dde1ba9e29185bd73af107b | 4,331 | py | Python | writer/cashData/csvUtils.py | sifarone/gce_k8s_deployment | f596e17b9d0263ae24c61ebba9925af4719b4306 | [
"MIT"
] | null | null | null | writer/cashData/csvUtils.py | sifarone/gce_k8s_deployment | f596e17b9d0263ae24c61ebba9925af4719b4306 | [
"MIT"
] | null | null | null | writer/cashData/csvUtils.py | sifarone/gce_k8s_deployment | f596e17b9d0263ae24c61ebba9925af4719b4306 | [
"MIT"
] | 1 | 2021-01-24T17:07:37.000Z | 2021-01-24T17:07:37.000Z | import pandas as pd
from . import cashUtils as utils
class ReadStockBhavDataCSV:
def __init__(self, fileName):
self.df = pd.read_csv(fileName)
self.columns = self.df.columns
def getCSVColumnList(self):
return self.columns
def getStockFlatData(self):
'''
Returns a list of following dictionary corresponding to each row in the csv file
[
{
symbol : ,
date : ,
prevClose : ,
openPrice : ,
highPrice : ,
lowPrice : ,
lastPrice : ,
closePrice : ,
avgPrice : ,
ttlTrdQtnty : ,
turnoverLacs : ,
noOfTrades : ,
delivQty : ,
delivPer :
},
{},
{},
.
.
.
]
'''
# Nested function
def getDictFromRows(r):
rowDict = {
'symbol' : r[self.columns[utils.STOCK_COL_IDX['symbol']]],
'date' : r[self.columns[utils.STOCK_COL_IDX['date']]],
'prevClose' : r[self.columns[utils.STOCK_COL_IDX['prevClose']]],
'openPrice' : r[self.columns[utils.STOCK_COL_IDX['openPrice']]],
'highPrice' : r[self.columns[utils.STOCK_COL_IDX['highPrice']]],
'lowPrice' : r[self.columns[utils.STOCK_COL_IDX['lowPrice']]],
'lastPrice' : r[self.columns[utils.STOCK_COL_IDX['lastPrice']]],
'closePrice' : r[self.columns[utils.STOCK_COL_IDX['closePrice']]],
'avgPrice' : r[self.columns[utils.STOCK_COL_IDX['avgPrice']]],
'ttlTrdQtnty' : r[self.columns[utils.STOCK_COL_IDX['ttlTrdQtnty']]],
'turnoverLacs' : r[self.columns[utils.STOCK_COL_IDX['turnoverLacs']]],
'noOfTrades' : r[self.columns[utils.STOCK_COL_IDX['noOfTrades']]],
'delivQty' : r[self.columns[utils.STOCK_COL_IDX['delivQty']]],
'delivPer' : r[self.columns[utils.STOCK_COL_IDX['delivPer']]]
}
return rowDict
returnList = []
for index, row in self.df.iterrows():
rowDict = getDictFromRows(row)
returnList.append(rowDict)
return returnList
class ReadArchivedStockBhavDataCSV:
def __init__(self, fileName):
self.df = pd.read_csv(fileName)
self.columns = self.df.columns
def getCSVColumnList(self):
return self.columns
def getStockFlatData(self):
'''
Returns a list of following dictionary corresponding to each row in the csv file
[
{
symbol : ,
openPrice : ,
highPrice : ,
lowPrice : ,
closePrice : ,
lastPrice : ,
prevClose : ,
date :
},
{},
{},
.
.
.
]
'''
# Nested function
def getDictFromRows(r):
rowDict = {
'symbol' : r[self.columns[utils.STOCK_ARCHIVED_COL_IDX['symbol']]],
'date' : r[self.columns[utils.STOCK_ARCHIVED_COL_IDX['date']]],
'prevClose' : r[self.columns[utils.STOCK_ARCHIVED_COL_IDX['prevClose']]],
'openPrice' : r[self.columns[utils.STOCK_ARCHIVED_COL_IDX['openPrice']]],
'highPrice' : r[self.columns[utils.STOCK_ARCHIVED_COL_IDX['highPrice']]],
'lowPrice' : r[self.columns[utils.STOCK_ARCHIVED_COL_IDX['lowPrice']]],
'lastPrice' : r[self.columns[utils.STOCK_ARCHIVED_COL_IDX['lastPrice']]],
'closePrice' : r[self.columns[utils.STOCK_ARCHIVED_COL_IDX['closePrice']]]
}
return rowDict
returnList = []
for index, row in self.df.iterrows():
rowDict = getDictFromRows(row)
returnList.append(rowDict)
return returnList
| 36.091667 | 93 | 0.484184 | 4,271 | 0.986146 | 0 | 0 | 0 | 0 | 0 | 0 | 1,734 | 0.400369 |
f8683ceaf922240bb0a9b5391ea9deb94effc25d | 253 | py | Python | programming/python_in_high_performance_computing/cyt_modules/cyt_setup.py | carlosevmoura/courses-notes | dc938625dd79267f9a262e7e6939205f63dda885 | [
"MIT"
] | null | null | null | programming/python_in_high_performance_computing/cyt_modules/cyt_setup.py | carlosevmoura/courses-notes | dc938625dd79267f9a262e7e6939205f63dda885 | [
"MIT"
] | null | null | null | programming/python_in_high_performance_computing/cyt_modules/cyt_setup.py | carlosevmoura/courses-notes | dc938625dd79267f9a262e7e6939205f63dda885 | [
"MIT"
] | null | null | null | from distutils.core import Extension, setup
from Cython.Build import cythonize
from Cython.Compiler import Options
Options.docstrings = False
ext = Extension(name="cyt_module", sources=["cyt_module.pyx"])
setup(
ext_modules = cythonize(ext),
)
| 18.071429 | 62 | 0.766798 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.110672 |
f86cbd077218ced0fe45ca2c5ef698554acc3ecd | 18,995 | py | Python | server_code.py | johnr0/TaleBrush-backend | f7429e10f328087444647d5dc6bf1f3a22ccfcce | [
"BSD-3-Clause"
] | 1 | 2022-02-25T18:36:16.000Z | 2022-02-25T18:36:16.000Z | server_code.py | johnr0/Generative-Input-NLP | 9607cf2db2aa29f10d4b2179e25dc5bfc9b00288 | [
"BSD-3-Clause"
] | null | null | null | server_code.py | johnr0/Generative-Input-NLP | 9607cf2db2aa29f10d4b2179e25dc5bfc9b00288 | [
"BSD-3-Clause"
] | null | null | null | from flask import request, url_for
from flask_api import FlaskAPI, status, exceptions
from flask_cors import CORS, cross_origin
import torch
import json
import numpy as np
import torch
from modeling_gptneo import GPTNeoForCausalLM
from modeling_gpt2 import GPT2LMHeadModel
from transformers import (
GPTNeoConfig,
GPT2Config,
GPT2Tokenizer
)
import transformers
from nltk import sent_tokenize
import nltk
nltk.download('punkt')
### Loading the model
code_desired = "true"
code_undesired = "false"
model_type = 'gpt2'
gen_type = "gedi"
gen_model_name_or_path = "EleutherAI/gpt-neo-2.7B"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
MODEL_CLASSES = {"gptneo": (GPTNeoConfig, GPTNeoForCausalLM, GPT2Tokenizer), "gpt2": (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer),}
config_class_n, model_class_n, tokenizer_class_n = MODEL_CLASSES["gptneo"]
config_class_2, model_class_2, tokenizer_class_2 = MODEL_CLASSES["gpt2"]
tokenizer = tokenizer_class_n.from_pretrained('EleutherAI/gpt-neo-2.7B', do_lower_case=False, additional_special_tokens=['[Prompt]'])
model = model_class_n.from_pretrained(gen_model_name_or_path, load_in_half_prec=True)
model = model.to(device)
model = model.float()
model.config.use_cache=True
model.resize_token_embeddings(len(tokenizer))
gedi_model_name_or_path = 'fortune_gedi'
gedi_model = model_class_2.from_pretrained(gedi_model_name_or_path)
gedi_model.to(device)
gedi_model.resize_token_embeddings(len(tokenizer))
gedi_model.resize_token_embeddings(50258)
wte = gedi_model.get_input_embeddings()
wte.weight.requires_grad=False
wte.weight[len(tokenizer)-1, :]= wte.weight[len(tokenizer)-2, :]
gedi_model.set_input_embeddings(wte)
embed_cont = torch.load('./result_embedding_cont')
embed_infill_front = torch.load('./result_embedding_infill_front')
embed_infill_back = torch.load('./result_embedding_infill_back')
embed_recognition = torch.load('./result_embedding_recognition')
recognition_score = torch.load('./recog_score')
model.set_input_embeddings(embed_cont.wte)
# setting arguments for generation
#max generation length
gen_length = 40
#omega from paper, higher disc_weight means more aggressive topic steering
disc_weight = 30
#1 - rho from paper, should be between 0 and 1 higher filter_p means more aggressive topic steering
filter_p = 0.8
#tau from paper, preserves tokens that are classified as correct topic
target_p = 0.8
#hyperparameter that determines class prior, set to uniform by default
class_bias = 0
if gen_length>1024:
length = 1024
else:
length = gen_length
def cut_into_sentences(text, do_cleanup=True):
"""
Cut text into sentences. \n are also regarded as a sentence.
:param do_cleanup: if True, do cleanups.
:param text: input text.
:return: sentences.
"""
all_sentences = []
# print(text)
# sentences_raw = text.split("\n")
text = text.replace("[Prompt] [Prompt] [Prompt] [Prompt] ", "[Prompt] [Prompt] [Prompt] ")
sentences_raw = text.split('[Prompt] [Prompt] [Prompt]')
text = sentences_raw[len(sentences_raw)-1]
text = text.replace("Start:", " ")
text = text.replace("Characters:", " ")
text = text.replace("Story after start:", " ")
sentences_raw = [text.replace("\n", " ")]
result = []
for item in sentences_raw:
sentence_in_item = sent_tokenize(item)
for item2 in sentence_in_item:
all_sentences.append(item2.strip())
if do_cleanup:
for item in all_sentences:
item = item.replace('<|endoftext|>', '')
if len(item) > 2:
result.append(item)
else:
result = all_sentences
return result
def generate_one_sentence(sentence, control, length=50, disc_weight=30, temperature=0.8, gpt3_id=None):
"""
Generate one sentence based on input data.
:param sentence: (string) context (prompt) used.
:param topic: (dict) {topic: weight, topic:weight,...} topic that the sentence need to steer towards.
:param extra_args: (dict) a dictionary that certain key will trigger additional functionality.
disc_weight: Set this value to use a different control strength than default.
get_gen_token_count: Return only how many tokens the generator has generated (for debug only).
:return: sentence generated, or others if extra_args are specified.
"""
secondary_code = control
if sentence == "":
print("Prompt is empty! Using a dummy sentence.")
sentence = "."
# Specify prompt below
prompt = sentence
# Calculate oroginal input length.
length_of_prompt = len(sentence)
start_len = 0
text_ids = tokenizer.encode(prompt)
length_of_prompt_in_tokens = len(text_ids)
# print('text ids', text_ids)
encoded_prompts = torch.LongTensor(text_ids).unsqueeze(0).to(device)
if type(control) is str:
multi_code = tokenizer.encode(secondary_code)
elif type(control) is dict:
multi_code = {}
for item in secondary_code:
encoded = tokenizer.encode(item)[0] # only take the first one
multi_code[encoded] = secondary_code[item]
else:
raise NotImplementedError("topic data type of %s not supported... Supported: (str,dict)" % type(control))
# If 1, generate sentences towards a specific topic.
attr_class = 1
print(multi_code)
if int(control)!=-1:
if gpt3_id is None:
generated_sequence = model.generate(input_ids=encoded_prompts,
pad_lens=None,
max_length=length + length_of_prompt_in_tokens,
top_k=None,
top_p=None,
repetition_penalty=1.2,
rep_penalty_scale=10,
eos_token_ids=tokenizer.eos_token_id,
pad_token_id=tokenizer.eos_token_id,
bad_token_ids = tokenizer.all_special_ids,
do_sample=True,
temperature = temperature,
penalize_cond=True,
gedi_model=gedi_model,
tokenizer=tokenizer,
disc_weight=disc_weight,
filter_p=filter_p,
target_p=target_p,
class_bias=class_bias,
attr_class=attr_class,
code_0=code_undesired,
code_1=code_desired,
multi_code=multi_code,
)
else:
generated_sequence = model.generate(input_ids=encoded_prompts,
pad_lens=None,
max_length=length + length_of_prompt_in_tokens,
top_k=None,
top_p=None,
repetition_penalty=1.2,
rep_penalty_scale=10,
eos_token_ids=tokenizer.eos_token_id,
pad_token_id=tokenizer.eos_token_id,
bad_token_ids = tokenizer.all_special_ids,
do_sample=True,
temperature = temperature,
penalize_cond=True,
gedi_model=gedi_model,
tokenizer=tokenizer,
disc_weight=disc_weight,
filter_p=filter_p,
target_p=target_p,
class_bias=class_bias,
attr_class=attr_class,
code_0=code_undesired,
code_1=code_desired,
multi_code=multi_code,
gpt3_api_key=gpt3_id,
)
text = tokenizer.decode(generated_sequence.tolist()[0])
else:
if gpt3_id is None:
generated_sequence = model.generate(input_ids=encoded_prompts,
pad_lens=None,
max_length=length + length_of_prompt_in_tokens,
top_k=None,
top_p=None,
repetition_penalty=1.2,
rep_penalty_scale=10,
eos_token_ids=tokenizer.eos_token_id,
pad_token_id=tokenizer.eos_token_id,
bad_token_ids = tokenizer.all_special_ids,
do_sample=True,
temperature = temperature,
penalize_cond=True,
gedi_model=None,
tokenizer=tokenizer,
disc_weight=disc_weight,
class_bias=class_bias,
attr_class=attr_class,
)
text = tokenizer.decode(generated_sequence.tolist()[0])
else:
import openai
openai.api_key = gpt3_id
completion = openai.Completion()
response = completion.create(prompt=prompt,
engine="curie",
max_tokens=length,
temperature=temperature,)
text = response["choices"][0]["text"]
text = cut_into_sentences(text)
if len(text) == 0:
print("Warning! No text generated.")
return ""
all_gen_text = text[0]
return all_gen_text
import numpy as np
def continuing_generation(prompts, generation_controls, characters, temperatures, gpt3_id=None, disc_weight=30):
"""
Explanations on controls
prompts: The prompt to be input. This is a list of sentences.
generation_controls: Generation control in the list. If no control is given, -1 is given.
"""
model.set_input_embeddings(embed_cont)
prompts = list(prompts)
generated = []
character_prepend = '[Prompt][Prompt][Prompt]'
for idx, character in enumerate(characters):
if idx==0:
character_prepend = character_prepend+character
else:
character_prepend = character_prepend+' '+character
if idx != len(characters)-1:
character_prepend = character_prepend + ','
prompt_start_idx = 0
for c_idx, generation_control in enumerate(generation_controls):
temperature = temperatures[c_idx]
while True:
prompt_postpend = '[Prompt][Prompt][Prompt]'
# prompt_postpend = 'Story: '
for i in range(prompt_start_idx, len(prompts)):
prompt_postpend = prompt_postpend + prompts[i]
if i != len(prompts)-1:
prompt_postpend = prompt_postpend + ' '
# continue
else:
prompt_postpend = prompt_postpend
prompt_input = prompt_postpend+character_prepend+ '[Prompt][Prompt][Prompt]'
prompt_encoded = tokenizer.encode(prompt_input)
length_of_prompt_in_tokens = len(prompt_encoded)
if length_of_prompt_in_tokens>2048:
prompt_start_idx = prompt_start_idx + 1
else:
break
print(prompt_input, generation_control)
gen_sent = generate_one_sentence(prompt_input, generation_control, temperature=temperature, gpt3_id=gpt3_id, disc_weight=disc_weight)
prompts.append(gen_sent)
generated.append(gen_sent)
for gen in generated:
print('gen:', gen)
print()
return generated
def infilling_generation(pre_prompts, post_prompts, generation_controls, characters, temperatures, is_front, gpt3_id=None, disc_weight=30):
"""
Explanations on controls
prompts: The prompt to be input. This is a list of sentences.
generation_controls: Generation control in the list. If no control is given, -1 is given.
"""
pre_prompts = list(pre_prompts)
post_prompts = list(post_prompts)
right = ''
for idx, pp in enumerate(post_prompts):
right = right + pp
if idx!=len(post_prompts)-1:
right = right + ' '
left = ''
for idx, pp in enumerate(pre_prompts):
left = left + pp
if idx!=len(post_prompts)-1:
left = left + ' '
generated = ['']*len(generation_controls)
# gen_counter = 0
for gen_counter in range(len(generation_controls)):
if is_front:
generation_control = generation_controls[int(gen_counter/2)]
temperature = temperatures[int(gen_counter/2)]
model.set_input_embeddings(embed_infill_front)
prompt_input = '[Prompt][Prompt][Prompt]'+right+'[Prompt][Prompt][Prompt]'+left+'[Prompt][Prompt][Prompt][Prompt]'
gen_sent = generate_one_sentence(prompt_input, generation_control, temperature=temperature, gpt3_id=gpt3_id, disc_weight=disc_weight)
generated[int(gen_counter/2)] =gen_sent
print(gen_sent)
left = left + ' ' + gen_sent
else:
generation_control = generation_controls[len(generated)-1-int(gen_counter/2)]
temperature = temperatures[len(generated)-1-int(gen_counter/2)]
model.set_input_embeddings(embed_infill_back)
prompt_input = '[Prompt][Prompt][Prompt]'+left+'[Prompt][Prompt][Prompt]'+right + '[Prompt][Prompt][Prompt][Prompt]'
gen_sent = generate_one_sentence(prompt_input, generation_control, temperature=temperature, gpt3_id=gpt3_id, disc_weight=disc_weight)
generated[len(generated)-1-int(gen_counter/2)] =gen_sent
print(gen_sent)
right = gen_sent+' '+right
for gen in generated:
print('gen', gen)
print()
return generated
def recognize_sentence_fortune(pre_context, character, target_sentence):
rec_input = "[Prompt][Prompt][Prompt]"+pre_context+"[Prompt][Prompt][Prompt]"+character+"[Prompt][Prompt][Prompt]"+target_sentence
with torch.no_grad():
model.set_input_embeddings(embed_recognition)
tokenized_input = tokenizer.encode(rec_input)
tokenized_input = torch.LongTensor(tokenized_input).unsqueeze(0).to(device)
output = model.transformer(tokenized_input)
op= output[0].type(torch.half)
# op=output[0].type(torch.FloatTensor).to(device)
logits = recognition_score(op)
to_return = float(logits[0][len(tokenized_input[0])-1][0])
if to_return > 1:
to_return = 1
elif to_return <0:
to_return = 0
return to_return
app = FlaskAPI(__name__)
# run_with_ngrok(app)
CORS(app, resources={r"/*": {"origins": "*"}})
app.config['CORS_HEADERS'] = 'Content-Type'
# Below is temporary function with sentiment analysis.
# Hence, it needs to be updated later.
@app.route('/labelSentence', methods=['GET', 'POST'])
@cross_origin(origin='http://10.168.233.218:7082',headers=['Content-Type'])
def sentence_analysis():
if request.method == 'POST':
print(request.data)
sentence = request.data['sentence']
pre_context = request.data['pre_context']
character = request.data['character']
# print(images, group_model, l2t, dec)
value = recognize_sentence_fortune(pre_context, character, sentence)
value = value * 100
return {'value': value}
@app.route('/continuingGeneration', methods=['GET', 'POST'])
@cross_origin(origin='http://10.168.233.218:7082',headers=['Content-Type'])
def continuingGeneration():
if request.method == 'POST':
pre_context = json.loads(request.data['pre_context'])
controls = json.loads(request.data['controls'])
characters = json.loads(request.data['characters'])
temperature = json.loads(request.data['temperature'])
print(pre_context)
print(controls)
print(characters)
print(temperature)
# TODO update below
generated = continuing_generation(pre_context, controls, characters, temperature, gpt3_id=None, disc_weight=30)
# generated = ['This is a generated sentence'] * len(controls)
values = []
for gen in generated:
pre_context_concat = ''
# start_id = 0
# start_id = len(pre_context)-2
# if start_id<0:
# start_id=0
# for idx in range(start_id, len(pre_context)):
# pre_context_concat = pre_context_concat + pre_context[idx]
value = recognize_sentence_fortune(pre_context_concat, characters[0], gen)
pre_context.append(gen)
values.append(value*100)
return {'generated': json.dumps(generated), 'values': json.dumps(values)}
@app.route('/infillingGeneration', methods=['GET', 'POST'])
@cross_origin(origin='http://10.168.233.218:7082',headers=['Content-Type'])
def infillingGeneration():
if request.method == 'POST':
pre_context = json.loads(request.data['pre_context'])
post_context = json.loads(request.data['post_context'])
controls = json.loads(request.data['controls'])
characters = json.loads(request.data['characters'])
temperature = json.loads(request.data['temperature'])
is_front = request.data['is_front']
print(pre_context)
print(post_context)
print(controls)
print(characters)
print(temperature)
# TODO update below
generated = infilling_generation(pre_context, post_context, controls, characters, temperature, is_front, gpt3_id=None, disc_weight=30)
# generated = ['This is a generated sentence'] * len(controls)
# it needs to be updated
values = sentences_analysis(generated)
return {'generated': json.dumps(generated), 'values': json.dumps(values)}
if __name__=="__main__":
app.run(host='0.0.0.0', port=11080) | 41.025918 | 140 | 0.588944 | 0 | 0 | 0 | 0 | 2,870 | 0.151092 | 0 | 0 | 3,850 | 0.202685 |
f86d0468889ac52f5ce1040fe21e913a6db95f94 | 20,391 | py | Python | pymatflow/abinit/post/bands.py | DeqiTang/pymatflow | bd8776feb40ecef0e6704ee898d9f42ded3b0186 | [
"MIT"
] | 6 | 2020-03-06T16:13:08.000Z | 2022-03-09T07:53:34.000Z | pymatflow/abinit/post/bands.py | DeqiTang/pymatflow | bd8776feb40ecef0e6704ee898d9f42ded3b0186 | [
"MIT"
] | 1 | 2021-10-02T02:23:08.000Z | 2021-11-08T13:29:37.000Z | pymatflow/abinit/post/bands.py | DeqiTang/pymatflow | bd8776feb40ecef0e6704ee898d9f42ded3b0186 | [
"MIT"
] | 1 | 2021-07-10T16:28:14.000Z | 2021-07-10T16:28:14.000Z | """
post_bands:
post_bands extract data from static-o_DS3_EBANDS.agr and it will build
the kpoints length: xcoord_k from the high symmetry line and the corresponding
basis for reciprocal space.
b1 = 1 / a1, b2 = 1 / a2 and b3 = 1 / a3.
"""
import os
import numpy as np
import matplotlib.pyplot as plt
class PostBands:
def __init__(self):
pass
def get_xcoord_k(self, kpath, cell):
"""
Note:
xcoord_k is the x axis of the band structure plot
let's see how it is build from kpoints and the
crystal lattice or reciprocal lattice.
"""
self.kpath = kpath
self.xcoord_k = []
b1 = 1 / np.sqrt(cell[0][0]**2 + cell[0][1]**2 + cell[0][2]**2)
b2 = 1 / np.sqrt(cell[1][0]**2 + cell[1][1]**2 + cell[1][2]**2)
b3 = 1 / np.sqrt(cell[2][0]**2 + cell[2][1]**2 + cell[2][2]**2)
# actually you will find that in vasp b1=1/a1, b2=1/a2, b3=1/a3
V = np.dot(cell[0], np.cross(cell[1], cell[2]))
b1_vec = np.cross(cell[1], cell[2]) * 2 * np.pi / V
b2_vec = np.cross(cell[2], cell[0]) * 2 * np.pi / V
b3_vec = np.cross(cell[0], cell[1]) * 2 * np.pi / V
print("cell a:")
print("%f %f %f\n" % (cell[0][0], cell[0][1], cell[0][2]))
print("%f %f %f\n" % (cell[1][0], cell[1][1], cell[1][2]))
print("%f %f %f\n" % (cell[2][0], cell[2][1], cell[2][2]))
print("cell b:\n")
print("%f %f %f\n" % (b1_vec[0], b1_vec[1], b1_vec[2]))
print("%f %f %f\n" % (b2_vec[0], b2_vec[1], b2_vec[2]))
print("%f %f %f\n" % (b3_vec[0], b3_vec[1], b3_vec[2]))
self.xcoord_k.append(0.0000000)
for i in range(len(self.kpath) - 1):
# the step in the xcoord_k for each segment is different and it is actually
# the distance between the two high symmetry kpoint in unit of reciprocal coordinates
# divided by the conneciting number kpath[i][4]
if self.kpath[i][4] != "|":
#delta_b_1 = b1*(self.kpath[i+1][0] - self.kpath[i][0])
#delta_b_2 = b2*(self.kpath[i+1][1] - self.kpath[i][1])
#delta_b_3 = b3*(self.kpath[i+1][2] - self.kpath[i][2])
#step = np.sqrt(delta_b_1**2+delta_b_2**2+delta_b_3**2) / (self.kpath[i][4])
# the above way to calculate step is only applicable when
# b1 b2 b3 are perpendicular to each other so they are abandoned.
vec1 = self.kpath[i][0] * np.array(b1_vec) + self.kpath[i][1] * np.array(b2_vec) + self.kpath[i][2] * np.array(b3_vec)
vec2 = self.kpath[i+1][0] * np.array(b1_vec) + self.kpath[i+1][1] * np.array(b2_vec) + self.kpath[i+1][2] * np.array(b3_vec)
distance_in_b = np.linalg.norm(np.array(vec2)-np.array(vec1))
step = distance_in_b / (self.kpath[i][4])
for j in range(self.kpath[i][4]):
self.xcoord_k.append(self.xcoord_k[-1] + step)
else:
self.xcoord_k.append(self.xcoord_k[-1])
# label for plot
self.locs = []
self.labels_for_matplotlib = []
self.labels_for_gnuplot = []
self.locs.append(0.0000000)
nk = 0
print("%d\n" % nk)
for i in range(len(self.kpath)-1):
if self.kpath[i][4] != "|":
nk = nk + self.kpath[i][4]
self.locs.append(self.xcoord_k[nk])
print("%d\n" % nk)
else:
nk = nk + 1
self.labels_for_matplotlib.append(r"$%s$" % self.kpath[0][3].upper() if self.kpath[0][3].upper() != "GAMMA" else r"$\Gamma$")
self.labels_for_gnuplot.append("%s" % self.kpath[0][3].upper() if self.kpath[0][3].upper() != "GAMMA" else "{/symbol G}")
for i in range(1, len(self.kpath)):
if self.kpath[i-1][4] != "|":
self.labels_for_matplotlib.append(r"$%s$" % self.kpath[i][3].upper() if self.kpath[i][3].upper() != "GAMMA" else r"$\Gamma$")
self.labels_for_gnuplot.append("%s" % self.kpath[i][3].upper() if self.kpath[i][3].upper() != "GAMMA" else "{/symbol G}")
else:
self.labels_for_matplotlib[-1] = r"$%s | %s$" % (self.labels_for_matplotlib[-1].split("$")[1], self.kpath[i][3].upper())
self.labels_for_gnuplot[-1] = "%s | %s" % (self.labels_for_gnuplot[-1], self.kpath[i][3].upper())
def get_ebands_agr(self, filepath="static-o_DS3_EBANDS.agr"):
with open(filepath, 'r') as fin:
self.lines = fin.readlines()
# get the band energy
# in xxx_EBANDS.agr, energy are in unit of eV, and fermi energy are already shfited to 0
# first check the magnetic_status
for line in self.lines:
if len(line.split()) == 0:
continue
if line.split()[0] == "#" and line.split()[1] == "mband:":
self.mband = int(line.split()[2].split(",")[0])
self.nkpt = int(line.split()[4].split(",")[0])
self.nsppol = int(line.split()[6].split(",")[0])
self.nspinor = int(line.split()[8])
# get the eigenval (in agr, efermi is shfited to 0 already)
self.energies_agr = []
for i in range(len(self.lines)):
if len(self.lines[i].split()) == 0:
continue
if self.lines[i].split()[0] == "@type" and self.lines[i].split()[1].split("\n")[0] == "xy":
band = []
for j in range(self.nkpt):
band.append(float(self.lines[i+j+1].split()[1]))
self.energies_agr.append(band)
def _plot_band_matplotlib(self, bandrange=[0, 1.0]):
"""
:param bandrange:
a list of two values(between 0 and 1) defining the percentage
of bands to plot.
plotrange[0]: left boundary of the nth band to plot
plotrange[1]: right boundary of the nth band to plot
default is plotrange[0] = 0, plotrange[1], in which case
all the band available will be plot.
Be aware that the range if not for energy but for band number
:param imagebase: image file name(not full)
"""
if self.nsppol == 1:
band_min = int(bandrange[0] * self.mband)
band_max = int(bandrange[1] * self.mband)
for i in range(band_min, band_max, 1):
plt.plot(self.xcoord_k, self.energies_agr[i], color='blue', linewidth=1)
plt.xticks(self.locs, self.labels_for_matplotlib)
plt.title("Band Structure")
plt.xlabel("K")
plt.ylabel("Energy(eV)")
plt.grid(b=True, which='major')
plt.savefig("band-structure-spin-unpolarized.png")
if self.nsppol == 2:
# half of self.energies_agr are spin up, and half are spin down
band_min = int(bandrange[0] * self.mband)
band_max = int(bandrange[1] * self.mband)
# spin up
for i in range(band_min, band_max, 1):
plt.plot(self.xcoord_k, self.energies_agr[i])
plt.title("Band Structure(Spin Up)")
plt.xlabel("K")
plt.ylabel("Energy(eV)")
plt.xticks(self.locs, self.labels_for_matplotlib)
plt.grid(b=True, which='major')
plt.savefig("band-structure-spin-polarized-1.png")
plt.close()
# spin down
for i in range(int(band_min+self.mband), int(band_max+self.mband), 1):
plt.plot(self.xcoord_k, self.energies_agr[i])
plt.title("Band Structure(Spin Down)")
plt.xlabel("K")
plt.ylabel("Energy(eV)")
plt.xticks(self.locs, self.labels_for_matplotlib)
plt.grid(b=True, which='major')
plt.savefig("band-structure-spin-polarized-2.png")
plt.close()
# all in one picture
for i in range(band_min, band_max, 1):
plt.plot(self.xcoord_k, self.energies_agr[i], color="blue", linewidth=1)
for i in range(int(band_min+self.mband), int(band_max+self.mband), 1):
plt.plot(self.xcoord_k, self.energies_agr[i], color="red", linewidth=1)
plt.title("Band Structure(Spin Up&Down)")
plt.xlabel("K")
plt.ylabel("Energy(eV)")
plt.xticks(self.locs, self.labels_for_matplotlib)
plt.grid(b=True, which='major')
plt.savefig("band-structure-spin-polarized-all.png")
plt.close()
def _plot_band_gnuplot(self, bandrange=[0, 1.0]):
"""
:param bandrange:
a list of two values(between 0 and 1) defining the percentage
of bands to plot.
plotrange[0]: left boundary of the nth band to plot
plotrange[1]: right boundary of the nth band to plot
default is plotrange[0] = 0, plotrange[1], in which case
all the band available will be plot.
Be aware that the range if not for energy but for band number
:param imagebase: image file name(not full)
"""
if self.nsppol == 1:
band_min = int(bandrange[0] * self.mband)
band_max = int(bandrange[1] * self.mband)
with open("all-bands-spin-unpolarized.data", 'w') as fout:
fout.write("# band structure extracted from xxx_EBANDS.agr\n")
fout.write("# efermi shfited to 0 already\n")
for i in range(self.mband):
for j in range(len(self.xcoord_k)):
fout.write("%f %f\n" % (self.xcoord_k[j], self.energies_agr[i][j]))
fout.write("\n")
with open("specified-bands-spin-unpolarized.data", 'w') as fout:
fout.write("# band structure extracted from ***_EBANDS.agr\n")
fout.write("# efermi shifted to 0 already\n")
for i in range(band_min, band_max, 1):
for j in range(len(self.xcoord_k)):
fout.write("%f %f\n" % (self.xcoord_k[j], self.energies_agr[i][j]))
fout.write("\n")
with open("all-bands-spin-unpolarized.gnuplot", 'w') as fout:
fout.write("set terminal gif\n")
fout.write("set output 'all-bands-spin-unpolarized.gif'\n")
fout.write("unset key\n")
fout.write("set parametric\n")
fout.write("set title 'Band Structure'\n")
fout.write("set xlabel 'K'\n")
fout.write("set ylabel 'Energy(eV)'\n")
fout.write("set xtics(")
for i in range(len(self.labels_for_gnuplot)-1):
fout.write("'%s' %f, " % (self.labels_for_gnuplot[i], self.locs[i]))
fout.write("'%s' %f)\n" % (self.labels_for_gnuplot[-1], self.locs[-1]))
fout.write("set grid xtics ytics\n")
fout.write("set autoscale\n")
fout.write("plot 'all-bands-spin-unpolarized.data' using 1:2 w l\n")
os.system("gnuplot all-bands-spin-unpolarized.gnuplot")
with open("specified-bands-spin-unpolarized.gnuplot", 'w') as fout:
fout.write("set terminal gif\n")
fout.write("set output 'specified-bands-spin-unpolarized.gif'\n")
fout.write("unset key\n")
fout.write("set parametric\n")
fout.write("set title 'Band Structure'\n")
fout.write("set xlabel 'K'\n")
fout.write("set ylabel 'Energy(eV)'\n")
fout.write("set xtics(")
for i in range(len(self.labels_for_gnuplot)-1):
fout.write("'%s' %f, " % (self.labels_for_gnuplot[i], self.locs[i]))
fout.write("'%s' %f)\n" % (self.labels_for_gnuplot[-1], self.locs[-1]))
fout.write("set grid xtics ytics\n")
fout.write("set autoscale\n")
fout.write("plot 'specified-bands-spin-unpolarized.data' using 1:2 w l\n")
os.system("gnuplot specified-bands-spin-unpolarized.gnuplot")
if self.nsppol == 2:
band_min = int(bandrange[0] * self.mband)
band_max = int(bandrange[1] * self.mband)
with open("all-bands-spin-polarized-spin-1.data", 'w') as fout:
fout.write("# band structure extracted from xxx_EBANDS.agr\n")
fout.write("# efermi shfited to 0 already\n")
for i in range(self.mband):
for j in range(len(self.xcoord_k)):
fout.write("%f %f\n" % (self.xcoord_k[j], self.energies_agr[i][j]))
fout.write("\n")
with open("all-bands-spin-polarized-spin-2.data", 'w') as fout:
fout.write("# band structure extracted from xxx_EBANDS.agr\n")
fout.write("# efermi shifted to 0 already\n")
for i in range(self.mband):
for j in range(len(self.xcoord_k)):
fout.write("%f %f\n" % (self.xcoord_k[j], self.energies_agr[self.mband+i][j]))
fout.write("\n")
with open("specified-bands-spin-polarized-spin-1.data", 'w') as fout:
fout.write("# band structure extracted from xxx_EBANDS.agr\n")
fout.write("# efermi shifted to 0 already\n")
for i in range(band_min, band_max, 1):
for j in range(len(self.xcoord_k)):
fout.write("%f %f\n" % (self.xcoord_k[j], self.energies_agr[i][j]))
fout.write("\n")
with open("specified-bands-spin-polarized-spin-2.data", 'w') as fout:
fout.write("# band structure extracted from xxx_EBANDS.agr\n")
fout.write("# efermi shifted to 0 already\n")
for i in range(band_min, band_max, 1):
for j in range(len(self.xcoord_k)):
fout.write("%f %f\n" % (self.xcoord_k[j], self.energies_agr[self.mband+i][j]))
fout.write("\n")
with open("all-bands-spin-polarized-spin-1.gnuplot", 'w') as fout:
fout.write("set terminal gif\n")
fout.write("set output 'all-bands-spin-polarized-spin-1.gif'\n")
fout.write("unset key\n")
fout.write("set parametric\n")
fout.write("set title 'Band Structure'\n")
fout.write("set xlabel 'K'\n")
fout.write("set ylabel 'Energy(eV)'\n")
fout.write("set xtics(")
for i in range(len(self.labels_for_gnuplot)-1):
fout.write("'%s' %f, " % (self.labels_for_gnuplot[i], self.locs[i]))
fout.write("'%s' %f)\n" % (self.labels_for_gnuplot[-1], self.locs[-1]))
fout.write("set grid xtics ytics\n")
fout.write("set autoscale\n")
fout.write("plot 'all-bands-spin-polarized-spin-1.data' using 1:2 w l\n")
os.system("gnuplot all-bands-spin-polarized-spin-1.gnuplot")
with open("all-bands-spin-polarized-spin-2.gnuplot", 'w') as fout:
fout.write("set terminal gif\n")
fout.write("set output 'all-bands-spin-polarized-spin-2.gif'\n")
fout.write("unset key\n")
fout.write("set parametric\n")
fout.write("set title 'Band Structure'\n")
fout.write("set xlabel 'K'\n")
fout.write("set ylabel 'Energy(eV)'\n")
fout.write("set xtics(")
for i in range(len(self.labels_for_gnuplot)-1):
fout.write("'%s' %f, " % (self.labels_for_gnuplot[i], self.locs[i]))
fout.write("'%s' %f)\n" % (self.labels_for_gnuplot[-1], self.locs[-1]))
fout.write("set grid xtics ytics\n")
fout.write("set autoscale\n")
fout.write("plot 'all-bands-spin-polarized-spin-2.data' using 1:2 w l\n")
os.system("gnuplot all-bands-spin-polarized-spin-2.gnuplot")
with open("specified-bands-spin-polarized-spin-1.gnuplot", 'w') as fout:
fout.write("set terminal gif\n")
fout.write("set output 'specified-bands-spin-polarized-spin-1.gif'\n")
fout.write("unset key\n")
fout.write("set parametric\n")
fout.write("set title 'Band Structure'\n")
fout.write("set xlabel 'K'\n")
fout.write("set ylabel 'Energy(eV)'\n")
fout.write("set xtics(")
for i in range(len(self.labels_for_gnuplot)-1):
fout.write("'%s' %f, " % (self.labels_for_gnuplot[i], self.locs[i]))
fout.write("'%s' %f)\n" % (self.labels_for_gnuplot[-1], self.locs[-1]))
fout.write("set grid xtics ytics\n")
fout.write("set autoscale\n")
fout.write("plot 'specified-bands-spin-polarized-spin-1.data' using 1:2 w l\n")
os.system("gnuplot specified-bands-spin-polarized-spin-1.gnuplot")
with open("specified-bands-spin-polarized-spin-2.gnuplot", 'w') as fout:
fout.write("set terminal gif\n")
fout.write("set output 'specified-bands-spin-polarized-spin-2.gif'\n")
fout.write("unset key\n")
fout.write("set parametric\n")
fout.write("set title 'Band Structure'\n")
fout.write("set xlabel 'K'\n")
fout.write("set ylabel 'Energy(eV)'\n")
fout.write("set xtics(")
for i in range(len(self.labels_for_gnuplot)-1):
fout.write("'%s' %f, " % (self.labels_for_gnuplot[i], self.locs[i]))
fout.write("'%s' %f)\n" % (self.labels_for_gnuplot[-1], self.locs[-1]))
fout.write("set grid xtics ytics\n")
fout.write("set autoscale\n")
fout.write("plot 'specified-bands-spin-polarized-spin-2.data' using 1:2 w l\n")
os.system("gnuplot specified-bands-spin-polarized-spin-2.gnuplot")
def plot_band(self, option="matplotlib", bandrange=[0, 1.0]):
"""
:parama option:
gnuplot or matplotlib
:param bandrange:
a list of two values(between 0 and 1) defining the percentage
of bands to plot.
plotrange[0]: left boundary of the nth band to plot
plotrange[1]: right boundary of the nth band to plot
default is plotrange[0] = 0, plotrange[1], in which case
all the band available will be plot.
Be aware that the range if not for energy but for band number
:param imagebase: image file name(not full)
"""
if option == "matplotlib":
self._plot_band_matplotlib(bandrange=bandrange)
elif option == "gnuplot":
self._plot_band_gnuplot(bandrange=bandrange)
#
def export(self, directory="tmp-abinit-static", bandrange=[0, 1], option="matplotlib"):
"""
:parama option:
gnuplot or matplotlib
:param bandrange:
a list of two values(between 0 and 1) defining the percentage
of bands to plot.
plotrange[0]: left boundary of the nth band to plot
plotrange[1]: right boundary of the nth band to plot
default is plotrange[0] = 0, plotrange[1], in which case
all the band available will be plot.
Be aware that the range if not for energy but for band number
"""
os.system("mkdir -p %s/post-processing" % directory)
os.chdir(os.path.join(directory, "post-processing"))
self.plot_band(option=option, bandrange=bandrange)
os.chdir("../../")
| 51.492424 | 142 | 0.529106 | 20,056 | 0.983571 | 0 | 0 | 0 | 0 | 0 | 0 | 7,702 | 0.377716 |
f86d356d798352c0185a9ec2592dc21b131a7ed8 | 337 | py | Python | logconfig.py | Erick-Faster/gerbot-api | 36d723c7e9df525b99fd4eff2da318e9046e7734 | [
"Apache-2.0"
] | null | null | null | logconfig.py | Erick-Faster/gerbot-api | 36d723c7e9df525b99fd4eff2da318e9046e7734 | [
"Apache-2.0"
] | null | null | null | logconfig.py | Erick-Faster/gerbot-api | 36d723c7e9df525b99fd4eff2da318e9046e7734 | [
"Apache-2.0"
] | null | null | null |
import logging
import logging.config
logging.config.fileConfig('./instance/logging.conf')
# create logger
logger = logging.getLogger('Cognitive-API')
# 'application' code
'''
logger.debug('debug message')
logger.info('info message')
logger.warning('warn message')
logger.error('error message')
logger.critical('critical message')
''' | 19.823529 | 52 | 0.756677 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 237 | 0.703264 |
f86db685725dd6affbd6d16efda49f2dd028eb93 | 1,735 | py | Python | tests/app/test_app_service.py | 0604hx/buter | 670584e7c39c985192684c9f68f52fc69c57049c | [
"MIT"
] | 2 | 2017-11-21T10:00:47.000Z | 2018-02-02T04:40:09.000Z | tests/app/test_app_service.py | 0604hx/buter | 670584e7c39c985192684c9f68f52fc69c57049c | [
"MIT"
] | 1 | 2018-10-31T06:56:22.000Z | 2018-11-01T00:58:16.000Z | tests/app/test_app_service.py | 0604hx/buter | 670584e7c39c985192684c9f68f52fc69c57049c | [
"MIT"
] | 5 | 2017-12-14T01:07:21.000Z | 2020-04-29T02:21:46.000Z | import json
import unittest
from buter.app.services import load_from_file, detect_app_name
from buter.server import docker
from buter.util.Utils import unzip
from config import getConfig
class AppServiceTest(unittest.TestCase):
def setUp(self):
"""
这里只需要初始化 server.docker 对象
:return:
"""
config = getConfig('dev')
docker.setup(config)
def test_load_from_file(self):
load_from_file("G:/tidb.zip")
def test_load_image(self):
docker.loadImage("G:/tidb.tar")
def test_json_read(self):
with open("G:/app.json") as content:
app = json.load(content) # '{"name":"abc"}'
print(app)
docker.createContainer("pingcap/tidb", app['cmd'], app['args'])
def test_detect_app_name(self):
app = json.loads('{"image":"pingcap/tidb", "args":{"name":"tidb01"}}')
self.assertEqual("tidb", detect_app_name(None, app['image']))
self.assertEqual("tidb01", detect_app_name(app['args']))
self.assertEqual("tidb", detect_app_name("tidb"))
def test_unzip(self):
file_path = "G:/test/test.zip"
unzip(file_path, "G:/test")
def test_list_container(self):
containers = docker.listContainer()
print(containers)
for c in containers:
print("container: name={}, id={} ({}), labels={}, stat={}"
.format(c.name, c.id, c.short_id, c.labels, c.status))
print([{"name": c.name, "id": c.short_id, "labels": c.labels, "stat": c.status} for c in containers])
cs = dict((c.name, {"id": c.short_id, "labels": c.labels, "stat": c.status}) for c in containers)
print(cs)
if __name__ == '__main__':
unittest.main()
| 30.982143 | 109 | 0.609222 | 1,515 | 0.863248 | 0 | 0 | 0 | 0 | 0 | 0 | 395 | 0.225071 |
f86f8495a3b204ecbbc51199ca2187879cae3c8e | 397 | py | Python | code/level6.py | ab300819/PythonChallenge | 4bcc91f8b11d0a5ec5720137bef55eec6b1f7581 | [
"Apache-2.0"
] | null | null | null | code/level6.py | ab300819/PythonChallenge | 4bcc91f8b11d0a5ec5720137bef55eec6b1f7581 | [
"Apache-2.0"
] | null | null | null | code/level6.py | ab300819/PythonChallenge | 4bcc91f8b11d0a5ec5720137bef55eec6b1f7581 | [
"Apache-2.0"
] | null | null | null | # -*-coding:utf-8-*-
__author__ = 'Mason'
import re
import zipfile
z = zipfile.ZipFile('channel.zip', mode='r')
number = '90052'
comments = []
while True:
text = z.read(number + '.txt')
number = re.findall('([0-9]+)', text)
print number
try:
number = number[0]
comments.append(z.getinfo(number + '.txt').comment)
except:
break
print ''.join(comments)
| 19.85 | 59 | 0.594458 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 74 | 0.186398 |
f870be2bd112b621b44e0d7642b1d268ee31edf5 | 728 | py | Python | subscriptions/subscription.py | iamsharmaapoorv/availability-checker | 02fc28f495140f74fa38c02a3e4a5111e196151f | [
"MIT"
] | null | null | null | subscriptions/subscription.py | iamsharmaapoorv/availability-checker | 02fc28f495140f74fa38c02a3e4a5111e196151f | [
"MIT"
] | null | null | null | subscriptions/subscription.py | iamsharmaapoorv/availability-checker | 02fc28f495140f74fa38c02a3e4a5111e196151f | [
"MIT"
] | null | null | null | from products.product import Product
from notifications.notification import Notification
from clients.client import Client
class Subscription:
def __init__(self, product: Product, timing: int):
self.notifications = []
self.product = product
self.timing = timing
def add_notification(self, notification: Notification) -> None:
self.notifications.append(notification)
def check_availability(self, client: Client):
return self.product.check_availability(client)
def send_notifications(self, client: Client):
for notification in self.notifications:
notification.notify(self.product.title,
self.product.content, client)
| 33.090909 | 67 | 0.696429 | 602 | 0.826923 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f871c0ad8b9204fef05550a10cc4ceb534586079 | 654 | py | Python | joi2008yo/joi2008yo_e.py | Vermee81/practice-coding-contests | 78aada60fa75f208ee0eef337b33b27b1c260d18 | [
"MIT"
] | null | null | null | joi2008yo/joi2008yo_e.py | Vermee81/practice-coding-contests | 78aada60fa75f208ee0eef337b33b27b1c260d18 | [
"MIT"
] | null | null | null | joi2008yo/joi2008yo_e.py | Vermee81/practice-coding-contests | 78aada60fa75f208ee0eef337b33b27b1c260d18 | [
"MIT"
] | null | null | null | # https://atcoder.jp/contests/joi2008yo/tasks/joi2008yo_e
R, C = list(map(int, input().split()))
senbei_pos = []
ans = 0
for _ in range(R):
pos = list(map(int, input().split()))
senbei_pos.append(pos)
for bit in range(2**R):
total = 0
copied_pos = senbei_pos[:]
# Rの上限が10なので10桁の2進数になるように0で埋める
flip_row_pos = list(format(bit, '010b'))
for j in range(C):
column = [p[j] for p in copied_pos]
one_count = sum([column[k] ^ int(flip_row_pos[10 - R + k])
for k in range(R)])
zero_count = R - one_count
total += max(zero_count, one_count)
ans = max(ans, total)
print(ans)
| 29.727273 | 66 | 0.59633 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 135 | 0.193966 |
f8724ce5a5705922dd55fcf91b7512b691dc8ab7 | 2,850 | py | Python | yttgmp3.py | RomaniukVadim/ytmp3_bot | ce3cc3cfa2098257e4ec22c019c8c33d31a73128 | [
"WTFPL"
] | 1 | 2018-03-27T00:08:26.000Z | 2018-03-27T00:08:26.000Z | yttgmp3.py | RomaniukVadim/ytmp3_bot | ce3cc3cfa2098257e4ec22c019c8c33d31a73128 | [
"WTFPL"
] | null | null | null | yttgmp3.py | RomaniukVadim/ytmp3_bot | ce3cc3cfa2098257e4ec22c019c8c33d31a73128 | [
"WTFPL"
] | 1 | 2020-06-04T02:49:20.000Z | 2020-06-04T02:49:20.000Z | #!/usr/env python3
import requests
import os
import glob
import telegram
from time import sleep
token = "token"
bot = telegram.Bot(token=token)
# Боту шлется ссылка на ютуб, он загоняет ее в bash комманду youtube-dl -x --audio-format mp3 <link>, шлет загруженный mp3 обратно клиенту
class BotHandler:
def __init__(self, token):
self.token = token
self.api_url = "https://api.telegram.org/bot{}/".format(token)
def get_updates(self, offset=None, timeout=30):
method = 'getUpdates'
params = {'timeout': timeout, 'offset': offset}
resp = requests.get(self.api_url + method, params)
result_json = resp.json()['result']
return result_json
def send_audio(self, chat_id, audio):
params = {'chat_id': chat_id, 'audio': audio}
method = 'sendAudio'
resp = requests.post(self.api_url + method, params)
return resp
def get_last_update(self):
get_result = self.get_updates()
if len(get_result) > 0:
last_update = get_result[-1]
else:
try:
last_update = get_result[len(get_result)]
except IndexError:
last_update = 'null'
return last_update
def mp3_download(url):
cwd = os.getcwd() + "/"
os.system('youtube-dl -x --audio-format mp3 ' + url)
try:
sleep(15)
mp3_name = glob.glob(cwd + "*.mp3")[0]
return mp3_name
except:
print("Aw, man")
def song_rm():
cwd = os.getcwd() + "/"
try:
os.system('rm ' + cwd + '*.mp3')
except:
print("Aw, man")
mp3_bot = BotHandler(token)
def main():
new_offset = None
while True:
mp3_bot.get_updates(new_offset)
last_update = mp3_bot.get_last_update()
try:
last_update_id = last_update['update_id']
last_chat_text = last_update['message']['text']
last_chat_id = last_update['message']['chat']['id']
except:
last_update_id = 0
last_chat_text = 'null'
last_chat_id = 0
print(last_chat_text)
if 'https://www.youtube.com/' in last_chat_text.lower() or 'https://youtu.be/' in last_chat_text.lower():
bot.send_message(chat_id=last_chat_id, text="Downloading, please wait....")
song_name = mp3_download(last_chat_text)
bot.send_message(chat_id=last_chat_id, text="Uploading, please wait....")
bot.send_audio(chat_id=last_chat_id, audio=open(song_name, 'rb'))
song_rm()
elif '/start' in last_chat_text.lower():
bot.send_message(chat_id=last_chat_id, text="Please send me youtube link.")
new_offset = last_update_id + 1
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
exit()
| 30.978261 | 138 | 0.597895 | 954 | 0.326489 | 0 | 0 | 0 | 0 | 0 | 0 | 622 | 0.212868 |
f873639a13e98ee3a4151d1be3542d91c969ac64 | 530 | py | Python | djangobmf/contrib/team/views.py | dmatthes/django-bmf | 3a97167de7841b13f1ddd23b33ae65e98dc49dfd | [
"BSD-3-Clause"
] | 1 | 2020-05-11T08:00:49.000Z | 2020-05-11T08:00:49.000Z | djangobmf/contrib/team/views.py | dmatthes/django-bmf | 3a97167de7841b13f1ddd23b33ae65e98dc49dfd | [
"BSD-3-Clause"
] | null | null | null | djangobmf/contrib/team/views.py | dmatthes/django-bmf | 3a97167de7841b13f1ddd23b33ae65e98dc49dfd | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python
# ex:set fileencoding=utf-8:
from __future__ import unicode_literals
from djangobmf.views import ModuleCreateView
from djangobmf.views import ModuleUpdateView
from djangobmf.views import ModuleDetailView
from .forms import BMFTeamUpdateForm
from .forms import BMFTeamCreateForm
class TeamCreateView(ModuleCreateView):
form_class = BMFTeamCreateForm
class TeamUpdateView(ModuleUpdateView):
form_class = BMFTeamUpdateForm
class TeamDetailView(ModuleDetailView):
form_class = BMFTeamUpdateForm
| 22.083333 | 44 | 0.828302 | 222 | 0.418868 | 0 | 0 | 0 | 0 | 0 | 0 | 45 | 0.084906 |
f873731d39e77de62eb053df48244e290afd54de | 1,038 | py | Python | py/LSS/imaging/veto_masks/lrg/lrg_wise_mask_v1.py | echaussidon/LSS | 205ce48a288acacbd41358e6d0215f4aff355049 | [
"BSD-3-Clause"
] | null | null | null | py/LSS/imaging/veto_masks/lrg/lrg_wise_mask_v1.py | echaussidon/LSS | 205ce48a288acacbd41358e6d0215f4aff355049 | [
"BSD-3-Clause"
] | null | null | null | py/LSS/imaging/veto_masks/lrg/lrg_wise_mask_v1.py | echaussidon/LSS | 205ce48a288acacbd41358e6d0215f4aff355049 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import division, print_function
import sys, os, glob, time, warnings, gc
# import matplotlib.pyplot as plt
import numpy as np
from astropy.table import Table, vstack, hstack
import fitsio
from astropy.io import fits
from scipy.interpolate import interp1d
output_path = '/global/cfs/cdirs/desi/users/rongpu/desi_mask/w1_bright-2mass-lrg_mask_v1.fits'
# WISE mask
w1_mags = [0, 0.5, 1, 1.5, 2, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5, 10.0]
w1_radii = [600, 600, 550, 500, 475, 425, 400, 400, 390, 392.5, 395, 370, 360, 330, 275, 240, 210, 165, 100, 75, 60]
w1_max_mag = 10.0
f_radius = interp1d(w1_mags, w1_radii, bounds_error=False, fill_value='extrapolate')
wise_path = '/global/cfs/cdirs/desi/users/rongpu/desi_mask/w1_bright-2mass-13.3-dr9.fits'
wise = Table(fitsio.read(wise_path))
# print(len(wise))
wise['w1ab'] = np.array(wise['W1MPRO']) + 2.699
mask = wise['w1ab']<w1_max_mag
wise['radius'] = 0.
wise['radius'][mask] = f_radius(wise['w1ab'][mask])
wise.write(output_path)
| 33.483871 | 116 | 0.706166 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 274 | 0.263969 |
f87515fbbdca8d3d26053fb65bc3d5ece4d188b8 | 290 | py | Python | cursoDePythonNaPratica/aula18 - telegram.py | wemerson-henrique/kivy | 3cb6061a2d19b01e86c3738206f30c8a853763d4 | [
"MIT"
] | null | null | null | cursoDePythonNaPratica/aula18 - telegram.py | wemerson-henrique/kivy | 3cb6061a2d19b01e86c3738206f30c8a853763d4 | [
"MIT"
] | null | null | null | cursoDePythonNaPratica/aula18 - telegram.py | wemerson-henrique/kivy | 3cb6061a2d19b01e86c3738206f30c8a853763d4 | [
"MIT"
] | null | null | null | import telepot
# Não criei um bot no telegram ainda, dessa forma este codigo não funciona
# TODO: Criar bot no telegram e pegar chave
bot = telepot.Bot("Aqui vai minha chave do Telegram")
def recebendoMsg(msg):
print(msg['text'])
bot.message_loop(recebendoMsg)
while True:
pass | 20.714286 | 74 | 0.741379 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 159 | 0.544521 |
f875e138fd658884c3bfbd92197a369b04338ea0 | 4,590 | py | Python | cembot/languages/EN.py | niksart/cembot | 99ec3067bde5b8b72053dd18caa18742afba6a5e | [
"MIT"
] | null | null | null | cembot/languages/EN.py | niksart/cembot | 99ec3067bde5b8b72053dd18caa18742afba6a5e | [
"MIT"
] | 15 | 2018-08-30T13:56:27.000Z | 2021-07-21T08:58:03.000Z | cembot/languages/EN.py | niksart/cembot | 99ec3067bde5b8b72053dd18caa18742afba6a5e | [
"MIT"
] | null | null | null | # Support for english (EN) language
def missing_translation(tr_id):
return "MISSING TRANSLATION FOR STRING ID '" + str(tr_id) + "'"
helper_commands = {
"AUTHORIZE": "Usage:\n/authorize @<username>\n/authorize <user id>",
"DEAUTHORIZE": "Usage:\n/deauthorize @<username>\n/deauthorize <user id>",
"GIVEN": "Usage:\n/given <amount> @<username> <description>",
"SPENT": "Usage:\n/spent <amount> <description>.\nPayees are all the members of the group, including the payer.",
"MYID": "Usage: /myid\nshow your user id, useful if you have no username",
"START": "Show the initial message",
"LAST_GROUP_EXPENSES": "See the last expenses in a group. \n"
"Usage:\n"
"▪️ /last_expenses (show max 5 expenses)\n"
"▪️ /last_expenses <n max expenses to show>",
"LAST_CHARGES": "Use this command in private chat to see the last charges on your cembot account. \n"
"Usage:\n"
"▪️ /last_charges (show max 5 charges)\n"
"▪️ /last_charges <n max charges to show>",
"LAST_LOANS": "Use this command in private chat to see the last loans you did \n"
"Usage:\n"
"▪️ /last_loans (show max 5 loans)\n"
"▪️ /last loans <n max loans to show>"
}
info = {
"start": missing_translation("start"),
"guide": missing_translation("start"),
"introduced_in_group": "Hello everyone!\nI'm cembot, and I'll help you administrating your expenses!\n"
"Each member of this group now should introduce yourself. "
"People added after this message can avoid to introduce themselves.\n"
"Do it with the command /hereIam",
"each_member_introduced": missing_translation("each_member_introduced"),
"person_missing": "1 person is missing.",
"people_missing": " people are missing.",
"transaction_succeed": "Transaction added successfully!",
"authorized_confirm(user)": "User @%s has been authorized.",
"deauthorized_confirm(user)": "The authorization of user @%s has been revoked.",
"your_id_is(id)": "Your Telegram id is %s. You can add in Telegram settings an username and use cembot more easily.",
"balance_with_other_user(user,balance)": "Your balance with the user %s is %s",
"header_balance_credit": "📗 Credits\n",
"header_balance_debit": "📕 Debits\n",
"commands": missing_translation("commands"),
"these_are_the_last_group_expenses": missing_translation("these_are_the_last_group_expenses"),
"these_are_the_last_individual_charges": missing_translation("these_are_the_last_individual_charges"),
"these_are_the_last_group_charges": missing_translation("these_are_the_last_group_charges"),
"no_charges_yet": missing_translation("no_charges_yet"),
"these_are_the_last_individual_loans": missing_translation("these_are_the_last_individual_loans"),
"these_are_the_last_group_loans": missing_translation("these_are_the_last_group_loans")
}
error = {
"command_unavailable_for_private": "For using this command open a private chat with @en_cembot.",
"command_unavailable_for_group": "For using this command add @en_cembot in a group.",
"amount_money_not_valid": "Amount of money not valid.",
"waiting_for_all_users": "Someone did not present themselves yet.\n"
"Present yourself with /hereIam before adding expenses.",
"lack_of_authorization(user)": "The user @%s has not authorized you for charging expenses.",
"user_unregistered(user)": "The user @%s that you want to add as a payee is not registered on our system",
"can't_deauthorize_cause_not_authorized_yet": "You have not already authorized this user. You can't deauthorize it.",
"have_authorized_yet_this_user": "You have already authorized this user.",
"maybe_you_wrote_an_username_instead_id": "This is not a numeric id. If you intended to write an username write it with a @ at the beginning.",
"insert_a_correct_number": "Insert a correct number and retry"
}
# commands
private_commands = {
"start": "START",
"commands": "COMMANDS",
"authorize": "AUTHORIZE",
"revoke": "DEAUTHORIZE",
"given": "GIVEN",
"myid": "MYID",
"balance": "BALANCE",
"last_charges": "LAST_CHARGES",
"last_loans": "LAST_LOANS",
"guide": "GUIDE"
}
group_commands = {
"spent": "SPENT",
"spent@en_cembot": "SPENT", # version with @[language]_cembot
"hereIam": "PRESENTATION",
"hereIam@en_cembot": "PRESENTATION", # version with @[language]_cembot
"last_expenses": "LAST_GROUP_EXPENSES",
"last_expenses@en_cembot": "LAST_GROUP_EXPENSES", # version with @[language]_cembot
}
| 49.891304 | 144 | 0.70305 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,693 | 0.799351 |
f87b501fc4a702c459b5a826cf1537ec0638bb2a | 1,855 | py | Python | core/migrations/0001_initial.py | SanjaLV/Tenis | ea714da10207723c27ff7204b4285ea6a773521b | [
"MIT"
] | null | null | null | core/migrations/0001_initial.py | SanjaLV/Tenis | ea714da10207723c27ff7204b4285ea6a773521b | [
"MIT"
] | null | null | null | core/migrations/0001_initial.py | SanjaLV/Tenis | ea714da10207723c27ff7204b4285ea6a773521b | [
"MIT"
] | null | null | null | # Generated by Django 2.1.7 on 2019-03-31 10:31
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Game',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('score1', models.IntegerField(default=0)),
('score2', models.IntegerField(default=0)),
('elo1', models.IntegerField()),
('elo2', models.IntegerField()),
('change', models.IntegerField()),
('date', models.DateTimeField()),
('verified', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Player',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('elo', models.IntegerField()),
('userID', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='game',
name='player1',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='player_one', to='core.Player'),
),
migrations.AddField(
model_name='game',
name='player2',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='player_two', to='core.Player'),
),
]
| 37.1 | 132 | 0.581132 | 1,696 | 0.914286 | 0 | 0 | 0 | 0 | 0 | 0 | 228 | 0.122911 |
f87cfb9c6282ebda75b44ea58b3afec144dcbcf4 | 448 | py | Python | generator.py | iomintz/python-snippets | 982861c173bf4bcd5d908514a9e8b1914a580a5d | [
"CC0-1.0"
] | 2 | 2020-04-10T07:29:56.000Z | 2020-05-27T03:45:21.000Z | generator.py | iomintz/python-snippets | 982861c173bf4bcd5d908514a9e8b1914a580a5d | [
"CC0-1.0"
] | null | null | null | generator.py | iomintz/python-snippets | 982861c173bf4bcd5d908514a9e8b1914a580a5d | [
"CC0-1.0"
] | 2 | 2018-11-24T08:16:59.000Z | 2019-02-24T04:41:30.000Z | #!/usr/bin/env python3
# encoding: utf-8
# Douglas Crockford's idea for making generators
# basically "why do you need a `yield` keyword when you can just maintain some state"
# in my view, a class would be a better way to do this, and indeed, in python,
# that's how Iterators are defined.
def iter(list):
i = 0
def gen():
nonlocal i
value = list[i]
i += 1
return value
return gen
gen = iter([1,2,3])
for _ in range(4):
print(gen())
| 22.4 | 85 | 0.683036 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 285 | 0.636161 |
f87d14c4254943a1783a77600bab62106f89c898 | 6,336 | py | Python | pddlstream/ss/algorithms/fast_downward.py | zerongxi/Kitchen2D | 2cbaa6c8ea8fbf5f5c3a5de34cb11efde4121793 | [
"MIT"
] | 35 | 2018-03-15T14:26:33.000Z | 2022-02-09T15:37:59.000Z | pddlstream/ss/algorithms/fast_downward.py | zerongxi/Kitchen2D | 2cbaa6c8ea8fbf5f5c3a5de34cb11efde4121793 | [
"MIT"
] | 1 | 2020-11-03T04:49:43.000Z | 2020-11-17T16:42:48.000Z | pddlstream/ss/algorithms/fast_downward.py | zerongxi/Kitchen2D | 2cbaa6c8ea8fbf5f5c3a5de34cb11efde4121793 | [
"MIT"
] | 12 | 2018-04-28T20:11:21.000Z | 2021-09-18T22:24:46.000Z | from time import time
from ss.utils import INF
import sys
import os
import shutil
TEMP_DIR = 'temp/'
DOMAIN_INPUT = 'domain.pddl'
PROBLEM_INPUT = 'problem.pddl'
TRANSLATE_OUTPUT = 'output.sas'
SEARCH_OUTPUT = 'sas_plan'
ENV_VAR = 'FD_PATH'
FD_BIN = 'bin'
TRANSLATE_DIR = 'translate/'
SEARCH_COMMAND = 'downward --internal-plan-file %s %s < %s'
SEARCH_OPTIONS = {
'dijkstra': '--heuristic "h=blind(transform=adapt_costs(cost_type=NORMAL))" '
'--search "astar(h,cost_type=NORMAL,max_time=%s,bound=%s)"',
'max-astar': '--heuristic "h=hmax(transform=adapt_costs(cost_type=NORMAL))"'
' --search "astar(h,cost_type=NORMAL,max_time=%s,bound=%s)"',
'ff-astar': '--heuristic "h=ff(transform=adapt_costs(cost_type=NORMAL))" '
'--search "astar(h,cost_type=NORMAL,max_time=%s,bound=%s)"',
'ff-wastar1': '--heuristic "h=ff(transform=adapt_costs(cost_type=NORMAL))" '
'--search "lazy_wastar([h],preferred=[h],reopen_closed=true,boost=100,w=1,'
'preferred_successors_first=true,cost_type=NORMAL,max_time=%s,bound=%s)"',
'ff-wastar3': '--heuristic "h=ff(transform=adapt_costs(cost_type=PLUSONE))" '
'--search "lazy_wastar([h],preferred=[h],reopen_closed=false,boost=100,w=3,'
'preferred_successors_first=true,cost_type=PLUSONE,max_time=%s,bound=%s)"',
'ff-wastar5': '--heuristic "h=ff(transform=adapt_costs(cost_type=PLUSONE))" '
'--search "lazy_wastar([h],preferred=[h],reopen_closed=false,boost=100,w=5,'
'preferred_successors_first=true,cost_type=PLUSONE,max_time=%s,bound=%s)"',
'cea-wastar1': '--heuristic "h=cea(transform=adapt_costs(cost_type=PLUSONE))" '
'--search "lazy_wastar([h],preferred=[h],reopen_closed=false,boost=1000,w=1,'
'preferred_successors_first=true,cost_type=PLUSONE,max_time=%s,bound=%s)"',
'cea-wastar3': '--heuristic "h=cea(transform=adapt_costs(cost_type=PLUSONE))" '
'--search "lazy_wastar([h],preferred=[h],reopen_closed=false,boost=1000,w=3,'
'preferred_successors_first=true,cost_type=PLUSONE,max_time=%s,bound=%s)"',
'cea-wastar5': '--heuristic "h=cea(transform=adapt_costs(cost_type=PLUSONE))" '
'--search "lazy_wastar([h],preferred=[h],reopen_closed=false,boost=1000,w=5,'
'preferred_successors_first=true,cost_type=PLUSONE,max_time=%s,bound=%s)"',
'ff-eager': '--heuristic "hff=ff(transform=adapt_costs(cost_type=PLUSONE))" '
'--search "eager_greedy([hff],max_time=%s,bound=%s)"',
'ff-eager-pref': '--heuristic "hff=ff(transform=adapt_costs(cost_type=PLUSONE))" '
'--search "eager_greedy([hff],preferred=[hff],max_time=%s,bound=%s)"',
'ff-lazy': '--heuristic "hff=ff(transform=adapt_costs(cost_type=PLUSONE))" '
'--search "lazy_greedy([hff],preferred=[hff],max_time=%s,bound=%s)"',
}
def read(filename):
with open(filename, 'r') as f:
return f.read()
def write(filename, string):
with open(filename, 'w') as f:
f.write(string)
def safe_remove(p):
if os.path.exists(p):
os.remove(p)
def ensure_dir(f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
def safe_rm_file(p):
if os.path.exists(p):
os.remove(p)
def safe_rm_dir(d):
if os.path.exists(d):
shutil.rmtree(d)
def get_fd_root():
if ENV_VAR not in os.environ:
raise RuntimeError('Environment variable %s is not defined.' % ENV_VAR)
return os.environ[ENV_VAR]
def run_translate(verbose, temp_dir, use_negative=False):
t0 = time()
translate_path = os.path.join(get_fd_root(), FD_BIN, TRANSLATE_DIR)
if translate_path not in sys.path:
sys.path.append(translate_path)
if use_negative and ('modified' in get_fd_root()):
translate_flags = ['--negative-axioms']
else:
translate_flags = []
temp_argv = sys.argv[:]
sys.argv = sys.argv[:1] + translate_flags + [DOMAIN_INPUT, PROBLEM_INPUT]
import translate
sys.argv = temp_argv
old_cwd = os.getcwd()
tmp_cwd = os.path.join(old_cwd, temp_dir)
if verbose:
print '\nTranslate command: import translate; translate.main()'
os.chdir(tmp_cwd)
translate.main()
os.chdir(old_cwd)
print 'Translate runtime:', time() - t0
return
with open(os.devnull, 'w') as devnull:
old_stdout = sys.stdout
sys.stdout = devnull
os.chdir(tmp_cwd)
try:
translate.main()
finally:
sys.stdout = old_stdout
os.chdir(old_cwd)
def run_search(planner, max_time, max_cost, verbose, temp_dir):
if max_time == INF:
max_time = 'infinity'
elif isinstance(max_time, float):
max_time = int(max_time)
if max_cost == INF:
max_cost = 'infinity'
elif isinstance(max_cost, float):
max_cost = int(max_cost)
t0 = time()
search = os.path.join(get_fd_root(), FD_BIN, SEARCH_COMMAND)
planner_config = SEARCH_OPTIONS[planner] % (max_time, max_cost)
command = search % (temp_dir + SEARCH_OUTPUT,
planner_config, temp_dir + TRANSLATE_OUTPUT)
if verbose:
print '\nSearch command:', command
p = os.popen(command)
output = p.read()
if verbose:
print output[:-1]
print 'Search runtime:', time() - t0
if not os.path.exists(temp_dir + SEARCH_OUTPUT):
return None
return read(temp_dir + SEARCH_OUTPUT)
def parse_solution(solution):
lines = solution.split('\n')[:-2]
plan = []
for line in lines:
entries = line.strip('( )').split(' ')
plan.append((entries[0], tuple(entries[1:])))
return plan
def remove_paths(temp_dir):
safe_rm_dir(temp_dir)
def fast_downward(domain_pddl, problem_pddl, planner='max-astar',
max_time=INF, max_cost=INF, verbose=False, clean=False, temp_dir=TEMP_DIR):
remove_paths(temp_dir)
ensure_dir(temp_dir)
write(temp_dir + DOMAIN_INPUT, domain_pddl)
write(temp_dir + PROBLEM_INPUT, problem_pddl)
run_translate(verbose, temp_dir)
solution = run_search(planner, max_time, max_cost, verbose, temp_dir)
if clean:
remove_paths(temp_dir)
if solution is None:
return None
return parse_solution(solution)
| 34.064516 | 94 | 0.646938 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,533 | 0.399779 |
f881c0e0b875dfcd895b81b936783f36c735935f | 564 | py | Python | backend/external/docgen/request_token.py | bcgov-c/wally | 264bc5d40f9b5cf293159f1bc0424cfd9ff8aa06 | [
"Apache-2.0"
] | null | null | null | backend/external/docgen/request_token.py | bcgov-c/wally | 264bc5d40f9b5cf293159f1bc0424cfd9ff8aa06 | [
"Apache-2.0"
] | null | null | null | backend/external/docgen/request_token.py | bcgov-c/wally | 264bc5d40f9b5cf293159f1bc0424cfd9ff8aa06 | [
"Apache-2.0"
] | null | null | null | import requests
from api import config
def get_docgen_token():
params = {
"grant_type": "client_credentials",
"client_id": config.COMMON_DOCGEN_CLIENT_ID,
"client_secret": config.COMMON_DOCGEN_CLIENT_SECRET,
"scope": ""
}
req = requests.post(
config.COMMON_DOCGEN_SSO_ENDPOINT,
data=params,
headers={
"Content-Type": "application/x-www-form-urlencoded",
}
)
req.raise_for_status()
resp = req.json()
token = req.json().get('access_token')
return token
| 21.692308 | 64 | 0.615248 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 130 | 0.230496 |
f88205db59ac35f6745b81386eb53c57775a1972 | 3,164 | py | Python | gcode_gen/gcode.py | tulth/gcode_gen | d6e276f2074d4fe66755b2ae06c5b4d85583c563 | [
"BSD-3-Clause"
] | null | null | null | gcode_gen/gcode.py | tulth/gcode_gen | d6e276f2074d4fe66755b2ae06c5b4d85583c563 | [
"BSD-3-Clause"
] | null | null | null | gcode_gen/gcode.py | tulth/gcode_gen | d6e276f2074d4fe66755b2ae06c5b4d85583c563 | [
"BSD-3-Clause"
] | null | null | null | '''
Library for gcode commands objects that render to strings.
'''
from .number import num2str
from .point import XYZ
class GcodePoint(XYZ):
def __str__(self):
ret_list = []
for label, val in zip(('X', 'Y', 'Z'), self.xyz):
if val is not None:
ret_list.append('{}{}'.format(label, num2str(val)))
return ' '.join(ret_list)
class BaseGcode(object):
def __init__(self, cmd, x=None, y=None, z=None):
super().__init__()
self.cmd = cmd
self.point = GcodePoint(x, y, z)
def __str__(self):
point_str = str(self.point)
if point_str != '':
point_str = ' ' + point_str
return '{}{}'.format(self.cmd, point_str)
class Home(BaseGcode):
'''homing cycle'''
def __init__(self):
super().__init__('$H')
class Comment(BaseGcode):
'''comment'''
def __str__(self):
return '({})'.format(self.cmd)
class UnitsInches(BaseGcode):
'''Set system units to inches'''
def __init__(self):
super().__init__('G20')
class UnitsMillimeters(BaseGcode):
'''Set system units to millimeters'''
def __init__(self):
super().__init__('G21')
class MotionAbsolute(BaseGcode):
'''Set system to use absolute motion'''
def __init__(self):
super().__init__('G90')
class MotionRelative(BaseGcode):
'''Set system to use relative motion'''
def __init__(self):
raise Exception('Not supported!!')
# super().__init__('G91')
class SetSpindleSpeed(BaseGcode):
'''Set spindle rotation speed'''
def __init__(self, spindle_speed):
super().__init__('S {}'.format(spindle_speed))
class SetFeedRate(BaseGcode):
'''set feed rate. CAUTION: feed rate is system units per minute'''
def __init__(self, feedRate):
self.feedRate = feedRate
super().__init__('F {}'.format(num2str(feedRate)))
class ActivateSpindleCW(BaseGcode):
'''Activate spindle (clockwise)'''
def __init__(self, ):
super().__init__('M3')
class StopSpindle(BaseGcode):
'''Stop spindle'''
def __init__(self, ):
super().__init__('M5')
class G0(BaseGcode):
'''linear NONcut motion'''
def __init__(self, x=None, y=None, z=None):
super().__init__('G0', x, y, z)
class G1(BaseGcode):
'''linear CUT motion'''
def __init__(self, x=None, y=None, z=None):
super().__init__('G1', x, y, z)
class BaseArcGcode(BaseGcode):
def __init__(self, cmd, x=None, y=None, z=None, r=None):
assert r is not None
# need at least one rectangular coordinate
assert not all(rect_coord is None for rect_coord in (x, y, z))
self.radius = r
super().__init__(cmd, x, y, z)
def __str__(self):
return "{} R{}".format(super().__str__(), num2str(self.radius))
class G2(BaseArcGcode):
'''clockwise arc CUT motion'''
def __init__(self, x=None, y=None, z=None, r=None):
super().__init__('G2', x, y, z, r)
class G3(BaseArcGcode):
'''clockwise arc CUT motion'''
def __init__(self, x=None, y=None, z=None, r=None):
super().__init__('G3', x, y, z, r)
| 24.913386 | 71 | 0.60335 | 2,994 | 0.946271 | 0 | 0 | 0 | 0 | 0 | 0 | 686 | 0.216814 |
f8825ad47b75cf630d4ad3f98bb97cd2847d852d | 619 | py | Python | tAPP/2/P3.py | ArvinZJC/UofG_PGT_PSD_Python | d90e9bb0b53b14c6b1d7e657c3c61e2792e0d9c4 | [
"MIT"
] | null | null | null | tAPP/2/P3.py | ArvinZJC/UofG_PGT_PSD_Python | d90e9bb0b53b14c6b1d7e657c3c61e2792e0d9c4 | [
"MIT"
] | null | null | null | tAPP/2/P3.py | ArvinZJC/UofG_PGT_PSD_Python | d90e9bb0b53b14c6b1d7e657c3c61e2792e0d9c4 | [
"MIT"
] | null | null | null | '''
Description: Problem 3 (rearrange the code)
Version: 1.0.1.20210116
Author: Arvin Zhao
Date: 2021-01-14 22:51:16
Last Editors: Arvin Zhao
LastEditTime: 2021-01-16 04:11:18
'''
def get_data():
username = input('Enter your username: ')
age = int(input('Enter your age: '))
data_tuple = (username, age)
return data_tuple
def message(username, age):
if age <= 10:
print('Hi', username)
else:
print('Hello', username)
def main():
username, age = get_data()
message(username, age)
if __name__ == '__main__': # It is strongly recommended to add this line.
main() | 20.633333 | 74 | 0.646204 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 287 | 0.463651 |
f8825cac93ae51da9c9e342930c13e66cd5b1a63 | 1,046 | py | Python | tf_trees/demo.py | hazimehh/google-research | 81ff754d88f9ad479448c78d7ab615bef140423d | [
"Apache-2.0"
] | null | null | null | tf_trees/demo.py | hazimehh/google-research | 81ff754d88f9ad479448c78d7ab615bef140423d | [
"Apache-2.0"
] | null | null | null | tf_trees/demo.py | hazimehh/google-research | 81ff754d88f9ad479448c78d7ab615bef140423d | [
"Apache-2.0"
] | null | null | null | from tensorflow import keras
# Make sure the tf_trees directory is in the search path.
from tf_trees import TEL
# The documentation of TEL can be accessed as follows
print(TEL.__doc__)
# We will fit TEL on the Boston Housing regression dataset.
# First, load the dataset.
from keras.datasets import boston_housing
(x_train, y_train), (x_test, y_test) = boston_housing.load_data()
# Define the tree layer; here we choose 10 trees, each of depth 3.
# Note output_logits_dim is the dimension of the tree output.
# output_logits_dim = 1 in this case, but should be equal to the
# number of classes if used as an output layer in a classification task.
tree_layer = TEL(output_logits_dim=1, trees_num=10, depth=3)
# Construct a sequential model with batch normalization and TEL.
model = keras.Sequential()
model.add(keras.layers.BatchNormalization())
model.add(tree_layer)
# Fit a model with mse loss.
model.compile(loss='mse', optimizer='adam', metrics=['mse'])
result = model.fit(x_train, y_train, epochs=100, validation_data=(x_test, y_test))
| 38.740741 | 82 | 0.772467 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 566 | 0.541109 |
f88367f68dcb96f708907ba780b8dfe0c11ecea5 | 725 | py | Python | tests/utils_test.py | MartinThoma/nntoolkit | 1f9eed7b6d6fdacc706060d9cbfefaa9c2d0dbf8 | [
"MIT"
] | 4 | 2015-01-26T17:56:05.000Z | 2020-04-01T05:52:00.000Z | tests/utils_test.py | MartinThoma/nntoolkit | 1f9eed7b6d6fdacc706060d9cbfefaa9c2d0dbf8 | [
"MIT"
] | 11 | 2015-01-06T10:34:36.000Z | 2021-03-22T18:29:45.000Z | tests/utils_test.py | MartinThoma/nntoolkit | 1f9eed7b6d6fdacc706060d9cbfefaa9c2d0dbf8 | [
"MIT"
] | 6 | 2015-01-02T15:02:27.000Z | 2021-05-12T18:09:35.000Z | #!/usr/bin/env python
# Core Library modules
import argparse
import os
# Third party modules
import pytest
# First party modules
import nntoolkit.utils as utils
def test_is_valid_file():
parser = argparse.ArgumentParser()
# Does exist
path = os.path.realpath(__file__)
assert utils.is_valid_file(parser, path) == path
# Does not exist
with pytest.raises(SystemExit):
utils.is_valid_file(parser, "/etc/nonexistingfile")
def test_is_valid_folder():
parser = argparse.ArgumentParser()
# Does exist
assert utils.is_valid_folder(parser, "/etc") == "/etc"
# Does not exist
with pytest.raises(SystemExit):
utils.is_valid_folder(parser, "/etc/nonexistingfoler")
| 20.714286 | 62 | 0.704828 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 198 | 0.273103 |
f8837ac94ce790820bfbaf796665ce3cc290523c | 101 | py | Python | ex1.py | luismachado/python_project_euler | 79798ee00c18f4f8cc1b397aa7c92f8175a3ed33 | [
"MIT"
] | null | null | null | ex1.py | luismachado/python_project_euler | 79798ee00c18f4f8cc1b397aa7c92f8175a3ed33 | [
"MIT"
] | null | null | null | ex1.py | luismachado/python_project_euler | 79798ee00c18f4f8cc1b397aa7c92f8175a3ed33 | [
"MIT"
] | null | null | null | sum = 0
for x in range(1,1000):
if x%3 == 0 or x%5 == 0:
sum += x
print ("Total is:", sum) | 14.428571 | 26 | 0.485149 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 0.108911 |
f885cb85cd328a59b1d3f0d46e987b871f1a5d6d | 1,977 | py | Python | apiser/10-grpc/src/utils/tools/zemail.py | hyhlinux/demo_vue | cf61d0ba21cce93b04951076c8c23c0fe693bb5b | [
"Apache-2.0"
] | null | null | null | apiser/10-grpc/src/utils/tools/zemail.py | hyhlinux/demo_vue | cf61d0ba21cce93b04951076c8c23c0fe693bb5b | [
"Apache-2.0"
] | 2 | 2022-02-10T12:00:22.000Z | 2022-03-02T02:31:40.000Z | apiser/10-grpc/src/utils/tools/zemail.py | hyhlinux/demo_vue | cf61d0ba21cce93b04951076c8c23c0fe693bb5b | [
"Apache-2.0"
] | null | null | null | import smtplib
import os
from email.mime.text import MIMEText
from email.utils import formataddr
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.header import Header
try:
from src.config import CONFIG
except ImportError:
class CONFIG:
EMAIL = {
"user": os.getenv('EMAIL_USER', ""),
"passwd": os.getenv("EMAIL_USER_PASSWD", ""),
}
class SendEmail(object):
hosts = "smtp.exmail.qq.com"
host_port = 465
def __init__(self, user_email, user_pwd, tls=False):
print("conf:", user_email, user_pwd)
self.user = user_email
self.user_pass = user_pwd
self.server = smtplib.SMTP_SSL(host=SendEmail.hosts, port=SendEmail.host_port)
self.server.debuglevel = 1
self.server.ehlo()
if tls:
self.server.starttls()
def login(self):
self.server = smtplib.SMTP_SSL(host=SendEmail.hosts, port=SendEmail.host_port)
self.server.ehlo()
self.server.login(self.user, self.user_pass)
def sendEmail(self, to, msg):
try:
msg = self.body(msg=msg)
self.login() # 括号中对应的是发件人邮箱账号、邮箱密码
self.server.sendmail(from_addr=self.user, to_addrs=to, msg=msg) # 括号中对应的是发件人邮箱账号、收件人邮箱账号、发送邮件
self.logout() # 关闭连接
except Exception as e: # 如果 try 中的语句没有执行,则会执行下面的 ret=False
print(e)
def body(self, subject=None, msg=None):
msg = MIMEText(msg, 'html', 'utf-8')
msg['From'] = formataddr(["FromTES", self.user]) # 括号里的对应发件人邮箱昵称、发件人邮箱账号
if not subject:
subject = '邮件激活'
msg['Subject'] = subject
return msg.as_string()
def logout(self):
self.server.quit()
Zemail = SendEmail(CONFIG.EMAIL.get("user"), CONFIG.EMAIL.get("passwd"))
def main():
Zemail.sendEmail("2285020853@qq.com", "")
if __name__ == '__main__':
main()
| 29.954545 | 107 | 0.630754 | 1,672 | 0.773 | 0 | 0 | 0 | 0 | 0 | 0 | 464 | 0.214517 |
f8885de2c1bf956e3ffc0a2b8c32753cd240d5eb | 2,679 | py | Python | can_decoder/Frame.py | justinwald99/can_decoder | abfdd839856745f88b3fc3a58c8bedbdd05d5616 | [
"MIT"
] | 17 | 2020-08-18T02:34:57.000Z | 2022-03-16T16:26:53.000Z | can_decoder/Frame.py | justinwald99/can_decoder | abfdd839856745f88b3fc3a58c8bedbdd05d5616 | [
"MIT"
] | 4 | 2020-09-09T04:18:28.000Z | 2022-02-23T10:29:14.000Z | can_decoder/Frame.py | justinwald99/can_decoder | abfdd839856745f88b3fc3a58c8bedbdd05d5616 | [
"MIT"
] | 3 | 2021-08-18T18:30:43.000Z | 2022-02-21T07:11:09.000Z | from typing import List, Optional
from can_decoder.Signal import Signal
class Frame(object):
id = 0 # type: int
size = 0 # type: int
signals = None # type: List[Signal]
multiplexer = None # type: Optional[Signal]
def __init__(
self,
frame_id: int,
frame_size: int,
) -> None:
self.id = frame_id
self.size = frame_size
self.signals = []
def _get_tuple(self):
return (
self.id,
self.size
)
def add_signal(self, *args, **kwargs) -> bool:
"""Add a new signal directly to this frame. All arguments are passed on the the Signal constructor.
:param args:
:param kwargs:
:return: True if signal added. False otherwise.
"""
result = False
# If only one argument is supplied, check if it already is a signal.
if len(args) == 1:
signal = args[0]
if isinstance(signal, Signal):
result = True
else:
try:
signal = Signal(*args, **kwargs)
except Exception:
signal = None
result = False
if result:
# Add the signal to the internal storage.
self.signals.append(signal)
# If the signal is a multiplexer, and no other signal is a multiplexer, set this as the root multiplexer.
if self.multiplexer is None and signal.is_multiplexer:
self.multiplexer = signal
elif signal.is_multiplexer:
raise ValueError(
"""
Multiplexed signal added to frame, but frame already contains a root multiplexed signal.
Should the signal have been added to another signal as a multiplexed value?
"""
)
pass
return result
def __str__(self) -> str:
result = f"CAN Frame with ID 0x{self.id:08X} - {self.size} bytes and {len(self.signals)} direct signals"
for signal in self.signals:
signal_str = str(signal)
for line in signal_str.splitlines():
result += f"\n\t{line}"
if self.multiplexer:
# TODO: Count multiplexed signals
pass
return result
def __hash__(self) -> int:
return hash(self._get_tuple())
def __eq__(self, other) -> bool:
if not isinstance(other, Frame):
return NotImplemented
return self._get_tuple() == other._get_tuple()
pass
| 29.43956 | 117 | 0.528182 | 2,603 | 0.971631 | 0 | 0 | 0 | 0 | 0 | 0 | 840 | 0.31355 |
f888a9124299142dae94af378de65454815c28dd | 268 | py | Python | Curso_de_Python_ Curso_em_Video/PythonTeste/operadoresAritmeticosEx007.py | DanilooSilva/Cursos_de_Python | 8f167a4c6e16f01601e23b6f107578aa1454472d | [
"MIT"
] | null | null | null | Curso_de_Python_ Curso_em_Video/PythonTeste/operadoresAritmeticosEx007.py | DanilooSilva/Cursos_de_Python | 8f167a4c6e16f01601e23b6f107578aa1454472d | [
"MIT"
] | null | null | null | Curso_de_Python_ Curso_em_Video/PythonTeste/operadoresAritmeticosEx007.py | DanilooSilva/Cursos_de_Python | 8f167a4c6e16f01601e23b6f107578aa1454472d | [
"MIT"
] | null | null | null | largura = float(input('Digite a largura da parede: '))
altura = float(input('Digite a altura da parede: '))
area = largura * altura
tinta = area / 2
print('A área da parede é de {}'.format(area))
print('Será necessário para pintar {} litros de tinta'.format(tinta))
| 29.777778 | 69 | 0.697761 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 137 | 0.503676 |
f888dc9c7ee0e666487347bc03cdcb3278274bed | 174 | py | Python | terraform.py | kyleslater/254-space-log | 7496ff4e134b1a916580d8c0f8a0493e4863e9a2 | [
"MIT"
] | null | null | null | terraform.py | kyleslater/254-space-log | 7496ff4e134b1a916580d8c0f8a0493e4863e9a2 | [
"MIT"
] | null | null | null | terraform.py | kyleslater/254-space-log | 7496ff4e134b1a916580d8c0f8a0493e4863e9a2 | [
"MIT"
] | null | null | null | #Kyle Slater
import re
def terraformable(content:str):
pattern = re.compile("\"TerraformState\":\"Terraformable\"")
matches = pattern.findall(content)
return len(matches) | 24.857143 | 61 | 0.752874 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 50 | 0.287356 |
f88a9c72050c19a376ad171a7a2391d21f7e3ac6 | 256 | py | Python | bugzilla_service/bzservice_flask/app/tests/test_flaskr.py | 5GEVE/5G-EVE-PORTAL-BACKEND-tsb | 3fe3140b26d30e7e7ff1a034315183eaed60a599 | [
"MIT"
] | null | null | null | bugzilla_service/bzservice_flask/app/tests/test_flaskr.py | 5GEVE/5G-EVE-PORTAL-BACKEND-tsb | 3fe3140b26d30e7e7ff1a034315183eaed60a599 | [
"MIT"
] | 3 | 2021-02-08T20:38:29.000Z | 2021-06-02T00:55:43.000Z | file_storage_service/tests/test_flaskr.py | 5GEVE/5G-EVE-PORTAL-BACKEND-fs | 27d5d10fa39e3007cfee2e48e3b95047abf2c144 | [
"MIT"
] | null | null | null | import os
import tempfile
import requests
api_url = "http://127.0.0.1:8989"
def test_no_token():
"""Request home without token"""
response = requests.get(api_url+"/isvalid")
print(response.status_code)
assert response.status_code == 401 | 23.272727 | 47 | 0.707031 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 65 | 0.253906 |
f88aa3fcd8cfa698889ea39a72ffe01decd8c2ea | 6,279 | py | Python | translator-v2.py | g-h-0-S-t/translator | 9e55b5b3a7d68b85aa718bc9eef064599b75f914 | [
"MIT"
] | 1 | 2021-07-22T14:06:08.000Z | 2021-07-22T14:06:08.000Z | translator-v2.py | g-h-0-S-t/translator | 9e55b5b3a7d68b85aa718bc9eef064599b75f914 | [
"MIT"
] | null | null | null | translator-v2.py | g-h-0-S-t/translator | 9e55b5b3a7d68b85aa718bc9eef064599b75f914 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# MIT License
#
# Copyright (c) 2021 gh0$t
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
############################################################################################################################
# imports
############################################################################################################################
import sys
import urllib.request
from bs4 import BeautifulSoup
from urllib.request import Request
from selenium import webdriver
import os
import time
from stem import Signal
from stem.control import Controller
############################################################################################################################
# Pass URL, extract text, translate
############################################################################################################################
URL = str(sys.argv[1])
GTURL = 'https://translate.google.com/'
# this is important, drives the whole translation process.
# if google updates the translate.google.com page selectors, this HORRIBLE selector needs to be updated
GTXpathSel = '//*[@id="yDmH0d"]/c-wiz/div/div[@class="WFnNle"]/c-wiz/div[@class="OlSOob"]/c-wiz/div[@class="hRFt4b"]/c-wiz/div[@class="ykTHSe"]/div/div[@class="dykxn MeCBDd j33Gae"]/div/div[2]/div/div[@class="Llmcnf"]'
print('\nConnecting to ' + URL + ' ...' + '\nExtracting text...')
req = Request(URL)
html = BeautifulSoup(urllib.request.urlopen(req).read(), 'html.parser')
text = html.find('div', {'id': 'bodyContent'}).get_text()
with open('out/English.txt', 'w', encoding='utf-8') as f:
f.write(text)
print('\nExtracted -> out/English.txt')
print('\nStarting translation job...')
options = webdriver.ChromeOptions()
options.add_argument('--incognito')
options.add_argument('--headless')
driver = webdriver.Chrome(executable_path='driver/chromedriver', options=options)
print('\nConnecting to ' + GTURL + ' ...')
driver.get(GTURL)
time.sleep(1)
try:
# accept Google's cookies
driver.find_elements_by_xpath ('//span[contains(text(), "I agree")]')[0].click()
except:
pass
time.sleep(2)
driver.find_element_by_xpath('//*[@aria-label="Document translation"]').click()
driver.find_element_by_name('file').send_keys(os.path.abspath('out/English.txt'))
langEle = driver.find_elements_by_xpath(GTXpathSel)
i = 0
def init(driver):
try:
# elements are stale, need to refresh the list
langEle = driver.find_elements_by_xpath(GTXpathSel)
lang = langEle[i]
langTxt = lang.get_attribute('innerHTML')
if langTxt != 'English':
# printing this to make you feel less giddy if you end up staring at your terminal at a stretch
print('\nTrying English to ' + langTxt + '...')
driver.find_elements_by_xpath('//button[@aria-label="More target languages"]')[1].click()
time.sleep(2)
# translate.google.com DOM structure SUCKS.
# sorry Google, but that's the truth.
# #$!@ -> i am swearing, that's Google's representation of their 'swearing emote'
try:
driver.find_elements_by_xpath('//div[@data-language-code="' + lang.find_element_by_xpath('..').get_attribute('data-language-code') + '"]')[3].click()
except:
driver.find_elements_by_xpath('//div[@data-language-code="' + lang.find_element_by_xpath('..').get_attribute('data-language-code') + '"]')[1].click()
driver.find_elements_by_xpath ('//span[contains(text(), "Translate")]')[3].click()
time.sleep(1)
translatedBlog = driver.find_element_by_xpath('//pre').text
with open('out/' + langTxt + '.txt', 'w', encoding='utf-8') as f:
f.write(translatedBlog)
print('\n' + str(i + 1) + '/' + str(totLang) + ' -> ' + langTxt + ' -> Done -> out/' + langTxt + '.txt')
driver.back()
else:
print('\nSkipping ' + str(i + 1) + '/' + str(totLang) + ' -> ' + langTxt + '...')
except Exception as e:
# for debugging. use it @ your own risk. i am tired of the terminal screaming @ my face.
# print('\n---------->', e)
# Strategy to bypass Google's spam filter: quit chrome, switch TOR ID, re-try translation job
driver.quit()
with Controller.from_port(port = 9051) as controller:
controller.authenticate()
controller.signal(Signal.NEWNYM)
# it's an overkill to print this. just let it do it's job silently.
# print('\n----------> Switching TOR ID & re-trying ' + str(i + 1) + '/' + str(totLang) + '...')
options = webdriver.ChromeOptions()
options.add_argument('--incognito')
options.add_argument('--headless')
driver = webdriver.Chrome(executable_path='driver/chromedriver', options=options)
driver.get(GTURL)
time.sleep(1)
try:
# accept Google's cookies
driver.find_elements_by_xpath ('//span[contains(text(), "I agree")]')[0].click()
except:
pass
time.sleep(2)
driver.find_element_by_xpath('//*[@aria-label="Document translation"]').click()
driver.find_element_by_name('file').send_keys(os.path.abspath('out/English.txt'))
init(driver)
totLang = len(langEle)
print('\nTotal languages = ' + str(totLang) + ' [press CTRL + C once or twice or thrice or any number of times you like to press to quit anytime]')
print('\nTranslating text...')
while i < totLang:
init(driver)
i += 1
print('\nTranslations completed. Check "/out" for the files.')
driver.quit()
exit()
| 34.5 | 218 | 0.645963 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,790 | 0.603599 |
f88ab7cb09ff4cce53f828728ecd959e4a4ca37a | 955 | py | Python | djangoBackend/payment_module/migrations/0005_auto_20210924_0054.py | muhanzi/Django-REST-API | 08b8b2bbd08a74589cca7b5fd4e1d604d9a6d7eb | [
"Apache-2.0"
] | null | null | null | djangoBackend/payment_module/migrations/0005_auto_20210924_0054.py | muhanzi/Django-REST-API | 08b8b2bbd08a74589cca7b5fd4e1d604d9a6d7eb | [
"Apache-2.0"
] | null | null | null | djangoBackend/payment_module/migrations/0005_auto_20210924_0054.py | muhanzi/Django-REST-API | 08b8b2bbd08a74589cca7b5fd4e1d604d9a6d7eb | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.1.2 on 2021-09-23 21:54
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('payment_module', '0004_auto_20210924_0009'),
]
operations = [
migrations.RenameField(
model_name='commission',
old_name='agencyId',
new_name='agency',
),
migrations.RenameField(
model_name='commission',
old_name='paymentId',
new_name='payment',
),
migrations.RenameField(
model_name='invoice',
old_name='employeeId',
new_name='employee',
),
migrations.RenameField(
model_name='invoice',
old_name='employerId',
new_name='employer',
),
migrations.RenameField(
model_name='invoice',
old_name='paymentId',
new_name='payment',
),
]
| 24.487179 | 54 | 0.536126 | 870 | 0.910995 | 0 | 0 | 0 | 0 | 0 | 0 | 241 | 0.252356 |
f88e5bdd49e9b79ee78760de491336a0c465e929 | 935 | py | Python | general/tfHelper.py | jbroot/SHGAN | 9ed83f8356145adcbda219c0d9673e36109b0cb2 | [
"MIT"
] | null | null | null | general/tfHelper.py | jbroot/SHGAN | 9ed83f8356145adcbda219c0d9673e36109b0cb2 | [
"MIT"
] | null | null | null | general/tfHelper.py | jbroot/SHGAN | 9ed83f8356145adcbda219c0d9673e36109b0cb2 | [
"MIT"
] | null | null | null | import tensorflow as tf
import keras
import numpy as np
def get_bias_major_weights(model):
weights = model.get_weights()
biasMajor = []
for arrI in range(0, len(weights), 2):
inWeights = weights[arrI]
biasWeights = weights[arrI+1].reshape((1,-2))
l = np.concatenate((biasWeights, inWeights), axis=0).T
biasMajor.append(l)
return np.asarray(biasMajor)
def get_max_arg_vals(arr3D):
amaxes = tf.argmax(arr3D, axis=-1)
windowIdx = np.arange(0, amaxes.shape[0])
rowIdx = np.arange(0, amaxes.shape[1])
return arr3D[windowIdx[:, np.newaxis], rowIdx[np.newaxis, :], amaxes]
def get_steps_per_epoch(nSamplesOg, fracOfOg):
return int(max(nSamplesOg * fracOfOg), 1)
def get_steps_and_epochs(nSamplesOg, fracOfOg, epochsIfFull):
stepsPerEpoch = get_steps_per_epoch(nSamplesOg, fracOfOg)
epochs = int(max(epochsIfFull / fracOfOg, 1))
return stepsPerEpoch, epochs
| 32.241379 | 73 | 0.698396 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f88f6e13c4185abcf8cceff79dbfda6d0f9a19ba | 486 | py | Python | wsgi/settings.py | zhemao/speakeasy | 793bcca6d30fe31b1579bb8464f1eafacd6eb593 | [
"BSD-2-Clause"
] | 1 | 2022-02-02T10:40:59.000Z | 2022-02-02T10:40:59.000Z | wsgi/settings.py | zhemao/speakeasy | 793bcca6d30fe31b1579bb8464f1eafacd6eb593 | [
"BSD-2-Clause"
] | null | null | null | wsgi/settings.py | zhemao/speakeasy | 793bcca6d30fe31b1579bb8464f1eafacd6eb593 | [
"BSD-2-Clause"
] | null | null | null | import os
MONGO_HOST = os.getenv('OPENSHIFT_NOSQL_DB_HOST')
MONGO_PORT = os.getenv('OPENSHIFT_NOSQL_DB_PORT')
MONGO_USERNAME = os.getenv('OPENSHIFT_NOSQL_DB_USERNAME')
MONGO_PASSWORD = os.getenv('OPENSHIFT_NOSQL_DB_PASSWORD')
MONGO_DBNAME = 'speakeasy'
PRIV_KEY_FILE = os.getenv('OPENSHIFT_DATA_DIR') + '/server_private.pem'
PUB_KEY_FILE = os.getenv('OPENSHIFT_DATA_DIR') + '/server_public.pem'
PRIV_KEY = open(PRIV_KEY_FILE).read()
PUB_KEY = open(PUB_KEY_FILE).read()
DEBUG = True
| 30.375 | 71 | 0.790123 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 200 | 0.411523 |
f8900e5fac4e08162311478b3ed9cf017f5cb02c | 10,047 | py | Python | perl_io.py | hariguchi/perl_io | 1deb367faa56081b68c4eda99d364f5b533a331e | [
"MIT"
] | null | null | null | perl_io.py | hariguchi/perl_io | 1deb367faa56081b68c4eda99d364f5b533a331e | [
"MIT"
] | null | null | null | perl_io.py | hariguchi/perl_io | 1deb367faa56081b68c4eda99d364f5b533a331e | [
"MIT"
] | null | null | null | r''' perl_io - Opens a file or pipe in the Perl style
Copyright (c) 2016 Yoichi Hariguchi
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Usage:
from perl_io import PerlIO
Example 1:
pio = PerlIO('/proc/meminfo') # open `/proc/meminfo' for input
Example 2:
pio = PerlIO('> /tmp/foo.txt') # open '/tmp/foo.txt' for output
Example 3:
pio = PerlIO('>> /tmp/foo.txt') # open '/tmp/foo.txt' for appending
Example 4:
pio = PerlIO('| cmd arg1 ...') # we pipe output to the command `cmd'
Example 5:
pio = PerlIO('cmd arg1 ... |') # execute `cmd' that pipes output to us
You can access the Python file object as `pio.fo' after
PerlIO object `pio' was successfully created. `pio.fo' is
set to `None' if PelIO failed to open a file or pipe.
Example6 : Read the output of `strings /usr/bin/python' from a pipe
with PerlIO('strings /usr/bin/python |') as pio:
for line in pio.fo.xreadlines():
#
# do something...
#
Example7 : Write to a file
with PerlIO('>/tmp/.tmpfile-%d' % (os.getpid())) as pio:
print >> pio.fo, 'This is an example'
pio.fo.write('This is another example')
pio.fo.write('\n')
Note: PerlIO parses the parameter as follows in the case it
indicates to input from or output to a pipe.
1. Strips the first or last `|' (which indicates to open a pipe)
2. If the remaining string includes shell special characters
like `|', `>', `;', etc., PerlIO calls Popen() with
"sh -c 'remaining_string'", which means it can be a security
hazard when the remaining string includes the unsanitized input
from an untrusted source.
3. If the remaining string includes no shell special characters,
PerlIO does not invoke shell when it calls Popen().
How to test:
python -m unittest -v perl_io
'''
import os
import platform
import re
import sys
import syslog
import time
import subprocess
import shlex
import unittest
class PerlIO:
def __init__(self, open_str):
self._fo = None
self._proc = None
open_str = open_str.strip()
if open_str[-1] == '|':
self._rd_open_pipe(open_str[:-1])
elif open_str[0] == '|':
self._wr_open_pipe(open_str[1:])
elif open_str[0] == '>':
if open_str[1] == '>':
self._open_file(open_str[2:], 'a')
else:
self._open_file(open_str[1:], 'w')
elif open_str[0] == '<':
self._open_file(open_str[1:], 'r')
elif open_str[0:2] == '+>' or open_str[0:2] == '+<':
self._open_file(open_str[2:], 'r+')
elif open_str == '-':
self._fo = sys.stdin
elif open_str == '>-':
self._fo = sys.stdout
else:
self._open_file(open_str, 'r')
def __enter__(self):
return self
def __exit__(self, type, val, traceback):
self.close()
def _parse_command(self, cmd):
m = re.search(r'(\||<|>|`|;)', cmd)
if m:
return "sh -c '" + cmd + "'"
return cmd
def _rd_open_pipe(self, cmd):
try:
cmd = self._parse_command(cmd)
self._proc = subprocess.Popen(shlex.split(cmd),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self._fo = self._proc.stdout
except IOError:
print >> sys.stderr, 'failed to open pipe from %s' % (cmd)
def _wr_open_pipe(self, cmd):
try:
cmd = self._parse_command(cmd)
self._proc = subprocess.Popen(shlex.split(cmd),
stdin=subprocess.PIPE,
stderr=subprocess.PIPE)
self._fo = self._proc.stdin
except IOError:
print >> sys.stderr, 'failed to open pipe to %s' % (cmd)
def _open_file(self, file, mode):
file = file.strip()
try:
self._fo = open(file, mode)
except IOError:
print >> sys.stderr, 'failed to open %s' % (file)
@property
def fo(self):
return self._fo
@property
def err_fo(self):
return self._proc.stderr
def close(self):
if self._proc == None:
self._fo.close()
else:
self._proc.communicate()
class TestPerlIO(unittest.TestCase):
def runTest(self):
file = self.file_test(False)
self.rd_pipe_test(file)
self.rd_pipe_shell_test()
self.wr_pipe_test()
os.remove(file)
#
# 1. Open a file to write using PerlIO
# 2. Open a pipe outputting to us with a complex command line
# PerlIO('strings `which ls` | sort | uniq | ')
# so that shell is invoked with Popen().
# 3. Write all the input to the file created in No. 1
# 4. Check the contents
#
def rd_pipe_shell_test(self):
file = '/tmp/.pio_pipe_rd_test-%d' % (os.getpid())
pio_wr = PerlIO('> %s' % (file))
self.assertNotEqual(pio_wr.fo, None)
ll = []
cmd = 'strings `which ls` | sort | uniq | '
print >> sys.stderr, \
'Read from pipe (multiple commands): %s' % (cmd)
with PerlIO(cmd) as pio:
for line in pio.fo.xreadlines():
line = line.strip()
ll.append(line)
print >> pio_wr.fo, line
pio_wr.close()
pio_rd = PerlIO(file)
self.assertNotEqual(pio_rd.fo, None)
for line in pio_rd.fo.xreadlines():
line = line.strip()
expected = ll.pop(0)
self.assertEqual(line, expected)
os.remove(file)
#
# 1. Open a pipe to write with a complex command line
# PerlIO('| cat > /tmp/.pio_pipe_rt_test-XXXX')
# so that shell is invoked with Popen().
# The output to the pipe is redirected to a file
# 2. Open the file to read using PerlIO
# 3. Check the contents
#
def wr_pipe_test(self):
m = re.search(r'CYGWIN', platform.system())
if m:
#
# test fails on cygwin
#
return
file = '/tmp/.pio_pipe_wr_test-%d' % (os.getpid())
cmd = '| cat > %s' % (file)
print >> sys.stderr, 'Write to pipe: %s' % (cmd)
pio = PerlIO(cmd)
self.assertNotEqual(pio.fo, None)
ll = []
for i in range (0, 100):
line = "%4d %4d %4d %4d %4d" % (i, i, i, i, i)
ll.append(line)
print >> pio.fo, line
pio.close()
pio_rd = PerlIO(file)
self.assertNotEqual(pio_rd.fo, None)
for line in pio_rd.fo.xreadlines():
line = line.rstrip()
expected = ll.pop(0)
self.assertEqual(line, expected)
os.remove(file)
def file_test(self, remove):
#
# pio = PerlIO('>/tmp/.fileTest-pid')
#
file = '/tmp/.fileTest-%d' % os.getpid()
ofile = '> ' + file
print >> sys.stderr, '\n\nWrite to file: %s' % (ofile)
pio = PerlIO(ofile)
if pio.fo == None:
print >> sys.stderr, ' Error: failed to open %s' % file
sys.exit(1)
else:
for i in range (0, 500):
print >> pio.fo, '%4d %4d %4d %4d %4d' % (i, i, i, i, i)
pio.close()
#
# Append test ('>>/tmp/.fileTest-pid')
#
ofile = ' >> ' + file
print >> sys.stderr, 'Append to file: %s' % (ofile)
pio = PerlIO(ofile)
if pio.fo == None:
print >> sys.stderr, ' Error: failed to open %s' % file
sys.exit(1)
else:
for i in range (500, 1000):
print >> pio.fo, '%4d %4d %4d %4d %4d' % (i, i, i, i, i)
pio.close()
#
# Read the file just created and check the contents
#
print >> sys.stderr, 'Read from file: %s' % (file)
pio = PerlIO(file)
i = 0
for line in pio.fo.xreadlines():
line = line.rstrip()
expected = '%4d %4d %4d %4d %4d' % (i, i, i, i, i)
i += 1
self.assertEqual(line, expected)
pio.close()
if remove == True:
os.remove(file)
return file
#
# Read from a pipe with a simple command line
# so that shell is not invoked with Popen().
# Confirm the contents of the file is correct.
# Must be called after file_test().
#
def rd_pipe_test(self, file):
cmd = ' cat %s | ' % (file)
print >> sys.stderr, 'Read from pipe: %s' % (cmd)
i = 0
with PerlIO(cmd) as pio:
for line in pio.fo.xreadlines():
line = line.rstrip()
expected = '%4d %4d %4d %4d %4d' % (i, i, i, i, i)
i += 1
self.assertEqual(line, expected)
| 33.602007 | 78 | 0.54902 | 7,070 | 0.703693 | 0 | 0 | 115 | 0.011446 | 0 | 0 | 4,315 | 0.429481 |
f89039eac3e7b46b0d707c6f7b3927ce103b2914 | 919 | py | Python | app/controllers/config/system/logs.py | grepleria/SnitchDNS | 24f98b01fd5fca9aa2c660d6ee15742f2e44915c | [
"MIT"
] | 152 | 2020-12-07T13:26:53.000Z | 2022-03-23T02:00:04.000Z | app/controllers/config/system/logs.py | grepleria/SnitchDNS | 24f98b01fd5fca9aa2c660d6ee15742f2e44915c | [
"MIT"
] | 16 | 2020-12-07T17:04:36.000Z | 2022-03-10T11:12:52.000Z | app/controllers/config/system/logs.py | grepleria/SnitchDNS | 24f98b01fd5fca9aa2c660d6ee15742f2e44915c | [
"MIT"
] | 36 | 2020-12-09T13:04:40.000Z | 2022-03-12T18:14:36.000Z | from .. import bp
from flask import request, render_template, flash, redirect, url_for
from flask_login import current_user, login_required
from app.lib.base.provider import Provider
from app.lib.base.decorators import admin_required
@bp.route('/logs/errors', methods=['GET'])
@login_required
@admin_required
def logs_errors():
provider = Provider()
logging = provider.logging()
default_per_page = 20
page = request.args.get('page', 1)
per_page = request.args.get('per_page', default_per_page)
if isinstance(page, str):
page = int(page) if page.isdigit() else 1
if isinstance(per_page, str):
per_page = int(per_page) if per_page.isdigit() else 1
if page <= 0:
page = 1
if per_page <= 0:
per_page = default_per_page
return render_template(
'config/system/logs/errors.html',
results=logging.view_errors(page, per_page)
)
| 26.257143 | 68 | 0.688792 | 0 | 0 | 0 | 0 | 682 | 0.742111 | 0 | 0 | 67 | 0.072905 |
f8905d54c870fed2c5b2b1831a4130a25651f566 | 5,539 | py | Python | MR-OCP/mrcap/utils/downsample_atlas.py | justi/m2g | 09e8b889889ee8d8fb08b9b6fcd726fb3d901644 | [
"Apache-2.0"
] | 12 | 2015-03-11T22:07:17.000Z | 2016-01-29T21:24:29.000Z | MR-OCP/mrcap/utils/downsample_atlas.py | youngmook/m2g | 09e8b889889ee8d8fb08b9b6fcd726fb3d901644 | [
"Apache-2.0"
] | 213 | 2015-01-30T16:02:57.000Z | 2016-01-29T21:45:02.000Z | MR-OCP/mrcap/utils/downsample_atlas.py | youngmook/m2g | 09e8b889889ee8d8fb08b9b6fcd726fb3d901644 | [
"Apache-2.0"
] | 5 | 2015-02-04T13:58:12.000Z | 2016-01-29T21:24:46.000Z | #!/usr/bin/env python
# Copyright 2014 Open Connectome Project (http://openconnecto.me)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# create_atlas.py
# Created by Disa Mhembere on 2014-04-10.
# Email: disa@jhu.edu
# Copyright (c) 2014. All rights reserved.
# This simply takes a (182, 218, 182) atlas and creates
# a ~30-non-zero k region atlas by relabelling each
# 3x3x3 region with a new label then masking
# using a base atlas
import argparse
import nibabel as nib
import numpy as np
from math import ceil
from copy import copy
import sys, pdb
from time import time
import os
from packages.utils.setup import get_files
def create(roifn=os.path.join(os.environ["M2G_HOME"],"data","Atlas",
"MNI152_T1_1mm_brain.nii"), start=2):
"""
Downsamples an atlas from a template brain.
Create a new atlas given some scaling factor determined by the start index. Can be useful if looking for parcellation of certain scale for graph generation.
**Positional Arguments**
roifn: [.nii; nifti image] (default = MNI152)
- Nifti roi mask file name
start: [int] (default = 2)
- The x,y,z start position which determines the scaling.
**Returns**
atlas: [.nii; nifti image]
- Atlas labels in MNI space.
"""
start_time = time()
atlmap = None
print "Loading rois as base ..."
if not os.path.exists(roifn):
get_files()
img = nib.load(roifn)
base = img.get_data()
aff = img.get_affine()
fm = img.file_map
true_dim = base.shape
# Labelling new
label_used = False
print "Labeling new ..."
region_num = 1
step = 1+(start*2)
mstart = -start
mend = start+1
# Align new to scale factor
xdim, ydim, zdim = map(ceil, np.array(base.shape)/float(step))
if step == 1:
assert xdim == base.shape[0] and ydim == base.shape[1] and zdim == base.shape[2]
resized_base = np.zeros((xdim*step, ydim*step, zdim*step), dtype=int)
resized_base[:base.shape[0], :base.shape[1], :base.shape[2]] = base
base = resized_base
del resized_base
# Create new matrix
new = np.zeros_like(base, dtype=np.int) # poke my finger in the eye of bjarne
# TODO: Cythonize
for z in xrange(start, base.shape[2]-start, step):
for y in xrange(start, base.shape[1]-start, step):
for x in xrange(start, base.shape[0]-start, step):
if label_used:
region_num += 1 # only increase counter when a label was used
label_used = False
# set other (step*step)-1 around me to same region
for zz in xrange(mstart,mend):
for yy in xrange(mstart,mend):
for xx in xrange(mstart,mend):
if (base[x+xx,y+yy,z+zz]): # Masking
label_used = True
new[x+xx,y+yy,z+zz] = region_num
new = new[:true_dim[0], :true_dim[1], :true_dim[2]] # shrink new to correct size
print "Your atlas has %d regions ..." % len(np.unique(new))
img = nib.Nifti1Image(new, affine=img.get_affine(), header=img.get_header(), file_map=img.file_map)
del new
print "Building atlas took %.3f sec ..." % (time()-start_time)
return img
def validate(atlas_fn, roifn):
"""
Validate that an atlas you've created is a valid based on the
masking you have
@param atlas_fn: the new atlas you've created
@param roifn: nifti roi file name
"""
base = nib.load(roifn).get_data()
try:
new = nib.load(atlas_fn).get_data()
except:
sys.stderr.write("[Error]: Loading file %s failed!\n" % atlas_fn);
exit(-1)
# This is a mapping from base to new where if we get any conflicting regions we failed to make a valid atlas
old_to_new = {}
for i in xrange(new.shape[2]):
for ii in xrange(new.shape[1]):
for iii in xrange(new.shape[0]):
if old_to_new.has_key(base[i,ii,iii]):
if old_to_new[base[i,ii,iii]] != new[i,ii,iii]:
print "[Error]; Index [%d,%d,%d] Should be: {0}, but is {1}".format(i, ii, iii,
old_to_new[base[i,ii,iii]], new[i,ii,iii])
exit(911)
else:
if start == 0 and new[i,i,iii] in old_to_new.values(): import pdb; pdb.set_trace()
old_to_new[base[i,ii,iii]] = new[i,i,iii]
print "Success! Validation complete."
def main():
parser = argparse.ArgumentParser(description="Create a downsampled atlas for a fibergraph")
parser.add_argument("baseatlas", action="store", help="NIFTI roi")
parser.add_argument("-n","--niftifn", action="store", default="atlas.nii", \
help="Nifti output file name if creating else the input file name if validation")
parser.add_argument("-f", "--factor", default=2, action="store", type=int, \
help="Downsample factor/Start index")
parser.add_argument("-v", "--validate", action="store_true", help="Perform validation")
result = parser.parse_args()
if result.validate:
print "Validating %s..." % result.niftifn
validate(result.niftifn, result.baseatlas)
exit(1)
img = create(result.baseatlas, result.factor)
nib.save(img, result.niftifn)
if __name__ == "__main__":
main()
| 32.582353 | 157 | 0.667268 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,532 | 0.457122 |
f890b528c3dd1757b9098304393522baa32267a2 | 2,241 | py | Python | tensorforce/agents/random_agent.py | matthewwilfred/tensorforce | 0ba3d39ed88fb0a0a0bf4bf03e79150c0fe0d54c | [
"Apache-2.0",
"MIT"
] | 1 | 2021-08-23T19:49:03.000Z | 2021-08-23T19:49:03.000Z | tensorforce/agents/random_agent.py | matthewwilfred/tensorforce | 0ba3d39ed88fb0a0a0bf4bf03e79150c0fe0d54c | [
"Apache-2.0",
"MIT"
] | null | null | null | tensorforce/agents/random_agent.py | matthewwilfred/tensorforce | 0ba3d39ed88fb0a0a0bf4bf03e79150c0fe0d54c | [
"Apache-2.0",
"MIT"
] | null | null | null | # Copyright 2017 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Random agent that always returns a random action.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from random import gauss, random, randrange
from tensorforce.agents import Agent
class RandomAgent(Agent):
name = 'RandomAgent'
model = (lambda config: None)
def __init__(self, config):
super(RandomAgent, self).__init__(config)
def reset(self):
self.episode += 1
def act(self, state):
"""
Get random action from action space
:param state: current state (disregarded)
:return: random action
"""
self.timestep += 1
if self.unique_state:
self.current_state = dict(state=state)
else:
self.current_state = state
self.current_action = dict()
for name, action in self.actions_config.items():
if action.continuous:
action = random()
if 'min_value' in action:
action = action.min_value + random() * (action.max_value - action.min_value)
else:
action = gauss(mu=0.0, sigma=1.0)
else:
action = randrange(action.num_actions)
self.current_action[name] = action
if self.unique_action:
return self.current_action['action']
else:
return self.current_action
def observe(self, reward, terminal):
self.current_reward = reward
self.current_terminal = terminal
| 30.283784 | 96 | 0.622936 | 1,307 | 0.583222 | 0 | 0 | 0 | 0 | 0 | 0 | 895 | 0.399375 |
f891f4ca2c23bac0817312243666f8fd196ddfcf | 9,970 | py | Python | selinum_basics.py | elithaxxor/craiglist_scraper | db35d06004e306229cd10d7678574763cf48c625 | [
"MIT"
] | null | null | null | selinum_basics.py | elithaxxor/craiglist_scraper | db35d06004e306229cd10d7678574763cf48c625 | [
"MIT"
] | null | null | null | selinum_basics.py | elithaxxor/craiglist_scraper | db35d06004e306229cd10d7678574763cf48c625 | [
"MIT"
] | null | null | null | import os
import re
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver import Chrome
from selenium.webdriver.support.expected_conditions import presence_of_element_located
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
## ORIGINAL CODE ###
# OS = os.name
# # s.environ['PATH'] += '/Users/macbook/Documents/CS/PROJECT/AutoDownloader/TEST_DOWNLOADS/fileexteniontest.torrenttorrent.torrent'
# driver = webdriver.Chrome(r'/Users/macbook/Documents/CS/PROJECT/AutoDownloader/TEST_DOWNLOADS/fileexteniontest.torrenttorrent.torrent/chromedriver')
# driver.get('https://1337x.to/')
## To Load Extensions::
try:
OS = os.name
chrome_options = Options()
chrome_options.add_extension('/Users/macbook/Documents/CS/PROJECT/AutoDownloader/TEST_DOWNLOADS/Selenium_Project/ad_blocker.crx')
driver = webdriver.Chrome(options=chrome_options, executable_path= r'/Users/macbook/Documents/CS/PROJECT/AutoDownloader/TEST_DOWNLOADS/fileexteniontest.torrenttorrent.torrent/chromedriver')
time.sleep(2)
driver.get('https://1337x.to/')
driver.implicitly_wait(25) ### no need to call more than once
print(OS)
print(driver)
#print(driver.text)
except Exception as e:
print('ERROR IN PARSING CHROME EXTENSION', str(e))
try:
search_box = driver.find_element_by_id('autocomplete')
print(search_box.text)
search_box.click()
search_box.send_keys('chopper')
click_search_box = driver.find_element_by_class_name('flaticon-search')
#click_seach_box.click()
#click_search_box.send_keys(Keys.ENTER)
search_box.send_keys(Keys.ENTER)
#driver.find_element_by_xpath("html/xxxxx").send_keys('keys.ENTER')
except Exception as e:
print('Element not found CANNOT FIND SEARCH BOX ', str(e))
try:
search_box01 = driver.find_element_by_id('autocomplete')
print(search_box01.text)
search_box01.click()
search_box01.send_keys(Keys.CONTROL, "a")
search_box01.clear()
search_box01.send_keys('the titanic')
search_box01.send_keys(Keys.ENTER)
except Exception as e:
print('Element not found 2nd search', str(e))
### IMPLIMENT EXPLICIT WAIT
## SINCE THE WEBPAGE MAY TAKE LONG TO LOAD, AND TIME TO PARSE, SET UP AN EXPLICIT WAIT--> THIS WILL WAIT UNTIL THE DEFINED THING IS LOADED
## SET UP LOOP TO ITERATE THROUGH LIST OF ELEMENTS
try:
body = WebDriverWait(driver, 15).until(
EC.presence_of_element_located((By.CLASS_NAME, 'table-list-wrap'))
#EC.presence_of_all_elements_located((by.CLASS, 'table-list table table-responsive table-striped')) ##
)
print(body.text)
print(),print()
print('1111111111')
href_link = body.find_element_by_xpath("/html/body/main/div/div/div/div[2]/div[1]/table/tbody/tr[1]/td[1]")
print(href_link.text)
except Exception as e:
print('Element not found body search', str(e))
try:
click_link = driver.find_element_by_link_text('The Titanic Secret by Clive Cussler, Jack Du Brul EPUB')
print(click_link.text)
click_link.click()
except Exception as e:
print('Element not found click test', str(e))
try:
# magnet = driver.find_element
magnet_pull =WebDriverWait(driver, 15).until(
EC.presence_of_element_located((By.CLASS_NAME, "l4702248fa49fbaf25efd33c5904b4b3175b29571 l0e850ee5d16878d261dd01e2486970eda4fb2b0c l8680f3a1872d2d50e0908459a4bfa4dc04f0e610"))
)
print('magnetpull info')
print(magnet_pull.text)
magnet_link = driver.find_element_by_xpath("/html/body/main/div/div/div/div[2]/div[1]/ul[1]/li[1]/a")
print(magnet_link.text)
magnet_link.click()
except Exception as e:
print('MAGNET PULL ERROR', str(e))
driver.quit()
###### GOOOD CODE ######
##### TO LOOP THROUGH A LIST WHILE IN IMPLICIT WAIT
# sm_table = body.find_element_by_class_name('"table-list table table-responsive table-striped"')
# # sm_table = body.find_element_by_class_name('coll-1 name')
# #sm_table = body.find_element_by_xpath("/html/body/main/div/div/div/div[2]/div[1]/table/tbody/tr[1]/td[1]")
#
# for cell in sm_table:
# href_link = cell.find_element_by_xpath("/html/body/main/div/div/div/div[2]/div[1]/table/tbody/tr[1]/td[1]")
# print(href_link.text)
## ORIGINAL CODE ###
# OS = os.name
# # s.environ['PATH'] += '/Users/macbook/Documents/CS/PROJECT/AutoDownloader/TEST_DOWNLOADS/fileexteniontest.torrenttorrent.torrent'
# driver = webdriver.Chrome(r'/Users/macbook/Documents/CS/PROJECT/AutoDownloader/TEST_DOWNLOADS/fileexteniontest.torrenttorrent.torrent/chromedriver')
# driver.get('https://1337x.to/')
#################### EXPLICIT WAIT ###########################
###### USE WHEN DOWNLOAD COMPLETES ######### (23:00)
#### use when you want to wait some to for executution
## explicit wait -- waits until condition is returned true.
## driver, 30 --> how long to wait till true
# ## use body class to find element
# ## nest elements in a tuple
# print(f"my_element")
# WebDriverWait(driver, 30).until(
# EC.text_to_b_present_in_element(
# (by.CLASS_NAME, 'progress-label'),## element filtration (class name, class name vaue as a tuple
# 'complete' ## expected text as a string
#
# )
#
# )
# my_element00 = driver.find_element_by_class_name('') ## <--- pass in class value #-> class styling method
# print(my_element00)
#
# #### DROP DOWN CLASSES FOR MAGNET / TORRENT DOWNLOAD ##
# <ul class="lfa750b508ad7d04e3fc96bae2ea94a5d121e6607 lcafae12a818cf41a5873ad374b98e79512c946c6">
# <li><a class="l4702248fa49fbaf25efd33c5904b4b3175b29571 l0e850ee5d16878d261dd01e2486970eda4fb2b0c l8680f3a1872d2d50e0908459a4bfa4dc04f0e610" href="magnet:?xt=urn:btih:F5BC20E9AA709CFC32BE63B2F6BEE56882EB7BD2&dn=The+Titanic+Secret+by+Clive+Cussler%2C+Jack+Du+Brul+EPUB&tr=udp%3A%2F%2Ftracker.coppersurfer.tk%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker.leechers-paradise.org%3A6969%2Fannounce&tr=udp%3A%2F%2F9.rarbg.to%3A2710%2Fannounce&tr=udp%3A%2F%2Fexodus.desync.com%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker.uw0.xyz%3A6969%2Fannounce&tr=udp%3A%2F%2Fopen.stealth.si%3A80%2Fannounce&tr=udp%3A%2F%2Ftracker.tiny-vps.com%3A6969%2Fannounce&tr=udp%3A%2F%2Fopen.demonii.si%3A1337%2Fannounce&tr=udp%3A%2F%2Ftracker.nibba.trade%3A1337%2Fannounce&tr=udp%3A%2F%2Fopentracker.sktorrent.org%3A6969%2Fannounce&tr=udp%3A%2F%2Fexplodie.org%3A6969%2Fannounce&tr=udp%3A%2F%2Fbt.xxx-tracker.com%3A2710%2Fannounce&tr=udp%3A%2F%2Fzephir.monocul.us%3A6969%2Fannounce&tr=udp%3A%2F%2Famigacity.xyz%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker.zer0day.to%3A1337%2Fannounce&tr=udp%3A%2F%2Ftracker.leechers-paradise.org%3A6969%2Fannounce&tr=udp%3A%2F%2Fcoppersurfer.tk%3A6969%2Fannounce" onclick="javascript: count(this);"><span class="icon"><i class="flaticon-ld08a4206c278863eddc1bf813faa024ef55ce0ef"></i></span>Magnet Download</a> </li>
# <li class="dropdown">
# <a data-toggle="dropdown" class="l4702248fa49fbaf25efd33c5904b4b3175b29571 l0e850ee5d16878d261dd01e2486970eda4fb2b0c le41399670fcf7cac9ad72cbf1af20d76a1fa16ad" onclick="javascript: count(this);" href="#"><span class="icon"><i class="flaticon-le9f40194aef2ed76d8d0f7f1be7fe5aad6fce5e6"></i></span>Torrent Download</a>
# <ul class="dropdown-menu" aria-labelledby="dropdownMenu1">
# <li><a class="l4702248fa49fbaf25efd33c5904b4b3175b29571 l0e850ee5d16878d261dd01e2486970eda4fb2b0c l13bf8e2d22d06c362f67b795686b16d022e80098" target="_blank" href="http://itorrents.org/torrent/F5BC20E9AA709CFC32BE63B2F6BEE56882EB7BD2.torrent"><span class="icon"><i class="flaticon-lbebff891414215bfc65d51afbd7677e45be19fad"></i></span>ITORRENTS MIRROR</a> </li>
# <li><a class="l4702248fa49fbaf25efd33c5904b4b3175b29571 l0e850ee5d16878d261dd01e2486970eda4fb2b0c l13bf8e2d22d06c362f67b795686b16d022e80098" target="_blank" href="http://torrage.info/torrent.php?h=F5BC20E9AA709CFC32BE63B2F6BEE56882EB7BD2"><span class="icon"><i class="flaticon-lbebff891414215bfc65d51afbd7677e45be19fad"></i></span>TORRAGE MIRROR</a></li>
# <li><a class="l4702248fa49fbaf25efd33c5904b4b3175b29571 l0e850ee5d16878d261dd01e2486970eda4fb2b0c l13bf8e2d22d06c362f67b795686b16d022e80098" target="_blank" href="http://btcache.me/torrent/F5BC20E9AA709CFC32BE63B2F6BEE56882EB7BD2"><span class="icon"><i class="flaticon-lbebff891414215bfc65d51afbd7677e45be19fad"></i></span>BTCACHE MIRROR</a></li>
# <li><a class="l4702248fa49fbaf25efd33c5904b4b3175b29571 l0e850ee5d16878d261dd01e2486970eda4fb2b0c l8680f3a1872d2d50e0908459a4bfa4dc04f0e610" href="magnet:?xt=urn:btih:F5BC20E9AA709CFC32BE63B2F6BEE56882EB7BD2&dn=The+Titanic+Secret+by+Clive+Cussler%2C+Jack+Du+Brul+EPUB&tr=udp%3A%2F%2Ftracker.coppersurfer.tk%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker.leechers-paradise.org%3A6969%2Fannounce&tr=udp%3A%2F%2F9.rarbg.to%3A2710%2Fannounce&tr=udp%3A%2F%2Fexodus.desync.com%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker.uw0.xyz%3A6969%2Fannounce&tr=udp%3A%2F%2Fopen.stealth.si%3A80%2Fannounce&tr=udp%3A%2F%2Ftracker.tiny-vps.com%3A6969%2Fannounce&tr=udp%3A%2F%2Fopen.demonii.si%3A1337%2Fannounce&tr=udp%3A%2F%2Ftracker.nibba.trade%3A1337%2Fannounce&tr=udp%3A%2F%2Fopentracker.sktorrent.org%3A6969%2Fannounce&tr=udp%3A%2F%2Fexplodie.org%3A6969%2Fannounce&tr=udp%3A%2F%2Fbt.xxx-tracker.com%3A2710%2Fannounce&tr=udp%3A%2F%2Fzephir.monocul.us%3A6969%2Fannounce&tr=udp%3A%2F%2Famigacity.xyz%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker.zer0day.to%3A1337%2Fannounce&tr=udp%3A%2F%2Ftracker.leechers-paradise.org%3A6969%2Fannounce&tr=udp%3A%2F%2Fcoppersurfer.tk%3A6969%2Fannounce"><span class="icon"><i class="flaticon-ld08a4206c278863eddc1bf813faa024ef55ce0ef"></i></span>None Working? Use Magnet</a></li>
#
| 57.298851 | 1,381 | 0.768907 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,722 | 0.774524 |
f893a81b68249d96ab59017996d9f35493423f0f | 8,644 | py | Python | training/MNISTFashionMicroservice/src/server/training.py | UMass-Rescue/CombinedTechStack | b3447b174d9798f3baf9bf6509b4cc14a5bd225a | [
"MIT"
] | null | null | null | training/MNISTFashionMicroservice/src/server/training.py | UMass-Rescue/CombinedTechStack | b3447b174d9798f3baf9bf6509b4cc14a5bd225a | [
"MIT"
] | 32 | 2021-03-17T13:17:22.000Z | 2021-05-04T14:25:31.000Z | training/MNISTFashionMicroservice/src/server/training.py | UMass-Rescue/CombinedTechStack | b3447b174d9798f3baf9bf6509b4cc14a5bd225a | [
"MIT"
] | 1 | 2021-03-24T13:47:44.000Z | 2021-03-24T13:47:44.000Z | import os
import tempfile
import shutil
import requests
import sys
import logging
import json
from src.server.dependency import ModelData
import tensorflow as tf
class StreamToLogger(object):
"""
Fake file-like stream object that redirects writes to a logger instance.
Source: https://stackoverflow.com/a/39215961
"""
def __init__(self, logger, level):
self.logger = logger
self.level = level
self.linebuf = ''
def write(self, buf):
for line in buf.rstrip().splitlines():
self.logger.log(self.level, line.rstrip())
def flush(self):
pass
def train_model(training_id, model_data: ModelData):
"""
Train model(s) based on a given model and hyperparameters
Now supporting two hyperparameters which are
- Optimizer and learning_rate
"""
# SET LOGGER TO PRINT TO STDOUT AND WRITE TO FILE
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s [%(levelname)s] %(message)s",
handlers=[
logging.FileHandler("/log/{}.log".format(training_id)),
logging.StreamHandler(sys.stdout)
]
)
log = logging.getLogger('db_microservice_logger')
sys.stdout = StreamToLogger(log,logging.INFO)
sys.stderr = StreamToLogger(log,logging.ERROR)
# get API KEY from the environment file
API_KEY = os.getenv('API_KEY')
best_acc = -1
best_val_acc = -1
best_loss = -1
best_val_loss = -1
best_model = None
best_config = None
best_optimizer = None
best_loss_fn = None
# print("Save:" + str(model_data.save))
logging.info("Save:" + str(model_data.save))
try:
# print('[Training] Starting to train model ID: ' + training_id)
logging.info('[Training] Starting to train model ID: ' + training_id)
dataset_root = '/app/src/public_dataset'
img_height = 28
img_width = 28
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
dataset_root,
validation_split=model_data.split,
subset="training",
seed=model_data.seed,
image_size=(img_height, img_width),
batch_size=model_data.batch_size
)
validation_ds = tf.keras.preprocessing.image_dataset_from_directory(
dataset_root,
validation_split=model_data.split,
subset="validation",
seed=model_data.seed,
image_size=(img_height, img_width),
batch_size=model_data.batch_size
)
autotune_buf_size = tf.data.AUTOTUNE
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=autotune_buf_size)
validation_ds = validation_ds.cache().prefetch(buffer_size=autotune_buf_size)
optimizer_dict = model_data.optimizer.dict()
config = {}
if "config" in optimizer_dict and optimizer_dict["config"]:
# convert all float config from string to float
convert_data_type(optimizer_dict["config"])
config = optimizer_dict["config"]
# if learning_rate is not defined, it will use the optimizor's default value
learning_rate_list = [None]
if model_data.optimizer.learning_rate:
learning_rate_list = model_data.optimizer.learning_rate
# get loss function object
loss_dict = model_data.loss_function.dict()
if loss_dict["config"] is None:
loss_dict["config"] = {}
else:
convert_data_type(loss_dict["config"])
loss_fn = tf.keras.losses.get(loss_dict)
logging.info(loss_fn)
# create all hyperparameters combination
optimizer_class = model_data.optimizer.dict()
hyperparameters = [[o,lr] for o in optimizer_dict["class_name"]
for lr in learning_rate_list]
# loop through all hyperparameters
for hp in hyperparameters:
# load model from json file
model = tf.keras.models.model_from_json(model_data.model_structure)
optimizer_obj = {
"class_name": hp[0],
"config": config
}
# set learning rate if not None
if hp[1]:
optimizer_obj["config"]["learning_rate"] = hp[1]
optimizer = tf.keras.optimizers.get(optimizer_obj)
n_epochs = model_data.n_epochs
# train the model
(acc, val_acc, loss, val_loss, model) = fit(model, loss_fn, optimizer, train_ds, validation_ds, n_epochs)
# CHECK FOR THE BEST MODEL (from validation accuracy)
if val_acc > best_val_acc:
best_acc = acc
best_val_acc = val_acc
best_loss = loss
best_val_loss = val_loss
best_model = model
best_optimizer = optimizer.get_config()
best_loss_fn = loss_fn.get_config()
# END LOOP
logging.info('[Training] Completed training on model ID: ' + training_id)
# If we are saving the model, we must save it to folder, zip that folder,
# and then send the zip file to the server via HTTP requests
if model_data.save:
# print('[Training] Preparing to save Model data on model ID: ' + training_id)
logging.info('[Training] Preparing to save Model data on model ID: ' + training_id)
# Create temp dir and save model to it
tmpdir = tempfile.mkdtemp()
model_save_path = os.path.join(tmpdir, training_id)
# Save model nested 1 more layer down to facilitate unzipping
tf.saved_model.save(best_model, os.path.join(model_save_path, training_id))
shutil.make_archive(model_save_path, 'zip', model_save_path)
print(tmpdir)
files = {'model': open(model_save_path+'.zip', 'rb')}
requests.post(
'http://host.docker.internal:' + str(os.getenv('SERVER_PORT')) + '/training/model',
headers={'api_key': API_KEY},
params={'training_id': training_id},
files=files
)
# print('[Training] Sent SavedModel file data on model ID: ' + training_id)
logging.info('[Training] Sent SavedModel file data on model ID: ' + training_id)
except:
# print('[Training] Critical error on training: ' + training_id)
logging.exception('[Training] Critical error on training: ' + training_id)
result = {
'training_accuracy': best_acc,
'validation_accuracy': best_val_acc,
'training_loss': best_loss,
'validation_loss': best_val_loss,
'optimizer_config': str(best_optimizer),
'loss_config': str(best_loss_fn)
}
logging.info('[Training] results: ' + str(result))
# Send HTTP request to server with the statistics on this training
r = requests.post(
'http://host.docker.internal:' + str(os.getenv('SERVER_PORT')) + '/training/result',
headers={'api_key': API_KEY},
json={
'dataset_name': os.getenv('DATASET_NAME'),
'training_id': training_id,
'results': result
})
r.raise_for_status()
# print("[Training Results] Sent training results to server.")
logging.info("[Training Results] Sent training results to server.")
def fit(model, loss_fn, optimizer, train_ds, validation_ds, n_epochs):
acc = [-1]
val_acc = [-1]
loss = [-1]
val_loss = [-1]
logging.info(loss_fn)
logging.info(optimizer)
model.compile(optimizer=optimizer,
loss=loss_fn,
metrics=['accuracy'])
logging.info('[Training] with optimizer config: ' + str(model.optimizer.get_config()))
logging.info('[Training] with loss function config: ' + str(model.loss.get_config()))
history = model.fit(train_ds, validation_data=validation_ds, epochs=n_epochs)
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
return (acc[-1], val_acc[-1], loss[-1], val_loss[-1], model)
def convert_data_type(input_dict):
for k, v in input_dict.items():
if v == "True":
input_dict[k] = True
elif v == "False":
input_dict[k] = False
elif isfloat(v):
input_dict[k] = float(v)
def isfloat(value):
if type(value) == bool:
return False
try:
float(value)
return True
except ValueError:
return False
| 32.618868 | 117 | 0.614762 | 450 | 0.052059 | 0 | 0 | 0 | 0 | 0 | 0 | 2,453 | 0.283781 |
f894286d87c8139bf9e7bda1448f050c5b02eb70 | 3,287 | py | Python | app.py | pythonlittleboy/python_gentleman_crawler | 751b624d22a5024746c256080ea0815a9986e3d7 | [
"Apache-2.0"
] | 1 | 2017-05-03T12:18:31.000Z | 2017-05-03T12:18:31.000Z | app.py | pythonlittleboy/python_gentleman_crawler | 751b624d22a5024746c256080ea0815a9986e3d7 | [
"Apache-2.0"
] | null | null | null | app.py | pythonlittleboy/python_gentleman_crawler | 751b624d22a5024746c256080ea0815a9986e3d7 | [
"Apache-2.0"
] | 1 | 2020-10-29T04:00:04.000Z | 2020-10-29T04:00:04.000Z | from flask import Flask
from flask import render_template
from flask import request
from model import MovieWebDAO
import json
from ml import Forcast
app = Flask(__name__)
@app.route('/')
def hello_world():
return render_template('index.html')
@app.route('/hello/')
@app.route('/hello/<name>')
def hello(name=None):
return render_template('hello.html', name=name)
@app.route('/recently/')
def recently():
return render_template('list.html', functionPath="recently")
@app.route('/download/')
def download():
return render_template('list.html', functionPath="download")
@app.route('/recommander/')
def recommander():
return render_template('list.html', functionPath="recommander")
@app.route('/search/<keyword>')
def search(keyword=None):
return render_template('list.html', functionPath="search", keyword=keyword)
@app.route('/favor/')
def favor():
return render_template('list.html', functionPath="favor")
@app.route('/api/recently/')
def getRecentlyMovies():
start = request.args.get("start", type=int, default=0)
limit = request.args.get("limit", type=int, default=10)
#print(str(start) + ", " + str(limit))
movies = MovieWebDAO.getRecentlyMovies(start, limit)
total = MovieWebDAO.countRecentlyMovies()
return json.dumps({"movies": movies, "total": total}, ensure_ascii=False)
@app.route('/api/recommander/')
def getRecommanderMovies():
start = request.args.get("start", type=int, default=0)
limit = request.args.get("limit", type=int, default=10)
movies = MovieWebDAO.getForcastMovies(start, limit)
total = MovieWebDAO.countForcastMovies()
return json.dumps({"movies": movies, "total": total}, ensure_ascii=False)
@app.route('/api/download/')
def getDownloadMovies():
start = request.args.get("start", type=int, default=0)
limit = request.args.get("limit", type=int, default=10)
movies = MovieWebDAO.getDownloadMovies(start, limit)
total = MovieWebDAO.countDownloadMovies();
return json.dumps({"movies": movies, "total": total}, ensure_ascii=False)
@app.route('/api/search/<keyword>')
def getSearchMovies(keyword=None):
start = request.args.get("start", type=int, default=0)
limit = request.args.get("limit", type=int, default=10)
movies = MovieWebDAO.getSearchMovies(start, limit, keyword)
total = MovieWebDAO.countSearchMovies(keyword)
return json.dumps({"movies": movies, "total": total}, ensure_ascii=False)
@app.route('/api/favor/')
def getFavorMovies():
start = request.args.get("start", type=int, default=0)
limit = request.args.get("limit", type=int, default=10)
movies = MovieWebDAO.getFavorMovies(start, limit)
total = MovieWebDAO.countFavorMovies();
return json.dumps({"movies": movies, "total": total}, ensure_ascii=False)
@app.route('/api/pick/<actor>/<avNumber>')
def pick(actor=None, avNumber=None):
if not actor or not avNumber:
return "must be <actor>/<avNumber>"
MovieWebDAO.downloadMovie(avNumber)
#DiskIndex.copyOneImageToTemp(actor, avNumber)
return "OK"
@app.route('/api/skip/<avNumber>')
def skip(avNumber=None):
MovieWebDAO.skipMovie(avNumber)
return "OK"
if __name__ == '__main__':
print("http://localhost:15001")
app.run(host='0.0.0.0', debug=True, port=15001) | 31.009434 | 79 | 0.703377 | 0 | 0 | 0 | 0 | 2,970 | 0.903559 | 0 | 0 | 668 | 0.203225 |
f89a1bc1e1f90da376a0c1761bee56b1db485561 | 1,438 | py | Python | remove_negative_from_positive_augmented_samples.py | DarkElement75/object-detection-experiments | fc638f361f76d7bbb6e5cde9a3480c656b486ad6 | [
"MIT"
] | null | null | null | remove_negative_from_positive_augmented_samples.py | DarkElement75/object-detection-experiments | fc638f361f76d7bbb6e5cde9a3480c656b486ad6 | [
"MIT"
] | null | null | null | remove_negative_from_positive_augmented_samples.py | DarkElement75/object-detection-experiments | fc638f361f76d7bbb6e5cde9a3480c656b486ad6 | [
"MIT"
] | null | null | null | import h5py
import numpy as np
import cv2
def read_new(archive_dir):
with h5py.File(archive_dir, "r", chunks=True, compression="gzip") as hf:
"""
Load our X data the usual way,
using a memmap for our x data because it may be too large to hold in RAM,
and loading Y as normal since this is far less likely
-using a memmap for Y when it is very unnecessary would likely impact performance significantly.
"""
x_shape = list(hf.get("x_shape"))
x_shape[0] = x_shape[0]-27
x_shape = tuple(x_shape)
print x_shape
x = np.memmap("x.dat", dtype="float32", mode="r+", shape=x_shape)
memmap_step = 1000
hf_x = hf.get("x")
for i in range(27, x_shape[0]+27, memmap_step):
x[i-27:i-27+memmap_step] = hf_x[i:i+memmap_step]
print i
y = np.ones((x_shape[0]))
return x, y
def write_new(archive_dir, x, y):
with h5py.File(archive_dir, "w", chunks=True, compression="gzip") as hf:
hf.create_dataset("x", data=x)
hf.create_dataset("x_shape", data=x.shape)
hf.create_dataset("y", data=y)
hf.create_dataset("y_shape", data=y.shape)
"""
Just gets rid of the negatives by only reading the positives, then writing them to replace the existing archive
"""
archive_dir="positive_augmented_samples.h5"
x,y = read_new(archive_dir)
write_new(archive_dir, x, y)
| 35.95 | 112 | 0.632823 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 544 | 0.378303 |
f89ade1e452186e4d101ccde6adaccc57996d66d | 646 | py | Python | Automate_Whatsapp_Sending_Text.py | IshvinaKapoor/Automate-WhatsApp | f499db0540c56b74152a368af1fa361ecea69806 | [
"MIT"
] | null | null | null | Automate_Whatsapp_Sending_Text.py | IshvinaKapoor/Automate-WhatsApp | f499db0540c56b74152a368af1fa361ecea69806 | [
"MIT"
] | null | null | null | Automate_Whatsapp_Sending_Text.py | IshvinaKapoor/Automate-WhatsApp | f499db0540c56b74152a368af1fa361ecea69806 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Automate WhatsApp - Sending WhatsApp message
@author: DELL Ishvina Kapoor
"""
#importing the necessary modules
import pywhatkit as pkt
import getpass as gp
#displaying a welcome message
print("Automating Whatsapp!")
#capturing the target phone number from the user
phone_num = gp.getpass(prompt = 'Enter the phone number(with country code) : ', stream = None)
#capture the message
message = "Hi IK this side"
#call the method
#the time is in 24 hr format
pkt.sendwhatmsg(phone_num, message, 22 , 33)
#will be displayed once whatsapp is automated
print("Delivered to the target user") | 23.925926 | 96 | 0.716718 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 450 | 0.696594 |
f89c748dd51197d30a5af7af230eb9f70959fb01 | 894 | py | Python | transonic/analyses/beniget.py | fluiddyn/transonic | a460e9f6d1139f79b668cb3306d1e8a7e190b72d | [
"BSD-3-Clause"
] | 88 | 2019-01-08T16:39:08.000Z | 2022-02-06T14:19:23.000Z | transonic/analyses/beniget.py | fluiddyn/transonic | a460e9f6d1139f79b668cb3306d1e8a7e190b72d | [
"BSD-3-Clause"
] | 13 | 2019-06-20T15:53:10.000Z | 2021-02-09T11:03:29.000Z | transonic/analyses/beniget.py | fluiddyn/transonic | a460e9f6d1139f79b668cb3306d1e8a7e190b72d | [
"BSD-3-Clause"
] | 1 | 2019-11-05T03:03:14.000Z | 2019-11-05T03:03:14.000Z | import gast as ast
from beniget import Ancestors, DefUseChains as DUC, UseDefChains
from beniget.beniget import Def
__all__ = ["Ancestors", "DefUseChains", "UseDefChains"]
class DefUseChains(DUC):
def visit_List(self, node):
if isinstance(node.ctx, ast.Load):
dnode = self.chains.setdefault(node, Def(node))
for elt in node.elts:
if isinstance(elt, CommentLine):
continue
self.visit(elt).add_user(dnode)
return dnode
# unfortunately, destructured node are marked as Load,
# only the parent List/Tuple is marked as Store
elif isinstance(node.ctx, ast.Store):
return self.visit_Destructured(node)
visit_Tuple = visit_List
# this import has to be after the definition of DefUseChains
from transonic.analyses.extast import CommentLine # noqa: E402
| 29.8 | 64 | 0.659955 | 588 | 0.657718 | 0 | 0 | 0 | 0 | 0 | 0 | 212 | 0.237136 |
f89ffb26ee589ce79dc400f7f5cf4afa16b557b3 | 88 | py | Python | view/resources/__init__.py | surfaceanalytics/inelasticscattering | da549dde788a55084c565bbc5f89ebf9cbae4263 | [
"MIT"
] | null | null | null | view/resources/__init__.py | surfaceanalytics/inelasticscattering | da549dde788a55084c565bbc5f89ebf9cbae4263 | [
"MIT"
] | 3 | 2021-09-08T03:02:25.000Z | 2022-03-12T01:00:06.000Z | view/resources/__init__.py | surfaceanalytics/inelasticscattering | da549dde788a55084c565bbc5f89ebf9cbae4263 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 3 16:43:28 2020
@author: nicholls
"""
| 11 | 35 | 0.568182 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 85 | 0.965909 |
f8a219513d5df677c7712f374a4d0f79bdc2f13b | 2,401 | py | Python | 2020/python/16.py | gcp825/advent_of_code | b4ea17572847e1a9044487041b3e12a0da58c94b | [
"MIT"
] | 1 | 2021-12-29T09:32:08.000Z | 2021-12-29T09:32:08.000Z | 2020/python/16.py | gcp825/advent_of_code | b4ea17572847e1a9044487041b3e12a0da58c94b | [
"MIT"
] | null | null | null | 2020/python/16.py | gcp825/advent_of_code | b4ea17572847e1a9044487041b3e12a0da58c94b | [
"MIT"
] | null | null | null | from collections import Counter
def read_file(filepath):
with open(filepath,'r') as f:
a = [x for x in f.read().split('\n\n')]
b = []; d = []
for x in [[x[0],x[1].split(' or ')] for x in [x.split(': ') for x in a[0].split('\n')]]:
for y in x[1]:
z = y.split('-')
b += [[x[0],range(int(z[0]),int(z[1])+1)]]
c = [int(x) for x in [x for x in a[1].split('\n')][1].split(',')]
for x in a[2].split('\n')[1:]:
d += [[int(x) for x in x.split(',')]]
return b,c,d
def validate_tix(tix,rules):
valid_tix = []; error_rate = 0
for t in tix:
curr_rate = error_rate
for n in t:
valid = False
for r in rules:
if n in r[1]:
valid = True
break
if not valid: error_rate += n
if curr_rate == error_rate: valid_tix += [t]
return valid_tix, error_rate
def determine_fields(tix,rules):
fields = list(map(list,zip(*tix)))
length = len(rules)
results = {}; p = []
for e,f in enumerate(fields):
i = 0
while i < length:
valid = []
for r in rules[i:i+2]:
for n in f:
if n in r[1]: valid += [n]
if sorted(f) == sorted(valid): p += [(r[0],str(e))]
i += 2
while len(p) > 0:
count = Counter([x[0] for x in p])
matches = [x for x in p if x[0] in [k for k,v in count.items() if v == 1]]
for a,b in matches:
results[a] = int(b)
p = [x for x in p if x[1] != b]
return results
def check_ticket(my_ticket,fields):
total = 0
for k,v in fields.items():
if k[0:9] == 'departure':
total = max(total,1) * my_ticket[v]
return total
def main(filepath):
rules, my_ticket, tickets = read_file(filepath)
valid_tickets, pt1 = validate_tix(tickets,rules)
fields = determine_fields(valid_tickets,rules)
pt2 = check_ticket(my_ticket,fields)
return pt1, pt2
print(main('day16.txt'))
| 24.752577 | 96 | 0.43107 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 62 | 0.025823 |
f8a565676ba40410367b887bd52120b87f5a4d60 | 9,512 | py | Python | MODEL3.CNN.py | alhasacademy96/finalyearproject | 1f8f21dea55e45807767e465c27b225e2fc5c082 | [
"MIT"
] | 2 | 2020-09-15T18:10:12.000Z | 2021-01-25T21:54:04.000Z | MODEL3.CNN.py | alhasacademy96/finalyearproject | 1f8f21dea55e45807767e465c27b225e2fc5c082 | [
"MIT"
] | null | null | null | MODEL3.CNN.py | alhasacademy96/finalyearproject | 1f8f21dea55e45807767e465c27b225e2fc5c082 | [
"MIT"
] | null | null | null | # Author: Ibrahim Alhas - ID: 1533204.
# MODEL 3: CNN with built-in tensorflow tokenizer.
# This is the final version of the model (not the base).
# Packages and libraries used for this model.
# ** Install these if not installed already **.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import datetime
from time import time
import re
from sklearn.metrics import accuracy_score, confusion_matrix, precision_score, recall_score, f1_score, roc_curve, \
classification_report
from tensorflow import keras
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras import layers
from keras.models import Sequential
from sklearn.model_selection import train_test_split, cross_validate
import tensorflow as tf
import seaborn as sns
import warnings
import keras
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Flatten, Activation, BatchNormalization
from keras.layers.noise import GaussianNoise
from keras.layers import Conv2D, MaxPooling2D
warnings.filterwarnings('ignore')
# plt.style.use('ggplot')
# Basic data visualisation and analysis ------------------------------------------------------------------------------
# We see that the title column is from news articles, and the text column forms the twitter tweet extracts.
true = pd.read_csv('True.csv')
false = pd.read_csv('Fake.csv')
# We drop the columns we do not need. See chapter 3, model CNN for more details.
true = true.drop('title', axis=1)
true = true.drop('subject', axis=1)
true = true.drop('date', axis=1)
false = false.drop('title', axis=1)
false = false.drop('subject', axis=1)
false = false.drop('date', axis=1)
# We set the labels for each data instance, where factual = 1, otherwise 0.
false['label'] = 0
true['label'] = 1
# We merge the two divided datasets (true and fake) into a singular dataset.
data = pd.concat([true, false], ignore_index=True)
texts = data['text']
labels = data['label']
x = texts
y = labels
# We incorporate the publishers feature from title and text instances, and place it into the dataset manually.
# First Creating list of index that do not have publication part. We can use this as a new feature.
unknown_publishers = []
for index, row in enumerate(true.text.values):
try:
record = row.split(" -", maxsplit=1)
# if no text part is present, following will give error
print(record[1])
# if len of piblication part is greater than 260
# following will give error, ensuring no text having "-" in between is counted
assert (len(record[0]) < 260)
except:
unknown_publishers.append(index)
# We print the instances where publication information is absent or different.
print(true.iloc[unknown_publishers].text)
# We want to use the publication information as a new feature.
publisher = []
tmp_text = []
for index, row in enumerate(true.text.values):
if index in unknown_publishers:
# Append unknown publisher:
tmp_text.append(row)
publisher.append("Unknown")
continue
record = row.split(" -", maxsplit=1)
publisher.append(record[0])
tmp_text.append(record[1])
# Replace text column with new text + add a new feature column called publisher/source.
true["publisher"] = publisher
true["text"] = tmp_text
del publisher, tmp_text, record, unknown_publishers
# Validate that the publisher/source column has been added to the dataset.
print(true.head())
# Check for missing values, then drop them for both datasets.
print([index for index, text in enumerate(true.text.values) if str(text).strip() == ''])
true = true.drop(8970, axis=0)
fakeEmptyIndex = [index for index, text in enumerate(false.text.values) if str(text).strip() == '']
print(f"No of empty rows: {len(fakeEmptyIndex)}")
false.iloc[fakeEmptyIndex].tail()
# -
# For CNNs, we have to vectorize the text into 2d integers (tensors).
MAX_SEQUENCE_LENGTH = 5000
MAX_NUM_WORDS = 25000
EMBEDDING_DIM = 300
TEST_SPLIT = 0.2
epochs = 1
# We tokenize the text, just like all other models--------------------------------------------------------------------
tokenizer = Tokenizer(num_words=MAX_NUM_WORDS)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
num_words = min(MAX_NUM_WORDS, len(word_index)) + 1
data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH, padding='pre', truncating='pre')
# Print the total number of tokens:
print('Found %s tokens.' % len(word_index))
# We partition our dataset into train/test.
x_train, x_val, y_train, y_val = train_test_split(data, labels.apply(lambda x: 0 if x == 0 else 1),
test_size=TEST_SPLIT)
log_dir = "logs\\model\\"
# A custom callbacks function, which initially included tensorboard.
mycallbacks = [
tf.keras.callbacks.ReduceLROnPlateau(monitor='val_accuracy', patience=2, verbose=1, factor=0.5, min_lr=0.00001),
tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=2, restore_best_weights=True), # Restoring the best
# ...weights will help keep the optimal weights.
# tf.keras.callbacks.TensorBoard(log_dir="./logs"), # NEWLY ADDED - CHECK.
# tf.keras.callbacks.TensorBoard(log_dir=log_dir.format(time())), # NEWLY ADDED - CHECK.
# tensorboard --logdir logs --> to check tensorboard feedback.
]
# Parameters for our model. We experimented with some combinations and settled on this configuration------------------
model = Sequential(
[
# Word/sequence processing:
layers.Embedding(num_words, EMBEDDING_DIM, input_length=MAX_SEQUENCE_LENGTH, trainable=True),
# The layers:
layers.Conv1D(128, 5, activation='relu'),
layers.GlobalMaxPooling1D(),
# We classify our model here:
layers.Dense(128, activation='relu'),
layers.Dense(1, activation='sigmoid')
])
# We compile our model and run, with the loss function crossentropy, and optimizer rmsprop (we experimented with adam,
# ...but rmsprop produced better results).
model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
model.summary()
print("Model weights:")
print(model.weights)
# tensorboard_callback = keras.callbacks.TensorBoard(log_dir="./logs")
history = model.fit(x_train, y_train, batch_size=256, epochs=epochs, validation_data=(x_val, y_val),
callbacks=mycallbacks)
# Produce a figure, for every epoch, and show performance metrics.
epochs = [i for i in range(1)]
fig, ax = plt.subplots(1, 2)
train_acc = history.history['accuracy']
train_loss = history.history['loss']
val_acc = history.history['val_accuracy']
val_loss = history.history['val_loss']
fig.set_size_inches(20, 10)
ax[0].plot(epochs, train_acc, 'go-', label='Training Accuracy')
ax[0].plot(epochs, val_acc, 'ro-', label='Testing Accuracy')
ax[0].set_title('Training & Testing Accuracy')
ax[0].legend()
ax[0].set_xlabel("Epochs")
ax[0].set_ylabel("Accuracy")
ax[1].plot(epochs, train_loss, 'go-', label='Training Loss')
ax[1].plot(epochs, val_loss, 'ro-', label='Testing Loss')
ax[1].set_title('Training & Testing Loss')
ax[1].legend()
ax[1].set_xlabel("Epochs")
ax[1].set_ylabel("Loss")
plt.show()
'''
history_dict = history.history
acc = history_dict['accuracy']
val_acc = history_dict['val_accuracy']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = history.epoch
plt.figure(figsize=(12, 9))
plt.plot(epochs, loss, 'r', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss', size=20)
plt.xlabel('Epochs', size=20)
plt.ylabel('Loss', size=20)
plt.legend(prop={'size': 20})
plt.show()
plt.figure(figsize=(12, 9))
plt.plot(epochs, acc, 'g', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy', size=20)
plt.xlabel('Epochs', size=20)
plt.ylabel('Accuracy', size=20)
plt.legend(prop={'size': 20})
plt.ylim((0.5, 1))
plt.show()
'''
# We evaluate our model by predicting a few instances from our test data (the first 5)--------------------------------
print("Evaluation:")
print(model.evaluate(x_val, y_val))
# We predict a few instances (up to 5).
pred = model.predict(x_val)
print(pred[:5])
binary_predictions = []
for i in pred:
if i >= 0.5:
binary_predictions.append(1)
else:
binary_predictions.append(0)
# We print performance metrics:
print('Accuracy on test set:', accuracy_score(binary_predictions, y_val))
print('Precision on test set:', precision_score(binary_predictions, y_val))
print('Recall on test set:', recall_score(binary_predictions, y_val))
print('F1 on test set:', f1_score(binary_predictions, y_val))
# We print the classification report (as an extra):
print(classification_report(y_val, pred.round(), target_names=['Fact', 'Fiction']))
# We print the confusion matrix.
cmm = confusion_matrix(y_val, pred.round())
print(cmm)
print("Ibrahim Alhas")
cmm = pd.DataFrame(cmm, index=['Fake', 'Original'], columns=['Fake', 'Original'])
plt.figure(figsize=(10, 10))
sns.heatmap(cmm, cmap="Blues", linecolor='black', linewidth=1, annot=True, fmt='', xticklabels=['Fake', 'Original'],
yticklabels=['Fake', 'Original'])
plt.xlabel("Predicted")
plt.ylabel("Actual")
plt.show()
# End----------------------------------------------------
| 37.746032 | 120 | 0.700694 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,574 | 0.480866 |
f8a57061a44b4ce6c14481e8a79c00cddf4bc7c8 | 40,857 | py | Python | tn/old_scripts/old_md_to_pdf/export_md_to_pdf.py | unfoldingWord-dev/tools | 7251d64b4750f1615125dab3c09d6d00a9c284b4 | [
"MIT"
] | 6 | 2015-07-27T21:50:39.000Z | 2020-06-25T14:32:35.000Z | tn/old_scripts/old_md_to_pdf/export_md_to_pdf.py | unfoldingWord-dev/tools | 7251d64b4750f1615125dab3c09d6d00a9c284b4 | [
"MIT"
] | 89 | 2015-06-24T09:35:40.000Z | 2022-02-13T14:40:31.000Z | tn/old_scripts/old_md_to_pdf/export_md_to_pdf.py | unfoldingWord-dev/tools | 7251d64b4750f1615125dab3c09d6d00a9c284b4 | [
"MIT"
] | 12 | 2015-07-13T17:31:04.000Z | 2021-08-06T06:50:21.000Z | #!/usr/bin/env python2
# -*- coding: utf8 -*-
#
# Copyright (c) 2017 unfoldingWord
# http://creativecommons.org/licenses/MIT/
# See LICENSE file for details.
#
# Contributors:
# Richard Mahn <rich.mahn@unfoldingword.org>
"""
This script generates the HTML tN documents for each book of the Bible
"""
from __future__ import unicode_literals, print_function
import os
import sys
import re
import pprint
import logging
import argparse
import tempfile
import markdown
import shutil
import subprocess
import csv
import codecs
import markdown2
import json
from glob import glob
from bs4 import BeautifulSoup
from usfm_tools.transform import UsfmTransform
from ...general_tools.file_utils import write_file, read_file, load_json_object, unzip, load_yaml_object
from ...general_tools.url_utils import download_file
from ...general_tools.bible_books import BOOK_NUMBERS, BOOK_CHAPTER_VERSES
from ...general_tools.usfm_utils import usfm3_to_usfm2
_print = print
def print(obj):
_print(json.dumps(obj, ensure_ascii=False, indent=2).encode('utf-8'))
class TnConverter(object):
def __init__(self, ta_tag=None, tn_tag=None, tw_tag=None, ust_tag=None, ult_tag=None, ugnt_tag=None, working_dir=None,
output_dir=None, lang_code='en', books=None):
"""
:param ta_tag:
:param tn_tag:
:param tw_tag:
:param ust_tag:
:param ult_tag:
:param ugnt_tag:
:param working_dir:
:param output_dir:
:param lang_code:
:param books:
"""
self.ta_tag = ta_tag
self.tn_tag = tn_tag
self.tw_tag = tw_tag
self.ust_tag = ust_tag
self.ult_tag = ult_tag
self.ugnt_tag = ugnt_tag
self.working_dir = working_dir
self.output_dir = output_dir
self.lang_code = lang_code
self.books = books
self.logger = logging.getLogger()
self.logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(levelname)s - %(message)s')
ch.setFormatter(formatter)
self.logger.addHandler(ch)
self.pp = pprint.PrettyPrinter(indent=4)
if not self.working_dir:
self.working_dir = tempfile.mkdtemp(prefix='tn-')
if not self.output_dir:
self.output_dir = self.working_dir
self.logger.debug('TEMP DIR IS {0}'.format(self.working_dir))
self.tn_dir = os.path.join(self.working_dir, '{0}_tn'.format(lang_code))
self.tw_dir = os.path.join(self.working_dir, '{0}_tw'.format(lang_code))
self.ta_dir = os.path.join(self.working_dir, '{0}_ta'.format(lang_code))
self.ust_dir = os.path.join(self.working_dir, '{0}_ust'.format(lang_code))
self.ult_dir = os.path.join(self.working_dir, '{0}_ult'.format(lang_code))
self.ugnt_dir = os.path.join(self.working_dir, 'UGNT'.format(lang_code))
self.versification_dir = os.path.join(self.working_dir, 'versification', 'bible', 'ufw', 'chunks')
self.manifest = None
self.book_id = None
self.book_title = None
self.book_number = None
self.project = None
self.tn_text = ''
self.tw_text = ''
self.ta_text = ''
self.rc_references = {}
self.chapters_and_verses = {}
self.resource_data = {}
self.tn_book_data = {}
self.tw_words_data = {}
self.bad_links = {}
self.usfm_chunks = {}
self.version = None
self.contributors = ''
self.publisher = None
self.issued = None
self.filename_base = None
def run(self):
self.setup_resource_files()
self.manifest = load_yaml_object(os.path.join(self.tn_dir, 'manifest.yaml'))
self.version = self.manifest['dublin_core']['version']
#############self.contributors = '; '.join(self.manifest['dublin_core']['contributor'])
self.publisher = self.manifest['dublin_core']['publisher']
self.issued = self.manifest['dublin_core']['issued']
projects = self.get_book_projects()
for p in projects:
self.project = p
self.book_id = p['identifier'].upper()
self.book_title = p['title'].replace(' translationNotes', '')
self.book_number = BOOK_NUMBERS[self.book_id.lower()]
if int(self.book_number) != 65:
continue
self.populate_tn_book_data()
self.populate_tw_words_data()
self.populate_chapters_and_verses()
self.populate_usfm_chunks()
self.filename_base = '{0}_tn_{1}-{2}_v{3}'.format(self.lang_code, self.book_number.zfill(2), self.book_id, self.version)
self.rc_references = {}
self.logger.info('Creating tN for {0} ({1}-{2})...'.format(self.book_title, self.book_number, self.book_id))
if not os.path.isfile(os.path.join(self.output_dir, '{0}.hhhhtml'.format(self.filename_base))):
print("Processing HTML...")
self.generate_html()
if not os.path.isfile(os.path.join(self.output_dir, '{0}.pdf'.format(self.filename_base))):
print("Generating PDF...")
self.convert_html2pdf()
if len(self.bad_links.keys()):
_print("BAD LINKS:")
for bad in sorted(self.bad_links.keys()):
for ref in self.bad_links[bad]:
parts = ref[5:].split('/')
_print("Bad reference: `{0}` in {1}'s {2}".format(bad, parts[1], '/'.join(parts[3:])))
def get_book_projects(self):
projects = []
if not self.manifest or 'projects' not in self.manifest or not self.manifest['projects']:
return
for p in self.manifest['projects']:
if not self.books or p['identifier'] in self.books:
if not p['sort']:
p['sort'] = BOOK_NUMBERS[p['identifier']]
projects.append(p)
return sorted(projects, key=lambda k: k['sort'])
def get_resource_url(self, resource, tag):
return 'https://git.door43.org/unfoldingWord/{0}_{1}/archive/{2}.zip'.format(self.lang_code, resource, tag)
def setup_resource_files(self):
if not os.path.isdir(os.path.join(self.working_dir, 'en_tn')):
tn_url = self.get_resource_url('tn', self.tn_tag)
self.extract_files_from_url(tn_url)
if not os.path.isdir(os.path.join(self.working_dir, 'en_tw')):
tw_url = self.get_resource_url('tw', self.tw_tag)
self.extract_files_from_url(tw_url)
if not os.path.isdir(os.path.join(self.working_dir, 'en_ta')):
ta_url = self.get_resource_url('ta', self.ta_tag)
self.extract_files_from_url(ta_url)
if not os.path.isdir(os.path.join(self.working_dir, 'en_ust')):
ust_url = self.get_resource_url('ust', self.ust_tag)
self.extract_files_from_url(ust_url)
if not os.path.isdir(os.path.join(self.working_dir, 'en_ult')):
ult_url = self.get_resource_url('ult', self.ult_tag)
self.extract_files_from_url(ult_url)
if not os.path.isdir(os.path.join(self.working_dir, 'ugnt')):
ugnt_url = 'https://git.door43.org/unfoldingWord/UGNT/archive/{0}.zip'.format(self.ugnt_tag)
self.extract_files_from_url(ugnt_url)
if not os.path.isfile(os.path.join(self.working_dir, 'icon-tn.png')):
command = 'curl -o {0}/icon-tn.png https://unfoldingword.bible/assets/img/icon-tn.png'.format(self.working_dir)
subprocess.call(command, shell=True)
if not os.path.isdir(os.path.join(self.working_dir, 'versification')):
versification_url = 'https://git.door43.org/Door43-Catalog/versification/archive/master.zip'
self.extract_files_from_url(versification_url)
def extract_files_from_url(self, url):
zip_file = os.path.join(self.working_dir, url.rpartition('/')[2])
try:
self.logger.debug('Downloading {0}...'.format(url))
download_file(url, zip_file)
finally:
self.logger.debug('finished.')
try:
self.logger.debug('Unzipping {0}...'.format(zip_file))
unzip(zip_file, self.working_dir)
finally:
self.logger.debug('finished.')
def populate_usfm_chunks(self):
book_chunks = {}
for resource in ['ult', 'ust']:
save_dir = os.path.join(self.working_dir, 'chunk_data', resource)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
save_file = os.path.join(save_dir, '{0}.json'.format(self.book_id.lower()))
if os.path.isfile(save_file):
book_chunks[resource] = load_json_object(save_file)
continue
book_chunks[resource] = {}
bible_dir = getattr(self, '{0}_dir'.format(resource))
usfm = read_file(os.path.join(bible_dir, '{0}-{1}.usfm'.format(BOOK_NUMBERS[self.book_id.lower()], self.book_id)), encoding='utf-8')
usfm = usfm3_to_usfm2(usfm)
usfm = re.sub(r'\n*\s*\\s5\s*\n*', r'\n', usfm, flags=re.MULTILINE | re.IGNORECASE)
chapters_usfm = re.compile(r'\n*\s*\\c[\u00A0\s]+').split(usfm)
book_chunks[resource]['header'] = chapters_usfm[0]
for chapter_data in self.chapters_and_verses:
chapter = str(chapter_data['chapter'])
book_chunks[resource][chapter] = {}
book_chunks[resource][chapter]['chunks'] = []
chapter_usfm = r'\\c ' + chapters_usfm[int(chapter)].strip()
verses_usfm = re.compile(r'\n*\s*\\v[\u00A0\s]+').split(chapter_usfm)
for idx, first_verse in enumerate(chapter_data['first_verses']):
if len(chapter_data['first_verses']) > idx+1:
last_verse = chapter_data['first_verses'][idx+1] - 1
else:
last_verse = int(BOOK_CHAPTER_VERSES[self.book_id.lower()][chapter])
chunk_usfm = ''
for verse in range(first_verse, last_verse+1):
chunk_usfm += r'\v '+verses_usfm[verse]+'\n'
data = {
'usfm': chunk_usfm,
'first_verse': first_verse,
'last_verse': last_verse,
}
# print('chunk: {0}-{1}-{2}-{3}-{4}'.format(resource, self.book_id, chapter, first_verse, last_verse))
book_chunks[resource][chapter][str(first_verse)] = data
book_chunks[resource][chapter]['chunks'].append(data)
write_file(save_file, book_chunks[resource])
self.usfm_chunks = book_chunks
def generate_html(self):
tn_html = self.get_tn_html()
ta_html = self.get_ta_html()
tw_html = self.get_tw_html()
html = '\n<br>\n'.join([tn_html, tw_html, ta_html])
html = self.replace_rc_links(html)
html = self.fix_links(html)
html_file = os.path.join(self.output_dir, '{0}.html'.format(self.filename_base))
write_file(html_file, html)
print('Wrote HTML to {0}'.format(html_file))
def pad(self, num):
if self.book_id == 'PSA':
return str(num).zfill(3)
else:
return str(num).zfill(2)
@staticmethod
def isInt(str):
try:
int(str)
return True
except ValueError:
return False
def populate_chapters_and_verses(self):
versification_file = os.path.join(self.versification_dir, '{0}.json'.format(self.book_id.lower()))
self.chapter_and_verses = {}
if os.path.isfile(versification_file):
self.chapters_and_verses = load_json_object(versification_file)
def populate_tn_book_data(self):
book_file = os.path.join(self.tn_dir, 'en_tn_{0}-{1}.tsv'.format(self.book_number, self.book_id))
self.tn_book_data = {}
if not os.path.isfile(book_file):
return
book_data = {}
with open(book_file) as fd:
rd = csv.reader(fd, delimiter=str("\t"), quotechar=str('"'))
header = next(rd)
for row in rd:
data = {}
for idx, field in enumerate(header):
data[field] = row[idx]
chapter = data['Chapter']
verse = data['Verse']
if not chapter in book_data:
book_data[chapter] = {}
if not verse in book_data[chapter]:
book_data[chapter][verse] = []
book_data[chapter][verse].append(data)
self.tn_book_data = book_data
def get_tn_html(self):
tn_html = '<h1><a id="tn-{0}"></a>translationNotes</h1>\n\n'.format(self.book_id)
if 'front' in self.tn_book_data and 'intro' in self.tn_book_data['front']:
intro = markdown.markdown(self.tn_book_data['front']['intro'][0]['OccurrenceNote'].decode('utf8').replace('<br>', '\n'))
title = self.get_first_header(intro)
intro = self.fix_tn_links(intro, 'intro')
intro = self.increase_headers(intro)
intro = self.decrease_headers(intro, 4) # bring headers of 3 or more down 1
id = 'tn-{0}-front-intro'.format(self.book_id)
intro = re.sub(r'<h(\d)>', r'<h\1><a id="{0}"></a>'.format(id), intro, 1, flags=re.IGNORECASE | re.MULTILINE)
intro += '<br><br>\n\n'
tn_html += '\n<br>\n'+intro
# HANDLE RC LINKS AND BACK REFERENCE
rc = 'rc://*/tn/help/{0}/front/intro'.format(self.book_id.lower())
self.resource_data[rc] = {
'rc': rc,
'id': id,
'link': '#'+id,
'title': title
}
self.get_resource_data_from_rc_links(intro, rc)
for chapter_verses in self.chapters_and_verses:
chapter = str(chapter_verses['chapter'])
if 'intro' in self.tn_book_data[chapter]:
intro = markdown.markdown(self.tn_book_data[chapter]['intro'][0]['OccurrenceNote'].replace('<br>',"\n"))
intro = re.sub(r'<h(\d)>([^>]+) 0+([1-9])', r'<h\1>\2 \3', intro, 1, flags=re.MULTILINE | re.IGNORECASE)
title = self.get_first_header(intro)
intro = self.fix_tn_links(intro, chapter)
intro = self.increase_headers(intro)
intro = self.decrease_headers(intro, 5, 2) # bring headers of 5 or more down 2
id = 'tn-{0}-{1}'.format(self.book_id, self.pad(chapter))
intro = re.sub(r'<h(\d+)>', r'<h\1><a id="{0}"></a>'.format(id), intro, 1, flags=re.IGNORECASE | re.MULTILINE)
intro += '<br><br>\n\n'
tn_html += '\n<br>\n'+intro
# HANDLE RC LINKS
rc = 'rc://*/tn/help/{0}/{1}/intro'.format(self.book_id.lower(), self.pad(chapter))
self.resource_data[rc] = {
'rc': rc,
'id': id,
'link': '#'+id,
'title': title
}
self.get_resource_data_from_rc_links(intro, rc)
for idx, first_verse in enumerate(chapter_verses['first_verses']):
col1 = ''
if idx < len(chapter_verses['first_verses'])-1:
last_verse = chapter_verses['first_verses'][idx+1] - 1
else:
last_verse = int(BOOK_CHAPTER_VERSES[self.book_id.lower()][chapter])
if first_verse != last_verse:
title = '{0} {1}:{2}-{3}'.format(self.book_title, chapter, first_verse, last_verse)
else:
title = '{0} {1}:{2}'.format(self.book_title, chapter, first_verse)
anchors = ''
for verse in range(first_verse, last_verse+1):
id = 'tn-{0}-{1}-{2}'.format(self.book_id, self.pad(chapter), self.pad(verse))
anchors += '<a id="{0}"></a>'.format(id)
rc = 'rc://*/tn/help/{0}/{1}/{2}'.format(self.book_id.lower(), self.pad(chapter), self.pad(verse))
self.resource_data[rc] = {
'rc': rc,
'id': id,
'link': '#'+id,
'title': title
}
header = '\n<br>\n<h2>{0}{1}</h2>\n\n'.format(anchors, title)
col1 += '<sup style="color:light-gray">ULT</sup>' + self.get_bible_html('ult', int(chapter), first_verse, last_verse)
col1 += '\n<br><br>\n'
col1 += '<sup style="color:light-gray">UST</sup>' + self.get_bible_html('ust', int(chapter), first_verse, last_verse)
col2 = ''
for verse in range(first_verse, last_verse+1):
if str(verse) in self.tn_book_data[chapter]:
for data in self.tn_book_data[chapter][str(verse)]:
title = data['GLQuote'].decode('utf8')
col2 += '<b>' + title + (' -' if not title.endswith(':') else '') + ' </b>'
col2 += markdown.markdown(data['OccurrenceNote'].decode('utf8').replace('<br>',"\n")).replace('<p>', '').replace('</p>', '')
col2 += '\n<br><br>\n'
if col2 != '':
col2 = self.decrease_headers(col2, 5) # bring headers of 5 or more #'s down 1
col2 = self.fix_tn_links(col2, chapter)
chunk_page = '{0}\n<table style="width:100%">\n<tr>\n<td style="vertical-align:top;width:35%;padding-right:5px">\n\n<p>{1}</p>\n</td>\n<td style="vertical-align:top">\n\n<p>{2}</p>\n</td>\n</tr>\n</table>\n'.format(header, col1, col2)
# chunk_page = '{0}\n<table style="width:100%;border:none"><tr><td style="width:50%">{1}</td><td>{2}</td></tr></table>'.format(header, col1, col2) # REMOVE
tn_html += chunk_page
self.get_resource_data_from_rc_links(chunk_page, rc)
return tn_html
def populate_tw_words_data(self):
groups = ['kt', 'names', 'other']
grc_path = 'tools/tn/generate_tn_pdf/grc/translationHelps/translationWords/v0.4'
if not os.path.isdir(grc_path):
_print('{0} not found! Please make sure you ran `node getResources ./` in the generate_tn_pdf dir and that the version in the script is correct'.format(grc_path))
exit(1)
words = {}
for group in groups:
files_path = '{0}/{1}/groups/{2}/*.json'.format(grc_path, group, self.book_id.lower())
files = glob(files_path)
for file in files:
base = os.path.splitext(os.path.basename(file))[0]
rc = 'rc://*/tw/dict/bible/{0}/{1}'.format(group, base)
occurrences = load_json_object(file)
for occurrence in occurrences:
contextId = occurrence['contextId']
chapter = contextId['reference']['chapter']
verse = contextId['reference']['verse']
contextId['rc'] = rc
if chapter not in words:
words[chapter] = {}
if verse not in words[chapter]:
words[chapter][verse] = []
words[chapter][verse].append(contextId)
self.tw_words_data = words
def get_bible_html(self, resource, chapter, first_verse, last_verse):
html = self.get_chunk_html(resource, chapter, first_verse)
html = html.replace('\n', '').replace('<p>', '').replace('</p>', '').strip()
html = re.sub(r'<span class="v-num"', '<br><span class="v-num"', html, flags=re.IGNORECASE | re.MULTILINE)
if resource != 'ult':
return html
words = self.get_all_words_to_match(resource, chapter, first_verse, last_verse)
verses = html.split('<sup>')
for word in words:
parts = word['text'].split(' ... ')
highlights = {}
idx = word['contextId']['reference']['verse']-first_verse+1
for part in parts:
highlights[part] = r'<a href="{0}">{1}</a>'.format(word['contextId']['rc'], part)
regex = re.compile(r'(?<![></\\_-])\b(%s)\b(?![></\\_-])' % "|".join(highlights.keys()))
verses[idx] = regex.sub(lambda m: highlights[m.group(0)], verses[idx])
html = '<sup>'.join(verses)
return html
def get_all_words_to_match(self, resource, chapter, first_verse, last_verse):
path = 'tools/tn/generate_tn_pdf/en/bibles/{0}/v1/{1}/{2}.json'.format(resource, self.book_id.lower(), chapter)
words = []
data = load_json_object(path)
chapter = int(chapter)
for verse in range(first_verse, last_verse + 1):
if chapter in self.tw_words_data and verse in self.tw_words_data[chapter]:
contextIds = self.tw_words_data[int(chapter)][int(verse)]
verseObjects = data[str(verse)]['verseObjects']
for contextId in contextIds:
aligned_text = self.get_aligned_text(verseObjects, contextId, False)
if aligned_text:
words.append({'text': aligned_text, 'contextId': contextId})
return words
def find_english_from_combination(self, verseObjects, quote, occurrence):
greekWords = []
wordList = []
for verseObject in verseObjects:
greek = None
if 'content' in verseObject and verseObject['type'] == 'milestone':
greekWords.append(verseObject['content'])
englishWords = []
for child in verseObject['children']:
if child['type'] == 'word':
englishWords.append(child['text'])
english = ' '.join(englishWords)
found = False
for idx, word in enumerate(wordList):
if word['greek'] == verseObject['content'] and word['occurrence'] == verseObject['occurrence']:
wordList[idx]['english'] += ' ... ' + english
found = True
if not found:
wordList.append({'greek': verseObject['content'], 'english': english, 'occurrence': verseObject['occurrence']})
combinations = []
occurrences = {}
for i in range(0, len(wordList)):
greek = wordList[i]['greek']
english = wordList[i]['english']
for j in range(i, len(wordList)):
if i != j:
greek += ' '+wordList[j]['greek']
english += ' '+wordList[j]['english']
if greek not in occurrences:
occurrences[greek] = 0
occurrences[greek] += 1
combinations.append({'greek': greek, 'english': english, 'occurrence': occurrences[greek]})
for combination in combinations:
if combination['greek'] == quote and combination['occurrence'] == occurrence:
return combination['english']
return None
def find_english_from_split(self, verseObjects, quote, occurrence, isMatch=False):
wordsToMatch = quote.split(' ')
separator = ' '
needsEllipsis = False
text = ''
for index, verseObject in enumerate(verseObjects):
lastMatch = False
if verseObject['type'] == 'milestone' or verseObject['type'] == 'word':
if ((('content' in verseObject and verseObject['content'] in wordsToMatch) or ('lemma' in verseObject and verseObject['lemma'] in wordsToMatch)) and verseObject['occurrence'] == occurrence) or isMatch:
lastMatch = True
if needsEllipsis:
separator += '... '
needsEllipsis = False
if text:
text += separator
separator = ' '
if 'text' in verseObject and verseObject['text']:
text += verseObject['text']
if 'children' in verseObject and verseObject['children']:
text += self.find_english_from_split(verseObject['children'], quote, occurrence, True)
elif 'children' in verseObject and verseObject['children']:
childText = self.find_english_from_split(verseObject['children'], quote, occurrence, isMatch)
if childText:
lastMatch = True
if needsEllipsis:
separator += '... '
needsEllipsis = False
text += (separator if text else '') + childText
separator = ' '
elif text:
needsEllipsis = True
if lastMatch and (index+1) in verseObjects and verseObjects[index + 1]['type'] == "text" and text:
if separator == ' ':
separator = ''
separator += verseObjects[index + 1]['text']
return text
def get_aligned_text(self, verseObjects, contextId, isMatch=False):
if not verseObjects or not contextId or not 'quote' in contextId or not contextId['quote']:
return ''
text = self.find_english_from_combination(verseObjects, contextId['quote'], contextId['occurrence'])
if text:
return text
text = self.find_english_from_split(verseObjects, contextId['quote'], contextId['occurrence'])
if text:
return text
_print('English not found!')
print(contextId)
def get_tw_html(self):
tw_html = '<h1><a id="tw-{0}"></a>translationWords</h1>\n\n'.format(self.book_id)
sorted_rcs = sorted(self.resource_data.keys(), key=lambda k: self.resource_data[k]['title'].lower())
for rc in sorted_rcs:
if '/tw/' not in rc:
continue
html = markdown.markdown(self.resource_data[rc]['text'])
html = self.increase_headers(html)
id_tag = '<a id="{0}"></a>'.format(self.resource_data[rc]['id'])
html = re.sub(r'<h(\d)>(.*?)</h(\d)>', r'<h\1>{0}\2</h\3>\n{1}'.format(id_tag, self.get_reference_text(rc)), html, 1, flags=re.IGNORECASE | re.MULTILINE)
html += '\n\n'
tw_html += html
return tw_html
def get_ta_html(self):
ta_html = '<h1><a id="{0}-ta-{1}"></a>translationAcademy</h1>\n\n'.format(self.lang_code, self.book_id)
sorted_rcs = sorted(self.resource_data.keys(), key=lambda k: self.resource_data[k]['title'].lower())
for rc in sorted_rcs:
if '/ta/' not in rc:
continue
if self.resource_data[rc]['text']:
html = markdown.markdown(self.resource_data[rc]['text'])
html = self.increase_headers(html)
id_tag = '<a id="{0}"></a>'.format(self.resource_data[rc]['id'])
html = re.sub(r'<h(\d)>(.*?)</h(\d)>', r'<h\1>{0}\2</h\3>{1}\n'.format(id_tag, self.get_reference_text(rc)), html, 1, flags=re.IGNORECASE | re.MULTILINE)
html += "\n\n"
ta_html += html
return ta_html
def get_reference_text(self, rc):
uses = ''
if len(self.rc_references[rc]):
references = []
for reference in self.rc_references[rc]:
if '/tn/' in reference:
parts = reference[5:].split('/')
id = 'tn-{0}-{1}-{2}'.format(self.book_id, parts[4], parts[5])
if parts[4] == 'front':
text = 'Intro'.format(self.book_title)
elif parts[5] == 'intro':
text = 'Ch. {0} Notes'.format(parts[5].lstrip('0'))
else:
text = '{1}:{2}'.format(id, parts[4].lstrip('0'), parts[5].lstrip('0'))
references.append('<a href="#{0}">{1}</a>'.format(id, text))
if len(references):
uses = '(Linked from: ' + ', '.join(references) + ')'
return uses
def get_resource_data_from_rc_links(self, text, source_rc):
for rc in re.findall(r'rc://[A-Z0-9/_\*-]+', text, flags=re.IGNORECASE | re.MULTILINE):
parts = rc[5:].split('/')
resource = parts[1]
path = '/'.join(parts[3:])
if resource not in ['ta', 'tw']:
continue
if rc not in self.rc_references:
self.rc_references[rc] = []
self.rc_references[rc].append(source_rc)
if rc not in self.resource_data:
title = ''
t = ''
anchor_id = '{0}-{1}'.format(resource, path.replace('/', '-'))
link = '#{0}'.format(anchor_id)
file_path = os.path.join(self.working_dir, '{0}_{1}'.format(self.lang_code, resource),
'{0}.md'.format(path))
if not os.path.isfile(file_path):
file_path = os.path.join(self.working_dir, '{0}_{1}'.format(self.lang_code, resource),
'{0}/01.md'.format(path))
# if not os.path.isfile(file_path):
# if resource == 'tw':
# if path.startswith('bible/other/'):
# path2 = re.sub(r'^bible/other/', r'bible/kt/', path)
# else:
# path2 = re.sub(r'^bible/kt/', r'bible/other/', path)
# anchor_id = '{0}-{1}'.format(resource, path2.replace('/', '-'))
# link = '#{0}'.format(anchor_id)
# file_path = os.path.join(self.working_dir, '{0}_{1}'.format(self.lang_code, resource),
# '{0}.md'.format(path2))
if os.path.isfile(file_path):
t = read_file(file_path)
if resource == 'ta':
title_file = os.path.join(os.path.dirname(file_path), 'title.md')
question_file = os.path.join(os.path.dirname(file_path), 'sub-title.md')
if os.path.isfile(title_file):
title = read_file(title_file)
else:
title = self.get_first_header(t)
if os.path.isfile(question_file):
question = read_file(question_file)
question = 'This page answers the question: *{0}*\n\n'.format(question)
else:
question = ''
t = '# {0}\n\n{1}{2}'.format(title, question, t)
t = self.fix_ta_links(t, path.split('/')[0])
elif resource == 'tw':
title = self.get_first_header(t)
t = re.sub(r'\n*\s*\(See [^\n]*\)\s*\n*', '\n\n', t, flags=re.IGNORECASE | re.MULTILINE)
t = self.fix_tw_links(t, path.split('/')[1])
else:
if rc not in self.bad_links:
self.bad_links[rc] = []
self.bad_links[rc].append(source_rc)
self.resource_data[rc] = {
'rc': rc,
'link': link,
'id': anchor_id,
'title': title,
'text': t,
}
if t:
self.get_resource_data_from_rc_links(t, rc)
@staticmethod
def increase_headers(text, increase_depth=1):
if text:
for num in range(5,0,-1):
text = re.sub(r'<h{0}>\s*(.+?)\s*</h{0}>'.format(num), r'<h{0}>\1</h{0}>'.format(num+increase_depth), text, flags=re.MULTILINE)
return text
@staticmethod
def decrease_headers(text, minimum_header=1, decrease=1):
if text:
for num in range(minimum_header, minimum_header+10):
text = re.sub(r'<h{0}>\s*(.+?)\s*</h{0}>'.format(num), r'<h{0}>\1</h{0}>'.format(num-decrease if (num-decrease) <= 5 else 5), text, flags=re.MULTILINE)
return text
@staticmethod
def get_first_header(text):
lines = text.split('\n')
if len(lines):
for line in lines:
if re.match(r'<h1>', line):
return re.sub(r'<h1>(.*?)</h1>', r'\1', line)
return lines[0]
return "NO TITLE"
def fix_tn_links(self, text, chapter):
text = re.sub(r'<a href="\.\./\.\./([^"]+)">([^<]+)</a>', r'\2'.format(self.lang_code), text, flags=re.IGNORECASE | re.MULTILINE)
text = re.sub(r'href="\.\./([^"]+?)/([^"]+?)(\.md)*"', r'href="#{0}-tn-{1}-\1-\2"'.format(self.lang_code, self.book_id), text, flags=re.IGNORECASE | re.MULTILINE)
text = re.sub(r'href="\.\./([^"]+?)(\.md)*"', r'href="#{0}-tn-{1}-\1"'.format(self.lang_code, self.book_id), text, flags=re.IGNORECASE | re.MULTILINE)
text = re.sub(r'href="\./([^"]+?)(\.md)*"', r'href="#{0}-tn-{1}-{2}-\1"'.format(self.lang_code, self.book_id, self.pad(chapter)), text, flags=re.IGNORECASE | re.MULTILINE)
text = re.sub(r'\n__.*\|.*', r'', text, flags=re.IGNORECASE | re.MULTILINE)
return text
def fix_tw_links(self, text, dictionary):
text = re.sub(r'\]\(\.\./([^/)]+?)(\.md)*\)', r'](rc://{0}/tw/dict/bible/{1}/\1)'.format(self.lang_code, dictionary), text, flags=re.IGNORECASE | re.MULTILINE)
text = re.sub(r'\]\(\.\./([^)]+?)(\.md)*\)', r'](rc://{0}/tw/dict/bible/\1)'.format(self.lang_code), text, flags=re.IGNORECASE | re.MULTILINE)
return text
def fix_ta_links(self, text, manual):
text = re.sub(r'\]\(\.\./([^/)]+)/01\.md\)', r'](rc://{0}/ta/man/{1}/\1)'.format(self.lang_code, manual), text, flags=re.IGNORECASE | re.MULTILINE)
text = re.sub(r'\]\(\.\./\.\./([^/)]+)/([^/)]+)/01\.md\)', r'](rc://{0}/ta/man/\1/\2)'.format(self.lang_code), text, flags=re.IGNORECASE | re.MULTILINE)
text = re.sub(r'\]\(([^# :/)]+)\)', r'](rc://{0}/ta/man/{1}/\1)'.format(self.lang_code, manual), text, flags=re.IGNORECASE | re.MULTILINE)
return text
def replace_rc_links(self, text):
# Change rc://... rc links,
# 1st: [[rc://en/tw/help/bible/kt/word]] => <a href="#tw-kt-word">God's Word</a>
# 2nd: rc://en/tw/help/bible/kt/word => #tw-kt-word (used in links that are already formed)
for rc, info in self.resource_data.iteritems():
parts = rc[5:].split('/')
tail = '/'.join(parts[1:])
pattern = r'\[\[rc://[^/]+/{0}\]\]'.format(re.escape(tail))
replace = r'<a href="{0}">{1}</a>'.format(info['link'], info['title'])
text = re.sub(pattern, replace, text, flags=re.IGNORECASE | re.MULTILINE)
pattern = r'rc://[^/]+/{0}'.format(re.escape(tail))
replace = info['link']
text = re.sub(pattern, replace, text, flags=re.IGNORECASE | re.MULTILINE)
# Remove other scripture reference not in this tN
text = re.sub(r'<a[^>]+rc://[^>]+>([^>]+)</a>', r'\1', text, flags=re.IGNORECASE | re.MULTILINE)
return text
def fix_links(self, text):
# Change [[http.*]] to <a href="http\1">http\1</a>
text = re.sub(r'\[\[http([^\]]+)\]\]', r'<a href="http\1">http\1</a>', text, flags=re.IGNORECASE)
# convert URLs to links if not already
text = re.sub(r'([^">])((http|https|ftp)://[A-Za-z0-9\/\?&_\.:=#-]+[A-Za-z0-9\/\?&_:=#-])', r'\1<a href="\2">\2</a>', text, flags=re.IGNORECASE)
# URLS wth just www at the start, no http
text = re.sub(r'([^\/])(www\.[A-Za-z0-9\/\?&_\.:=#-]+[A-Za-z0-9\/\?&_:=#-])', r'\1<a href="http://\2">\2</a>', text, flags=re.IGNORECASE)
# Removes leading 0s from verse references
text = re.sub(r' 0*(\d+):0*(\d+)(-*)0*(\d*)', r' \1:\2\3\4', text, flags=re.IGNORECASE | re.MULTILINE)
return text
def get_chunk_html(self, resource, chapter, verse):
# print("html: {0}-{3}-{1}-{2}".format(resource, chapter, verse, self.book_id))
path = os.path.join(self.working_dir, 'usfm_chunks', 'usfm-{0}-{1}-{2}-{3}-{4}'.
format(self.lang_code, resource, self.book_id, chapter, verse))
filename_base = '{0}-{1}-{2}-{3}'.format(resource, self.book_id, chapter, verse)
html_file = os.path.join(path, '{0}.html'.format(filename_base))
usfm_file = os.path.join(path, '{0}.usfm'.format(filename_base))
if os.path.isfile(html_file):
return read_file(html_file)
if not os.path.exists(path):
os.makedirs(path)
chunk = self.usfm_chunks[resource][str(chapter)][str(verse)]['usfm']
usfm = self.usfm_chunks[resource]['header']
if '\\c' not in chunk:
usfm += '\n\n\\c {0}\n'.format(chapter)
usfm += chunk
write_file(usfm_file, usfm)
UsfmTransform.buildSingleHtml(path, path, filename_base)
html = read_file(os.path.join(path, filename_base+'.html'))
soup = BeautifulSoup(html, 'html.parser')
header = soup.find('h1')
if header:
header.decompose()
chapter = soup.find('h2')
if chapter:
chapter.decompose()
html = ''.join(['%s' % x for x in soup.body.contents])
write_file(html_file, html)
return html
def convert_html2pdf(self):
command = """pandoc \
--pdf-engine="wkhtmltopdf" \
--template="tools/tn/generate_tn_pdf/tex/template.tex" \
--toc \
--toc-depth=2 \
-V documentclass="scrartcl" \
-V classoption="oneside" \
-V geometry='hmargin=2cm' \
-V geometry='vmargin=3cm' \
-V title="{2}" \
-V subtitle="translationNotes" \
-V logo="{6}/icon-tn.png" \
-V date="{3}" \
-V version="{4}" \
-V publisher="{8}" \
-V contributors="{9}" \
-V mainfont="Noto Serif" \
-V sansfont="Noto Sans" \
-V fontsize="13pt" \
-V urlcolor="Bittersweet" \
-V linkcolor="Bittersweet" \
-H "tools/tn/generate_tn_pdf/tex/format.tex" \
-o "{5}/{7}.pdf" \
"{5}/{7}.html"
""".format(BOOK_NUMBERS[self.book_id.lower()], self.book_id, self.book_title, self.issued, self.version, self.output_dir,
self.working_dir, self.filename_base, self.publisher, self.contributors)
_print(command)
subprocess.call(command, shell=True)
def main(ta_tag, tn_tag, tw_tag, ust_tag, ult_tag, ugnt_tag, lang_code, books, working_dir, output_dir):
"""
:param ta_tag:
:param tn_tag:
:param tw_tag:
:param ust_tag:
:param ult_tag:
:param ugnt_tag:
:param lang_code:
:param books:
:param working_dir:
:param output_dir:
:return:
"""
tn_converter = TnConverter(ta_tag, tn_tag, tw_tag, ust_tag, ult_tag, ugnt_tag, working_dir, output_dir,
lang_code, books)
tn_converter.run()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-l', '--lang', dest='lang_code', default='en', required=False, help="Language Code")
parser.add_argument('-b', '--book_id', dest='books', nargs='+', default=None, required=False, help="Bible Book(s)")
parser.add_argument('-w', '--working', dest='working_dir', default=False, required=False, help="Working Directory")
parser.add_argument('-o', '--output', dest='output_dir', default=False, required=False, help="Output Directory")
parser.add_argument('--ta-tag', dest='ta', default='v10', required=False, help="tA Tag")
parser.add_argument('--tn-tag', dest='tn', default='v13', required=False, help="tN Tag")
parser.add_argument('--tw-tag', dest='tw', default='v9', required=False, help="tW Tag")
parser.add_argument('--ust-tag', dest='ust', default='master', required=False, help="UST Tag")
parser.add_argument('--ult-tag', dest='ult', default='master', required=False, help="ULT Tag")
parser.add_argument('--ugnt-tag', dest='ugnt', default='v0.4', required=False, help="UGNT Tag")
args = parser.parse_args(sys.argv[1:])
main(args.ta, args.tn, args.tw, args.ust, args.ult, args.ugnt, args.lang_code, args.books, args.working_dir, args.output_dir)
| 49.46368 | 254 | 0.548303 | 37,886 | 0.927283 | 0 | 0 | 1,067 | 0.026115 | 0 | 0 | 8,752 | 0.214211 |
f8a59fce72ffcde75ac9e9b378c6906ab092d7dd | 2,565 | py | Python | mudi/interp/bootstrap_aucell.py | getzlab/mudi | eda170119708e59920c23a03834af915ecca24ce | [
"MIT"
] | 1 | 2021-11-04T00:08:00.000Z | 2021-11-04T00:08:00.000Z | mudi/interp/bootstrap_aucell.py | getzlab/mudi | eda170119708e59920c23a03834af915ecca24ce | [
"MIT"
] | null | null | null | mudi/interp/bootstrap_aucell.py | getzlab/mudi | eda170119708e59920c23a03834af915ecca24ce | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
from tqdm import tqdm
import argparse
from pyscenic.aucell import aucell
from .aucell import create_gene_signatures
from .aucell import assign_bootstrap
def main():
parser = argparse.ArgumentParser(description='AUcell Bootstrapping.')
parser.add_argument(
'-i', '--in_file',
required=True,
help='<Required> Path to input expression matrix.',
type=str
)
parser.add_argument(
'-d', '--de_genes',
required=True,
help='<Required> Differential expression results.',
type=str
)
parser.add_argument('-o', '--out_file',
help='<Required> Output .h5 file to save results.',
required=True,
type=str
)
parser.add_argument('-n', '--niter',
help='Number of iterations.',
required=False,
default=100,
type=int
)
parser.add_argument('-s', '--subset_n',
help='Number of genes to subset.',
required=False,
default=150,
type=int
)
parser.add_argument('-w', '--n_workers',
help='Number of workers.',
required=False,
default=8,
type=int
)
parser.add_argument('-k', '--weight',
help='Enrichment weight. Default is "t" statistic form differential expression.',
required=False,
default="t",
type=str
)
parser.add_argument('-r', '--random_seed',
help='Random seed for bootstrapping.',
required=False,
default=None
)
args = parser.parse_args()
# Set random seed
if args.random_seed is None:
np.random.seed()
else:
np.random.seed(int(args.random_seed))
# Load
exp_mtx = pd.read_parquet(args.in_file)
print(" * {} cells loaded".format(exp_mtx.shape[0]))
print(" * {} genes detected".format(exp_mtx.shape[1]))
# Load DE Genes
de_df = pd.read_csv(args.de_genes, sep='\t').set_index("gene_name")
store = pd.HDFStore(args.out_file,'a')
for n in tqdm(range(args.niter)):
gene_sigs = create_gene_signatures(de_df, n=args.subset_n, weight_idx=args.weight)
enrich_df = aucell(exp_mtx, gene_sigs, normalize=False, num_workers=args.n_workers)
store["perm{}".format(n)] = enrich_df
store.close()
# Assign bootstrapped
print(" * assigning bootstrap results")
bootstrap_df = assign_bootstrap(args.out_file, n=args.niter, norm=True)
bootstrap_df.to_csv(args.out_file.split(".h5")[0]+".tsv", sep="\t")
if __name__ == "__main__":
main()
| 28.5 | 91 | 0.617934 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 655 | 0.255361 |
f8a65542e1ebb18eabea4f393380c912f8314bfc | 696 | py | Python | network/topo-custom.py | kstough/pox | 152625fcd40fc5ddfce87b7632fd40777507205c | [
"Apache-2.0"
] | null | null | null | network/topo-custom.py | kstough/pox | 152625fcd40fc5ddfce87b7632fd40777507205c | [
"Apache-2.0"
] | null | null | null | network/topo-custom.py | kstough/pox | 152625fcd40fc5ddfce87b7632fd40777507205c | [
"Apache-2.0"
] | null | null | null | """Custom topology example
s7 ---- s8 ---- s9
/ \ / \ / \
h1 h2 h3 h4 h5 h6
"""
from mininet.topo import Topo
print('Loading MyTopo')
class MyTopo(Topo):
"Simple topology example."
def __init__(self):
Topo.__init__(self)
# Add hosts and switches
h1, h2, h3, h4, h5, h6 = (self.addHost('h' + str(i + 1)) for i in range(6))
s7, s8, s9 = (self.addSwitch('s' + str(i + 7)) for i in range(3))
# Add links
self.addLink(h1, s7)
self.addLink(h2, s7)
self.addLink(s7, s8)
self.addLink(h3, s8)
self.addLink(h4, s8)
self.addLink(s8, s9)
self.addLink(h5, s9)
self.addLink(h6, s9)
topos = {'mytopo': (lambda: MyTopo())}
| 19.885714 | 79 | 0.570402 | 492 | 0.706897 | 0 | 0 | 0 | 0 | 0 | 0 | 195 | 0.280172 |
f8a77e8060730c4c9bc76d9c5c083f084aed00b7 | 2,383 | py | Python | test_alarms.py | ajaynema/rule-engine | 99cd5d54dd45e1223d0eec2a65bc6d5f0ef3da51 | [
"MIT"
] | null | null | null | test_alarms.py | ajaynema/rule-engine | 99cd5d54dd45e1223d0eec2a65bc6d5f0ef3da51 | [
"MIT"
] | null | null | null | test_alarms.py | ajaynema/rule-engine | 99cd5d54dd45e1223d0eec2a65bc6d5f0ef3da51 | [
"MIT"
] | null | null | null | from rule_condition import Condition
from rule_action import Action
from rule_template import RuleTemplate
from rule_engine import RuleEngine
from rule import Rule
from rule_data import Data
from rule_scope import Scope
from action_handler_send_email import SendEmailHandler
from action_handler_report_alarm import ReportAlarmHandler
def initialize(rule_engine):
condition = Condition("{{telemetry.messageId}}" , "EQ", "{{rule.messageId}}")
action = Action("REPORT_ALARM", {})
scope = Scope()
scope.add("device_type","PITLID")
rule_template = RuleTemplate(scope=scope, condition=condition, action=action)
data = Data()
data.add("messageId",301)
rule = Rule("301-message-rule",rule_template, data)
rule_engine.add_rule(rule)
action = Action("SEND_EMAIL", {})
scope = Scope()
scope.add("device_type","CAPTIS")
rule_template = RuleTemplate(scope=scope, condition=condition, action=action)
data = Data()
data.add("messageId",201)
rule = Rule("201-message-rule",rule_template, data)
rule_engine.add_rule(rule)
rule_engine.add_handler(ReportAlarmHandler())
rule_engine.add_handler(SendEmailHandler())
def test1(rule_engine):
print("===== Start Test case 1======")
telemetry = Data()
telemetry.add("device_type", "PITLID")
telemetry.add("messageId", 201)
rule_engine.process(telemetry)
print("===== End ======\n\n")
def test2(rule_engine):
print("===== Start Test case 2======")
telemetry = Data()
telemetry.add("device_type", "PITLID")
telemetry.add("messageId", 301)
rule_engine.process(telemetry)
print("===== End ======\n\n")
def test3(rule_engine):
print("===== Start test case 3 ======")
telemetry = Data()
telemetry.add("device_type", "CAPTIS")
telemetry.add("messageId", 301)
rule_engine.process(telemetry)
print("===== End ======\n\n")
def test4(rule_engine):
print("===== Start test case 4 ======")
telemetry = Data()
telemetry.add("device_type", "CAPTIS")
telemetry.add("messageId", 201)
rule_engine.process(telemetry)
print("===== End ======\n\n")
def main():
rule_engine = RuleEngine()
initialize(rule_engine)
test1(rule_engine)
test2(rule_engine)
test3(rule_engine)
test4(rule_engine)
if __name__=="__main__":
main() | 29.419753 | 81 | 0.660512 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 527 | 0.22115 |
f8a7cc80262619abcc2b85bf1530f105f8f8ce34 | 362 | py | Python | agri/urls.py | Bhavesh0327/Agriblock | 72015e1765214b153771dbc3868eae01fe8898b3 | [
"MIT"
] | 1 | 2020-10-01T08:28:57.000Z | 2020-10-01T08:28:57.000Z | agri/urls.py | Bhavesh0327/Agriblock | 72015e1765214b153771dbc3868eae01fe8898b3 | [
"MIT"
] | 14 | 2020-06-05T20:37:13.000Z | 2022-02-26T22:51:36.000Z | agri/urls.py | Bhavesh0327/Agriblock | 72015e1765214b153771dbc3868eae01fe8898b3 | [
"MIT"
] | 3 | 2020-01-29T04:34:28.000Z | 2020-09-30T21:48:30.000Z | from django.urls import path
from .views import *
rest_urls = list(map(lambda x: path(x[0], x[1], name=x[2]), [
('login/', login, 'login'),
('issue_asset/', issue_asset, 'issue_asset'),
('buy/', buy, 'buy'),
('get_assets/', get_assets, 'get_assets'),
('get_transactions/', get_transactions, 'get_transactions')
]))
urlpatterns = rest_urls
| 25.857143 | 63 | 0.638122 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 115 | 0.31768 |
f8a96eee4517afeca4532922b8ea2f6d38dc101a | 4,898 | py | Python | lib/utils_monai.py | octaviomtz/Growing-Neural-Cellular-Automata | a6f91661e35f7bd0d7b90ac4347f4d56c9351d0b | [
"MIT"
] | null | null | null | lib/utils_monai.py | octaviomtz/Growing-Neural-Cellular-Automata | a6f91661e35f7bd0d7b90ac4347f4d56c9351d0b | [
"MIT"
] | null | null | null | lib/utils_monai.py | octaviomtz/Growing-Neural-Cellular-Automata | a6f91661e35f7bd0d7b90ac4347f4d56c9351d0b | [
"MIT"
] | null | null | null | import os
import numpy as np
import monai
import math
import torch
import glob
from skimage.morphology import remove_small_holes, remove_small_objects
from monai.transforms import (
LoadImaged,
AddChanneld,
Orientationd,
Spacingd,
ScaleIntensityRanged,
SpatialPadd,
RandAffined,
RandCropByPosNegLabeld,
RandGaussianNoised,
RandFlipd,
RandFlipd,
RandFlipd,
CastToTyped,
)
def get_xforms_scans_or_synthetic_lesions(mode="scans", keys=("image", "label")):
"""returns a composed transform for scans or synthetic lesions."""
xforms = [
LoadImaged(keys),
AddChanneld(keys),
Orientationd(keys, axcodes="LPS"),
Spacingd(keys, pixdim=(1.25, 1.25, 5.0), mode=("bilinear", "nearest")[: len(keys)]),
]
dtype = (np.int16, np.uint8)
if mode == "synthetic":
xforms.extend([
ScaleIntensityRanged(keys[0], a_min=-1000.0, a_max=500.0, b_min=0.0, b_max=1.0, clip=True),
])
dtype = (np.float32, np.uint8)
xforms.extend([CastToTyped(keys, dtype=dtype)])
return monai.transforms.Compose(xforms)
def get_xforms_load(mode="load", keys=("image", "label")):
"""returns a composed transform."""
xforms = [
LoadImaged(keys),
ScaleIntensityRanged(keys[0], a_min=-1000.0, a_max=500.0, b_min=0.0, b_max=1.0, clip=True),
]
if mode == "load":
dtype = (np.float32, np.uint8)
xforms.extend([CastToTyped(keys, dtype=dtype)])
return monai.transforms.Compose(xforms)
def load_COVID19_v2(data_folder, SCAN_NAME):
images= [f'{data_folder}/{SCAN_NAME}_ct.nii.gz']
labels= [f'{data_folder}/{SCAN_NAME}_seg.nii.gz']
keys = ("image", "label")
files_scans = [{keys[0]: img, keys[1]: seg} for img, seg in zip(images, labels)]
return images, labels, keys, files_scans
def load_synthetic_lesions(files_scans, keys, batch_size):
transforms_load = get_xforms_scans_or_synthetic_lesions("synthetic", keys)
ds_synthetic = monai.data.CacheDataset(data=files_scans, transform=transforms_load)
loader_synthetic = monai.data.DataLoader(
ds_synthetic,
batch_size=batch_size,
shuffle=False, #should be true for training
num_workers=2,
pin_memory=torch.cuda.is_available(),
)
for idx_mini_batch, mini_batch in enumerate(loader_synthetic):
# if idx_mini_batch==6:break #OMM
BATCH_IDX=0
scan_synthetic = mini_batch['image'][BATCH_IDX][0,...].numpy()
scan_mask = mini_batch['label'][BATCH_IDX][0,...].numpy()
name_prefix = mini_batch['image_meta_dict']['filename_or_obj'][0].split('Train/')[-1].split('.nii')[0]
return name_prefix
def load_scans(files_scans, keys, batch_size, SCAN_NAME, mode="scans"):
transforms_load = get_xforms_scans_or_synthetic_lesions(mode, keys)
ds_scans = monai.data.CacheDataset(data=files_scans, transform=transforms_load)
loader_scans = monai.data.DataLoader(
ds_scans,
batch_size=batch_size,
shuffle=False, #should be true for training
num_workers=2,
pin_memory=torch.cuda.is_available(),
)
for idx_mini_batch, mini_batch in enumerate(loader_scans):
# if idx_mini_batch==1:break #OMM
BATCH_IDX=0
scan = mini_batch['image'][BATCH_IDX][0,...]
scan_mask = mini_batch['label'][BATCH_IDX][0,...]
scan_name = mini_batch['image_meta_dict']['filename_or_obj'][0].split('/')[-1].split('.nii')[0][:-3]
print(f'working on scan= {scan_name}')
assert scan_name == SCAN_NAME, 'cannot load that scan'
scan = scan.numpy() #ONLY READ ONE SCAN (WITH PREVIOUS BREAK)
scan_mask = scan_mask.numpy()
return scan, scan_mask
def load_individual_lesions(folder_source, batch_size):
# folder_source = f'/content/drive/MyDrive/Datasets/covid19/COVID-19-20/individual_lesions/{SCAN_NAME}_ct/'
files_scan = sorted(glob.glob(os.path.join(folder_source,"*.npy")))
files_mask = sorted(glob.glob(os.path.join(folder_source,"*.npz")))
keys = ("image", "label")
files = [{keys[0]: img, keys[1]: seg} for img, seg in zip(files_scan, files_mask)]
print(len(files_scan), len(files_mask), len(files))
transforms_load = get_xforms_load("load", keys)
ds_lesions = monai.data.CacheDataset(data=files, transform=transforms_load)
loader_lesions = monai.data.DataLoader(
ds_lesions,
batch_size=batch_size,
shuffle=False, #should be true for training
num_workers=2,
pin_memory=torch.cuda.is_available(),
)
return loader_lesions
def load_synthetic_texture(path_synthesis_old):
texture_orig = np.load(f'{path_synthesis_old}texture.npy.npz')
texture_orig = texture_orig.f.arr_0
texture = texture_orig + np.abs(np.min(texture_orig))# + .07
return texture
| 39.5 | 111 | 0.669661 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 842 | 0.171907 |
f8ab0286f449987129eeade795e566330ff36d18 | 867 | py | Python | api/leaderboard/tests/test_views.py | individuo7/wololo-tournaments-api | 5be6284064373e99346d39c78844e454c41c501d | [
"MIT"
] | 2 | 2019-12-09T10:19:36.000Z | 2020-01-11T11:48:41.000Z | api/leaderboard/tests/test_views.py | individuo7/wololo-tournaments-api | 5be6284064373e99346d39c78844e454c41c501d | [
"MIT"
] | null | null | null | api/leaderboard/tests/test_views.py | individuo7/wololo-tournaments-api | 5be6284064373e99346d39c78844e454c41c501d | [
"MIT"
] | null | null | null | import json
import pytest
from unittest import TestCase
from rest_framework.test import APIClient
from ..models import Group, Prediction
@pytest.mark.django_db
class PredictionViewSetTest(TestCase):
def setUp(self):
self.client = APIClient()
def test_prediction_list(self):
response = self.client.get("/api/predictions/")
assert response.status_code == 200
response_json = json.loads(response.content)
assert len(response_json) == Prediction.objects.count()
@pytest.mark.django_db
class GroupViewSetTest(TestCase):
def setUp(self):
self.client = APIClient()
def test_prediction_list(self):
response = self.client.get("/api/groups/")
assert response.status_code == 200
response_json = json.loads(response.content)
assert len(response_json) == Group.objects.count()
| 27.967742 | 63 | 0.704729 | 677 | 0.780854 | 0 | 0 | 723 | 0.83391 | 0 | 0 | 33 | 0.038062 |
f8ab70b04aa64ecaf4843be345aba0efec2cfc69 | 414 | py | Python | sapextractor/utils/string_matching/distances.py | aarkue/sap-meta-explorer | 613bf657bbaa72a3781a84664e5de7626516532f | [
"Apache-2.0"
] | 2 | 2021-02-10T08:09:35.000Z | 2021-05-21T06:25:34.000Z | sapextractor/utils/string_matching/distances.py | aarkue/sap-meta-explorer | 613bf657bbaa72a3781a84664e5de7626516532f | [
"Apache-2.0"
] | null | null | null | sapextractor/utils/string_matching/distances.py | aarkue/sap-meta-explorer | 613bf657bbaa72a3781a84664e5de7626516532f | [
"Apache-2.0"
] | 3 | 2021-11-22T13:27:00.000Z | 2022-03-16T22:08:51.000Z | import stringdist
def levenshtein(stru1, stru2):
"""
Measures the Levenshtein distance between two strings
Parameters
---------------
stru1
First string
stru2
Second string
Returns
---------------
levens_dist
Levenshtein distance
"""
return stringdist.levenshtein(stru1, stru2)
def apply(stru1, stru2):
return levenshtein(stru1, stru2)
| 16.56 | 57 | 0.601449 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 246 | 0.594203 |
f8acaa7460d221225a0bd79d4a5ca48dc091b0af | 2,873 | py | Python | components/aws/sagemaker/delete_simulation_app/src/robomaker_delete_simulation_app_spec.py | Strasser-Pablo/pipelines | a1d513eb412f3ffd44edf82af2fa7edb05c3b952 | [
"Apache-2.0"
] | 2,860 | 2018-05-24T04:55:01.000Z | 2022-03-31T13:49:56.000Z | components/aws/sagemaker/delete_simulation_app/src/robomaker_delete_simulation_app_spec.py | Strasser-Pablo/pipelines | a1d513eb412f3ffd44edf82af2fa7edb05c3b952 | [
"Apache-2.0"
] | 7,331 | 2018-05-16T09:03:26.000Z | 2022-03-31T23:22:04.000Z | components/aws/sagemaker/delete_simulation_app/src/robomaker_delete_simulation_app_spec.py | Strasser-Pablo/pipelines | a1d513eb412f3ffd44edf82af2fa7edb05c3b952 | [
"Apache-2.0"
] | 1,359 | 2018-05-15T11:05:41.000Z | 2022-03-31T09:42:09.000Z | """Specification for the RoboMaker delete. simulation application component."""
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import List
from common.sagemaker_component_spec import SageMakerComponentSpec
from common.common_inputs import (
COMMON_INPUTS,
SageMakerComponentCommonInputs,
SageMakerComponentInput as Input,
SageMakerComponentOutput as Output,
SageMakerComponentBaseOutputs,
SageMakerComponentInputValidator as InputValidator,
SageMakerComponentOutputValidator as OutputValidator,
)
@dataclass(frozen=True)
class RoboMakerDeleteSimulationAppInputs(SageMakerComponentCommonInputs):
"""Defines the set of inputs for the delete simulation application component."""
arn: Input
version: Input
@dataclass
class RoboMakerDeleteSimulationAppOutputs(SageMakerComponentBaseOutputs):
"""Defines the set of outputs for the create simulation application component."""
arn: Output
class RoboMakerDeleteSimulationAppSpec(
SageMakerComponentSpec[
RoboMakerDeleteSimulationAppInputs, RoboMakerDeleteSimulationAppOutputs
]
):
INPUTS: RoboMakerDeleteSimulationAppInputs = RoboMakerDeleteSimulationAppInputs(
arn=InputValidator(
input_type=str,
required=True,
description="The Amazon Resource Name (ARN) of the simulation application.",
default="",
),
version=InputValidator(
input_type=str,
required=False,
description="The version of the simulation application.",
default=None,
),
**vars(COMMON_INPUTS),
)
OUTPUTS = RoboMakerDeleteSimulationAppOutputs(
arn=OutputValidator(
description="The Amazon Resource Name (ARN) of the simulation application."
),
)
def __init__(self, arguments: List[str]):
super().__init__(
arguments,
RoboMakerDeleteSimulationAppInputs,
RoboMakerDeleteSimulationAppOutputs,
)
@property
def inputs(self) -> RoboMakerDeleteSimulationAppInputs:
return self._inputs
@property
def outputs(self) -> RoboMakerDeleteSimulationAppOutputs:
return self._outputs
@property
def output_paths(self) -> RoboMakerDeleteSimulationAppOutputs:
return self._output_paths
| 32.280899 | 88 | 0.725374 | 1,763 | 0.613644 | 0 | 0 | 711 | 0.247477 | 0 | 0 | 941 | 0.327532 |
f8aeac4c82055a9ca0856652e23d45a0af0bcf39 | 7,595 | py | Python | ckanext-hdx_theme/ckanext/hdx_theme/util/jql_queries.py | alexandru-m-g/hdx-ckan | 647f1f23f0505fa195601245b758edcaf4d25985 | [
"Apache-2.0"
] | null | null | null | ckanext-hdx_theme/ckanext/hdx_theme/util/jql_queries.py | alexandru-m-g/hdx-ckan | 647f1f23f0505fa195601245b758edcaf4d25985 | [
"Apache-2.0"
] | null | null | null | ckanext-hdx_theme/ckanext/hdx_theme/util/jql_queries.py | alexandru-m-g/hdx-ckan | 647f1f23f0505fa195601245b758edcaf4d25985 | [
"Apache-2.0"
] | null | null | null | DOWNLOADS_PER_DATASET = '''
/* VER 1.2
used for total downloads from 2016-08-01 which is used to sort datasets by "most downloads" for the "XXX downloads" counter on /search and on each individual dataset
gets all download events and counts occurrences of unique combinations of user, resource, and dataset, and day, then counts the number of occurrences of dataset by week. In other words, if a user downloaded all 3 resources on a dataset 2 different times on the same day (6 total downloads), the result of this query would be 3. It answers the question "What is the total number of downloads of any resource on a given dataset, ignorning repeated downloads from the same user the same day?"*/
function main() {{
return Events({{
from_date: '{}',
to_date: '{}',
event_selectors: [{{event: "resource download"}}]
}})
.groupBy(["distinct_id","properties.resource id","properties.dataset id",mixpanel.numeric_bucket('time',mixpanel.daily_time_buckets)],mixpanel.reducer.count())
.groupBy(["key.2"], mixpanel.reducer.count())
.map(function(r){{
return {{
dataset_id: r.key[0],
value: r.value
}};
}});
}}
'''
PAGEVIEWS_PER_DATASET = '''
/* VER 1.0
gets all page view events and counts the occurrence of each unique dataset. It answers the question "How many times has this dataset page been viewed?"*/
/* Note: as of 12-july-2017, this query fails (or at least doesn't return what is expected), because there are no dataset IDs being sent with the page view event.*/
function main() {{
return Events({{
from_date: '{}',
to_date: '{}',
event_selectors: [{{event: "page view"}}]
}})
.groupBy(["properties.dataset id"],mixpanel.reducer.count())
.map(function(r){{
return {{
dataset_id: r.key[0],
value: r.value
}};
}});
}}
'''
DOWNLOADS_PER_DATASET_PER_WEEK = '''
/* VER 1.0
selects all download events, counts unique combinations of week, user, resource, and dataset, then counts the number of those unique combinations by dataset. That is to say if a single user downloaded 10 different resources two times each (20 total downloads) from a single dataset in a given week, the count returned by this query would be 10*/
function main() {{
return Events({{
from_date: '{}',
to_date: '{}',
event_selectors: [{{event: "resource download"}}]
}})
.groupBy(["distinct_id","properties.resource id","properties.dataset id",mixpanel.numeric_bucket('time',mixpanel.daily_time_buckets)],mixpanel.reducer.count())
.groupBy(["key.2",(mixpanel.numeric_bucket('key.3',mixpanel.weekly_time_buckets))],mixpanel.reducer.count())
.sortAsc(function(row){{return row.key[1]}})
.map(function(r){{
return {{
dataset_id: r.key[0],
date: new Date(r.key[1]).toISOString().substring(0,10),
value: r.value
}};
}});
}}
'''
PAGEVIEWS_PER_ORGANIZATION = '''
/* VER 1.0
gets all page view events and counts unique combinations of user and org. This is to say, if a single user looked at 3 different datasets from a single organization and then looked at the organization page as well (4 total page views), the count returned by this query would be 1. It answers the question "How many individuals looked at one or more of an organization's content."*/
function main() {{
return Events({{
from_date: '{}',
to_date: '{}',
event_selectors: [{{event: "page view"}}]
}})
.groupBy(["distinct_id","properties.org id"],mixpanel.reducer.count())
.groupBy([function(row) {{return row.key.slice(1)}}],mixpanel.reducer.count())
.map(function(r){{
return {{
org_id: r.key[0],
value: r.value
}};
}});
}}
'''
DOWNLOADS_PER_ORGANIZATION = '''
/* VER 1.0
gets all download events and counts unique combinations of user and org. This is to say, if a single user downloaded 5 resources 2 times from datasets belonging to a given organization (10 total downloads), the count returned by this query would be 1. It answers the question "How many individuals one or more resources from an organization's datasets."*/
function main() {{
return Events({{
from_date: '{}',
to_date: '{}',
event_selectors: [{{event: "resource download"}}]
}})
.groupBy(["distinct_id","properties.org id"],mixpanel.reducer.count())
.groupBy([function(row) {{return row.key.slice(1)}}],mixpanel.reducer.count())
.map(function(r){{
return {{
org_id: r.key[0],
value: r.value
}};
}});
}}
'''
PAGEVIEWS_PER_ORGANIZATION_PER_WEEK = '''
/* VER 1.0
gets all page view events and counts unique combinations of week and org. This is to say, if a single user looked at 3 different datasets from a single organization and then looked at the organization page as well (4 total page views) in a given week, the count returned by this query for that week would be 4. It answers the question "How many page views did an organization's content receive in a given week."*/
function main() {{
return Events({{
from_date: '{}',
to_date: '{}',
event_selectors: [{{event: "page view", selector: 'properties["org id"] != ""'}}]
}})
.groupBy(["properties.org id",mixpanel.numeric_bucket('time',mixpanel.weekly_time_buckets)],mixpanel.reducer.count())
.sortAsc(function(row){{return row.key[1]}})
.map(function(r){{
return {{
org_id: r.key[0],
date: new Date(r.key[1]).toISOString().substring(0,10),
value: r.value
}};
}});
}}
'''
DOWNLOADS_PER_ORGANIZATION_PER_WEEK = '''
/* VER 1.0
selects all download events, counts unique combinations of week, user, resource, and org, then counts the number of those unique combinations by org. That is to say if a single user downloaded 10 different resources two times each (20 total downloads) from a given org in a given week, the count returned by this query would be 10*/
function main() {{
return Events({{
from_date: '{}',
to_date: '{}',
event_selectors: [{{event: "resource download"}}]
}})
.groupBy(["distinct_id","properties.resource id","properties.org id",mixpanel.numeric_bucket('time',mixpanel.daily_time_buckets)],mixpanel.reducer.count())
.groupBy(["key.2",(mixpanel.numeric_bucket('key.3',mixpanel.weekly_time_buckets))],mixpanel.reducer.count())
.sortAsc(function(row){{return row.key[1]}})
.map(function(r){{
return {{
org_id: r.key[0],
date: new Date(r.key[1]).toISOString().substring(0,10),
value: r.value
}};
}});
}}
'''
DOWNLOADS_PER_ORGANIZATION_PER_DATASET = '''
/* VER 1.0
unique (by distinct id, resource id, dataset id, org id) downloads by dataset id (24 weeks, used for top downloads on org page)*/
/*selects all download events, counts unique combinations of day, user, resource, dataset, and org, then counts the number of those unique combinations by dataset. That is to say if a single user downloaded 10 different resources two times each (20 total downloads) from a single dataset in a given day (and on no other days), the count returned by this query would be 10*/
function main() {{
return Events({{
from_date: '{}',
to_date: '{}',
event_selectors: [{{event: "resource download"}}]
}})
.groupBy(["distinct_id","properties.resource id",mixpanel.numeric_bucket('time',mixpanel.daily_time_buckets),"properties.dataset id", "properties.org id"],mixpanel.reducer.count())
.groupBy([function(row) {{return row.key.slice(4)}}, function(row) {{return row.key.slice(3)}}],mixpanel.reducer.count())
.map(function(r){{
return {{
org_id: r.key[0],
dataset_id: r.key[1],
value: r.value
}};
}})
.sortDesc('value');
}}
''' | 42.194444 | 493 | 0.688743 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,325 | 0.96445 |
f8afd1b0a1d62c5e20c07db83d59c2c494f17348 | 13,343 | py | Python | source/rttov_test/profile-datasets-py/div83/077.py | bucricket/projectMAScorrection | 89489026c8e247ec7c364e537798e766331fe569 | [
"BSD-3-Clause"
] | null | null | null | source/rttov_test/profile-datasets-py/div83/077.py | bucricket/projectMAScorrection | 89489026c8e247ec7c364e537798e766331fe569 | [
"BSD-3-Clause"
] | 1 | 2022-03-12T12:19:59.000Z | 2022-03-12T12:19:59.000Z | source/rttov_test/profile-datasets-py/div83/077.py | bucricket/projectMAScorrection | 89489026c8e247ec7c364e537798e766331fe569 | [
"BSD-3-Clause"
] | null | null | null | """
Profile ../profile-datasets-py/div83/077.py
file automaticaly created by prof_gen.py script
"""
self["ID"] = "../profile-datasets-py/div83/077.py"
self["Q"] = numpy.array([ 3.01408100e+00, 3.40341800e+00, 3.94918400e+00,
4.08209300e+00, 4.65722800e+00, 5.59385900e+00,
5.96882400e+00, 5.96578400e+00, 6.02361400e+00,
6.13266200e+00, 5.61561800e+00, 5.17541300e+00,
4.73120800e+00, 4.38244100e+00, 4.13858300e+00,
3.94732400e+00, 3.82339500e+00, 3.74146600e+00,
3.68389600e+00, 3.64322700e+00, 3.61384700e+00,
3.58783700e+00, 3.57544700e+00, 3.57424700e+00,
3.57814700e+00, 3.57652700e+00, 3.56295700e+00,
3.53513800e+00, 3.51090800e+00, 3.50409800e+00,
3.51977800e+00, 3.54417700e+00, 3.53987700e+00,
3.51452800e+00, 3.48830800e+00, 3.47651800e+00,
3.48119800e+00, 3.49274800e+00, 3.50137800e+00,
3.50850800e+00, 3.52815800e+00, 3.56910700e+00,
3.61097700e+00, 3.71830600e+00, 3.89014500e+00,
3.89370500e+00, 3.85655500e+00, 3.87925500e+00,
3.95365400e+00, 4.00917400e+00, 4.16308300e+00,
4.52899900e+00, 5.18923300e+00, 6.26899100e+00,
7.92153700e+00, 1.00846000e+01, 1.24507400e+01,
1.47046800e+01, 1.67259200e+01, 1.84705600e+01,
1.96999100e+01, 2.08678600e+01, 2.23955000e+01,
2.44190000e+01, 2.71340600e+01, 3.11191300e+01,
3.80605500e+01, 4.93422700e+01, 7.03837500e+01,
1.05079000e+02, 1.47056400e+02, 1.80304500e+02,
2.22368500e+02, 2.73803000e+02, 3.33293900e+02,
4.05331600e+02, 4.94623200e+02, 6.04438400e+02,
7.36045800e+02, 8.86931700e+02, 1.05317000e+03,
1.23561100e+03, 1.43888700e+03, 1.66709600e+03,
1.91848200e+03, 2.17581600e+03, 2.42905500e+03,
2.65031700e+03, 2.83038600e+03, 2.95328200e+03,
2.87015800e+03, 2.97041000e+03, 3.22605900e+03,
3.13244700e+03, 3.04276300e+03, 2.95681100e+03,
2.87439400e+03, 2.79532400e+03, 2.71943500e+03,
2.64656700e+03, 2.57657400e+03])
self["P"] = numpy.array([ 5.00000000e-03, 1.61000000e-02, 3.84000000e-02,
7.69000000e-02, 1.37000000e-01, 2.24400000e-01,
3.45400000e-01, 5.06400000e-01, 7.14000000e-01,
9.75300000e-01, 1.29720000e+00, 1.68720000e+00,
2.15260000e+00, 2.70090000e+00, 3.33980000e+00,
4.07700000e+00, 4.92040000e+00, 5.87760000e+00,
6.95670000e+00, 8.16550000e+00, 9.51190000e+00,
1.10038000e+01, 1.26492000e+01, 1.44559000e+01,
1.64318000e+01, 1.85847000e+01, 2.09224000e+01,
2.34526000e+01, 2.61829000e+01, 2.91210000e+01,
3.22744000e+01, 3.56505000e+01, 3.92566000e+01,
4.31001000e+01, 4.71882000e+01, 5.15278000e+01,
5.61260000e+01, 6.09895000e+01, 6.61253000e+01,
7.15398000e+01, 7.72396000e+01, 8.32310000e+01,
8.95204000e+01, 9.61138000e+01, 1.03017000e+02,
1.10237000e+02, 1.17778000e+02, 1.25646000e+02,
1.33846000e+02, 1.42385000e+02, 1.51266000e+02,
1.60496000e+02, 1.70078000e+02, 1.80018000e+02,
1.90320000e+02, 2.00989000e+02, 2.12028000e+02,
2.23442000e+02, 2.35234000e+02, 2.47408000e+02,
2.59969000e+02, 2.72919000e+02, 2.86262000e+02,
3.00000000e+02, 3.14137000e+02, 3.28675000e+02,
3.43618000e+02, 3.58966000e+02, 3.74724000e+02,
3.90893000e+02, 4.07474000e+02, 4.24470000e+02,
4.41882000e+02, 4.59712000e+02, 4.77961000e+02,
4.96630000e+02, 5.15720000e+02, 5.35232000e+02,
5.55167000e+02, 5.75525000e+02, 5.96306000e+02,
6.17511000e+02, 6.39140000e+02, 6.61192000e+02,
6.83667000e+02, 7.06565000e+02, 7.29886000e+02,
7.53628000e+02, 7.77790000e+02, 8.02371000e+02,
8.27371000e+02, 8.52788000e+02, 8.78620000e+02,
9.04866000e+02, 9.31524000e+02, 9.58591000e+02,
9.86067000e+02, 1.01395000e+03, 1.04223000e+03,
1.07092000e+03, 1.10000000e+03])
self["CO2"] = numpy.array([ 376.9289, 376.9267, 376.9235, 376.9185, 376.9102, 376.8979,
376.8878, 376.8858, 376.8987, 376.9157, 376.9379, 376.967 ,
377.0032, 377.0483, 377.0994, 377.1415, 377.1806, 377.2196,
377.2566, 377.2936, 377.3406, 377.4116, 377.4967, 377.5807,
377.6576, 377.7326, 377.7957, 377.8617, 377.9087, 377.9647,
378.0677, 378.1777, 378.3247, 378.4827, 378.5467, 378.5667,
378.6177, 378.7377, 378.8647, 379.1987, 379.5707, 379.8876,
380.1336, 380.3936, 380.7865, 381.1975, 381.5395, 381.8335,
382.1145, 382.2825, 382.4564, 382.5853, 382.705 , 382.8186,
382.922 , 383.0401, 383.2172, 383.4014, 383.6806, 383.9769,
384.2704, 384.569 , 384.8374, 385.0826, 385.3235, 385.554 ,
385.7733, 385.971 , 386.1468, 386.2794, 386.3942, 386.4873,
386.57 , 386.6411, 386.7101, 386.7782, 386.8426, 386.902 ,
386.948 , 386.9845, 387.009 , 387.0222, 387.0443, 387.0946,
387.2027, 387.3873, 387.5952, 387.7785, 388.0156, 388.3936,
388.8278, 389.5115, 390.0167, 390.5358, 390.9229, 391.1729,
391.2791, 391.3101, 391.3399, 391.3685, 391.3959])
self["CO"] = numpy.array([ 0.4988025 , 0.4837694 , 0.4549212 , 0.4091083 , 0.3466384 ,
0.2724125 , 0.2529705 , 0.3556049 , 0.3436299 , 0.3118041 ,
0.2360657 , 0.1332083 , 0.06529029, 0.04917818, 0.04630671,
0.04344553, 0.04531133, 0.04861692, 0.05090421, 0.05133911,
0.05167021, 0.04959452, 0.04651663, 0.04325405, 0.04006766,
0.03693337, 0.03511297, 0.03345558, 0.03285768, 0.03228319,
0.03236039, 0.03244319, 0.03296888, 0.03355668, 0.03477178,
0.03638897, 0.03844617, 0.04135936, 0.04467554, 0.05192792,
0.06128788, 0.07390404, 0.09132347, 0.1136636 , 0.1258555 ,
0.1400065 , 0.1455584 , 0.1438834 , 0.1412344 , 0.1340595 ,
0.1269835 , 0.1253564 , 0.1252244 , 0.1268982 , 0.131127 ,
0.1360076 , 0.1437272 , 0.1521708 , 0.1615463 , 0.1718538 ,
0.1819464 , 0.192305 , 0.2043344 , 0.2183397 , 0.2317967 ,
0.2427394 , 0.2529074 , 0.2590382 , 0.2647594 , 0.2679568 ,
0.2707162 , 0.2713961 , 0.2720315 , 0.2725024 , 0.2734588 ,
0.2752834 , 0.2772678 , 0.279404 , 0.2813058 , 0.2830447 ,
0.2835061 , 0.2839138 , 0.2842594 , 0.2847725 , 0.2852078 ,
0.2855673 , 0.2859427 , 0.2860628 , 0.2855026 , 0.2845641 ,
0.2836705 , 0.2825133 , 0.28085 , 0.2789235 , 0.2766895 ,
0.2739117 , 0.2711194 , 0.2683139 , 0.265496 , 0.262667 ,
0.2598288 ])
self["T"] = numpy.array([ 197.478, 204.431, 217.518, 232.024, 240.06 , 241.488,
237.615, 229.648, 222.059, 220.002, 221.016, 222.004,
224.079, 226.704, 229.151, 230.726, 232.026, 233.278,
234.389, 235.37 , 236.325, 237.27 , 238.285, 239.125,
239.562, 239.408, 239.074, 238.623, 237.788, 236.618,
235.366, 234.287, 233.649, 232.492, 231.082, 230.178,
230.011, 230.065, 229.721, 228.916, 227.9 , 226.942,
226.202, 225.266, 224.187, 223.613, 222.971, 222.094,
221.208, 220.74 , 220.537, 220.284, 219.887, 219.382,
218.843, 218.328, 217.832, 217.287, 216.618, 215.885,
215.461, 215.505, 215.981, 216.806, 217.881, 219.112,
220.38 , 221.707, 223.156, 224.773, 226.616, 228.678,
230.824, 232.99 , 235.092, 237.21 , 239.397, 241.632,
243.87 , 246.04 , 248.097, 250.061, 251.959, 253.803,
255.582, 257.221, 258.709, 259.968, 261.008, 261.803,
262.785, 263.963, 265.207, 265.207, 265.207, 265.207,
265.207, 265.207, 265.207, 265.207, 265.207])
self["N2O"] = numpy.array([ 0.00386999, 0.00306999, 0.00246999, 0.00239999, 0.00190999,
0.00132999, 0.00145999, 0.00159999, 0.00196999, 0.00296998,
0.00447997, 0.00696996, 0.01015995, 0.01528993, 0.02053991,
0.02663989, 0.03359987, 0.04270984, 0.05238981, 0.06860975,
0.0840797 , 0.1006696 , 0.1174596 , 0.1335495 , 0.1466895 ,
0.1590294 , 0.1708994 , 0.1853993 , 0.2003893 , 0.2148792 ,
0.2275592 , 0.2346592 , 0.2415291 , 0.2481891 , 0.2543991 ,
0.2593091 , 0.2640791 , 0.2687091 , 0.272769 , 0.276369 ,
0.279779 , 0.282829 , 0.285849 , 0.2887289 , 0.2917289 ,
0.2947289 , 0.2977089 , 0.3006588 , 0.3035488 , 0.3063688 ,
0.3090987 , 0.3116986 , 0.3141584 , 0.316448 , 0.3185375 ,
0.3203968 , 0.321996 , 0.3226453 , 0.3232446 , 0.323774 ,
0.3242236 , 0.3245932 , 0.3248727 , 0.3250421 , 0.3251012 ,
0.3250999 , 0.3250976 , 0.325094 , 0.3250871 , 0.3250758 ,
0.3250622 , 0.3250514 , 0.3250377 , 0.325021 , 0.3250016 ,
0.3249782 , 0.3249492 , 0.3249135 , 0.3248707 , 0.3248216 ,
0.3247676 , 0.3247083 , 0.3246422 , 0.324568 , 0.3244863 ,
0.3244026 , 0.3243203 , 0.3242484 , 0.3241898 , 0.3241499 ,
0.3241769 , 0.3241443 , 0.3240612 , 0.3240916 , 0.3241208 ,
0.3241487 , 0.3241755 , 0.3242012 , 0.3242259 , 0.3242496 ,
0.3242723 ])
self["O3"] = numpy.array([ 0.4650166 , 0.3722967 , 0.25801 , 0.3565255 , 0.5657804 ,
0.8310854 , 1.275442 , 1.941668 , 2.751043 , 3.509408 ,
4.226426 , 4.982314 , 5.571684 , 5.950054 , 6.172944 ,
6.354005 , 6.459525 , 6.548995 , 6.644776 , 6.735845 ,
6.790695 , 6.827586 , 6.854715 , 6.862505 , 6.844556 ,
6.799816 , 6.769176 , 6.719646 , 6.545257 , 6.195258 ,
5.70893 , 5.274831 , 4.976922 , 4.668544 , 4.294395 ,
3.893666 , 3.566958 , 3.348398 , 3.139029 , 2.84919 ,
2.451261 , 2.077143 , 1.888043 , 1.731914 , 1.409245 ,
1.303475 , 1.292365 , 1.148696 , 0.9682352 , 0.8581456 ,
0.728699 , 0.5758034 , 0.4443007 , 0.3535498 , 0.2938877 ,
0.2527305 , 0.2229512 , 0.1999561 , 0.178937 , 0.1568281 ,
0.1353713 , 0.1190565 , 0.1065156 , 0.09549277, 0.08532788,
0.07889094, 0.07722016, 0.07431793, 0.06841198, 0.05924887,
0.04912847, 0.04243675, 0.03862361, 0.03721341, 0.03721039,
0.03694232, 0.03630673, 0.03556769, 0.03509875, 0.03521114,
0.03578028, 0.03636931, 0.03676782, 0.03692234, 0.03683649,
0.03651109, 0.03619457, 0.03624089, 0.03710717, 0.03880576,
0.04014684, 0.03936333, 0.03682412, 0.03682758, 0.03683089,
0.03683407, 0.03683711, 0.03684003, 0.03684284, 0.03684553,
0.03684811])
self["CH4"] = numpy.array([ 0.3005231, 0.2351152, 0.1864963, 0.1572414, 0.1760812,
0.1975499, 0.2255547, 0.2531465, 0.2866593, 0.31921 ,
0.3721429, 0.4494437, 0.5428354, 0.6650551, 0.7814038,
0.8957425, 0.9909472, 1.054496 , 1.115956 , 1.182386 ,
1.245675 , 1.300155 , 1.349285 , 1.396355 , 1.424055 ,
1.448405 , 1.471845 , 1.501435 , 1.532305 , 1.537285 ,
1.542625 , 1.548345 , 1.554454 , 1.551955 , 1.549555 ,
1.547285 , 1.545195 , 1.543345 , 1.549515 , 1.556025 ,
1.562874 , 1.570084 , 1.577644 , 1.617574 , 1.657994 ,
1.700273 , 1.730303 , 1.751553 , 1.771163 , 1.779313 ,
1.787803 , 1.793322 , 1.798201 , 1.802289 , 1.805406 ,
1.808492 , 1.811087 , 1.813773 , 1.81631 , 1.818906 ,
1.821484 , 1.824092 , 1.826669 , 1.829225 , 1.83172 ,
1.834133 , 1.83637 , 1.838309 , 1.84005 , 1.841406 ,
1.842689 , 1.843857 , 1.84505 , 1.846294 , 1.847644 ,
1.84909 , 1.850584 , 1.85209 , 1.853525 , 1.854883 ,
1.856113 , 1.857182 , 1.858302 , 1.859615 , 1.861771 ,
1.864195 , 1.866575 , 1.868445 , 1.869743 , 1.871178 ,
1.873019 , 1.875702 , 1.877623 , 1.879713 , 1.881288 ,
1.882308 , 1.882753 , 1.882902 , 1.883045 , 1.883183 ,
1.883315 ])
self["CTP"] = 500.0
self["CFRACTION"] = 0.0
self["IDG"] = 0
self["ISH"] = 0
self["ELEVATION"] = 0.0
self["S2M"]["T"] = 265.207
self["S2M"]["Q"] = 2576.57411645
self["S2M"]["O"] = 0.0368481128494
self["S2M"]["P"] = 876.30151
self["S2M"]["U"] = 0.0
self["S2M"]["V"] = 0.0
self["S2M"]["WFETC"] = 100000.0
self["SKIN"]["SURFTYPE"] = 0
self["SKIN"]["WATERTYPE"] = 1
self["SKIN"]["T"] = 265.207
self["SKIN"]["SALINITY"] = 35.0
self["SKIN"]["FOAM_FRACTION"] = 0.0
self["SKIN"]["FASTEM"] = numpy.array([ 3. , 5. , 15. , 0.1, 0.3])
self["ZENANGLE"] = 0.0
self["AZANGLE"] = 0.0
self["SUNZENANGLE"] = 0.0
self["SUNAZANGLE"] = 0.0
self["LATITUDE"] = 60.824
self["GAS_UNITS"] = 2
self["BE"] = 0.0
self["COSBK"] = 0.0
self["DATE"] = numpy.array([2006, 12, 10])
self["TIME"] = numpy.array([0, 0, 0])
| 57.512931 | 92 | 0.566739 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 462 | 0.034625 |
f8b003880b2b0c817a1e02d7db8475b7ea56eada | 2,624 | py | Python | xos/synchronizers/monitoring_channel/templates/sflow_pub_sub/sflow_sub_records.py | xmaruto/mcord | 3678a3d10c3703c2b73f396c293faebf0c82a4f4 | [
"Apache-2.0"
] | null | null | null | xos/synchronizers/monitoring_channel/templates/sflow_pub_sub/sflow_sub_records.py | xmaruto/mcord | 3678a3d10c3703c2b73f396c293faebf0c82a4f4 | [
"Apache-2.0"
] | null | null | null | xos/synchronizers/monitoring_channel/templates/sflow_pub_sub/sflow_sub_records.py | xmaruto/mcord | 3678a3d10c3703c2b73f396c293faebf0c82a4f4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import fnmatch
import logging
class sflow_sub_record:
def __init__(self,scheme,app_id,app_ip,app_port,subscription_info,sub_info_filter):
logging.debug("* Updating subscription_info ")
self.scheme = scheme
self.app_id = app_id
self.ipaddress = app_ip
self.portno = app_port
self.subscription_info = subscription_info
self.sub_info_filter = sub_info_filter
sflow_sub_database=[]
def add_sflow_sub_record(record):
logging.info("* inside %s",add_sflow_sub_record.__name__)
if not sflow_sub_database:
logging.debug("* -----------List is EMpty -------------")
sflow_sub_database.append(record)
logging.debug("* Subscription is sucessful")
return "Subscription is sucessful \n"
for x in sflow_sub_database:
if (record.ipaddress == x.ipaddress) and (record.portno == x.portno) :
logging.warning("* entry already exists\n")
return "entry already exists \n"
sflow_sub_database.append(record)
return "Subscription is sucessful \n"
def delete_sflow_sub_record(ip,port):
logging.info("* inside %s",delete_sflow_sub_record.__name__)
Flag = False
for x in sflow_sub_database:
if (ip == x.ipaddress) and (port == x.portno) :
sflow_sub_database.remove(x)
Flag = True
logging.debug("* Un-Subscription is sucessful")
return "Un-Subscription is sucessful \n"
if not Flag :
err_str = "No subscription exists with target: udp://" + ip + ":" + str(port) + "\n"
logging.error(err_str)
raise Exception (err_str)
def print_sflow_sub_records():
logging.info("* inside %s",print_sflow_sub_records.__name__)
for obj in sflow_sub_database:
logging.debug("* ------------------------------------------------")
logging.debug("* scheme:%s",obj.scheme)
logging.debug("* app_id:%s",obj.app_id)
logging.debug("* portno:%s",obj.portno )
logging.debug("* ipaddress:%s",obj.ipaddress)
logging.debug("* portno:%s",obj.portno)
logging.debug("* subscription_info:%s",obj.subscription_info)
logging.debug("* sub_info_filter:%s",obj.sub_info_filter)
logging.debug("* ------------------------------------------------")
def get_sflow_sub_records(notif_subscription_info):
logging.info("* inside %s",get_sflow_sub_records.__name__)
sub_list=[]
for obj in sflow_sub_database:
if obj.subscription_info == notif_subscription_info:
sub_list.append(obj)
return sub_list
| 41 | 91 | 0.62843 | 388 | 0.147866 | 0 | 0 | 0 | 0 | 0 | 0 | 616 | 0.234756 |
f8b2fa45ad6aa0b508fe2d6b2b81fce66e566e4c | 3,148 | py | Python | scripts/gcorr/run_xfaster.py | SPIDER-CMB/xfaster | 1b8e56d775f2c3a8693d1372ae461392c21da7ca | [
"MIT"
] | 1 | 2021-03-25T14:15:44.000Z | 2021-03-25T14:15:44.000Z | scripts/gcorr/run_xfaster.py | annegambrel/xfaster | 03d5a2971d3cc19ae360d78995e3575f3f678d6e | [
"MIT"
] | 7 | 2021-04-20T23:34:38.000Z | 2021-08-24T00:00:53.000Z | scripts/gcorr/run_xfaster.py | SPIDER-CMB/xfaster | 1b8e56d775f2c3a8693d1372ae461392c21da7ca | [
"MIT"
] | 1 | 2021-05-18T16:43:54.000Z | 2021-05-18T16:43:54.000Z | """
A script to run XFaster for gcorr calculation. Called by iterate.py.
"""
import os
import xfaster as xf
import argparse as ap
from configparser import ConfigParser
# Change XFaster options here to suit your purposes
opts = dict(
likelihood=False,
residual_fit=False,
foreground_fit=False,
# change options below for your purposes
tbeb=True,
bin_width=25,
lmin=2,
lmax=500,
)
# Change submit options here to fit your system
submit_opts = dict(nodes=1, ppn=1, mem=6, omp_threads=10, wallt=4)
P = ap.ArgumentParser()
P.add_argument("--gcorr-config", help="The config file for gcorr computation")
P.add_argument("-f", "--first", default=0, type=int, help="First sim index to run")
P.add_argument("-n", "--num", default=1, type=int, help="Number of sims to run")
P.add_argument(
"-o", "--output", default="xfaster_gcal", help="Name of output subdirectory"
)
P.add_argument(
"--no-gcorr",
dest="gcorr",
default=True,
action="store_false",
help="Don't apply a g-gcorrection",
)
P.add_argument(
"--reload-gcorr", default=False, action="store_true", help="Reload the gcorr factor"
)
P.add_argument("--check-point", default="bandpowers", help="XFaster checkpoint")
P.add_argument(
"--no-submit", dest="submit", action="store_false", help="Don't submit, run locally"
)
P.add_argument(
"--omp",
default=None,
type=int,
help="Number of omp threads, if submit. Overwrites value in config file",
)
args = P.parse_args()
# start by loading up gcorr config file and parsing it
assert os.path.exists(args.gcorr_config), "Missing config file {}".format(
args.gcorr_config
)
g_cfg = ConfigParser()
g_cfg.read(args.gcorr_config)
# set all user-specific xfaster opts
for k, v in g_cfg["xfaster_opts"].items():
opts[k] = v
null = g_cfg.getboolean("gcorr_opts", "null")
tags = g_cfg["gcorr_opts"]["map_tags"].split(",")
# null tests should use noise sims. signal shouldn't.
if null:
opts["noise_type"] = g_cfg["xfaster_opts"]["noise_type"]
opts["sim_data_components"] = ["signal", "noise"]
else:
opts["noise_type"] = None
opts["sim_data_components"] = ["signal"]
opts["output_root"] = os.path.join(g_cfg["gcorr_opts"]["output_root"], args.output)
# update opts with command line args
opts["apply_gcorr"] = args.gcorr
opts["reload_gcorr"] = args.reload_gcorr
opts["checkpoint"] = args.check_point
seeds = list(range(args.first, args.first + args.num))
for tag in tags:
opts["sim_data"] = True
opts["output_tag"] = tag
opts["gcorr_file"] = os.path.abspath(
os.path.join(
g_cfg["gcorr_opts"]["output_root"],
"xfaster_gcal",
tag,
"gcorr_{}_total.npz".format(tag),
)
)
opts["data_subset"] = os.path.join(
g_cfg["gcorr_opts"]["data_subset"], "*{}".format(tag)
)
if args.omp is not None:
submit_opts["omp_threads"] = args.omp
if args.submit:
opts.update(**submit_opts)
for s in seeds:
opts["sim_index_default"] = s
if args.submit:
xf.xfaster_submit(**opts)
else:
xf.xfaster_run(**opts)
| 28.618182 | 88 | 0.661055 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,313 | 0.41709 |
f8b309e5e28868df32235aef95ba627c1ca50e48 | 1,888 | py | Python | tests/examples/c_decisions/tests_decisions.py | MSGP117/acc-cosc-1336-spring-2022-MSGP117 | 46fdfa5da8f8eb887d2c79fe205b8a0064d6903d | [
"MIT"
] | null | null | null | tests/examples/c_decisions/tests_decisions.py | MSGP117/acc-cosc-1336-spring-2022-MSGP117 | 46fdfa5da8f8eb887d2c79fe205b8a0064d6903d | [
"MIT"
] | null | null | null | tests/examples/c_decisions/tests_decisions.py | MSGP117/acc-cosc-1336-spring-2022-MSGP117 | 46fdfa5da8f8eb887d2c79fe205b8a0064d6903d | [
"MIT"
] | 1 | 2022-02-12T03:50:32.000Z | 2022-02-12T03:50:32.000Z | import unittest
from src.examples.c_decisions.decisions import is_letter_consonant, logical_op_precedence, num_is_not_in_range_or, number_is_in_range_and, test_config
from src.examples.c_decisions.decisions import get_letter_grade
from src.examples.c_decisions.decisions import logical_op_precedence
from src.examples.c_decisions.decisions import number_is_not_in_range
class Test_Config(unittest.TestCase):
def test_configuration(self):
self.assertEqual(True, test_config())
def test_get_letter_grade(self):
self.assertEqual('A', get_letter_grade(90))
self.assertEqual('B', get_letter_grade(85))
self.assertEqual('C', get_letter_grade(75))
self.assertEqual('D', get_letter_grade(65))
self.assertEqual('F', get_letter_grade(55))
self.assertEqual('Invalid Number', get_letter_grade(-10))
def test_logical_op_precedence(self):
self.assertEqual(True, logical_op_precedence(True, False, True))
self.assertEqual(False, logical_op_precedence(False, False, False))
def test_number_is_in_range(self):
self.assertEqual(True, number_is_in_range_and(20, 100, 50))
self.assertEqual(False, number_is_in_range_and(20, 100, 0))
self.assertEqual(True, number_is_in_range_and(20, 100, 100))
self.assertEqual(False, number_is_in_range_and(20, 100, 101))
def test_number_is_not_in_range(self):
self.assertEqual(True, number_is_not_in_range(20, 100, 101))
self.assertEqual(True, number_is_not_in_range(20, 100, 50))
def test_num_is_not_in_range_or(self):
self.assertEqual(True, num_is_not_in_range_or(20, 100, 101))
self.assertEqual(False, num_is_not_in_range_or(20, 100, 50))
def test_is_letter_consonant(self):
self.assertEqual(False, is_letter_consonant('a'))
self.assertEqual(True, is_letter_consonant('z')) | 46.04878 | 150 | 0.743114 | 1,516 | 0.802966 | 0 | 0 | 0 | 0 | 0 | 0 | 37 | 0.019597 |
f8b46b6ff72b56497017d6f934899df81b96c51a | 32 | py | Python | badge/__init__.py | krisgesling/swag-badge-skill | 7640264880d8ae14f9c49c3ba40c6e388e58dcaf | [
"Apache-2.0"
] | 1 | 2021-01-24T01:42:15.000Z | 2021-01-24T01:42:15.000Z | badge/__init__.py | krisgesling/swag-badge-skill | 7640264880d8ae14f9c49c3ba40c6e388e58dcaf | [
"Apache-2.0"
] | null | null | null | badge/__init__.py | krisgesling/swag-badge-skill | 7640264880d8ae14f9c49c3ba40c6e388e58dcaf | [
"Apache-2.0"
] | null | null | null | from .client import MQTT_Client
| 16 | 31 | 0.84375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f8b5ae0ccaf93b252b0712f888f73a49ece568a6 | 23,824 | py | Python | easy_server/_server_file.py | andy-maier/secureserveraccess | 24f4817b2066401451840b3c7b308e1792eb3e60 | [
"Apache-2.0"
] | 1 | 2021-03-29T22:09:47.000Z | 2021-03-29T22:09:47.000Z | easy_server/_server_file.py | andy-maier/secureserveraccess | 24f4817b2066401451840b3c7b308e1792eb3e60 | [
"Apache-2.0"
] | 49 | 2021-03-29T20:13:28.000Z | 2021-05-01T10:38:19.000Z | easy_server/_server_file.py | andy-maier/secureserveraccess | 24f4817b2066401451840b3c7b308e1792eb3e60 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Support for server files.
"""
from __future__ import absolute_import, print_function
import os
import yaml
import jsonschema
from ._server import Server
from ._vault_file import VaultFile
__all__ = ['ServerFile', 'ServerFileException',
'ServerFileOpenError', 'ServerFileFormatError',
'ServerFileUserDefinedFormatError',
'ServerFileUserDefinedSchemaError',
'ServerFileGroupUserDefinedFormatError',
'ServerFileGroupUserDefinedSchemaError']
# JSON schema describing the structure of the server files
SERVER_FILE_SCHEMA = {
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "JSON schema for easy-server server files",
"definitions": {},
"type": "object",
"required": [
"servers",
],
"additionalProperties": False,
"properties": {
"vault_file": {
"type": "string",
"description":
"Path name of vault file. Relative path names are relative to "
"the directory of the server file",
},
"servers": {
"type": "object",
"description": "The servers in the server file",
"additionalProperties": False,
"patternProperties": {
"^[a-zA-Z0-9_]+$": {
"type": "object",
"description": "Nickname of the server",
"required": [
"description",
],
"additionalProperties": False,
"properties": {
"description": {
"type": "string",
"description": "Short description of the server",
},
"contact_name": {
"type": "string",
"description":
"Name of technical contact for the server",
},
"access_via": {
"type": "string",
"description":
"Short reminder on the "
"network/firewall/proxy/vpn used to access the "
"server",
},
"user_defined": {
"type": "object",
"description":
"User-defined properties of the server. "
"This object can have an arbitrary "
"user-defined structure",
},
},
},
},
},
"server_groups": {
"type": "object",
"description": "The server groups in the server file",
"additionalProperties": False,
"patternProperties": {
"^[a-zA-Z0-9_]+$": {
"type": "object",
"description": "Nickname of the server group",
"required": [
"description",
"members",
],
"additionalProperties": False,
"properties": {
"description": {
"type": "string",
"description":
"Short description of the server group",
},
"members": {
"type": "array",
"description":
"List of members of the server group. "
"Those can be servers or other server groups.",
"items": {
"type": "string",
"description":
"Nickname of server or server group in "
"this file",
},
},
"user_defined": {
"type": "object",
"description":
"User-defined properties of the server group. "
"This object can have an arbitrary "
"user-defined structure",
},
},
},
},
},
"default": {
"type": "string",
"description": "Nickname of default server or server group",
},
},
}
class ServerFileException(Exception):
"""
Abstract base exception for errors related to server files.
Derived from :exc:`py:Exception`.
"""
pass
class ServerFileOpenError(ServerFileException):
"""
Exception indicating that a server file was not found or cannot
be accessed due to a permission error.
Derived from :exc:`ServerFileException`.
"""
pass
class ServerFileFormatError(ServerFileException):
"""
Exception indicating that an existing server file has some
issue with the format of its file content.
Derived from :exc:`ServerFileException`.
"""
pass
class ServerFileUserDefinedFormatError(ServerFileException):
"""
Exception indicating that the values of the user-defined portion of server
items in a server file do not match the JSON schema defined for them.
Derived from :exc:`ServerFileException`.
"""
pass
class ServerFileUserDefinedSchemaError(ServerFileException):
"""
Exception indicating that the JSON schema for validating the values of the
user-defined portion of server items in a server file is not a valid JSON
schema.
Derived from :exc:`ServerFileException`.
"""
pass
class ServerFileGroupUserDefinedFormatError(ServerFileException):
"""
Exception indicating that the values of the user-defined portion of group
items in a server file do not match the JSON schema defined for them.
Derived from :exc:`ServerFileException`.
"""
pass
class ServerFileGroupUserDefinedSchemaError(ServerFileException):
"""
Exception indicating that the JSON schema for validating the values of the
user-defined portion of group items in a server file is not a valid JSON
schema.
Derived from :exc:`ServerFileException`.
"""
pass
class ServerFile(object):
"""
A server file that specifies the openly accessible portion of the servers
and optionally references a vault file that specifies the secret portion
of the servers.
An object of this class is tied to a single server file.
The server file is loaded when this object is initialized. If
the server file specifies a vault file, the vault file is also
loaded at that point.
Optionally, the user-defined portions of the server and group items in
the server file, and the server items in the vault file can be validated
against user-provided JSON schema.
For a description of the file formats, see sections
:ref:`Server files` and :ref:`Vault files`.
"""
def __init__(
self, filepath, password=None, use_keyring=True, use_prompting=True,
verbose=False, user_defined_schema=None,
group_user_defined_schema=None, vault_server_schema=None):
"""
Parameters:
filepath (:term:`unicode string`):
Path name of the server file. Relative path names are
relative to the current directory.
password (:term:`unicode string`):
Password for the vault file. `None` indicates that no password has
been provided.
use_keyring (bool):
Enable the use of the keyring service for retrieving and storing the
password of the vault file.
use_prompting (bool):
Enable the use of password prompting for getting the password of
the vault file.
verbose (bool):
Print additional messages. Note that the password prompt (if needed)
is displayed regardless of verbose mode.
user_defined_schema (:term:`JSON schema`):
JSON schema for validating the values of the user-defined portion
of server items when loading the server file.
`None` means no schema validation takes place for these items.
group_user_defined_schema (:term:`JSON schema`):
JSON schema for validating the values of the user-defined portion
of group items when loading the server file.
`None` means no schema validation takes place for these items.
vault_server_schema (:term:`JSON schema`):
JSON schema for validating the values of the server items when
loading the vault file.
`None` means no schema validation takes place for these items.
Raises:
ServerFileOpenError: Error opening server file
ServerFileFormatError: Invalid server file format
ServerFileUserDefinedFormatError: Invalid format of user-defined
portion of server items in the server file
ServerFileUserDefinedSchemaError: Invalid JSON schema for validating
user-defined portion of server items in the server file
ServerFileGroupUserDefinedFormatError: Invalid format of user-defined
portion of group items in the server file
ServerFileGroupUserDefinedSchemaError: Invalid JSON schema for
validating user-defined portion of group items in the server file
VaultFileOpenError: Error with opening the vault file
VaultFileDecryptError: Error with decrypting the vault file
VaultFileFormatError: Invalid vault file format
VaultFileServerFormatError: Invalid format of server items in the
vault file
VaultFileServerSchemaError: Invalid JSON schema for validating server
items in the vault file
"""
self._filepath = os.path.abspath(filepath)
self._user_defined_schema = user_defined_schema
self._group_user_defined_schema = group_user_defined_schema
self._vault_server_schema = vault_server_schema
self._data = _load_server_file(
filepath, user_defined_schema, group_user_defined_schema)
self._vault_file = self._data['vault_file']
if self._vault_file:
if not os.path.isabs(self._vault_file):
self._vault_file = os.path.join(
os.path.dirname(self._filepath), self._vault_file)
self._vault = VaultFile(
self._vault_file, password=password, use_keyring=use_keyring,
use_prompting=use_prompting, verbose=verbose,
server_schema=vault_server_schema)
else:
self._vault = None
# The following attributes are for faster access
self._servers = self._data['servers']
self._server_groups = self._data['server_groups']
self._default = self._data['default']
@property
def filepath(self):
"""
:term:`unicode string`: Absolute path name of the server file.
"""
return self._filepath
@property
def vault_file(self):
"""
:term:`unicode string`: Absolute path name of the vault file specified
in the server file, or `None` if no vault file was specified.
Vault files specified with a relative path name are relative to the
directory of the server file.
"""
return self._vault_file
@property
def user_defined_schema(self):
"""
:term:`JSON schema`: JSON schema for validating the values of the
user-defined portion of server items in the server file, or `None`.
"""
return self._user_defined_schema
@property
def group_user_defined_schema(self):
"""
:term:`JSON schema`: JSON schema for validating the values of the
user-defined portion of group items in the server file, or `None`.
"""
return self._group_user_defined_schema
@property
def vault_server_schema(self):
"""
:term:`JSON schema`: JSON schema for validating the values of the
server items in the vault file, or `None`.
"""
return self._vault_server_schema
def is_vault_file_encrypted(self):
"""
Test whether the vault file is in the encrypted state.
If the server file does not specify a vault file, `None` is returned.
Returns:
bool: Boolean indicating whether the vault file is in the encrypted
state, or `None` if no vault file was specified.
"""
if self._vault is None:
return None
return self._vault.is_encrypted()
def get_server(self, nickname):
"""
Get server for a given server nickname.
Parameters:
nickname (:term:`unicode string`): Server nickname.
Returns:
:class:`~easy_server.Server`:
Server with the specified nickname.
Raises:
:exc:`py:KeyError`: Nickname not found
"""
try:
server_dict = self._servers[nickname]
except KeyError:
new_exc = KeyError(
"Server with nickname {!r} not found in server "
"file {!r}".
format(nickname, self._filepath))
new_exc.__cause__ = None
raise new_exc # KeyError
if self._vault:
try:
secrets_dict = self._vault.get_secrets(nickname)
except KeyError:
secrets_dict = None
else:
secrets_dict = None
return Server(nickname, server_dict, secrets_dict)
def list_servers(self, nickname):
"""
List the servers for a given server or server group nickname.
Parameters:
nickname (:term:`unicode string`): Server or server group nickname.
Returns:
list of :class:`~easy_server.Server`:
List of servers.
Raises:
:exc:`py:KeyError`: Nickname not found
"""
if nickname in self._servers:
return [self.get_server(nickname)]
if nickname in self._server_groups:
sd_list = list() # of Server objects
sd_nick_list = list() # of server nicknames
sg_item = self._server_groups[nickname]
for member_nick in sg_item['members']:
member_sds = self.list_servers(member_nick)
for sd in member_sds:
if sd.nickname not in sd_nick_list:
sd_nick_list.append(sd.nickname)
sd_list.append(sd)
return sd_list
raise KeyError(
"Server or server group with nickname {!r} not found in server "
"definition file {!r}".
format(nickname, self._filepath))
def list_default_servers(self):
"""
List the servers for the default server or group.
An omitted 'default' element in the server file results in
an empty list.
Returns:
list of :class:`~easy_server.Server`:
List of servers.
"""
if self._default is None:
return []
return self.list_servers(self._default)
def list_all_servers(self):
"""
List all servers.
Returns:
list of :class:`~easy_server.Server`:
List of servers.
"""
return [self.get_server(nickname) for nickname in self._servers]
def _load_server_file(
filepath, user_defined_schema=None, group_user_defined_schema=None):
"""
Load the server file, validate its format and default some
optional elements.
Returns:
dict: Python dict representing the file content.
Raises:
ServerFileOpenError: Error opening server file
ServerFileFormatError: Invalid server file content
ServerFileUserDefinedFormatError: Invalid format of user-defined
portion of server items in the server file
ServerFileUserDefinedSchemaError: Invalid JSON schema for validating
user-defined portion of server items in the server file
ServerFileGroupUserDefinedFormatError: Invalid format of user-defined
portion of group items in the server file
ServerFileGroupUserDefinedSchemaError: Invalid JSON schema for
validating user-defined portion of group items in the server file
"""
# Load the server file (YAML)
try:
with open(filepath, 'r') as fp:
data = yaml.safe_load(fp)
except (OSError, IOError) as exc:
new_exc = ServerFileOpenError(
"Cannot open server file: {fn}: {exc}".
format(fn=filepath, exc=exc))
new_exc.__cause__ = None
raise new_exc # ServerFileOpenError
except yaml.YAMLError as exc:
new_exc = ServerFileFormatError(
"Invalid YAML syntax in server file {fn}: {exc}".
format(fn=filepath, exc=exc))
new_exc.__cause__ = None
raise new_exc # ServerFileFormatError
# Schema validation of server file content
try:
jsonschema.validate(data, SERVER_FILE_SCHEMA)
# Raises jsonschema.exceptions.SchemaError if JSON schema is invalid
except jsonschema.exceptions.ValidationError as exc:
if exc.absolute_path:
elem_str = "element '{}'". \
format('.'.join(str(e) for e in exc.absolute_path))
else:
elem_str = 'top-level element'
new_exc = ServerFileFormatError(
"Invalid format in server file {fn}: Validation "
"failed on {elem}: {exc}".
format(fn=filepath, elem=elem_str, exc=exc))
new_exc.__cause__ = None
raise new_exc # ServerFileFormatError
# Establish defaults for optional top-level elements
if 'server_groups' not in data:
data['server_groups'] = {}
if 'default' not in data:
data['default'] = None
if 'vault_file' not in data:
data['vault_file'] = None
# Schema validation of user-defined portion of server items
if user_defined_schema:
for server_nick, server_item in data['servers'].items():
user_defined = server_item.get('user_defined', None)
if user_defined is None:
new_exc = ServerFileUserDefinedFormatError(
"Missing user_defined element for server {srv} "
"in server file {fn}".
format(srv=server_nick, fn=filepath))
new_exc.__cause__ = None
raise new_exc # ServerFileUserDefinedFormatError
try:
jsonschema.validate(user_defined, user_defined_schema)
except jsonschema.exceptions.SchemaError as exc:
new_exc = ServerFileUserDefinedSchemaError(
"Invalid JSON schema for validating user-defined portion "
"of server items in server file: {exc}".
format(exc=exc))
new_exc.__cause__ = None
raise new_exc # ServerFileUserDefinedSchemaError
except jsonschema.exceptions.ValidationError as exc:
if exc.absolute_path:
elem_str = "element '{}'". \
format('.'.join(str(e) for e in exc.absolute_path))
else:
elem_str = "top-level of user-defined item"
new_exc = ServerFileUserDefinedFormatError(
"Invalid format in user-defined portion of item for "
"server {srv} in server file {fn}: "
"Validation failed on {elem}: {exc}".
format(srv=server_nick, fn=filepath, elem=elem_str,
exc=exc))
new_exc.__cause__ = None
raise new_exc # ServerFileUserDefinedFormatError
# Schema validation of user-defined portion of group items
if group_user_defined_schema:
for group_nick, group_item in data['server_groups'].items():
user_defined = group_item.get('user_defined', None)
if user_defined is None:
new_exc = ServerFileGroupUserDefinedFormatError(
"Missing user_defined element for group {grp} "
"in server file {fn}".
format(grp=group_nick, fn=filepath))
new_exc.__cause__ = None
raise new_exc # ServerFileGroupUserDefinedFormatError
try:
jsonschema.validate(user_defined, group_user_defined_schema)
except jsonschema.exceptions.SchemaError as exc:
new_exc = ServerFileGroupUserDefinedSchemaError(
"Invalid JSON schema for validating user-defined portion "
"of group items in server file: {exc}".
format(exc=exc))
new_exc.__cause__ = None
raise new_exc # ServerFileGroupUserDefinedSchemaError
except jsonschema.exceptions.ValidationError as exc:
if exc.absolute_path:
elem_str = "element '{}'". \
format('.'.join(str(e) for e in exc.absolute_path))
else:
elem_str = "top-level of user-defined item"
new_exc = ServerFileGroupUserDefinedFormatError(
"Invalid format in user-defined portion of item for "
"group {grp} in server file {fn}: "
"Validation failed on {elem}: {exc}".
format(grp=group_nick, fn=filepath, elem=elem_str,
exc=exc))
new_exc.__cause__ = None
raise new_exc # ServerFileGroupUserDefinedFormatError
# Check dependencies in the file
server_nicks = list(data['servers'].keys())
group_nicks = list(data['server_groups'].keys())
all_nicks = server_nicks + group_nicks
default_nick = data['default']
if default_nick and default_nick not in all_nicks:
new_exc = ServerFileFormatError(
"Default nickname '{n}' not found in servers or groups in "
"server file {fn}".
format(n=default_nick, fn=filepath))
new_exc.__cause__ = None
raise new_exc # ServerFileFormatError
for group_nick in group_nicks:
sg_item = data['server_groups'][group_nick]
for member_nick in sg_item['members']:
if member_nick not in all_nicks:
new_exc = ServerFileFormatError(
"Nickname '{n}' in server group '{g}' not found in "
"servers or groups in server file {fn}".
format(n=member_nick, g=group_nick, fn=filepath))
new_exc.__cause__ = None
raise new_exc # ServerFileFormatError
return data
| 37.815873 | 80 | 0.580423 | 11,117 | 0.46663 | 0 | 0 | 1,276 | 0.053559 | 0 | 0 | 12,496 | 0.524513 |
f8b628877707fa6132110ae047367879935e3268 | 444 | py | Python | mezzanine_pagedown/defaults.py | eht16/mezzanine-pagedown | e6090f5713097e664e85b279a4a17febb73b00a1 | [
"BSD-2-Clause"
] | 94 | 2015-01-28T15:46:02.000Z | 2020-11-02T12:56:15.000Z | mezzanine_pagedown/defaults.py | eht16/mezzanine-pagedown | e6090f5713097e664e85b279a4a17febb73b00a1 | [
"BSD-2-Clause"
] | 56 | 2015-04-05T03:18:41.000Z | 2021-08-29T00:50:57.000Z | mezzanine_pagedown/defaults.py | eht16/mezzanine-pagedown | e6090f5713097e664e85b279a4a17febb73b00a1 | [
"BSD-2-Clause"
] | 38 | 2015-08-26T08:10:12.000Z | 2021-06-11T19:36:31.000Z | from mezzanine.conf import register_setting
register_setting(
name="PAGEDOWN_SERVER_SIDE_PREVIEW",
description="Render previews on the server using the same "
"converter that generates the actual pages.",
editable=False,
default=False,
)
register_setting(
name="PAGEDOWN_MARKDOWN_EXTENSIONS",
description="A tuple specifying enabled python-markdown extensions.",
editable=False,
default=(),
)
| 24.666667 | 73 | 0.725225 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 207 | 0.466216 |
f8b864241fa615529ec19943c7bf44bcc5c33cfb | 4,274 | py | Python | solutions/2021/day5/day5.py | teije01/adventofcode | 2742985f8437e9784e7ec5430e3846a755b5d386 | [
"MIT"
] | null | null | null | solutions/2021/day5/day5.py | teije01/adventofcode | 2742985f8437e9784e7ec5430e3846a755b5d386 | [
"MIT"
] | null | null | null | solutions/2021/day5/day5.py | teije01/adventofcode | 2742985f8437e9784e7ec5430e3846a755b5d386 | [
"MIT"
] | null | null | null | """
--- Day 5: Hydrothermal Venture ---
You come across a field of hydrothermal vents on the ocean floor! These vents constantly produce
large, opaque clouds, so it would be best to avoid them if possible.
They tend to form in lines; the submarine helpfully produces a list of nearby lines of vents (your
puzzle input) for you to review. For example:
0,9 -> 5,9
8,0 -> 0,8
9,4 -> 3,4
2,2 -> 2,1
7,0 -> 7,4
6,4 -> 2,0
0,9 -> 2,9
3,4 -> 1,4
0,0 -> 8,8
5,5 -> 8,2
Each line of vents is given as a line segment in the format x1,y1 -> x2,y2 where x1,y1 are the
coordinates of one end the line segment and x2,y2 are the coordinates of the other end. These line
segments include the points at both ends. In other words:
An entry like 1,1 -> 1,3 covers points 1,1, 1,2, and 1,3.
An entry like 9,7 -> 7,7 covers points 9,7, 8,7, and 7,7.
For now, only consider horizontal and vertical lines: lines where either x1 = x2 or y1 = y2.
So, the horizontal and vertical lines from the above list would produce the following diagram:
.......1..
..1....1..
..1....1..
.......1..
.112111211
..........
..........
..........
..........
222111....
In this diagram, the top left corner is 0,0 and the bottom right corner is 9,9. Each position is
shown as the number of lines which cover that point or . if no line covers that point. The top-left
pair of 1s, for example, comes from 2,2 -> 2,1; the very bottom row is formed by the overlapping
lines 0,9 -> 5,9 and 0,9 -> 2,9.
To avoid the most dangerous areas, you need to determine the number of points where at least two
lines overlap. In the above example, this is anywhere in the diagram with a 2 or larger - a total
of 5 points.
Consider only horizontal and vertical lines. At how many points do at least two lines overlap?
--- Part Two ---
Unfortunately, considering only horizontal and vertical lines doesn't give you the full picture;
you need to also consider diagonal lines.
Because of the limits of the hydrothermal vent mapping system, the lines in your list will only
ever be horizontal, vertical, or a diagonal line at exactly 45 degrees. In other words:
An entry like 1,1 -> 3,3 covers points 1,1, 2,2, and 3,3.
An entry like 9,7 -> 7,9 covers points 9,7, 8,8, and 7,9.
Considering all lines from the above example would now produce the following diagram:
1.1....11.
.111...2..
..2.1.111.
...1.2.2..
.112313211
...1.2....
..1...1...
.1.....1..
1.......1.
222111....
You still need to determine the number of points where at least two lines overlap. In the above
example, this is still anywhere in the diagram with a 2 or larger - now a total of 12 points.
Consider all of the lines. At how many points do at least two lines overlap?
"""
import numpy as np
class Line:
"""Line representation"""
def __init__(self, x1: int, y1: int, x2: int, y2: int):
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
@classmethod
def from_puzzle_input(cls, line: str):
x1y1, x2y2 = line.split(" -> ")
return cls(*map(int, x1y1.split(",")), *map(int, x2y2.split(",")))
@property
def xmin(self):
return min(self.x1, self.x2)
@property
def xmax(self):
return max(self.x1, self.x2)
@property
def ymin(self):
return min(self.y1, self.y2)
@property
def ymax(self):
return max(self.y1, self.y2)
if __name__ == "__main__":
with open("solutions/2021/day5/input.txt", "r") as f:
lines = [Line.from_puzzle_input(line) for line in f.readlines()]
straight_field = np.zeros((1000, 1000), dtype=int)
diagonal_field = straight_field.copy()
for line in lines:
field_index = (slice(line.ymin, line.ymax + 1), slice(line.xmin, line.xmax + 1))
if line.x1 == line.x2 or line.y1 == line.y2:
straight_field[field_index] += 1
else:
is_identity = (line.x2 - line.x1 > 0) == (line.y2 - line.y1 > 0)
diag_slice = slice(None, None, None if is_identity else -1)
diagonal_field[field_index] += np.diag(np.ones((line.xmax - line.xmin + 1), dtype=int))[diag_slice]
field = straight_field + diagonal_field
print(f"Answer 1: {np.sum(straight_field > 1)}")
print(f"Answer 2: {np.sum(field > 1)}")
| 31.426471 | 111 | 0.653486 | 649 | 0.151848 | 0 | 0 | 434 | 0.101544 | 0 | 0 | 2,869 | 0.671268 |
f8b88aa220e765ebad5849f646d7fa3f22e031df | 1,316 | py | Python | sort_array_by_parity_ii_alt.py | tusharsadhwani/leetcode | a17a8a7587c5654f05fcd13ae7cdf47263ab2ea8 | [
"MIT"
] | 6 | 2021-05-21T01:10:42.000Z | 2021-12-16T16:12:30.000Z | sort_array_by_parity_ii_alt.py | tusharsadhwani/leetcode | a17a8a7587c5654f05fcd13ae7cdf47263ab2ea8 | [
"MIT"
] | null | null | null | sort_array_by_parity_ii_alt.py | tusharsadhwani/leetcode | a17a8a7587c5654f05fcd13ae7cdf47263ab2ea8 | [
"MIT"
] | null | null | null | from typing import Callable
class Solution:
def sortArrayByParityII(self, nums: list[int]) -> list[int]:
# Crucial lesson: 2 pointer approach doesn't necessarily mean
# the pointers should start at opposite ends of the array.
evens, odds = 0, 1
end = len(nums)
while evens < end and odds < end:
if nums[evens] % 2 == 0:
evens += 2
elif nums[odds] % 2 != 0:
odds += 2
else:
nums[evens], nums[odds] = nums[odds], nums[evens]
evens += 2
odds += 2
return nums
tests = [
(
([4, 2, 5, 7],),
[4, 5, 2, 7],
),
(
([2, 3],),
[2, 3],
),
(
([2, 3, 1, 1, 4, 0, 0, 4, 3, 3],),
[2, 3, 4, 1, 4, 3, 0, 1, 0, 3],
),
]
def validator(
sortArrayByParityII: Callable[[list[int]], list[int]],
inputs: tuple[list[int]],
expected: list[int],
) -> None:
nums, = inputs
output = sortArrayByParityII(nums)
sorted_output = sorted(output)
sorted_expected = sorted(expected)
assert sorted_output == sorted_expected, (sorted_output, sorted_expected)
for index, value in enumerate(output):
assert index % 2 == value % 2, (index % 2, value % 2)
| 24.830189 | 77 | 0.50152 | 596 | 0.452888 | 0 | 0 | 0 | 0 | 0 | 0 | 119 | 0.090426 |
f8ba3fd25de458d4df99e4ca579804ce22c8dbdc | 112 | py | Python | database.py | anthonypang99/TakeNote | 7da668d48b72ee825f3fb9f503f8d4d6fe2ff644 | [
"MIT"
] | null | null | null | database.py | anthonypang99/TakeNote | 7da668d48b72ee825f3fb9f503f8d4d6fe2ff644 | [
"MIT"
] | null | null | null | database.py | anthonypang99/TakeNote | 7da668d48b72ee825f3fb9f503f8d4d6fe2ff644 | [
"MIT"
] | null | null | null | from flask_sqlalchemy import SQLAlchemy
# Initialize the Flask-SQLAlchemy extension instance
db = SQLAlchemy()
| 22.4 | 52 | 0.830357 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 52 | 0.464286 |
f8ba6e975ac143461562e6b418e4b0a0aee2b105 | 4,285 | py | Python | alfred/Alfred.alfredpreferences/workflows/user.workflow.99DE3F5C-7CB4-4E0B-9195-7782AADC167B/converter/constants.py | karamfil/saphe | f1c56dcf11613808e07f462d50f20881aef7fbdc | [
"MIT"
] | 2 | 2019-09-17T10:20:20.000Z | 2020-02-10T11:46:33.000Z | alfred/Alfred.alfredpreferences/workflows/user.workflow.99DE3F5C-7CB4-4E0B-9195-7782AADC167B/converter/constants.py | karamfil/saphe | f1c56dcf11613808e07f462d50f20881aef7fbdc | [
"MIT"
] | null | null | null | alfred/Alfred.alfredpreferences/workflows/user.workflow.99DE3F5C-7CB4-4E0B-9195-7782AADC167B/converter/constants.py | karamfil/saphe | f1c56dcf11613808e07f462d50f20881aef7fbdc | [
"MIT"
] | null | null | null | import re
UNITS_XML_FILE = 'poscUnits22.xml'
UNITS_PICKLE_FILE = 'units.pickle'
OUTPUT_DECIMALS = 6
SOURCE_PATTERN = r'^(?P<quantity>.*[\d.]+)\s*(?P<from>[^\d\s]([^\s]*|.+?))'
SOURCE_RE = re.compile(SOURCE_PATTERN + '$', re.IGNORECASE | re.VERBOSE)
FULL_PATTERN = r'(\s+as|\s+to|\s+in|\s*>|\s*=)\s(?P<to>[^\d\s][^\s]*)$'
FULL_RE = re.compile(SOURCE_PATTERN + FULL_PATTERN + '$',
re.IGNORECASE | re.VERBOSE)
ICONS = {
'length': 'scale6.png',
'height': 'scale6.png',
'distance': 'scale6.png',
'area': 'scaling1.png',
'time': 'round27.png',
'thermodynamic temperature': 'thermometer19.png',
'volume': 'measuring3.png',
'mass': 'weight4.png',
'velocity': 'timer18.png',
'level of power intensity': 'treble2.png',
'digital storage': 'binary9.png',
}
DEFAULT_ICON = 'ruler9.png'
ANNOTATION_REPLACEMENTS = {
'litre': ('liter', 'liters', 'l'),
'metre': ('meter', 'm'),
'm2': ('meter^3',),
'dm': ('decimeter',),
'dm2': ('dm^2', 'decimeter^2',),
'dm3': ('dm^3', 'decimeter^3',),
'cm': ('centimeter',),
'cm2': ('cm^2', 'centimeter^2',),
'cm3': ('cm^3', 'centimeter^3',),
'mm': ('milimeter',),
'mm2': ('mm^2', 'milimeter^2'),
'mm3': ('mm^3', 'milimeter^3'),
'degF': ('f', 'fahrenheit', 'farhenheit', 'farenheit'),
'degC': ('c', 'celsius', 'celcius'),
'byte': ('B', 'bytes',),
'bit': ('b', 'bits',),
'kbyte': ('KB', 'kB', 'kb', 'kilobyte',),
'Mbyte': ('MB', 'megabyte',),
'ozm': ('oz', 'ounce', 'ounces'),
'lbm': ('lb', 'lbs', 'pound', 'pounds'),
'miPh': ('mph',),
'ftPh': ('fps',),
'foot': ("'",),
'square': ('sq',),
'ft2': ('ft^2', 'foot^2'),
'ft3': ('ft^3', 'foot^3'),
'inch': ('inches', '"'),
'inch2': ('inch^2', 'square inch'),
'inch3': ('inch^3', 'cube inch'),
'flozUS': ('flus', 'floz', 'fl', 'fl oz', 'fl oz uk'),
'flozUK': ('fluk', 'fl oz uk', 'fl uk'),
}
EXPANSIONS = {
'foot': ('feet', 'ft'),
'mili': ('milli',),
'meter': ('metres', 'meter', 'meters'),
'^2': ('sq', 'square'),
'^3': ('cube', 'cubed'),
}
for annotation, items in ANNOTATION_REPLACEMENTS.items():
items = set(items)
items.add(annotation)
for key, expansions in EXPANSIONS.iteritems():
for expansion in expansions:
for item in set(items):
items.add(item.replace(key, expansion))
ANNOTATION_REPLACEMENTS[annotation] = sorted(items)
# Mostly for language specific stuff, defaulting to US for now since I'm not
# easily able to detect the language in a fast way from within alfred
LOCALIZED_UNITS = (
('metre', 'meter'),
('litre', 'liter'),
)
def localize(input_):
for k, v in LOCALIZED_UNITS:
if k in input_:
return input_.replace(k, v)
return input_
RIGHT_TRIMABLE_OPERATORS = '/+*- (.^'
FUNCTION_ALIASES = {
'deg': 'degrees',
'rad': 'radians',
'ln': 'log',
'arccos': 'acos',
'arcsin': 'asin',
'arctan': 'atan',
}
FUNCTION_ALIASES_RE = re.compile(r'\b(%s)\(' % '|'.join(FUNCTION_ALIASES))
def FUNCTION_ALIASES_REPLACEMENT(match):
return FUNCTION_ALIASES[match.group(1)] + '('
FOOT_INCH_RE = re.compile(r'''(\d+)'(\d+)"?''')
FOOT_INCH_REPLACE = r'(\1*12)+\2 inch'
POWER_UNIT_RE = re.compile(r'([a-z])\^([23])\b')
POWER_UNIT_REPLACEMENT = r'\g<1>\g<2>'
PRE_EVAL_REPLACEMENTS = {
'^': '**',
}
# Known safe math functions
MATH_FUNCTIONS = [
# Number theoretic and representation functions
'ceil',
'copysign',
'fabs',
'factorial',
'floor',
'fmod',
'frexp',
'isinf',
'isnan',
'ldexp',
'modf',
'trunc',
# Power and logarithmic functions
'exp',
'expm1',
'log',
'log1p',
'log10',
'pow',
'sqrt',
# Trigonometric functions
'acos',
'asin',
'atan',
'atan2',
'cos',
'hypot',
'sin',
'tan',
# Angular conversion functions
'degrees',
'radians',
# Hyperbolic functions
'acosh',
'asinh',
'atanh',
'cosh',
'sinh',
'tanh',
# Special functions
'erf',
'erfc',
'gamma',
'lgamma',
# Missing functions won't break anything but won't do anything either
'this_function_definitely_does_not_exist',
]
| 22.792553 | 76 | 0.54189 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,141 | 0.49965 |
f8bab3dc683ade4a29b7e25ca1a99e68f49ac849 | 462 | py | Python | 30/00/1.py | pylangstudy/201707 | c1cc72667f1e0b6e8eef4ee85067d7fa4ca500b6 | [
"CC0-1.0"
] | null | null | null | 30/00/1.py | pylangstudy/201707 | c1cc72667f1e0b6e8eef4ee85067d7fa4ca500b6 | [
"CC0-1.0"
] | 46 | 2017-06-30T22:19:07.000Z | 2017-07-31T22:51:31.000Z | 30/00/1.py | pylangstudy/201707 | c1cc72667f1e0b6e8eef4ee85067d7fa4ca500b6 | [
"CC0-1.0"
] | null | null | null | def RetT(): print('True'); return True
def RetF(): print('False'); return False
print('----- or -----')
RetT() or RetF()
RetF() or RetT() #第一引数が偽のときにのみ、第二引数が評価されます。
print('----- and -----')
RetT() and RetF() #第一引数が真のときにのみ、第二引数が評価されます。
RetF() and RetT() #第一引数が真のときにのみ、第二引数が評価されます。
print('----- not -----')
print(not True and True)
print(False or not True)
print(not True == True)
#print(True == not True) #SyntaxError: invalid syntax
print(True == (not True))
| 25.666667 | 53 | 0.645022 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 344 | 0.562092 |
f8bc9f66b7afd106a2727f0668012f3210c6ab27 | 1,548 | py | Python | tests/test_click.py | maxmouchet/mtoolbox | 977f3af1e3fe6e6403a26fcca3a30a1285eb28c2 | [
"MIT"
] | null | null | null | tests/test_click.py | maxmouchet/mtoolbox | 977f3af1e3fe6e6403a26fcca3a30a1285eb28c2 | [
"MIT"
] | 2 | 2020-07-19T21:03:34.000Z | 2020-09-11T14:56:34.000Z | tests/test_click.py | maxmouchet/mtoolbox | 977f3af1e3fe6e6403a26fcca3a30a1285eb28c2 | [
"MIT"
] | null | null | null | from enum import Enum
from pathlib import Path
import click
from mbox.click import EnumChoice, ParsedDate, PathParam
class AF(Enum):
IPv4 = 4
IPv6 = 6
def test_enum_choice(runner):
@click.command()
@click.option("--af", type=EnumChoice(AF, int))
def cmd(af):
click.echo(af)
result = runner.invoke(cmd, ["--af", "6"])
assert result.exit_code == 0
assert result.output == "AF.IPv6\n"
result = runner.invoke(cmd, ["--help"])
assert result.exit_code == 0
assert "--af [4|6]" in result.output
def test_path_param(runner):
@click.command()
@click.option("--path", type=PathParam())
def cmd(path):
click.echo(path)
click.echo(isinstance(path, Path))
result = runner.invoke(cmd, ["--path", "directory"])
assert result.exit_code == 0
assert result.output == "directory\nTrue\n"
def test_parsed_date(runner):
@click.command()
@click.option("--date", type=ParsedDate())
def cmd(date):
click.echo(date)
result = runner.invoke(cmd, ["--date", "21 february 2019 at noon"])
assert result.exit_code == 0
assert result.output == "2019-02-21 12:00:00\n"
settings = {"RETURN_AS_TIMEZONE_AWARE": True, "TIMEZONE": "UTC"}
@click.command()
@click.option(
"--date",
type=ParsedDate(settings=settings),
)
def cmd2(date):
click.echo(date.tzinfo)
result = runner.invoke(cmd2, ["--date", "21 february 2019 at noon"])
assert result.exit_code == 0
assert result.output == "UTC\n"
| 24.1875 | 72 | 0.623385 | 41 | 0.026486 | 0 | 0 | 519 | 0.335271 | 0 | 0 | 247 | 0.159561 |