content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
"""
Structured information on a coordinate point.
"""
# this file was auto-generated
from datetime import date, datetime
from fairgraph.base_v3 import EmbeddedMetadata, IRI
from fairgraph.fields import Field
class CoordinatePoint(EmbeddedMetadata):
"""
Structured information on a coordinate point.
"""
type = ["https://openminds.ebrains.eu/sands/CoordinatePoint"]
context = {
"schema": "http://schema.org/",
"kg": "https://kg.ebrains.eu/api/instances/",
"vocab": "https://openminds.ebrains.eu/vocab/",
"terms": "https://openminds.ebrains.eu/controlledTerms/",
"core": "https://openminds.ebrains.eu/core/"
}
fields = [
Field("coordinates", "openminds.core.QuantitativeValue", "vocab:coordinates", multiple=True, required=True,
doc="Pair or triplet of numbers defining a location in a given coordinate space."),
Field("coordinate_space", ["openminds.sands.CommonCoordinateSpace", "openminds.sands.CustomCoordinateSpace"], "vocab:coordinateSpace", multiple=False, required=True,
doc="Two or three dimensional geometric setting."),
]
| 34.205882 | 173 | 0.675838 | [
"Apache-2.0"
] | HumanBrainProject/fairgraph | fairgraph/openminds/sands/miscellaneous/coordinate_point.py | 1,163 | Python |
# coding=utf-8
from pyecharts.chart import Chart
def kline_tooltip_formatter(params):
text = (
params[0].seriesName
+ "<br/>"
+ "- open:"
+ params[0].data[1]
+ "<br/>"
+ "- close:"
+ params[0].data[2]
+ "<br/>"
+ "- lowest:"
+ params[0].data[3]
+ "<br/>"
+ "- highest:"
+ params[0].data[4]
)
return text
class Kline(Chart):
"""
<<< K 线图 >>>
红涨蓝跌
"""
def __init__(self, title="", subtitle="", **kwargs):
super(Kline, self).__init__(title, subtitle, **kwargs)
def add(self, *args, **kwargs):
self.__add(*args, **kwargs)
return self
def __add(self, name, x_axis, y_axis, **kwargs):
"""
:param name:
系列名称,用于 tooltip 的显示,legend 的图例筛选。
:param x_axis:
x 坐标轴数据。
:param y_axis:
y 坐标轴数据。数据中,每一行是一个『数据项』,每一列属于一个『维度』。
数据项具体为 [open, close, lowest, highest] (即:[开盘值, 收盘值,
最低值, 最高值])。
:param kwargs:
"""
kwargs.update(type="candlestick", x_axis=x_axis)
if "tooltip_formatter" not in kwargs:
kwargs["tooltip_formatter"] = kline_tooltip_formatter
if "tooltip_trigger" not in kwargs:
kwargs["tooltip_trigger"] = "axis"
chart = self._get_all_options(**kwargs)
xaxis, yaxis = chart["xy_axis"]
self._option.update(xAxis=xaxis, yAxis=yaxis)
self._option.get("xAxis")[0]["scale"] = True
self._option.get("yAxis")[0]["scale"] = True
self._option.get("yAxis")[0]["splitArea"] = {"show": True}
self._option.get("legend")[0].get("data").append(name)
self._option.get("series").append(
{
"type": "candlestick",
"name": name,
"data": y_axis,
"markPoint": chart["mark_point"],
"markLine": chart["mark_line"],
"seriesId": self._option.get("series_id"),
}
)
self._config_components(**kwargs)
| 27.884615 | 67 | 0.486437 | [
"Apache-2.0"
] | Amoswish/graduaction_design_pubgprediction | venv/lib/python3.7/site-packages/pyecharts/charts/kline.py | 2,347 | Python |
from pyexcel_io.sheet import (
SheetReader, SheetWriter, NamedContent
)
from pyexcel_io.book import BookWriter
from pyexcel_io.utils import is_empty_array
from nose.tools import raises
@raises(NotImplementedError)
def test_book_writer():
book = BookWriter()
book.create_sheet("test")
def test_is_empty_array():
a = ["", "", "", ""]
assert is_empty_array(a) is True
b = [1, "", "", ""]
assert is_empty_array(b) is False
class ArrayReader(SheetReader):
@property
def name(self):
SheetReader.name
return self._native_sheet.name
def number_of_columns(self):
SheetReader.number_of_columns(self)
return len(self._native_sheet.payload[0])
def number_of_rows(self):
SheetReader.number_of_rows(self)
return len(self._native_sheet.payload)
def cell_value(self, row, column):
SheetReader.cell_value(self, row, column)
return self._native_sheet.payload[row][column]
class ArrayWriter(SheetWriter):
def set_sheet_name(self, name):
self._native_sheet.name = name
def write_row(self, array):
self._native_sheet.payload.append(array)
class TestSheetReader:
@raises(NotImplementedError)
def test_abstractness(self):
reader = SheetReader("test")
reader.cell_value(1, 2)
@raises(NotImplementedError)
def test_number_of_columns(self):
reader = SheetReader("test")
reader.number_of_columns()
@raises(NotImplementedError)
def test_number_of_rows(self):
reader = SheetReader("test")
reader.number_of_rows()
def test_to_array(self):
name = "test"
class B(SheetReader):
@property
def name(self):
return self._native_sheet
def to_array(self):
pass
b = B(name)
b.to_array()
assert b.name == name
class TestSheetWriter:
@raises(NotImplementedError)
def test_abstractness(self):
writer = SheetWriter("te", "st", "abstract")
writer.write_row([])
def test_inheritance(self):
class D(SheetWriter):
def write_row(self, row):
pass
d = D('t', 'e', 's')
d.write_row([11, 11])
def test_writer(self):
native_sheet = NamedContent("test", [])
content = [
[1, 2],
[3, 4],
[5, 6]
]
writer = ArrayWriter(None, native_sheet, "test")
writer.write_row(content[0])
writer.write_array(content[1:])
assert native_sheet.payload == content
def test_writer2(self):
native_sheet = NamedContent("test", [])
content = [
[1, 2],
[3, 4],
[5, 6]
]
writer = ArrayWriter(None, native_sheet, None)
writer.write_row(content[0])
writer.write_array(content[1:])
assert native_sheet.payload == content
assert native_sheet.name == "pyexcel_sheet1"
| 25.075 | 56 | 0.606846 | [
"BSD-3-Clause"
] | AverkinSergei/pyexcel-io | tests/test_base.py | 3,009 | Python |
class FabricSheetType(ElementType,IDisposable):
""" Represents a fabric sheet type,used in the generation of fabric wires. """
@staticmethod
def CreateDefaultFabricSheetType(ADoc):
"""
CreateDefaultFabricSheetType(ADoc: Document) -> ElementId
Creates a new FabricSheetType object with a default name.
ADoc: The document.
Returns: The newly created type id.
"""
pass
def Dispose(self):
""" Dispose(self: Element,A_0: bool) """
pass
def getBoundingBox(self,*args):
""" getBoundingBox(self: Element,view: View) -> BoundingBoxXYZ """
pass
def GetReinforcementRoundingManager(self):
"""
GetReinforcementRoundingManager(self: FabricSheetType) -> FabricRoundingManager
Returns an object for managing reinforcement rounding override settings.
Returns: The rounding manager.
"""
pass
def GetWireItem(self,wireIndex,direction):
"""
GetWireItem(self: FabricSheetType,wireIndex: int,direction: WireDistributionDirection) -> FabricWireItem
Gets the Wire stored in the FabricSheetType at the associated index.
wireIndex: Item index in the Fabric Sheet
direction: Wire distribution direction of the inquired item
Returns: Fabric wire Item
"""
pass
def IsCustom(self):
"""
IsCustom(self: FabricSheetType) -> bool
Verifies if the type is Custom Fabric Sheet
Returns: True if Layout is set on Custom and if the wireArr is not null
"""
pass
def IsValidMajorLapSplice(self,majorLapSplice):
"""
IsValidMajorLapSplice(self: FabricSheetType,majorLapSplice: float) -> bool
Identifies if the input value is valid to be applied as the major lap splice
value for this FabricSheetType.
"""
pass
def IsValidMinorLapSplice(self,minorLapSplice):
"""
IsValidMinorLapSplice(self: FabricSheetType,minorLapSplice: float) -> bool
Identifies if the input value is valid to be applied as the minor lap splice
value for this FabricSheetType.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: Element,disposing: bool) """
pass
def setElementType(self,*args):
""" setElementType(self: Element,type: ElementType,incompatibleExceptionMessage: str) """
pass
def SetLayoutAsCustomPattern(self,minorStartOverhang,minorEndOverhang,majorStartOverhang,majorEndOverhang,minorFabricWireItems,majorFabricWireItems):
""" SetLayoutAsCustomPattern(self: FabricSheetType,minorStartOverhang: float,minorEndOverhang: float,majorStartOverhang: float,majorEndOverhang: float,minorFabricWireItems: IList[FabricWireItem],majorFabricWireItems: IList[FabricWireItem]) """
pass
def SetMajorLayoutAsActualSpacing(self,overallWidth,minorStartOverhang,spacing):
"""
SetMajorLayoutAsActualSpacing(self: FabricSheetType,overallWidth: float,minorStartOverhang: float,spacing: float)
Sets the major layout pattern as ActualSpacing,while specifying the needed
parameters for this pattern.
overallWidth: The entire width of the wire sheet in the minor direction.
minorStartOverhang: The distance from the edge of the sheet to the first wire in the minor
direction.
spacing: The distance between the wires in the major direction.
"""
pass
def SetMajorLayoutAsFixedNumber(self,overallWidth,minorStartOverhang,minorEndOverhang,numberOfWires):
"""
SetMajorLayoutAsFixedNumber(self: FabricSheetType,overallWidth: float,minorStartOverhang: float,minorEndOverhang: float,numberOfWires: int)
Sets the major layout pattern as FixedNumber,while specifying the needed
parameters for this pattern.
overallWidth: The entire width of the wire sheet in the minor direction.
minorStartOverhang: The distance from the edge of the sheet to the first wire in the minor
direction.
minorEndOverhang: The distance from the last wire to the edge of the sheet in the minor direction.
numberOfWires: The number of the wires to set in the major direction.
"""
pass
def SetMajorLayoutAsMaximumSpacing(self,overallWidth,minorStartOverhang,minorEndOverhang,spacing):
"""
SetMajorLayoutAsMaximumSpacing(self: FabricSheetType,overallWidth: float,minorStartOverhang: float,minorEndOverhang: float,spacing: float)
Sets the major layout pattern as MaximumSpacing,while specifying the needed
parameters for this pattern.
overallWidth: The entire width of the wire sheet in the minor direction.
minorStartOverhang: The distance from the edge of the sheet to the first wire in the minor
direction.
minorEndOverhang: The distance from the last wire to the edge of the sheet in the minor direction.
spacing: The distance between the wires in the major direction.
"""
pass
def SetMajorLayoutAsNumberWithSpacing(self,overallWidth,minorStartOverhang,numberOfWires,spacing):
"""
SetMajorLayoutAsNumberWithSpacing(self: FabricSheetType,overallWidth: float,minorStartOverhang: float,numberOfWires: int,spacing: float)
Sets the major layout pattern as NumberWithSpacing,while specifying the needed
parameters for this pattern.
overallWidth: The entire width of the wire sheet in the minor direction.
minorStartOverhang: The distance from the edge of the sheet to the first wire in the minor
direction.
numberOfWires: The number of the wires to set in the major direction.
spacing: The distance between the wires in the major direction.
"""
pass
def SetMinorLayoutAsActualSpacing(self,overallLength,majorStartOverhang,spacing):
"""
SetMinorLayoutAsActualSpacing(self: FabricSheetType,overallLength: float,majorStartOverhang: float,spacing: float)
Sets the minor layout pattern as ActualSpacing,while specifying the needed
parameters for this pattern.
overallLength: The entire length of the wire sheet in the major direction.
majorStartOverhang: The distance from the edge of the sheet to the first wire in the major
direction.
spacing: The distance between the wires in the minor direction.
"""
pass
def SetMinorLayoutAsFixedNumber(self,overallLength,majorStartOverhang,majorEndOverhang,numberOfWires):
"""
SetMinorLayoutAsFixedNumber(self: FabricSheetType,overallLength: float,majorStartOverhang: float,majorEndOverhang: float,numberOfWires: int)
Sets the major layout pattern as FixedNumber,while specifying the needed
parameters for this pattern.
overallLength: The entire length of the wire sheet in the major direction.
majorStartOverhang: The distance from the edge of the sheet to the first wire in the major
direction.
majorEndOverhang: The distance from the last wire to the edge of the sheet in the major direction.
numberOfWires: The number of the wires to set in the minor direction.
"""
pass
def SetMinorLayoutAsMaximumSpacing(self,overallLength,majorStartOverhang,majorEndOverhang,spacing):
"""
SetMinorLayoutAsMaximumSpacing(self: FabricSheetType,overallLength: float,majorStartOverhang: float,majorEndOverhang: float,spacing: float)
Sets the major layout pattern as MaximumSpacing,while specifying the needed
parameters for this pattern.
overallLength: The entire length of the wire sheet in the major direction.
majorStartOverhang: The distance from the edge of the sheet to the first wire in the major
direction.
majorEndOverhang: The distance from the last wire to the edge of the sheet in the major direction.
spacing: The distance between the wires in the minor direction.
"""
pass
def SetMinorLayoutAsNumberWithSpacing(self,overallLength,majorStartOverhang,numberOfWires,spacing):
"""
SetMinorLayoutAsNumberWithSpacing(self: FabricSheetType,overallLength: float,majorStartOverhang: float,numberOfWires: int,spacing: float)
Sets the major layout pattern as NumberWithSpacing,while specifying the needed
parameters for this pattern.
overallLength: The entire length of the wire sheet in the major direction.
majorStartOverhang: The distance from the edge of the sheet to the first wire in the major
direction.
numberOfWires: The number of wires in the minor direction.
spacing: The distance between the wires in the minor direction.
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
MajorDirectionWireType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The id of the FabricWireType to be used in the major direction.
Get: MajorDirectionWireType(self: FabricSheetType) -> ElementId
Set: MajorDirectionWireType(self: FabricSheetType)=value
"""
MajorEndOverhang=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The distance from the edge of the sheet to the last wire (measured in the major direction).
Get: MajorEndOverhang(self: FabricSheetType) -> float
"""
MajorLapSpliceLength=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The lap splice length in the major direction.
Get: MajorLapSpliceLength(self: FabricSheetType) -> float
Set: MajorLapSpliceLength(self: FabricSheetType)=value
"""
MajorLayoutPattern=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The layout pattern in the major direction.
Get: MajorLayoutPattern(self: FabricSheetType) -> FabricSheetLayoutPattern
"""
MajorNumberOfWires=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The number of wires used in the major direction (includes the first and last wires).
Get: MajorNumberOfWires(self: FabricSheetType) -> int
"""
MajorReinforcementArea=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The area of fabric divided by the spacing of the wire in the major direction.
Get: MajorReinforcementArea(self: FabricSheetType) -> float
"""
MajorSpacing=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The spacing between the wires in the major direction (not including the overhangs).
Get: MajorSpacing(self: FabricSheetType) -> float
"""
MajorStartOverhang=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The distance from the edge of the sheet to the first wire (measured in the major direction).
Get: MajorStartOverhang(self: FabricSheetType) -> float
"""
Material=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The id of the material assigned to wires.
Get: Material(self: FabricSheetType) -> ElementId
Set: Material(self: FabricSheetType)=value
"""
MinorDirectionWireType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The id of the FabricWireType to be used in the minor direction.
Get: MinorDirectionWireType(self: FabricSheetType) -> ElementId
Set: MinorDirectionWireType(self: FabricSheetType)=value
"""
MinorEndOverhang=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The distance from the edge of the sheet to the last wire (measured in the minor direction).
Get: MinorEndOverhang(self: FabricSheetType) -> float
"""
MinorLapSpliceLength=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The lap splice length in the minor direction.
Get: MinorLapSpliceLength(self: FabricSheetType) -> float
Set: MinorLapSpliceLength(self: FabricSheetType)=value
"""
MinorLayoutPattern=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The layout pattern in the minor direction.
Get: MinorLayoutPattern(self: FabricSheetType) -> FabricSheetLayoutPattern
"""
MinorNumberOfWires=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The number of wires used in the minor direction (includes the 1st and last wires).
Get: MinorNumberOfWires(self: FabricSheetType) -> int
"""
MinorReinforcementArea=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The area of fabric divided by the spacing of the wire in the minor direction.
Get: MinorReinforcementArea(self: FabricSheetType) -> float
"""
MinorSpacing=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The spacing between the wires in the minor direction (not including the overhangs).
Get: MinorSpacing(self: FabricSheetType) -> float
"""
MinorStartOverhang=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The distance from the edge of the sheet to the first wire (measured in the minor direction).
Get: MinorStartOverhang(self: FabricSheetType) -> float
"""
OverallLength=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The length of the wire sheet (including overhangs) in the major direction.
Get: OverallLength(self: FabricSheetType) -> float
"""
OverallWidth=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The length of the wire sheet (including overhangs) in the minor direction.
Get: OverallWidth(self: FabricSheetType) -> float
"""
SheetMass=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The sheet mass.
Get: SheetMass(self: FabricSheetType) -> float
Set: SheetMass(self: FabricSheetType)=value
"""
SheetMassUnit=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The sheet mass per area unit.
Get: SheetMassUnit(self: FabricSheetType) -> float
"""
| 26.237226 | 246 | 0.728265 | [
"MIT"
] | BCSharp/ironpython-stubs | release/stubs.min/Autodesk/Revit/DB/Structure/__init___parts/FabricSheetType.py | 14,378 | Python |
from __future__ import unicode_literals
from xml.dom import minidom
from django.contrib.syndication import views
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from django.utils import tzinfo
from django.utils.feedgenerator import rfc2822_date, rfc3339_date
from .models import Entry
class FeedTestCase(TestCase):
fixtures = ['feeddata.json']
def assertChildNodes(self, elem, expected):
actual = set(n.nodeName for n in elem.childNodes)
expected = set(expected)
self.assertEqual(actual, expected)
def assertChildNodeContent(self, elem, expected):
for k, v in expected.items():
self.assertEqual(
elem.getElementsByTagName(k)[0].firstChild.wholeText, v)
def assertCategories(self, elem, expected):
self.assertEqual(set(i.firstChild.wholeText for i in elem.childNodes if i.nodeName == 'category'), set(expected))
######################################
# Feed view
######################################
class SyndicationFeedTest(FeedTestCase):
"""
Tests for the high-level syndication feed framework.
"""
urls = 'syndication.urls'
def test_rss2_feed(self):
"""
Test the structure and content of feeds generated by Rss201rev2Feed.
"""
response = self.client.get('/syndication/rss2/')
doc = minidom.parseString(response.content)
# Making sure there's only 1 `rss` element and that the correct
# RSS version was specified.
feed_elem = doc.getElementsByTagName('rss')
self.assertEqual(len(feed_elem), 1)
feed = feed_elem[0]
self.assertEqual(feed.getAttribute('version'), '2.0')
# Making sure there's only one `channel` element w/in the
# `rss` element.
chan_elem = feed.getElementsByTagName('channel')
self.assertEqual(len(chan_elem), 1)
chan = chan_elem[0]
# Find the last build date
d = Entry.objects.latest('published').published
ltz = tzinfo.LocalTimezone(d)
last_build_date = rfc2822_date(d.replace(tzinfo=ltz))
self.assertChildNodes(chan, ['title', 'link', 'description', 'language', 'lastBuildDate', 'item', 'atom:link', 'ttl', 'copyright', 'category'])
self.assertChildNodeContent(chan, {
'title': 'My blog',
'description': 'A more thorough description of my blog.',
'link': 'http://example.com/blog/',
'language': 'en',
'lastBuildDate': last_build_date,
#'atom:link': '',
'ttl': '600',
'copyright': 'Copyright (c) 2007, Sally Smith',
})
self.assertCategories(chan, ['python', 'django'])
# Ensure the content of the channel is correct
self.assertChildNodeContent(chan, {
'title': 'My blog',
'link': 'http://example.com/blog/',
})
# Check feed_url is passed
self.assertEqual(
chan.getElementsByTagName('atom:link')[0].getAttribute('href'),
'http://example.com/syndication/rss2/'
)
# Find the pubdate of the first feed item
d = Entry.objects.get(pk=1).published
ltz = tzinfo.LocalTimezone(d)
pub_date = rfc2822_date(d.replace(tzinfo=ltz))
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), Entry.objects.count())
self.assertChildNodeContent(items[0], {
'title': 'My first entry',
'description': 'Overridden description: My first entry',
'link': 'http://example.com/blog/1/',
'guid': 'http://example.com/blog/1/',
'pubDate': pub_date,
'author': 'test@example.com (Sally Smith)',
})
self.assertCategories(items[0], ['python', 'testing'])
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description', 'guid', 'category', 'pubDate', 'author'])
# Assert that <guid> does not have any 'isPermaLink' attribute
self.assertIsNone(item.getElementsByTagName(
'guid')[0].attributes.get('isPermaLink'))
def test_rss2_feed_guid_permalink_false(self):
"""
Test if the 'isPermaLink' attribute of <guid> element of an item
in the RSS feed is 'false'.
"""
response = self.client.get(
'/syndication/rss2/guid_ispermalink_false/')
doc = minidom.parseString(response.content)
chan = doc.getElementsByTagName(
'rss')[0].getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
for item in items:
self.assertEqual(
item.getElementsByTagName('guid')[0].attributes.get(
'isPermaLink').value, "false")
def test_rss2_feed_guid_permalink_true(self):
"""
Test if the 'isPermaLink' attribute of <guid> element of an item
in the RSS feed is 'true'.
"""
response = self.client.get(
'/syndication/rss2/guid_ispermalink_true/')
doc = minidom.parseString(response.content)
chan = doc.getElementsByTagName(
'rss')[0].getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
for item in items:
self.assertEqual(
item.getElementsByTagName('guid')[0].attributes.get(
'isPermaLink').value, "true")
def test_rss091_feed(self):
"""
Test the structure and content of feeds generated by RssUserland091Feed.
"""
response = self.client.get('/syndication/rss091/')
doc = minidom.parseString(response.content)
# Making sure there's only 1 `rss` element and that the correct
# RSS version was specified.
feed_elem = doc.getElementsByTagName('rss')
self.assertEqual(len(feed_elem), 1)
feed = feed_elem[0]
self.assertEqual(feed.getAttribute('version'), '0.91')
# Making sure there's only one `channel` element w/in the
# `rss` element.
chan_elem = feed.getElementsByTagName('channel')
self.assertEqual(len(chan_elem), 1)
chan = chan_elem[0]
self.assertChildNodes(chan, ['title', 'link', 'description', 'language', 'lastBuildDate', 'item', 'atom:link', 'ttl', 'copyright', 'category'])
# Ensure the content of the channel is correct
self.assertChildNodeContent(chan, {
'title': 'My blog',
'link': 'http://example.com/blog/',
})
self.assertCategories(chan, ['python', 'django'])
# Check feed_url is passed
self.assertEqual(
chan.getElementsByTagName('atom:link')[0].getAttribute('href'),
'http://example.com/syndication/rss091/'
)
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), Entry.objects.count())
self.assertChildNodeContent(items[0], {
'title': 'My first entry',
'description': 'Overridden description: My first entry',
'link': 'http://example.com/blog/1/',
})
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description'])
self.assertCategories(item, [])
def test_atom_feed(self):
"""
Test the structure and content of feeds generated by Atom1Feed.
"""
response = self.client.get('/syndication/atom/')
feed = minidom.parseString(response.content).firstChild
self.assertEqual(feed.nodeName, 'feed')
self.assertEqual(feed.getAttribute('xmlns'), 'http://www.w3.org/2005/Atom')
self.assertChildNodes(feed, ['title', 'subtitle', 'link', 'id', 'updated', 'entry', 'rights', 'category', 'author'])
for link in feed.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href'), 'http://example.com/syndication/atom/')
entries = feed.getElementsByTagName('entry')
self.assertEqual(len(entries), Entry.objects.count())
for entry in entries:
self.assertChildNodes(entry, [
'title',
'link',
'id',
'summary',
'category',
'updated',
'published',
'rights',
'author',
])
summary = entry.getElementsByTagName('summary')[0]
self.assertEqual(summary.getAttribute('type'), 'html')
def test_atom_feed_published_and_updated_elements(self):
"""
Test that the published and updated elements are not
the same and now adhere to RFC 4287.
"""
response = self.client.get('/syndication/atom/')
feed = minidom.parseString(response.content).firstChild
entries = feed.getElementsByTagName('entry')
published = entries[0].getElementsByTagName('published')[0].firstChild.wholeText
updated = entries[0].getElementsByTagName('updated')[0].firstChild.wholeText
self.assertNotEqual(published, updated)
def test_latest_post_date(self):
"""
Test that both the published and updated dates are
considered when determining the latest post date.
"""
# this feed has a `published` element with the latest date
response = self.client.get('/syndication/atom/')
feed = minidom.parseString(response.content).firstChild
updated = feed.getElementsByTagName('updated')[0].firstChild.wholeText
d = Entry.objects.latest('published').published
ltz = tzinfo.LocalTimezone(d)
latest_published = rfc3339_date(d.replace(tzinfo=ltz))
self.assertEqual(updated, latest_published)
# this feed has an `updated` element with the latest date
response = self.client.get('/syndication/latest/')
feed = minidom.parseString(response.content).firstChild
updated = feed.getElementsByTagName('updated')[0].firstChild.wholeText
d = Entry.objects.exclude(pk=5).latest('updated').updated
ltz = tzinfo.LocalTimezone(d)
latest_updated = rfc3339_date(d.replace(tzinfo=ltz))
self.assertEqual(updated, latest_updated)
def test_custom_feed_generator(self):
response = self.client.get('/syndication/custom/')
feed = minidom.parseString(response.content).firstChild
self.assertEqual(feed.nodeName, 'feed')
self.assertEqual(feed.getAttribute('django'), 'rocks')
self.assertChildNodes(feed, ['title', 'subtitle', 'link', 'id', 'updated', 'entry', 'spam', 'rights', 'category', 'author'])
entries = feed.getElementsByTagName('entry')
self.assertEqual(len(entries), Entry.objects.count())
for entry in entries:
self.assertEqual(entry.getAttribute('bacon'), 'yum')
self.assertChildNodes(entry, [
'title',
'link',
'id',
'summary',
'ministry',
'rights',
'author',
'updated',
'published',
'category',
])
summary = entry.getElementsByTagName('summary')[0]
self.assertEqual(summary.getAttribute('type'), 'html')
def test_title_escaping(self):
"""
Tests that titles are escaped correctly in RSS feeds.
"""
response = self.client.get('/syndication/rss2/')
doc = minidom.parseString(response.content)
for item in doc.getElementsByTagName('item'):
link = item.getElementsByTagName('link')[0]
if link.firstChild.wholeText == 'http://example.com/blog/4/':
title = item.getElementsByTagName('title')[0]
self.assertEqual(title.firstChild.wholeText, 'A & B < C > D')
def test_naive_datetime_conversion(self):
"""
Test that datetimes are correctly converted to the local time zone.
"""
# Naive date times passed in get converted to the local time zone, so
# check the recived zone offset against the local offset.
response = self.client.get('/syndication/naive-dates/')
doc = minidom.parseString(response.content)
updated = doc.getElementsByTagName('updated')[0].firstChild.wholeText
d = Entry.objects.latest('published').published
ltz = tzinfo.LocalTimezone(d)
latest = rfc3339_date(d.replace(tzinfo=ltz))
self.assertEqual(updated, latest)
def test_aware_datetime_conversion(self):
"""
Test that datetimes with timezones don't get trodden on.
"""
response = self.client.get('/syndication/aware-dates/')
doc = minidom.parseString(response.content)
published = doc.getElementsByTagName('published')[0].firstChild.wholeText
self.assertEqual(published[-6:], '+00:42')
def test_feed_last_modified_time(self):
response = self.client.get('/syndication/naive-dates/')
self.assertEqual(response['Last-Modified'], 'Tue, 26 Mar 2013 01:00:00 GMT')
# No last-modified when feed has no item_pubdate
response = self.client.get('/syndication/no_pubdate/')
self.assertFalse(response.has_header('Last-Modified'))
def test_feed_url(self):
"""
Test that the feed_url can be overridden.
"""
response = self.client.get('/syndication/feedurl/')
doc = minidom.parseString(response.content)
for link in doc.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href'), 'http://example.com/customfeedurl/')
def test_secure_urls(self):
"""
Test URLs are prefixed with https:// when feed is requested over HTTPS.
"""
response = self.client.get('/syndication/rss2/', **{
'wsgi.url_scheme': 'https',
})
doc = minidom.parseString(response.content)
chan = doc.getElementsByTagName('channel')[0]
self.assertEqual(
chan.getElementsByTagName('link')[0].firstChild.wholeText[0:5],
'https'
)
atom_link = chan.getElementsByTagName('atom:link')[0]
self.assertEqual(atom_link.getAttribute('href')[0:5], 'https')
for link in doc.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href')[0:5], 'https')
def test_item_link_error(self):
"""
Test that a ImproperlyConfigured is raised if no link could be found
for the item(s).
"""
self.assertRaises(ImproperlyConfigured,
self.client.get,
'/syndication/articles/')
def test_template_feed(self):
"""
Test that the item title and description can be overridden with
templates.
"""
response = self.client.get('/syndication/template/')
doc = minidom.parseString(response.content)
feed = doc.getElementsByTagName('rss')[0]
chan = feed.getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
self.assertChildNodeContent(items[0], {
'title': 'Title in your templates: My first entry',
'description': 'Description in your templates: My first entry',
'link': 'http://example.com/blog/1/',
})
def test_template_context_feed(self):
"""
Test that custom context data can be passed to templates for title
and description.
"""
response = self.client.get('/syndication/template_context/')
doc = minidom.parseString(response.content)
feed = doc.getElementsByTagName('rss')[0]
chan = feed.getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
self.assertChildNodeContent(items[0], {
'title': 'My first entry (foo is bar)',
'description': 'My first entry (foo is bar)',
})
def test_add_domain(self):
"""
Test add_domain() prefixes domains onto the correct URLs.
"""
self.assertEqual(
views.add_domain('example.com', '/foo/?arg=value'),
'http://example.com/foo/?arg=value'
)
self.assertEqual(
views.add_domain('example.com', '/foo/?arg=value', True),
'https://example.com/foo/?arg=value'
)
self.assertEqual(
views.add_domain('example.com', 'http://djangoproject.com/doc/'),
'http://djangoproject.com/doc/'
)
self.assertEqual(
views.add_domain('example.com', 'https://djangoproject.com/doc/'),
'https://djangoproject.com/doc/'
)
self.assertEqual(
views.add_domain('example.com', 'mailto:uhoh@djangoproject.com'),
'mailto:uhoh@djangoproject.com'
)
self.assertEqual(
views.add_domain('example.com', '//example.com/foo/?arg=value'),
'http://example.com/foo/?arg=value'
)
| 40.006944 | 151 | 0.602847 | [
"BSD-3-Clause"
] | adambrenecki/django | tests/syndication/tests.py | 17,283 | Python |
# Copyright (c) 2011 The Chromium Embedded Framework Authors. All rights
# reserved. Use of this source code is governed by a BSD-style license that
# can be found in the LICENSE file.
from cef_parser import *
def make_function_body_block(cls):
impl = ' // ' + cls.get_name() + ' methods.\n'
funcs = cls.get_virtual_funcs()
for func in funcs:
impl += ' ' + func.get_cpp_proto()
if cls.is_client_side():
impl += ' override;\n'
else:
impl += ' OVERRIDE;\n'
return impl
def make_function_body(header, cls):
impl = make_function_body_block(cls)
cur_cls = cls
while True:
parent_name = cur_cls.get_parent_name()
if is_base_class(parent_name):
break
else:
parent_cls = header.get_class(parent_name)
if parent_cls is None:
raise Exception('Class does not exist: ' + parent_name)
if len(impl) > 0:
impl += '\n'
impl += make_function_body_block(parent_cls)
cur_cls = header.get_class(parent_name)
return impl
def make_ctocpp_header(header, clsname):
cls = header.get_class(clsname)
if cls is None:
raise Exception('Class does not exist: ' + clsname)
clientside = cls.is_client_side()
directory = cls.get_file_directory()
defname = ''
if not directory is None:
defname += directory + '_'
defname += get_capi_name(clsname[3:], False)
defname = defname.upper()
capiname = cls.get_capi_name()
result = get_copyright()
result += '#ifndef CEF_LIBCEF_DLL_CTOCPP_'+defname+'_CTOCPP_H_\n'+ \
'#define CEF_LIBCEF_DLL_CTOCPP_'+defname+'_CTOCPP_H_\n' + \
'#pragma once\n'
if clientside:
result += """
#if !defined(BUILDING_CEF_SHARED)
#error This file can be included DLL-side only
#endif
"""
else:
result += """
#if !defined(WRAPPING_CEF_SHARED)
#error This file can be included wrapper-side only
#endif
"""
# build the function body
func_body = make_function_body(header, cls)
# include standard headers
if func_body.find('std::map') > 0 or func_body.find('std::multimap') > 0:
result += '\n#include <map>'
if func_body.find('std::vector') > 0:
result += '\n#include <vector>'
# include the headers for this class
result += '\n#include "include/'+cls.get_file_name()+'"'+ \
'\n#include "include/capi/'+cls.get_capi_file_name()+'"\n'
# include headers for any forward declared classes that are not in the same file
declares = cls.get_forward_declares()
for declare in declares:
dcls = header.get_class(declare)
if dcls.get_file_name() != cls.get_file_name():
result += '#include "include/'+dcls.get_file_name()+'"\n' \
'#include "include/capi/'+dcls.get_capi_file_name()+'"\n'
base_class_name = header.get_base_class_name(clsname)
base_scoped = True if base_class_name == 'CefBaseScoped' else False
if base_scoped:
template_file = 'ctocpp_scoped.h'
template_class = 'CefCToCppScoped'
else:
template_file = 'ctocpp_ref_counted.h'
template_class = 'CefCToCppRefCounted'
result += '#include "libcef_dll/ctocpp/' + template_file + '"'
result += '\n\n// Wrap a C structure with a C++ class.\n'
if clientside:
result += '// This class may be instantiated and accessed DLL-side only.\n'
else:
result += '// This class may be instantiated and accessed wrapper-side only.\n'
result += 'class '+clsname+'CToCpp\n'+ \
' : public ' + template_class + '<'+clsname+'CToCpp, '+clsname+', '+capiname+'> {\n'+ \
' public:\n'+ \
' '+clsname+'CToCpp();\n\n'
result += func_body
result += '};\n\n'
result += '#endif // CEF_LIBCEF_DLL_CTOCPP_' + defname + '_CTOCPP_H_'
return result
def write_ctocpp_header(header, clsname, dir):
# give the output file the same directory offset as the input file
cls = header.get_class(clsname)
dir = os.path.dirname(os.path.join(dir, cls.get_file_name()))
file = os.path.join(dir, get_capi_name(clsname[3:], False) + '_ctocpp.h')
newcontents = make_ctocpp_header(header, clsname)
return (file, newcontents)
# test the module
if __name__ == "__main__":
import sys
# verify that the correct number of command-line arguments are provided
if len(sys.argv) < 3:
sys.stderr.write('Usage: ' + sys.argv[0] + ' <infile> <classname>')
sys.exit()
# create the header object
header = obj_header()
header.add_file(sys.argv[1])
# dump the result to stdout
sys.stdout.write(make_ctocpp_header(header, sys.argv[2]))
| 29.444444 | 104 | 0.666149 | [
"BSD-3-Clause"
] | AkihideHasegawa/cef | tools/make_ctocpp_header.py | 4,505 | Python |
# The MIT License (MIT)
#
# Copyright (c) 2015, Nicolas Sebrecht & contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import sys
def testingPath():
return os.path.join(
os.path.abspath(sys.modules['imapfw'].__path__[0]),
'testing')
| 41.419355 | 79 | 0.759346 | [
"MIT"
] | Deepanshu2017/imapfw | imapfw/testing/libcore.py | 1,284 | Python |
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 07 14:08:31 2016
@author: Mic
"""
from __future__ import division
from wiselib2.must import *
import numpy as np
import wiselib2.Rayman as rm
Gauss1d = lambda x ,y : None
from scipy import interpolate as interpolate
from matplotlib import pyplot as plt
class PsdFuns:
'''
Ensemble of possible Psd Functions.
Each element is a callable Psd.
Most used are
PsdFuns.PowerLaw(x,a,b)
PsdFuns.Interp(x, xData, yData)
'''
@staticmethod
def Flat(x, *args):
N = len(x)
return np.zeros([1,N]) +1
@staticmethod
def PowerLaw(x,a,b):
return a*x**b
@staticmethod
def Gaussian(x,sigma, x0=0):
return np.exp(-0.5 * (x-x0)**2/sigma**2)
@staticmethod
def Interp(x, xData, yData):
f = interpolate.interp1d(xData, yData)
return f(x)
def PsdFun2Noise_1d(N,dx, PsdFun, PsdArgs):
'''
Generates a noise pattern based an the Power spectral density returned
by PsdFun
'''
x = np.arange(0,N//2+1, dx)
yHalf = PsdFun(x, *PsdArgs)
y = Psd2NoisePattern_1d(yHalf, Semiaxis = True )
return x,y
#============================================================================
# FUN: PsdArray2Noise_1d_v2
#============================================================================
def PsdArray2Noise_1d_v2(f_in, Psd_in, L_mm,N):
'''
Returns meters
'''
from scipy import interpolate
log=np.log
fft = np.fft.fft
fftshift = np.fft.fftshift
ff = f_in
yy = Psd_in
L = L_mm
N = int(N)
N2 = int(N//2)
L =300 # (mm)
L_um = L*1e3
L_nm = L*1e6
fMin = 1/L_um
##vecchia riga
##fSpline = (np.array(range(N2))+1)/L_um # um^-1
fSpline = np.arange(N2)/N2 * (max(ff) - min(ff)) + min(ff)
fun = interpolate.splrep(log(ff), log(yy), s=2)
yPsd_log = interpolate.splev(log(fSpline), fun)
ySpline = np.exp(yPsd_log)
yPsd = ySpline
# tolgo
yPsd[fSpline<ff[0]] = 200
n = len(yPsd)
plt.plot(fSpline, yPsd,'-')
plt.plot(ff, yy,'x')
plt.legend(['ySpline','Data'])
ax = plt.axes()
#ax.set_yscale('log')
#ax.set_xscale('log')
#% controllo RMS integrando la yPsd
import scipy.integrate as integrate
RMS = np.sqrt(integrate.trapz(yPsd, fSpline/1000))
#% Modo Manfredda style
#yPsdNorm = np.sqrt(yPsd/L_um/1000)
#yPsdNorm_reverse = yPsdNorm[::-1]
yPsd_reverse = yPsd[::-1]
ell= 1/(fSpline[1] - fSpline[0])
if N%2 == 0:
yPsd2 = np.hstack((yPsd_reverse ,0,yPsd[0:-1]))
else:
yPsd2 = np.hstack((yPsd_reverse ,0,yPsd))
##yPsd2Norm = np.sqrt(yPsd2/ell/1000/2)
yPsd2Norm = np.sqrt(yPsd2/ell/1000)
n_ = len(yPsd2)
print('len(yPsd2) = %0.2d' % len(yPsd2Norm))
phi = 2*np.pi * np.random.rand(n_)
r = np.exp(1j*phi)
yPsd2Norm_ = fftshift(yPsd2Norm)
#yPsd2Norm_[len(yPsd2Norm_)//2] = 0
yRaf = np.fft.fft(r*yPsd2Norm_)
yRaf = np.real(yRaf)
print('Rms = %0.2e nm' % np.std(yRaf))
plt.plot(yPsd2Norm_)
print('max yPsd_ = %d nm' % max(yPsd2))
print('max yPsd2Norm = %0.4f nm' % max(yPsd2Norm))
print('Rms yRaf2 = %0.2e nm' % np.std(yRaf))
return yRaf * 1e-9
#============================================================================
# FUN: Psd2Noise
#============================================================================
def PsdArray2Noise_1d(PsdArray, N, Semiaxis = True, Real = True):
'''
Generates a noise pattern whose Power Spectral density is given by Psd.
Parameters
---------------------
Psd : 1d array
Contains the numeric Psd (treated as evenly spaced array)
Semiaxis :
0 : does nothing
1 : halvens Pds, then replicates the halven part for left frequencies,
producing an output as long as Psd
2 : replicates all Pds for lef frequencies as well, producing an output
twice as long as Psd
Real : boolean
If True, the real part of the output is returned (default)
Returns:
---------------------
An array of the same length of Psd
'''
if Semiaxis == True:
yHalf = PsdArray
PsdArrayNew = np.hstack((yHalf[-1:0:-1], yHalf))
idelta = len(PsdArrayNew) - N
if idelta == 1:# piu lungo
PsdArrayNew = PsdArrayNew[0:-1] # uguale
elif idelta == 0:
pass
else:
print('Error! len(PsdArrayNew) - len(PsdArray) = %0d' % idelta)
y = np.fft.fftshift(PsdArrayNew)
r = 2*np.pi * np.random.rand(len(PsdArrayNew))
f = np.fft.ifft(y * np.exp(1j*r))
if Real:
return np.real(f)
else:
return f
Psd2Noise_1d = PsdArray2Noise_1d
#============================================================================
# FUN: NoNoise_1d
#============================================================================
def NoNoise_1d(N, *args):
return np.zeros([1,N])
#============================================================================
# FUN: GaussianNoise_1d
#============================================================================
def GaussianNoise_1d(N,dx, Sigma):
'''
PSD(f) = np.exp(-0.5^f/Sigma^2)
'''
x = np.linspace( - N//2 *dx, N//2-1 * dx,N)
y = np.exp(-0.5*x**2/Sigma**2)
return Psd2NoisePattern_1d(y)
#============================================================================
# FUN: PowerLawNoise_1d
#============================================================================
def PowerLawNoise_1d(N, dx, a, b):
'''
PSD(x) = a*x^b
'''
x = np.arange(0,N//2+1, dx)
yHalf = a * x**b
# y = np.hstack((yHalf[-1:0:-1], 0, yHalf[1:-1]))
return Psd2NoisePattern_1d(y, Semiaxis = True)
#============================================================================
# FUN: CustomNoise_1d
#============================================================================
def CustomNoise_1d(N, dx, xPsd, yPsd):
xPsd_, yPsd_ = rm.FastResample1d(xPsd, yPsd,N)
return Psd2NoisePattern_1d(yPsd_, Semiaxis = True)
#============================================================================
# CLASS: NoiseGenerator
#============================================================================
class PsdGenerator:
NoNoise = staticmethod(NoNoise_1d)
Gauss = staticmethod(GaussianNoise_1d)
PowerLaw = staticmethod(PowerLawNoise_1d)
NumericArray = staticmethod(CustomNoise_1d)
#============================================================================
# FUN: FitPowerLaw
#============================================================================
def FitPowerLaw(x,y):
'''
Fits the input data in the form
y = a*x^b
returns a,b
'''
import scipy.optimize as optimize
fFit = lambda p, x: p[0] * x ** p[1]
fErr = lambda p, x, y: (y - fFit(p, x))
p0 = [max(y), -1.0]
out = optimize.leastsq(fErr, p0, args=(x, y), full_output=1)
pOut = out[0]
b = pOut[1]
a = pOut[0]
# indexErr = np.np.sqrt( covar[0][0] )
# ampErr = np.np.sqrt( covar[1][1] ) * amp
return a,b
#==============================================================================
# CLASS: RoughnessMaker
#==============================================================================
class RoughnessMaker(object):
class Options():
FIT_NUMERIC_DATA_WITH_POWER_LAW = True
AUTO_ZERO_MEAN_FOR_NUMERIC_DATA = True
AUTO_FILL_NUMERIC_DATA_WITH_ZERO = True
AUTO_RESET_CUTOFF_ON_PSDTYPE_CHANGE = True
def __init__(self):
self.PsdType = PsdFuns.PowerLaw
self.PsdParams = np.array([1,1])
self._IsNumericPsdInFreq = None
self.CutoffLowHigh = [None, None]
self.ProfileScaling = 1
return None
@property
def PsdType(self):
return self._PsdType
@PsdType.setter
def PsdType(self, Val):
'''
Note: each time that the Property value is set, self.CutoffLowHigh is
reset, is specified by options
'''
self. _PsdType = Val
if self.Options.AUTO_RESET_CUTOFF_ON_PSDTYPE_CHANGE == True:
self.PsdCutoffLowHigh = [None, None]
#======================================================================
# FUN: PdfEval
#======================================================================
def PsdEval(self, N, df, CutoffLowHigh = [None, None]):
'''
Evals the PSD in the range [0 - N*df]
It's good custom to have PSD[0] = 0, so that the noise pattern is
zero-mean.
Parameters:
----------------------
N : int
#of samples
df : float
spacing of spatial frequencies (df=1/TotalLength)
CutoffLowHigh : [LowCutoff, HighCutoff]
if >0, then Psd(f<Cutoff) is set to 0.
if None, then LowCutoff = min()
Returns : fAll, yPsdAll
----------------------
fAll : 1d array
contains the spatial frequencies
yPsd : 1d array
contains the Psd
'''
'''
The Pdf is evaluated only within LowCutoff and HoghCutoff
If the Pdf is PsdFuns.Interp, then LowCutoff and HighCutoff are
automatically set to min and max values of the experimental data
'''
StrMessage = ''
def GetInRange(fAll, LowCutoff, HighCutoff):
_tmpa = fAll >= LowCutoff
_tmpb = fAll <= HighCutoff
fMid_Pos = np.all([_tmpa, _tmpb],0)
fMid = fAll[fMid_Pos]
return fMid_Pos, fMid
LowCutoff, HighCutoff = CutoffLowHigh
fMin = 0
fMax = (N-1)*df
fAll = np.linspace(0, fMax, N)
yPsdAll = fAll* 0 # init
LowCutoff = 0 if LowCutoff is None else LowCutoff
HighCutoff = N*df if HighCutoff is None else HighCutoff
# Numeric PSD
# Note: by default returned yPsd is always 0 outside the input data range
if self.PsdType == PsdFuns.Interp:
# Use Auto-Fit + PowerLaw
if self.Options.FIT_NUMERIC_DATA_WITH_POWER_LAW == True:
xFreq,y = self.NumericPsdGetXY()
p = FitPowerLaw(1/xFreq,y)
_PsdParams = p[0], -p[1]
LowCutoff = np.amin(self._PsdNumericX)
HighCutoff = np.amin(self._PsdNumericX)
fMid_Pos, fMid = GetInRange(fAll, LowCutoff, HighCutoff)
yPsd = PsdFuns.PowerLaw(fMid, *_PsdParams )
# Use Interpolation
else:
# check Cutoff
LowVal = np.amin(self._PsdNumericX)
HighVal = np.amax(self._PsdNumericX)
LowCutoff = LowVal if LowCutoff <= LowVal else LowCutoff
HighCutoff = HighVal if HighCutoff >= HighVal else HighCutoff
# Get the list of good frequency values (fMid) and their positions
# (fMid_Pos)
fMid_Pos, fMid = GetInRange(fAll, LowCutoff, HighCutoff)
##yPsd = self.PsdType(fMid, *self.PsdParams)
## non funziona, rimpiazzo a mano
yPsd = PsdFuns.Interp(fMid, self._PsdNumericX, self._PsdNumericY)
# Analytical Psd
else:
fMid_Pos, fMid = GetInRange(fAll, LowCutoff, HighCutoff)
yPsd = self.PsdType(fMid, *self.PsdParams)
# copying array subset
yPsdAll[fMid_Pos] = yPsd
return fAll, yPsdAll
#======================================================================
# FUN: _FitNumericPsdWithPowerLaw
#======================================================================
# in disusos
def _FitNumericPsdWithPowerLaw(self):
x,y = self.NumericPsdGetXY()
if self._IsNumericPsdInFreq == True:
p = FitPowerLaw(1/x,y)
self.PsdParams = p[0], -p[1]
else:
p = FitPowerLaw(x,y)
self.PsdParams = p[0], p[1]
#======================================================================
# FUN: MakeProfile
#======================================================================
def MakeProfile(self, L,N):
'''
Evaluates the psd according to .PsdType, .PsdParams and .Options directives
Returns an evenly-spaced array.
If PsdType = NumericArray, linear interpolation is performed.
:PARAM: N: # of samples
:PARAM: dx: grid spacing (spatial frequency)
returns:
1d arr
'''
if self.PsdType == PsdFuns.Interp:
# chiama codice ad hoc
L_mm = L*1e3
yRoughness = PsdArray2Noise_1d_v2(self._PsdNumericX, self._PsdNumericY, L_mm, N)
else:
print('Irreversible error. The code was not completed to handle this instance')
return yRoughness * self.ProfileScaling
# f, yPsd = self.PsdEval(N//2 + 1,df)
# Special case
# if self.Options.FIT_NUMERIC_DATA_WITH_POWER_LAW == True:
# self.PsdParams = list(FitPowerLaw(*self.NumericPsdGetXY()))
# yPsd = PsdFuns.PowerLaw(x, *self.PsdParams)
# else: # general calse
# yPsd = self.PsdType(x, *self.PsdParams)
# yRoughness = Psd2Noise_1d(yPsd, N, Semiaxis = True)
# x = np.linspace(0, N*dx,N)
# # Special case
# if self.Options.FIT_NUMERIC_DATA_WITH_POWER_LAW == True:
# self.PsdParams = list(FitPowerLaw(*self.NumericPsdGetXY()))
# y = PowerLawNoise_1d(N, dx, *self.PsdParams)
# else: # general calse
# y = self.PsdType(N,dx, *self.PsdParams)
# return y
Generate = MakeProfile
#======================================================================
# FUN: NumericPsdSetXY
#======================================================================
def NumericPsdSetXY(self,x,y):
self._PsdNumericX = x
self._PsdNumericY = y
#======================================================================
# FUN: NumericPsdGetXY
#======================================================================
def NumericPsdGetXY(self):
try:
return self._PsdNumericX, self._PsdNumericY
except:
print('Error in RoughnessMaker.NumericPsdGetXY. Maybe the data file was not properly loaded')
#======================================================================
# FUN: NumericPsdLoadXY
#======================================================================
def NumericPsdLoadXY(self, FilePath, xScaling = 1, yScaling = 1 , xIsSpatialFreq = True):
''' @TODO: specificare formati e tipi di file
Parameters
----------------------------
xIsSpatialFreq : bool
true If the first column (Read_x_values) contains spatial
frequencies. False if it contains lenghts. Default = True
xScaling, yScaling: floats
Read_x_values => Read_x_values * xScaling
Read_y_values => Read_y_values * yScaling
Sometimes, properly setting the x and y scaling values may be confusing (although just matter of high-school considerations). On this purpose, the property .RoughnessMaker.ProfileScaling property can be used also..ProfileScaling is the scale factor that acts on the output of MakeProfile() function only.
remarks
--------
pippo
'''
try:
self._IsNumericPsdInFreq = xIsSpatialFreq
s = np.loadtxt(FilePath)
x = s[:,0]
y = s[:,1]
x = x * xScaling
y = y * yScaling
# inversion of x-axis if not spatial frequencies
if xIsSpatialFreq == False:
f = 1/x
else:
f = x
# array sorting
i = np.argsort(f)
f = f[i]
y = y[i]
# I set the Cutoff value of the class according to available data
self.PsdCutoffLowHigh = [np.amin, np.amax(f)]
# I set class operating variables
self.PsdType = PsdFuns.Interp
self.PsdParams = [f,y]
# Auto-set
# fill 0-value (DC Component)
# if self.Options.AUTO_FILL_NUMERIC_DATA_WITH_ZERO == True:
# if np.amin(x >0):
# x = np.insert(x,0,0)
# y = np.insert(y,0,0) # 0 in psd => 0-mean value in the noise pattern
# sync other class values
self.NumericPsdSetXY(f, y)
except:
pass
def Generate(self, N = None, dx = None, CutoffLowHigh = [None, None]):
'''
Parameters
N: # of output samples
dx: step of the x axis
Note: generates an evenly spaced array
'''
L = dx * N
df = 1/L
fPsd, yPsd = self.PsdEval(N//2 +1 , df = df,
CutoffLowHigh = CutoffLowHigh )
h = Psd2Noise_1d(yPsd, Semiaxis = True)
return h
#======================================================================
# FUN: NumericPsdCheck
#======================================================================
def NumericPsdCheck(self, N, L):
df = 1/L
# Stored data
ff,yy = self.NumericPsdGetXY()
# Evaluated data
fPsd, yPsd = self.PsdEval(N, df)
plt.plot(fPsd, np.log10(yPsd),'x')
plt.plot(ff, np.log10(yy),'.r')
plt.legend(['Evaluated data', 'Stored data'])
plt.suptitle('Usage of stored data (PSD)')
fMax = df*(N//2)
fMin = df
StrMsg = ''
_max = np.max(ff)
_min = np.min(ff)
print('fMax query = %0.1e m^-1' % fMax )
print('fMax data= %0.1e m^-1 = %0.2e um^-1' % (_max, (_max * 1e6) ))
print('fMin query= %0.1e m^-1' % fMin )
print('fMin data= %0.1e m^-1 = %0.2e um^-1' % (_min, (_min * 1e6) ))
return StrMsg | 28.234347 | 310 | 0.563138 | [
"MIT"
] | WISE-Project/wiselib2 | wiselib2/Noise.py | 15,783 | Python |
from collections import defaultdict
def list_to_map(Xs, ys):
labels_map = defaultdict(list)
for x, y in list(zip(Xs, ys)):
labels_map[y].append(x)
return labels_map | 23.25 | 35 | 0.682796 | [
"Apache-2.0"
] | kareemjano/image-toolbox | dataset_utils/general_utils.py | 186 | Python |
#!/Users/yaroten/Library/Mobile Documents/com~apple~CloudDocs/git/crawling_scraping/crawling_scraping/bin/python3
# $Id: rst2odt_prepstyles.py 5839 2009-01-07 19:09:28Z dkuhlman $
# Author: Dave Kuhlman <dkuhlman@rexx.com>
# Copyright: This module has been placed in the public domain.
"""
Fix a word-processor-generated styles.odt for odtwriter use: Drop page size
specifications from styles.xml in STYLE_FILE.odt.
"""
#
# Author: Michael Schutte <michi@uiae.at>
from lxml import etree
import sys
import zipfile
from tempfile import mkstemp
import shutil
import os
NAMESPACES = {
"style": "urn:oasis:names:tc:opendocument:xmlns:style:1.0",
"fo": "urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0"
}
def prepstyle(filename):
zin = zipfile.ZipFile(filename)
styles = zin.read("styles.xml")
root = etree.fromstring(styles)
for el in root.xpath("//style:page-layout-properties",
namespaces=NAMESPACES):
for attr in el.attrib:
if attr.startswith("{%s}" % NAMESPACES["fo"]):
del el.attrib[attr]
tempname = mkstemp()
zout = zipfile.ZipFile(os.fdopen(tempname[0], "w"), "w",
zipfile.ZIP_DEFLATED)
for item in zin.infolist():
if item.filename == "styles.xml":
zout.writestr(item, etree.tostring(root))
else:
zout.writestr(item, zin.read(item.filename))
zout.close()
zin.close()
shutil.move(tempname[1], filename)
def main():
args = sys.argv[1:]
if len(args) != 1:
print >> sys.stderr, __doc__
print >> sys.stderr, "Usage: %s STYLE_FILE.odt\n" % sys.argv[0]
sys.exit(1)
filename = args[0]
prepstyle(filename)
if __name__ == '__main__':
main()
# vim:tw=78:sw=4:sts=4:et:
| 26.367647 | 113 | 0.650307 | [
"MIT"
] | litteletips/crawling_scraping | crawling_scraping/bin/rst2odt_prepstyles.py | 1,793 | Python |
import pytest
from engine.constants import G
from pytest import param as p
from .orbit_derived_parameters import OrbitalPeriod
@pytest.mark.parametrize(
("primary_mass", "secondary_mass", "semimajor_axis", "expected"),
[p(10e10, 100, 10, 76.9102, id="arbitrary period")],
)
def test_orbital_period(primary_mass, secondary_mass, semimajor_axis, expected):
assert OrbitalPeriod(
primary_mass, secondary_mass, semimajor_axis, G
).evalf() == pytest.approx(expected, 1e-3)
| 30.9375 | 80 | 0.747475 | [
"MIT"
] | RomainEndelin/keplerian_orbits | python/engine/functions/orbit_derived_parameters_test.py | 495 | Python |
# -*- coding: utf-8 -*-
__author__ = """Adam Geitgey"""
__email__ = 'ageitgey@gmail.com'
__version__ = '0.1.0'
from .api import load_image_file, face_locations, face_landmarks, face_encodings, compare_faces, face_distance
| 28 | 110 | 0.741071 | [
"MIT"
] | EmmanuelOwusu/Image_Processing | face_recognition/face_recognition/__init__.py | 224 | Python |
import json
import pytest
from great_expectations.core import ExpectationConfiguration, ExpectationSuite
from .test_expectation_suite import baseline_suite, exp1, exp2, exp3, exp4
@pytest.fixture
def empty_suite():
return ExpectationSuite(
expectation_suite_name="warning",
expectations=[],
meta={"notes": "This is an expectation suite."},
)
@pytest.fixture
def exp5():
return ExpectationConfiguration(
expectation_type="expect_column_values_to_not_be_null",
kwargs={"column": "a",},
meta={},
)
def test_append_expectation(empty_suite, exp1, exp2):
assert len(empty_suite.expectations) == 0
empty_suite.append_expectation(exp1)
assert len(empty_suite.expectations) == 1
# Adding the same expectation again *does* add duplicates.
empty_suite.append_expectation(exp1)
assert len(empty_suite.expectations) == 2
empty_suite.append_expectation(exp2)
assert len(empty_suite.expectations) == 3
# Turn this on once we're ready to enforce strict typing.
# with pytest.raises(TypeError):
# empty_suite.append_expectation("not an expectation")
# Turn this on once we're ready to enforce strict typing.
# with pytest.raises(TypeError):
# empty_suite.append_expectation(exp1.to_json_dict())
def test_find_expectation_indexes(baseline_suite, exp5):
# Passing no parameters "finds" all Expectations
assert baseline_suite.find_expectation_indexes() == [0, 1]
# Match on single columns
assert baseline_suite.find_expectation_indexes(column="a") == [0]
assert baseline_suite.find_expectation_indexes(column="b") == [1]
# Non-existent column returns no matches
assert baseline_suite.find_expectation_indexes(column="z") == []
# It can return multiple expectation_type matches
assert baseline_suite.find_expectation_indexes(
expectation_type="expect_column_values_to_be_in_set"
) == [0, 1]
# It can return multiple column matches
baseline_suite.append_expectation(exp5)
assert baseline_suite.find_expectation_indexes(column="a") == [0, 2]
# It can match a single expectation_type
assert baseline_suite.find_expectation_indexes(
expectation_type="expect_column_values_to_not_be_null"
) == [2]
# expectation_kwargs can match full kwargs
assert baseline_suite.find_expectation_indexes(
expectation_kwargs={
"column": "b",
"value_set": [-1, -2, -3],
"result_format": "BASIC",
}
) == [1]
# expectation_kwargs can match partial kwargs
assert baseline_suite.find_expectation_indexes(
expectation_kwargs={"column": "a"}
) == [0, 2]
# expectation_type and expectation_kwargs work in conjunction
assert baseline_suite.find_expectation_indexes(
expectation_type="expect_column_values_to_not_be_null",
expectation_kwargs={"column": "a"},
) == [2]
# column and expectation_kwargs work in conjunction
assert baseline_suite.find_expectation_indexes(
column="a", expectation_kwargs={"result_format": "BASIC"}
) == [0]
# column and expectation_type work in conjunction
assert baseline_suite.find_expectation_indexes(
column="a", expectation_type="expect_column_values_to_not_be_null",
) == [2]
assert (
baseline_suite.find_expectation_indexes(
column="a", expectation_type="expect_column_values_to_be_between",
)
== []
)
assert (
baseline_suite.find_expectation_indexes(
column="zzz", expectation_type="expect_column_values_to_be_between",
)
== []
)
with pytest.raises(ValueError):
assert (
baseline_suite.find_expectation_indexes(
column="a", expectation_kwargs={"column": "b"}
)
== []
)
def test_find_expectation_indexes_on_empty_suite(empty_suite):
assert (
empty_suite.find_expectation_indexes(
expectation_type="expect_column_values_to_not_be_null"
)
== []
)
assert empty_suite.find_expectation_indexes(column="x") == []
assert empty_suite.find_expectation_indexes(expectation_kwargs={}) == []
def test_find_expectations(baseline_suite, exp1, exp2):
# Note: most of the logic in this method is based on
# find_expectation_indexes and _copy_and_clean_up_expectations_from_indexes
# These tests do not thoroughly cover that logic.
# Instead, they focus on the behavior of the discard_* methods
assert (
baseline_suite.find_expectations(
column="a", expectation_type="expect_column_values_to_be_between",
)
== []
)
result = baseline_suite.find_expectations(
column="a", expectation_type="expect_column_values_to_be_in_set",
)
assert len(result) == 1
assert result[0] == ExpectationConfiguration(
expectation_type="expect_column_values_to_be_in_set",
kwargs={
"column": "a",
"value_set": [1, 2, 3],
# "result_format": "BASIC"
},
meta={"notes": "This is an expectation."},
)
exp_with_all_the_params = ExpectationConfiguration(
expectation_type="expect_column_values_to_not_be_null",
kwargs={
"column": "a",
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": True,
},
meta={},
)
baseline_suite.append_expectation(exp_with_all_the_params)
assert baseline_suite.find_expectations(
column="a", expectation_type="expect_column_values_to_not_be_null",
)[0] == ExpectationConfiguration(
expectation_type="expect_column_values_to_not_be_null",
kwargs={"column": "a",},
meta={},
)
assert (
baseline_suite.find_expectations(
column="a",
expectation_type="expect_column_values_to_not_be_null",
discard_result_format_kwargs=False,
discard_include_config_kwargs=False,
discard_catch_exceptions_kwargs=False,
)[0]
== exp_with_all_the_params
)
assert baseline_suite.find_expectations(
column="a",
expectation_type="expect_column_values_to_not_be_null",
discard_result_format_kwargs=False,
discard_catch_exceptions_kwargs=False,
)[0] == ExpectationConfiguration(
expectation_type="expect_column_values_to_not_be_null",
kwargs={"column": "a", "result_format": "BASIC", "catch_exceptions": True,},
meta={},
)
def test_remove_expectation(baseline_suite):
# ValueError: Multiple expectations matched arguments. No expectations removed.
with pytest.raises(ValueError):
baseline_suite.remove_expectation()
# ValueError: No matching expectation found.
with pytest.raises(ValueError):
baseline_suite.remove_expectation(column="does_not_exist")
# ValueError: Multiple expectations matched arguments. No expectations removed.
with pytest.raises(ValueError):
baseline_suite.remove_expectation(
expectation_type="expect_column_values_to_be_in_set"
)
assert len(baseline_suite.expectations) == 2
assert baseline_suite.remove_expectation(column="a") == None
assert len(baseline_suite.expectations) == 1
baseline_suite.remove_expectation(
expectation_type="expect_column_values_to_be_in_set"
)
assert len(baseline_suite.expectations) == 0
# ValueError: No matching expectation found.
with pytest.raises(ValueError):
baseline_suite.remove_expectation(
expectation_type="expect_column_values_to_be_in_set"
)
| 32.224066 | 84 | 0.683492 | [
"Apache-2.0"
] | lfpll/great_expectations | tests/core/test_expectation_suite_crud_methods.py | 7,766 | Python |
from collections import defaultdict
from typing import DefaultDict
from .. import utils
from .. import data
'''
A collection of functions o index faculty data.
No function in this class reads data from the data files, just works logic
on them. This helps keep the program modular, by separating the data sources
from the data indexing
'''
'''
Maps faculty to the sections they teach.
This function works by taking several arguments:
- faculty, from [FacultyReader.get_faculty]
- sectionTeachers, from [SectionReader.get_section_faculty_ids]
These are kept as parameters instead of calling the functions by itself
in order to keep the data and logic layers separate.
'''
def get_faculty_sections(faculty,section_teachers):
result = defaultdict(set)
missing_emails = set()
for key, value in section_teachers.items():
section_id = key
faculty_id = value
#Teaches a class but doesn't have basic faculty data
if faculty_id not in faculty:
missing_emails.add(faculty_id)
continue
result[faculty[faculty_id]].add(section_id)
if missing_emails:
utils.logger.warning(f"Missing emails for {missing_emails}")
return result
'''
Returns complete [User] objects.
This function returns [User] objects with more properties than before.
See [User.addSchedule] for which properties are added.
This function works by taking several arguments:
- faculty_sections from [get_faculty_sections]
- section_periods from [student_reader.get_periods]
These are kept as parameters instead of calling the functions by itself
in order to keep the data and logic layers separate.
'''
def get_faculty_with_schedule(faculty_sections, section_periods):
# The schedule for each teacher
schedules = {}
# Sections IDs which are taught but never meet.
missing_periods = set()
# Faculty missing a homerooms.
#
# This will be logged at the debug level.
missing_homerooms = set()
# Loop over teacher sections and get their periods.
for key, value in faculty_sections.items():
periods = []
for section_id in value:
if section_id in section_periods:
periods = list(section_periods[section_id])
elif section_id.startswith("UADV"):
key.homeroom = section_id
key.homeroom_location = "Unavailable"
else:
missing_periods.add(section_id)
# Still couldn'y find any homeroom
if key.homeroom is None:
missing_homerooms.add(key)
key.homeroom = "SENIOR_HOMEROOM"
key.homeroom_location = "Unavailable"
schedules[key] = periods
# Some logging
if not missing_periods:
utils.logger.debug("Missing homerooms", missing_homerooms)
# Compiles a list of periods into a full schedule
result = []
for key, value in schedules.items():
schedule = data.DayDefaultDict()
for period in value:
schedule[period.day][period.period-1] = period
schedule.populate(utils.constants.day_names)
key.schedule = schedule
result.append(key)
return result | 27.916667 | 76 | 0.733002 | [
"MIT"
] | BraydenKO/RamLife | firebase/firestore-py/lib/faculty/logic.py | 3,015 | Python |
import sys
import struct
from math import sqrt
def cross(a, b):
return [
a[1] * b[2] - a[2] * b[1],
a[2] * b[0] - a[0] * b[2],
a[0] * b[1] - a[1] * b[0]
]
def dot(a, b):
return a[0] * b[0] + a[1] * b[1] + a[2] * b[2]
def normalized(a):
s = 1 / sqrt(dot(a, a))
return [ a[0] * s, a[1] * s, a[2] * s ]
def mul(m, a):
return [
dot(m[0], a),
dot(m[1], a),
dot(m[2], a)
]
def opp(a):
return [-a[0], -a[1], -a[2]]
def lookFrom(p):
z = p
x = normalized(cross([0,0,1], z))
y = normalized(cross(z, x))
invp = opp(mul([x, y, z], p))
return [
[x[0], x[1], x[2], invp[0]],
[y[0], y[1], y[2], invp[1]],
[z[0], z[1], z[2], invp[2]],
[0, 0, 0, 1],
]
def write_view_matrix(inputFilename, outputFilepath):
with open(outputFilepath, 'wb') as outFile:
for i, line in enumerate(open(inputFilename, 'r')):
coords = [float(x) for x in line.split()]
if len(coords) != 3:
print("Unable to parse line: %s " % line)
exit(1)
mat = lookFrom(coords)
print(mat)
column_major_data = tuple(mat[i][j] for j in range(4) for i in range(4))
outFile.write(struct.pack("f"*16, *column_major_data))
if __name__ == "__main__":
inputFilename = sys.argv[1] if len(sys.argv) > 1 else "octahedron.xyz"
outputFilepath = sys.argv[2] if len(sys.argv) > 2 else "octahedron_camera.bin"
write_view_matrix(inputFilename, outputFilepath)
| 26.87931 | 84 | 0.502245 | [
"MIT"
] | eliemichel/GrainViewer | share/scripts/augen_octahedron2camera.py | 1,559 | Python |
"""
* GTDynamics Copyright 2021, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* See LICENSE for the license information
*
* @file test_print.py
* @brief Test printing with DynamicsSymbol.
* @author Gerry Chen
"""
import unittest
from io import StringIO
from unittest.mock import patch
import gtdynamics as gtd
import gtsam
class TestPrint(unittest.TestCase):
"""Test printing of keys."""
def test_values(self):
"""Checks that printing Values uses the GTDKeyFormatter instead of gtsam's default"""
v = gtd.Values()
gtd.InsertJointAngle(v, 0, 1, 2)
self.assertTrue('q(0)1' in v.__repr__())
def test_nonlinear_factor_graph(self):
"""Checks that printing NonlinearFactorGraph uses the GTDKeyFormatter"""
fg = gtd.NonlinearFactorGraph()
fg.push_back(
gtd.MinTorqueFactor(
gtd.TorqueKey(0, 0).key(),
gtsam.noiseModel.Unit.Create(1)))
self.assertTrue('T(0)0' in fg.__repr__())
def test_key_formatter(self):
"""Tests print method with various key formatters"""
torqueKey = gtd.TorqueKey(0, 0).key()
factor = gtd.MinTorqueFactor(torqueKey,
gtsam.noiseModel.Unit.Create(1))
with patch('sys.stdout', new=StringIO()) as fake_out:
factor.print('factor: ', gtd.GTDKeyFormatter)
self.assertTrue('factor: min torque factor' in fake_out.getvalue())
self.assertTrue('keys = { T(0)0 }' in fake_out.getvalue())
def myKeyFormatter(key):
return 'this is my key formatter {}'.format(key)
with patch('sys.stdout', new=StringIO()) as fake_out:
factor.print('factor: ', myKeyFormatter)
self.assertTrue('factor: min torque factor' in fake_out.getvalue())
self.assertTrue('keys = {{ this is my key formatter {} }}'.format(
torqueKey) in fake_out.getvalue())
if __name__ == "__main__":
unittest.main()
| 34.694915 | 93 | 0.633122 | [
"BSD-2-Clause"
] | borglab/GTDynamics | python/tests/test_print.py | 2,047 | Python |
from flask_migrate import Migrate
from os import environ
from sys import exit
from config import config_dict
from app import create_app, db
get_config_mode = environ.get('GENTELELLA_CONFIG_MODE', 'Debug')
try:
config_mode = config_dict[get_config_mode.capitalize()]
except KeyError:
exit('Error: Invalid GENTELELLA_CONFIG_MODE environment variable entry.')
app = create_app(config_mode)
Migrate(app, db)
| 24.470588 | 77 | 0.798077 | [
"MIT"
] | Allenhorst/hh_test | gentelella.py | 416 | Python |
from typing import Optional, Union, Tuple, Mapping, List
from torch import Tensor
from torch_geometric.data.storage import recursive_apply
from torch_geometric.typing import Adj
from torch_sparse import SparseTensor
from tsl.ops.connectivity import convert_torch_connectivity
from tsl.typing import DataArray, SparseTensArray, ScipySparseMatrix
from . import utils
class DataParsingMixin:
def _parse_data(self, obj: DataArray) -> Tensor:
assert obj is not None
obj = utils.copy_to_tensor(obj)
obj = utils.to_steps_nodes_channels(obj)
obj = utils.cast_tensor(obj, self.precision)
return obj
def _parse_mask(self, mask: Optional[DataArray]) -> Optional[Tensor]:
if mask is None:
return None
mask = utils.copy_to_tensor(mask)
mask = utils.to_steps_nodes_channels(mask)
self._check_same_dim(mask.size(0), 'n_steps', 'mask')
self._check_same_dim(mask.size(1), 'n_nodes', 'mask')
if mask.size(-1) > 1:
self._check_same_dim(mask.size(-1), 'n_channels', 'mask')
mask = utils.cast_tensor(mask)
return mask
def _parse_exogenous(self, obj: DataArray, name: str,
node_level: bool) -> Tensor:
obj = utils.copy_to_tensor(obj)
if node_level:
obj = utils.to_steps_nodes_channels(obj)
self._check_same_dim(obj.shape[1], 'n_nodes', name)
else:
obj = utils.to_steps_channels(obj)
self._check_same_dim(obj.shape[0], 'n_steps', name)
obj = utils.cast_tensor(obj, self.precision)
return obj
def _parse_attribute(self, obj: DataArray, name: str,
node_level: bool) -> Tensor:
obj = utils.copy_to_tensor(obj)
if node_level:
obj = utils.to_nodes_channels(obj)
self._check_same_dim(obj.shape[0], 'n_nodes', name)
obj = utils.cast_tensor(obj, self.precision)
return obj
def _parse_adj(self, connectivity: Union[SparseTensArray, Tuple[DataArray]],
target_layout: Optional[str] = None
) -> Tuple[Optional[Adj], Optional[Tensor]]:
# format in [sparse, edge_index, None], where None means keep as input
if connectivity is None:
return None, None
# Convert to torch
# from np.ndarray, pd.DataFrame or torch.Tensor
if isinstance(connectivity, DataArray.__args__):
connectivity = utils.copy_to_tensor(connectivity)
elif isinstance(connectivity, (list, tuple)):
connectivity = recursive_apply(connectivity, utils.copy_to_tensor)
# from scipy sparse matrix
elif isinstance(connectivity, ScipySparseMatrix):
connectivity = SparseTensor.from_scipy(connectivity)
elif not isinstance(connectivity, SparseTensor):
raise TypeError("`connectivity` must be a dense matrix or in "
"COO format (i.e., an `edge_index`).")
if target_layout is not None:
connectivity = convert_torch_connectivity(connectivity,
target_layout,
num_nodes=self.n_nodes)
if isinstance(connectivity, (list, tuple)):
edge_index, edge_weight = connectivity
if edge_weight is not None:
edge_weight = utils.cast_tensor(edge_weight, self.precision)
else:
edge_index, edge_weight = connectivity, None
self._check_same_dim(edge_index.size(0), 'n_nodes', 'connectivity')
return edge_index, edge_weight
def _check_same_dim(self, dim: int, attr: str, name: str):
dim_data = getattr(self, attr)
if dim != dim_data:
raise ValueError("Cannot assign {0} with {1}={2}: data has {1}={3}"
.format(name, attr, dim, dim_data))
def _check_name(self, name: str):
if name.startswith('edge_'):
raise ValueError(f"Cannot set attribute with name '{name}' in this "
f"way, consider adding edge attributes as "
f"{self.name}.{name} = value.")
# name cannot be an attribute of self, nor a key in get
invalid_names = set(dir(self)).union(self.keys)
if name in invalid_names:
raise ValueError(f"Cannot set attribute with name '{name}', there "
f"is already an attribute named '{name}' in the "
"dataset.")
def _value_to_kwargs(self, value: Union[DataArray, List, Tuple, Mapping],
keys: Optional[Union[List, Tuple]] = None):
if isinstance(value, DataArray.__args__):
return dict(value=value)
if isinstance(value, (list, tuple)):
return dict(zip(keys, value))
elif isinstance(value, Mapping):
return value
else:
raise TypeError('Invalid type for value "{}"'.format(type(value)))
def _exog_value_to_kwargs(self,
value: Union[DataArray, List, Tuple, Mapping]):
keys = ['value', 'node_level', 'add_to_input_map', 'synch_mode',
'preprocess']
return self._value_to_kwargs(value, keys)
def _attr_value_to_kwargs(self,
value: Union[DataArray, List, Tuple, Mapping]):
keys = ['value', 'node_level', 'add_to_batch']
return self._value_to_kwargs(value, keys)
| 43.224806 | 80 | 0.603659 | [
"MIT"
] | TorchSpatiotemporal/tsl | tsl/data/mixin.py | 5,576 | Python |
"""
Write a function that takes in an array of integers and returns a sorted version of that array. Use the QuickSort algorithm to sort the array.
"""
def quick_sort(array):
if len(array) <= 1:
return array
_rec_helper(array, 0, len(array) - 1)
return array
def _rec_helper(array, start, end):
# base case
if start >= end:
return
pivot = start
left = pivot + 1
right = end
while left <= right:
if array[left] > array[pivot] and array[right] < array[pivot]:
_swap(array, left, right)
if array[pivot] >= array[left]:
left += 1
if array[pivot] <= array[right]:
right -= 1
_swap(array, pivot, right)
if right - start > end - right:
_rec_helper(array, start, right - 1)
_rec_helper(array, right + 1, end)
else:
_rec_helper(array, right + 1, end)
_rec_helper(array, start, right - 1)
def _swap(array, left, right):
array[left], array[right] = array[right], array[left]
#test
array = [3, 4, 7, 1, 1, 2, 5, 1, 3, 8, 4]
assert quick_sort(array) == sorted(array)
print('OK')
| 25.772727 | 142 | 0.589947 | [
"MIT"
] | Surbeivol/daily-coding-problems | solutions/quick_sort.py | 1,134 | Python |
# -*- coding: utf-8 -*-
"""Example for a list question type.
Run example by typing `python -m examples.list` in your console."""
from pprint import pprint
import questionary
from examples import custom_style_dope
from questionary import Separator, Choice, prompt
def ask_pystyle(**kwargs):
# create the question object
question = questionary.select(
'What do you want to do?',
qmark='😃',
choices=[
'Order a pizza',
'Make a reservation',
Separator(),
'Ask for opening hours',
Choice('Contact support', disabled='Unavailable at this time'),
'Talk to the receptionist'],
style=custom_style_dope,
**kwargs)
# prompt the user for an answer
return question.ask()
def ask_dictstyle(**kwargs):
questions = [
{
'type': 'select',
'name': 'theme',
'message': 'What do you want to do?',
'choices': [
'Order a pizza',
'Make a reservation',
Separator(),
'Ask for opening hours',
{
'name': 'Contact support',
'disabled': 'Unavailable at this time'
},
'Talk to the receptionist'
]
}
]
return prompt(questions, style=custom_style_dope, **kwargs)
if __name__ == '__main__':
pprint(ask_pystyle())
| 26.071429 | 75 | 0.534247 | [
"MIT"
] | fossabot/questionary | examples/select.py | 1,463 | Python |
# -*- coding: utf-8 -*-
from zappa_boilerplate.database import db_session
from flask_wtf import Form
from wtforms import StringField, PasswordField
from wtforms.validators import DataRequired, Email, EqualTo, Length
from .models import User
class RegisterForm(Form):
username = StringField('Username',
validators=[DataRequired(), Length(min=3, max=25)])
email = StringField('Email',
validators=[DataRequired(), Email(), Length(min=6, max=40)])
password = PasswordField('Password',
validators=[DataRequired(), Length(min=6, max=40)])
confirm = PasswordField('Verify password',
[DataRequired(), EqualTo('password', message='Passwords must match')])
def __init__(self, *args, **kwargs):
super(RegisterForm, self).__init__(*args, **kwargs)
self.user = None
def validate(self):
initial_validation = super(RegisterForm, self).validate()
if not initial_validation:
return False
user = db_session.query(User).filter_by(username=self.username.data).first()
if user:
self.username.errors.append("Username already registered")
return False
user = User.query.filter_by(email=self.email.data).first()
if user:
self.email.errors.append("Email already registered")
return False
return True | 40.083333 | 98 | 0.630631 | [
"MIT"
] | 402900550b/dtnewman2 | zappa_boilerplate/user/forms.py | 1,443 | Python |
# An old version of OpenAI Gym's multi_discrete.py. (Was getting affected by Gym updates)
# (https://github.com/openai/gym/blob/1fb81d4e3fb780ccf77fec731287ba07da35eb84/gym/spaces/multi_discrete.py)
import numpy as np
import gym
class MultiDiscrete(gym.Space):
"""
- The multi-discrete action space consists of a series of discrete action spaces with different parameters
- It can be adapted to both a Discrete action space or a continuous (Box) action space
- It is useful to represent game controllers or keyboards where each key can be represented as a discrete action space
- It is parametrized by passing an array of arrays containing [min, max] for each discrete action space
where the discrete action space can take any integers from `min` to `max` (both inclusive)
Note: A value of 0 always need to represent the NOOP action.
e.g. Nintendo Game Controller
- Can be conceptualized as 3 discrete action spaces:
1) Arrow Keys: Discrete 5 - NOOP[0], UP[1], RIGHT[2], DOWN[3], LEFT[4] - params: min: 0, max: 4
2) Button A: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1
3) Button B: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1
- Can be initialized as
MultiDiscrete([ [0,4], [0,1], [0,1] ])
"""
def __init__(self, array_of_param_array):
self.low = np.array([x[0] for x in array_of_param_array])
self.high = np.array([x[1] for x in array_of_param_array])
self.num_discrete_space = self.low.shape[0]
def sample(self):
""" Returns a array with one sample from each discrete action space """
# For each row: round(random .* (max - min) + min, 0)
np_random = np.random.RandomState()
random_array = np_random.rand(self.num_discrete_space)
return [int(x) for x in np.floor(np.multiply((self.high - self.low + 1.), random_array) + self.low)]
def contains(self, x):
return len(x) == self.num_discrete_space and (np.array(x) >= self.low).all() and (np.array(x) <= self.high).all()
@property
def shape(self):
return self.num_discrete_space
def __repr__(self):
return "MultiDiscrete" + str(self.num_discrete_space)
def __eq__(self, other):
return np.array_equal(self.low, other.low) and np.array_equal(self.high, other.high) | 53.522727 | 122 | 0.675159 | [
"MIT"
] | 51N84D/multiagent-particle-envs | multiagent/multi_discrete.py | 2,355 | Python |
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
class VmallPipeline(object):
def process_item(self, item, spider):
return item
| 23.833333 | 65 | 0.70979 | [
"MIT"
] | gikoluo/vmall | vmall/pipelines.py | 286 | Python |
"""
app.py - Flask-based server.
@author Thomas J. Daley, J.D.
@version: 0.0.1
Copyright (c) 2019 by Thomas J. Daley, J.D.
"""
import argparse
import random
from flask import Flask, render_template, request, flash, redirect, url_for, session, jsonify
from wtforms import Form, StringField, TextAreaField, PasswordField, validators
from functools import wraps
from views.decorators import is_admin_user, is_logged_in, is_case_set
from webservice import WebService
from util.database import Database
from views.admin.admin_routes import admin_routes
from views.cases.case_routes import case_routes
from views.discovery.discovery_routes import discovery_routes
from views.drivers.driver_routes import driver_routes
from views.info.info_routes import info_routes
from views.login.login import login
from views.objections.objection_routes import objection_routes
from views.real_property.real_property_routes import rp_routes
from views.responses.response_routes import response_routes
from views.vehicles.vehicle_routes import vehicle_routes
from views.decorators import is_admin_user, is_case_set, is_logged_in
WEBSERVICE = None
DATABASE = Database()
DATABASE.connect()
app = Flask(__name__)
app.register_blueprint(admin_routes)
app.register_blueprint(case_routes)
app.register_blueprint(discovery_routes)
app.register_blueprint(driver_routes)
app.register_blueprint(info_routes)
app.register_blueprint(login)
app.register_blueprint(objection_routes)
app.register_blueprint(rp_routes)
app.register_blueprint(response_routes)
app.register_blueprint(vehicle_routes)
# Helper to create Public Data credentials from session variables
def pd_credentials(mysession) -> dict:
return {
"username": session["pd_username"],
"password": session["pd_password"]
}
@app.route('/', methods=['GET'])
def index():
return render_template('home.html')
@app.route('/attorney/find/<string:bar_number>', methods=['POST'])
@is_logged_in
def find_attorney(bar_number: str):
attorney = DATABASE.attorney(bar_number)
if attorney:
attorney['success'] = True
return jsonify(attorney)
return jsonify(
{
'success': False,
'message': "Unable to find attorney having Bar Number {}"
.format(bar_number)
}
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Webservice for DiscoveryBot")
parser.add_argument(
"--debug",
help="Run server in debug mode",
action='store_true'
)
parser.add_argument(
"--port",
help="TCP port to listen on",
type=int,
default=5001
)
parser.add_argument(
"--zillowid",
"-z",
help="Zillow API credential from https://www.zillow.com/howto/api/APIOverview.htm" # NOQA
)
args = parser.parse_args()
WEBSERVICE = WebService(args.zillowid)
app.secret_key = "SDFIIUWER*HGjdf8*"
app.run(debug=args.debug, port=args.port)
| 28.769231 | 98 | 0.734626 | [
"MIT"
] | tjdaley/publicdataws | app/app.py | 2,992 | Python |
from . import auth
from . import groups
from . import hub
from . import proxy
from . import services
from . import users
from .base import *
default_handlers = []
for mod in (auth, hub, proxy, users, groups, services):
default_handlers.extend(mod.default_handlers)
| 22.5 | 55 | 0.744444 | [
"MIT"
] | KarmaScripter/PiggyPy | Lib/site-packages/jupyterhub/apihandlers/__init__.py | 270 | Python |
"""
A NumPy sub-namespace that conforms to the Python array API standard.
This submodule accompanies NEP 47, which proposes its inclusion in NumPy. It
is still considered experimental, and will issue a warning when imported.
This is a proof-of-concept namespace that wraps the corresponding NumPy
functions to give a conforming implementation of the Python array API standard
(https://data-apis.github.io/array-api/latest/). The standard is currently in
an RFC phase and comments on it are both welcome and encouraged. Comments
should be made either at https://github.com/data-apis/array-api or at
https://github.com/data-apis/consortium-feedback/discussions.
NumPy already follows the proposed spec for the most part, so this module
serves mostly as a thin wrapper around it. However, NumPy also implements a
lot of behavior that is not included in the spec, so this serves as a
restricted subset of the API. Only those functions that are part of the spec
are included in this namespace, and all functions are given with the exact
signature given in the spec, including the use of position-only arguments, and
omitting any extra keyword arguments implemented by NumPy but not part of the
spec. The behavior of some functions is also modified from the NumPy behavior
to conform to the standard. Note that the underlying array object itself is
wrapped in a wrapper Array() class, but is otherwise unchanged. This submodule
is implemented in pure Python with no C extensions.
The array API spec is designed as a "minimal API subset" and explicitly allows
libraries to include behaviors not specified by it. But users of this module
that intend to write portable code should be aware that only those behaviors
that are listed in the spec are guaranteed to be implemented across libraries.
Consequently, the NumPy implementation was chosen to be both conforming and
minimal, so that users can use this implementation of the array API namespace
and be sure that behaviors that it defines will be available in conforming
namespaces from other libraries.
A few notes about the current state of this submodule:
- There is a test suite that tests modules against the array API standard at
https://github.com/data-apis/array-api-tests. The test suite is still a work
in progress, but the existing tests pass on this module, with a few
exceptions:
- DLPack support (see https://github.com/data-apis/array-api/pull/106) is
not included here, as it requires a full implementation in NumPy proper
first.
The test suite is not yet complete, and even the tests that exist are not
guaranteed to give a comprehensive coverage of the spec. Therefore, when
reviewing and using this submodule, you should refer to the standard
documents themselves. There are some tests in numpy.array_api.tests, but
they primarily focus on things that are not tested by the official array API
test suite.
- There is a custom array object, numpy.array_api.Array, which is returned by
all functions in this module. All functions in the array API namespace
implicitly assume that they will only receive this object as input. The only
way to create instances of this object is to use one of the array creation
functions. It does not have a public constructor on the object itself. The
object is a small wrapper class around numpy.ndarray. The main purpose of it
is to restrict the namespace of the array object to only those dtypes and
only those methods that are required by the spec, as well as to limit/change
certain behavior that differs in the spec. In particular:
- The array API namespace does not have scalar objects, only 0-D arrays.
Operations on Array that would create a scalar in NumPy create a 0-D
array.
- Indexing: Only a subset of indices supported by NumPy are required by the
spec. The Array object restricts indexing to only allow those types of
indices that are required by the spec. See the docstring of the
numpy.array_api.Array._validate_indices helper function for more
information.
- Type promotion: Some type promotion rules are different in the spec. In
particular, the spec does not have any value-based casting. The spec also
does not require cross-kind casting, like integer -> floating-point. Only
those promotions that are explicitly required by the array API
specification are allowed in this module. See NEP 47 for more info.
- Functions do not automatically call asarray() on their input, and will not
work if the input type is not Array. The exception is array creation
functions, and Python operators on the Array object, which accept Python
scalars of the same type as the array dtype.
- All functions include type annotations, corresponding to those given in the
spec (see _typing.py for definitions of some custom types). These do not
currently fully pass mypy due to some limitations in mypy.
- Dtype objects are just the NumPy dtype objects, e.g., float64 =
np.dtype('float64'). The spec does not require any behavior on these dtype
objects other than that they be accessible by name and be comparable by
equality, but it was considered too much extra complexity to create custom
objects to represent dtypes.
- All places where the implementations in this submodule are known to deviate
from their corresponding functions in NumPy are marked with "# Note:"
comments.
Still TODO in this module are:
- DLPack support for numpy.ndarray is still in progress. See
https://github.com/numpy/numpy/pull/19083.
- The copy=False keyword argument to asarray() is not yet implemented. This
requires support in numpy.asarray() first.
- Some functions are not yet fully tested in the array API test suite, and may
require updates that are not yet known until the tests are written.
- The spec is still in an RFC phase and may still have minor updates, which
will need to be reflected here.
- The linear algebra extension in the spec will be added in a future pull
request.
- Complex number support in array API spec is planned but not yet finalized,
as are the fft extension and certain linear algebra functions such as eig
that require complex dtypes.
"""
import warnings
warnings.warn(
"The numpy.array_api submodule is still experimental. See NEP 47.", stacklevel=2
)
__all__ = []
from ._constants import e, inf, nan, pi
__all__ += ["e", "inf", "nan", "pi"]
from ._creation_functions import (
asarray,
arange,
empty,
empty_like,
eye,
from_dlpack,
full,
full_like,
linspace,
meshgrid,
ones,
ones_like,
zeros,
zeros_like,
)
__all__ += [
"asarray",
"arange",
"empty",
"empty_like",
"eye",
"from_dlpack",
"full",
"full_like",
"linspace",
"meshgrid",
"ones",
"ones_like",
"zeros",
"zeros_like",
]
from ._data_type_functions import (
broadcast_arrays,
broadcast_to,
can_cast,
finfo,
iinfo,
result_type,
)
__all__ += [
"broadcast_arrays",
"broadcast_to",
"can_cast",
"finfo",
"iinfo",
"result_type",
]
from ._dtypes import (
int8,
int16,
int32,
int64,
uint8,
uint16,
uint32,
uint64,
float32,
float64,
bool,
)
__all__ += [
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
"float32",
"float64",
"bool",
]
from ._elementwise_functions import (
abs,
acos,
acosh,
add,
asin,
asinh,
atan,
atan2,
atanh,
bitwise_and,
bitwise_left_shift,
bitwise_invert,
bitwise_or,
bitwise_right_shift,
bitwise_xor,
ceil,
cos,
cosh,
divide,
equal,
exp,
expm1,
floor,
floor_divide,
greater,
greater_equal,
isfinite,
isinf,
isnan,
less,
less_equal,
log,
log1p,
log2,
log10,
logaddexp,
logical_and,
logical_not,
logical_or,
logical_xor,
multiply,
negative,
not_equal,
positive,
pow,
remainder,
round,
sign,
sin,
sinh,
square,
sqrt,
subtract,
tan,
tanh,
trunc,
)
__all__ += [
"abs",
"acos",
"acosh",
"add",
"asin",
"asinh",
"atan",
"atan2",
"atanh",
"bitwise_and",
"bitwise_left_shift",
"bitwise_invert",
"bitwise_or",
"bitwise_right_shift",
"bitwise_xor",
"ceil",
"cos",
"cosh",
"divide",
"equal",
"exp",
"expm1",
"floor",
"floor_divide",
"greater",
"greater_equal",
"isfinite",
"isinf",
"isnan",
"less",
"less_equal",
"log",
"log1p",
"log2",
"log10",
"logaddexp",
"logical_and",
"logical_not",
"logical_or",
"logical_xor",
"multiply",
"negative",
"not_equal",
"positive",
"pow",
"remainder",
"round",
"sign",
"sin",
"sinh",
"square",
"sqrt",
"subtract",
"tan",
"tanh",
"trunc",
]
# einsum is not yet implemented in the array API spec.
# from ._linear_algebra_functions import einsum
# __all__ += ['einsum']
from ._linear_algebra_functions import matmul, tensordot, transpose, vecdot
__all__ += ["matmul", "tensordot", "transpose", "vecdot"]
from ._manipulation_functions import (
concat,
expand_dims,
flip,
reshape,
roll,
squeeze,
stack,
)
__all__ += ["concat", "expand_dims", "flip", "reshape", "roll", "squeeze", "stack"]
from ._searching_functions import argmax, argmin, nonzero, where
__all__ += ["argmax", "argmin", "nonzero", "where"]
from ._set_functions import unique
__all__ += ["unique"]
from ._sorting_functions import argsort, sort
__all__ += ["argsort", "sort"]
from ._statistical_functions import max, mean, min, prod, std, sum, var
__all__ += ["max", "mean", "min", "prod", "std", "sum", "var"]
from ._utility_functions import all, any
__all__ += ["all", "any"]
| 26.889488 | 84 | 0.697775 | [
"BSD-3-Clause"
] | ArpitaChatterjee/numpy | numpy/array_api/__init__.py | 9,976 | Python |
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_utils import uuidutils
from oslo_utils import versionutils
from nova import availability_zones
from nova import context as nova_context
from nova.db import api as db
from nova import exception
from nova.notifications.objects import base as notification
from nova.notifications.objects import service as service_notification
from nova import objects
from nova.objects import base
from nova.objects import fields
LOG = logging.getLogger(__name__)
# NOTE(danms): This is the global service version counter
SERVICE_VERSION = 35
# NOTE(danms): This is our SERVICE_VERSION history. The idea is that any
# time we bump the version, we will put an entry here to record the change,
# along with any pertinent data. For things that we can programatically
# detect that need a bump, we put something in _collect_things() below to
# assemble a dict of things we can check. For example, we pretty much always
# want to consider the compute RPC API version a thing that requires a service
# bump so that we can drive version pins from it. We could include other
# service RPC versions at some point, minimum object versions, etc.
#
# The TestServiceVersion test will fail if the calculated set of
# things differs from the value in the last item of the list below,
# indicating that a version bump is needed.
#
# Also note that there are other reasons we may want to bump this,
# which will not be caught by the test. An example of this would be
# triggering (or disabling) an online data migration once all services
# in the cluster are at the same level.
#
# If a version bump is required for something mechanical, just document
# that generic thing here (like compute RPC version bumps). No need to
# replicate the details from compute/rpcapi.py here. However, for more
# complex service interactions, extra detail should be provided
SERVICE_VERSION_HISTORY = (
# Version 0: Pre-history
{'compute_rpc': '4.0'},
# Version 1: Introduction of SERVICE_VERSION
{'compute_rpc': '4.4'},
# Version 2: Compute RPC version 4.5
{'compute_rpc': '4.5'},
# Version 3: Compute RPC version 4.6
{'compute_rpc': '4.6'},
# Version 4: Add PciDevice.parent_addr (data migration needed)
{'compute_rpc': '4.6'},
# Version 5: Compute RPC version 4.7
{'compute_rpc': '4.7'},
# Version 6: Compute RPC version 4.8
{'compute_rpc': '4.8'},
# Version 7: Compute RPC version 4.9
{'compute_rpc': '4.9'},
# Version 8: Compute RPC version 4.10
{'compute_rpc': '4.10'},
# Version 9: Compute RPC version 4.11
{'compute_rpc': '4.11'},
# Version 10: Compute node conversion to Inventories
{'compute_rpc': '4.11'},
# Version 11: Compute RPC version 4.12
{'compute_rpc': '4.12'},
# Version 12: The network APIs and compute manager support a NetworkRequest
# object where the network_id value is 'auto' or 'none'. BuildRequest
# objects are populated by nova-api during instance boot.
{'compute_rpc': '4.12'},
# Version 13: Compute RPC version 4.13
{'compute_rpc': '4.13'},
# Version 14: The compute manager supports setting device tags.
{'compute_rpc': '4.13'},
# Version 15: Indicate that nova-conductor will stop a boot if BuildRequest
# is deleted before RPC to nova-compute.
{'compute_rpc': '4.13'},
# Version 16: Indicate that nova-compute will refuse to start if it doesn't
# have a placement section configured.
{'compute_rpc': '4.13'},
# Version 17: Add 'reserve_volume' to the boot from volume flow and
# remove 'check_attach'. The service version bump is needed to fall back to
# the old check in the API as the old computes fail if the volume is moved
# to 'attaching' state by reserve.
{'compute_rpc': '4.13'},
# Version 18: Compute RPC version 4.14
{'compute_rpc': '4.14'},
# Version 19: Compute RPC version 4.15
{'compute_rpc': '4.15'},
# Version 20: Compute RPC version 4.16
{'compute_rpc': '4.16'},
# Version 21: Compute RPC version 4.17
{'compute_rpc': '4.17'},
# Version 22: A marker for the behaviour change of auto-healing code on the
# compute host regarding allocations against an instance
{'compute_rpc': '4.17'},
# Version 23: Compute hosts allow pre-creation of the migration object
# for cold migration.
{'compute_rpc': '4.18'},
# Version 24: Add support for Cinder v3 attach/detach API.
{'compute_rpc': '4.18'},
# Version 25: Compute hosts allow migration-based allocations
# for live migration.
{'compute_rpc': '4.18'},
# Version 26: Adds a 'host_list' parameter to build_and_run_instance()
{'compute_rpc': '4.19'},
# Version 27: Compute RPC version 4.20; adds multiattach argument to
# reserve_block_device_name().
{'compute_rpc': '4.20'},
# Version 28: Adds a 'host_list' parameter to prep_resize()
{'compute_rpc': '4.21'},
# Version 29: Compute RPC version 4.22
{'compute_rpc': '4.22'},
# Version 30: Compute RPC version 5.0
{'compute_rpc': '5.0'},
# Version 31: The compute manager checks if 'trusted_certs' are supported
{'compute_rpc': '5.0'},
# Version 32: Add 'file_backed_memory' support. The service version bump is
# needed to allow the destination of a live migration to reject the
# migration if 'file_backed_memory' is enabled and the source does not
# support 'file_backed_memory'
{'compute_rpc': '5.0'},
# Version 33: Add support for check on the server group with
# 'max_server_per_host' rules
{'compute_rpc': '5.0'},
# Version 34: Adds support to abort queued/preparing live migrations.
{'compute_rpc': '5.0'},
# Version 35: Indicates that nova-compute supports live migration with
# ports bound early on the destination host using VIFMigrateData.
{'compute_rpc': '5.0'},
)
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class Service(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Added compute_node nested object
# Version 1.2: String attributes updated to support unicode
# Version 1.3: ComputeNode version 1.5
# Version 1.4: Added use_slave to get_by_compute_host
# Version 1.5: ComputeNode version 1.6
# Version 1.6: ComputeNode version 1.7
# Version 1.7: ComputeNode version 1.8
# Version 1.8: ComputeNode version 1.9
# Version 1.9: ComputeNode version 1.10
# Version 1.10: Changes behaviour of loading compute_node
# Version 1.11: Added get_by_host_and_binary
# Version 1.12: ComputeNode version 1.11
# Version 1.13: Added last_seen_up
# Version 1.14: Added forced_down
# Version 1.15: ComputeNode version 1.12
# Version 1.16: Added version
# Version 1.17: ComputeNode version 1.13
# Version 1.18: ComputeNode version 1.14
# Version 1.19: Added get_minimum_version()
# Version 1.20: Added get_minimum_version_multi()
# Version 1.21: Added uuid
# Version 1.22: Added get_by_uuid()
VERSION = '1.22'
fields = {
'id': fields.IntegerField(read_only=True),
'uuid': fields.UUIDField(),
'host': fields.StringField(nullable=True),
'binary': fields.StringField(nullable=True),
'topic': fields.StringField(nullable=True),
'report_count': fields.IntegerField(),
'disabled': fields.BooleanField(),
'disabled_reason': fields.StringField(nullable=True),
'availability_zone': fields.StringField(nullable=True),
'compute_node': fields.ObjectField('ComputeNode'),
'last_seen_up': fields.DateTimeField(nullable=True),
'forced_down': fields.BooleanField(),
'version': fields.IntegerField(),
}
_MIN_VERSION_CACHE = {}
_SERVICE_VERSION_CACHING = False
def __init__(self, *args, **kwargs):
# NOTE(danms): We're going against the rules here and overriding
# init. The reason is that we want to *ensure* that we're always
# setting the current service version on our objects, overriding
# whatever else might be set in the database, or otherwise (which
# is the normal reason not to override init).
#
# We also need to do this here so that it's set on the client side
# all the time, such that create() and save() operations will
# include the current service version.
if 'version' in kwargs:
raise exception.ObjectActionError(
action='init',
reason='Version field is immutable')
super(Service, self).__init__(*args, **kwargs)
self.version = SERVICE_VERSION
def obj_make_compatible_from_manifest(self, primitive, target_version,
version_manifest):
super(Service, self).obj_make_compatible_from_manifest(
primitive, target_version, version_manifest)
_target_version = versionutils.convert_version_to_tuple(target_version)
if _target_version < (1, 21) and 'uuid' in primitive:
del primitive['uuid']
if _target_version < (1, 16) and 'version' in primitive:
del primitive['version']
if _target_version < (1, 14) and 'forced_down' in primitive:
del primitive['forced_down']
if _target_version < (1, 13) and 'last_seen_up' in primitive:
del primitive['last_seen_up']
if _target_version < (1, 10):
# service.compute_node was not lazy-loaded, we need to provide it
# when called
self._do_compute_node(self._context, primitive,
version_manifest)
def _do_compute_node(self, context, primitive, version_manifest):
try:
target_version = version_manifest['ComputeNode']
# NOTE(sbauza): Ironic deployments can have multiple
# nodes for the same service, but for keeping same behaviour,
# returning only the first elem of the list
compute = objects.ComputeNodeList.get_all_by_host(
context, primitive['host'])[0]
except Exception:
return
primitive['compute_node'] = compute.obj_to_primitive(
target_version=target_version,
version_manifest=version_manifest)
@staticmethod
def _from_db_object(context, service, db_service):
allow_missing = ('availability_zone',)
for key in service.fields:
if key in allow_missing and key not in db_service:
continue
if key == 'compute_node':
# NOTE(sbauza); We want to only lazy-load compute_node
continue
elif key == 'version':
# NOTE(danms): Special handling of the version field, since
# it is read_only and set in our init.
setattr(service, base.get_attrname(key), db_service[key])
elif key == 'uuid' and not db_service.get(key):
# Leave uuid off the object if undefined in the database
# so that it will be generated below.
continue
else:
service[key] = db_service[key]
service._context = context
service.obj_reset_changes()
# TODO(dpeschman): Drop this once all services have uuids in database
if 'uuid' not in service:
service.uuid = uuidutils.generate_uuid()
LOG.debug('Generated UUID %(uuid)s for service %(id)i',
dict(uuid=service.uuid, id=service.id))
service.save()
return service
def obj_load_attr(self, attrname):
if not self._context:
raise exception.OrphanedObjectError(method='obj_load_attr',
objtype=self.obj_name())
LOG.debug("Lazy-loading '%(attr)s' on %(name)s id %(id)s",
{'attr': attrname,
'name': self.obj_name(),
'id': self.id,
})
if attrname != 'compute_node':
raise exception.ObjectActionError(
action='obj_load_attr',
reason='attribute %s not lazy-loadable' % attrname)
if self.binary == 'nova-compute':
# Only n-cpu services have attached compute_node(s)
compute_nodes = objects.ComputeNodeList.get_all_by_host(
self._context, self.host)
else:
# NOTE(sbauza); Previous behaviour was raising a ServiceNotFound,
# we keep it for backwards compatibility
raise exception.ServiceNotFound(service_id=self.id)
# NOTE(sbauza): Ironic deployments can have multiple nodes
# for the same service, but for keeping same behaviour, returning only
# the first elem of the list
self.compute_node = compute_nodes[0]
@base.remotable_classmethod
def get_by_id(cls, context, service_id):
db_service = db.service_get(context, service_id)
return cls._from_db_object(context, cls(), db_service)
@base.remotable_classmethod
def get_by_uuid(cls, context, service_uuid):
db_service = db.service_get_by_uuid(context, service_uuid)
return cls._from_db_object(context, cls(), db_service)
@base.remotable_classmethod
def get_by_host_and_topic(cls, context, host, topic):
db_service = db.service_get_by_host_and_topic(context, host, topic)
return cls._from_db_object(context, cls(), db_service)
@base.remotable_classmethod
def get_by_host_and_binary(cls, context, host, binary):
try:
db_service = db.service_get_by_host_and_binary(context,
host, binary)
except exception.HostBinaryNotFound:
return
return cls._from_db_object(context, cls(), db_service)
@staticmethod
@db.select_db_reader_mode
def _db_service_get_by_compute_host(context, host, use_slave=False):
return db.service_get_by_compute_host(context, host)
@base.remotable_classmethod
def get_by_compute_host(cls, context, host, use_slave=False):
db_service = cls._db_service_get_by_compute_host(context, host,
use_slave=use_slave)
return cls._from_db_object(context, cls(), db_service)
# NOTE(ndipanov): This is deprecated and should be removed on the next
# major version bump
@base.remotable_classmethod
def get_by_args(cls, context, host, binary):
db_service = db.service_get_by_host_and_binary(context, host, binary)
return cls._from_db_object(context, cls(), db_service)
def _check_minimum_version(self):
"""Enforce that we are not older that the minimum version.
This is a loose check to avoid creating or updating our service
record if we would do so with a version that is older that the current
minimum of all services. This could happen if we were started with
older code by accident, either due to a rollback or an old and
un-updated node suddenly coming back onto the network.
There is technically a race here between the check and the update,
but since the minimum version should always roll forward and never
backwards, we don't need to worry about doing it atomically. Further,
the consequence for getting this wrong is minor, in that we'll just
fail to send messages that other services understand.
"""
if not self.obj_attr_is_set('version'):
return
if not self.obj_attr_is_set('binary'):
return
minver = self.get_minimum_version(self._context, self.binary)
if minver > self.version:
raise exception.ServiceTooOld(thisver=self.version,
minver=minver)
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
self._check_minimum_version()
updates = self.obj_get_changes()
if 'uuid' not in updates:
updates['uuid'] = uuidutils.generate_uuid()
self.uuid = updates['uuid']
db_service = db.service_create(self._context, updates)
self._from_db_object(self._context, self, db_service)
self._send_notification(fields.NotificationAction.CREATE)
@base.remotable
def save(self):
updates = self.obj_get_changes()
updates.pop('id', None)
self._check_minimum_version()
db_service = db.service_update(self._context, self.id, updates)
self._from_db_object(self._context, self, db_service)
self._send_status_update_notification(updates)
def _send_status_update_notification(self, updates):
# Note(gibi): We do not trigger notification on version as that field
# is always dirty, which would cause that nova sends notification on
# every other field change. See the comment in save() too.
if set(updates.keys()).intersection(
{'disabled', 'disabled_reason', 'forced_down'}):
self._send_notification(fields.NotificationAction.UPDATE)
def _send_notification(self, action):
payload = service_notification.ServiceStatusPayload(self)
service_notification.ServiceStatusNotification(
publisher=notification.NotificationPublisher.from_service_obj(
self),
event_type=notification.EventType(
object='service',
action=action),
priority=fields.NotificationPriority.INFO,
payload=payload).emit(self._context)
@base.remotable
def destroy(self):
db.service_destroy(self._context, self.id)
self._send_notification(fields.NotificationAction.DELETE)
@classmethod
def enable_min_version_cache(cls):
cls.clear_min_version_cache()
cls._SERVICE_VERSION_CACHING = True
@classmethod
def clear_min_version_cache(cls):
cls._MIN_VERSION_CACHE = {}
@staticmethod
@db.select_db_reader_mode
def _db_service_get_minimum_version(context, binaries, use_slave=False):
return db.service_get_minimum_version(context, binaries)
@base.remotable_classmethod
def get_minimum_version_multi(cls, context, binaries, use_slave=False):
if not all(binary.startswith('nova-') for binary in binaries):
LOG.warning('get_minimum_version called with likely-incorrect '
'binaries `%s\'', ','.join(binaries))
raise exception.ObjectActionError(action='get_minimum_version',
reason='Invalid binary prefix')
if (not cls._SERVICE_VERSION_CACHING or
any(binary not in cls._MIN_VERSION_CACHE
for binary in binaries)):
min_versions = cls._db_service_get_minimum_version(
context, binaries, use_slave=use_slave)
if min_versions:
min_versions = {binary: version or 0
for binary, version in
min_versions.items()}
cls._MIN_VERSION_CACHE.update(min_versions)
else:
min_versions = {binary: cls._MIN_VERSION_CACHE[binary]
for binary in binaries}
if min_versions:
version = min(min_versions.values())
else:
version = 0
# NOTE(danms): Since our return value is not controlled by object
# schema, be explicit here.
version = int(version)
return version
@base.remotable_classmethod
def get_minimum_version(cls, context, binary, use_slave=False):
return cls.get_minimum_version_multi(context, [binary],
use_slave=use_slave)
def get_minimum_version_all_cells(context, binaries, require_all=False):
"""Get the minimum service version, checking all cells.
This attempts to calculate the minimum service version for a set
of binaries across all the cells in the system. If require_all
is False, then any cells that fail to report a version will be
ignored (assuming they won't be candidates for scheduling and thus
excluding them from the minimum version calculation is reasonable).
If require_all is True, then a failing cell will cause this to raise
exception.CellTimeout, as would be appropriate for gating some
data migration until everything is new enough.
Note that services that do not report a positive version are excluded
from this, as it crosses all cells which will naturally not have all
services.
"""
if not all(binary.startswith('nova-') for binary in binaries):
LOG.warning('get_minimum_version_all_cells called with '
'likely-incorrect binaries `%s\'', ','.join(binaries))
raise exception.ObjectActionError(
action='get_minimum_version_all_cells',
reason='Invalid binary prefix')
# NOTE(danms): Instead of using Service.get_minimum_version_multi(), we
# replicate the call directly to the underlying DB method here because
# we want to defeat the caching and we need to filter non-present
# services differently from the single-cell method.
results = nova_context.scatter_gather_all_cells(
context,
Service._db_service_get_minimum_version,
binaries)
min_version = None
for cell_uuid, result in results.items():
if result is nova_context.did_not_respond_sentinel:
LOG.warning('Cell %s did not respond when getting minimum '
'service version', cell_uuid)
if require_all:
raise exception.CellTimeout()
elif result is nova_context.raised_exception_sentinel:
LOG.warning('Failed to get minimum service version for cell %s',
cell_uuid)
if require_all:
# NOTE(danms): Okay, this isn't necessarily a timeout, but
# it's functionally the same from the caller's perspective
# and we logged the fact that it was actually a failure
# for the forensic investigator during the scatter/gather
# routine.
raise exception.CellTimeout()
else:
# NOTE(danms): Don't consider a zero or None result as the minimum
# since we're crossing cells and will likely not have all the
# services being probed.
relevant_versions = [version for version in result.values()
if version]
if relevant_versions:
min_version_cell = min(relevant_versions)
min_version = (min(min_version, min_version_cell)
if min_version else min_version_cell)
# NOTE(danms): If we got no matches at all (such as at first startup)
# then report that as zero to be consistent with the other such
# methods.
return min_version or 0
@base.NovaObjectRegistry.register
class ServiceList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# Service <= version 1.2
# Version 1.1 Service version 1.3
# Version 1.2: Service version 1.4
# Version 1.3: Service version 1.5
# Version 1.4: Service version 1.6
# Version 1.5: Service version 1.7
# Version 1.6: Service version 1.8
# Version 1.7: Service version 1.9
# Version 1.8: Service version 1.10
# Version 1.9: Added get_by_binary() and Service version 1.11
# Version 1.10: Service version 1.12
# Version 1.11: Service version 1.13
# Version 1.12: Service version 1.14
# Version 1.13: Service version 1.15
# Version 1.14: Service version 1.16
# Version 1.15: Service version 1.17
# Version 1.16: Service version 1.18
# Version 1.17: Service version 1.19
# Version 1.18: Added include_disabled parameter to get_by_binary()
# Version 1.19: Added get_all_computes_by_hv_type()
VERSION = '1.19'
fields = {
'objects': fields.ListOfObjectsField('Service'),
}
@base.remotable_classmethod
def get_by_topic(cls, context, topic):
db_services = db.service_get_all_by_topic(context, topic)
return base.obj_make_list(context, cls(context), objects.Service,
db_services)
# NOTE(paul-carlton2): In v2.0 of the object the include_disabled flag
# will be removed so both enabled and disabled hosts are returned
@base.remotable_classmethod
def get_by_binary(cls, context, binary, include_disabled=False):
db_services = db.service_get_all_by_binary(
context, binary, include_disabled=include_disabled)
return base.obj_make_list(context, cls(context), objects.Service,
db_services)
@base.remotable_classmethod
def get_by_host(cls, context, host):
db_services = db.service_get_all_by_host(context, host)
return base.obj_make_list(context, cls(context), objects.Service,
db_services)
@base.remotable_classmethod
def get_all(cls, context, disabled=None, set_zones=False):
db_services = db.service_get_all(context, disabled=disabled)
if set_zones:
db_services = availability_zones.set_availability_zones(
context, db_services)
return base.obj_make_list(context, cls(context), objects.Service,
db_services)
@base.remotable_classmethod
def get_all_computes_by_hv_type(cls, context, hv_type):
db_services = db.service_get_all_computes_by_hv_type(
context, hv_type, include_disabled=False)
return base.obj_make_list(context, cls(context), objects.Service,
db_services)
| 43.90671 | 79 | 0.65654 | [
"Apache-2.0"
] | bopopescu/TestNova | nova/objects/service.py | 26,827 | Python |
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for the album art fetchers."""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
import os
import shutil
import responses
from mock import patch
from test import _common
from test._common import unittest
from beetsplug import fetchart
from beets.autotag import AlbumInfo, AlbumMatch
from beets import library
from beets import importer
from beets import config
from beets import logging
from beets import util
from beets.util.artresizer import ArtResizer, WEBPROXY
logger = logging.getLogger('beets.test_art')
class UseThePlugin(_common.TestCase):
def setUp(self):
super(UseThePlugin, self).setUp()
self.plugin = fetchart.FetchArtPlugin()
class FetchImageTest(UseThePlugin):
@responses.activate
def run(self, *args, **kwargs):
super(FetchImageTest, self).run(*args, **kwargs)
def mock_response(self, content_type):
responses.add(responses.GET, 'http://example.com',
content_type=content_type)
def test_invalid_type_returns_none(self):
self.mock_response('image/watercolour')
artpath = self.plugin._fetch_image('http://example.com')
self.assertEqual(artpath, None)
def test_jpeg_type_returns_path(self):
self.mock_response('image/jpeg')
artpath = self.plugin._fetch_image('http://example.com')
self.assertNotEqual(artpath, None)
class FSArtTest(UseThePlugin):
def setUp(self):
super(FSArtTest, self).setUp()
self.dpath = os.path.join(self.temp_dir, 'arttest')
os.mkdir(self.dpath)
self.source = fetchart.FileSystem(logger, self.plugin.config)
def test_finds_jpg_in_directory(self):
_common.touch(os.path.join(self.dpath, 'a.jpg'))
fn = self.source.get(self.dpath, ('art',), False)
self.assertEqual(fn, os.path.join(self.dpath, 'a.jpg'))
def test_appropriately_named_file_takes_precedence(self):
_common.touch(os.path.join(self.dpath, 'a.jpg'))
_common.touch(os.path.join(self.dpath, 'art.jpg'))
fn = self.source.get(self.dpath, ('art',), False)
self.assertEqual(fn, os.path.join(self.dpath, 'art.jpg'))
def test_non_image_file_not_identified(self):
_common.touch(os.path.join(self.dpath, 'a.txt'))
fn = self.source.get(self.dpath, ('art',), False)
self.assertEqual(fn, None)
def test_cautious_skips_fallback(self):
_common.touch(os.path.join(self.dpath, 'a.jpg'))
fn = self.source.get(self.dpath, ('art',), True)
self.assertEqual(fn, None)
def test_empty_dir(self):
fn = self.source.get(self.dpath, ('art',), True)
self.assertEqual(fn, None)
def test_precedence_amongst_correct_files(self):
_common.touch(os.path.join(self.dpath, 'back.jpg'))
_common.touch(os.path.join(self.dpath, 'front.jpg'))
_common.touch(os.path.join(self.dpath, 'front-cover.jpg'))
fn = self.source.get(self.dpath, ('cover', 'front', 'back'), False)
self.assertEqual(fn, os.path.join(self.dpath, 'front-cover.jpg'))
class CombinedTest(UseThePlugin):
ASIN = 'xxxx'
MBID = 'releaseid'
AMAZON_URL = 'http://images.amazon.com/images/P/{0}.01.LZZZZZZZ.jpg' \
.format(ASIN)
AAO_URL = 'http://www.albumart.org/index_detail.php?asin={0}' \
.format(ASIN)
CAA_URL = 'http://coverartarchive.org/release/{0}/front' \
.format(MBID)
def setUp(self):
super(CombinedTest, self).setUp()
self.dpath = os.path.join(self.temp_dir, 'arttest')
os.mkdir(self.dpath)
@responses.activate
def run(self, *args, **kwargs):
super(CombinedTest, self).run(*args, **kwargs)
def mock_response(self, url, content_type='image/jpeg'):
responses.add(responses.GET, url, content_type=content_type)
def test_main_interface_returns_amazon_art(self):
self.mock_response(self.AMAZON_URL)
album = _common.Bag(asin=self.ASIN)
artpath = self.plugin.art_for_album(album, None)
self.assertNotEqual(artpath, None)
def test_main_interface_returns_none_for_missing_asin_and_path(self):
album = _common.Bag()
artpath = self.plugin.art_for_album(album, None)
self.assertEqual(artpath, None)
def test_main_interface_gives_precedence_to_fs_art(self):
_common.touch(os.path.join(self.dpath, 'art.jpg'))
self.mock_response(self.AMAZON_URL)
album = _common.Bag(asin=self.ASIN)
artpath = self.plugin.art_for_album(album, [self.dpath])
self.assertEqual(artpath, os.path.join(self.dpath, 'art.jpg'))
def test_main_interface_falls_back_to_amazon(self):
self.mock_response(self.AMAZON_URL)
album = _common.Bag(asin=self.ASIN)
artpath = self.plugin.art_for_album(album, [self.dpath])
self.assertNotEqual(artpath, None)
self.assertFalse(artpath.startswith(self.dpath))
def test_main_interface_tries_amazon_before_aao(self):
self.mock_response(self.AMAZON_URL)
album = _common.Bag(asin=self.ASIN)
self.plugin.art_for_album(album, [self.dpath])
self.assertEqual(len(responses.calls), 1)
self.assertEqual(responses.calls[0].request.url, self.AMAZON_URL)
def test_main_interface_falls_back_to_aao(self):
self.mock_response(self.AMAZON_URL, content_type='text/html')
album = _common.Bag(asin=self.ASIN)
self.plugin.art_for_album(album, [self.dpath])
self.assertEqual(responses.calls[-1].request.url, self.AAO_URL)
def test_main_interface_uses_caa_when_mbid_available(self):
self.mock_response(self.CAA_URL)
album = _common.Bag(mb_albumid=self.MBID, asin=self.ASIN)
artpath = self.plugin.art_for_album(album, None)
self.assertNotEqual(artpath, None)
self.assertEqual(len(responses.calls), 1)
self.assertEqual(responses.calls[0].request.url, self.CAA_URL)
def test_local_only_does_not_access_network(self):
album = _common.Bag(mb_albumid=self.MBID, asin=self.ASIN)
artpath = self.plugin.art_for_album(album, [self.dpath],
local_only=True)
self.assertEqual(artpath, None)
self.assertEqual(len(responses.calls), 0)
def test_local_only_gets_fs_image(self):
_common.touch(os.path.join(self.dpath, 'art.jpg'))
album = _common.Bag(mb_albumid=self.MBID, asin=self.ASIN)
artpath = self.plugin.art_for_album(album, [self.dpath],
local_only=True)
self.assertEqual(artpath, os.path.join(self.dpath, 'art.jpg'))
self.assertEqual(len(responses.calls), 0)
class AAOTest(UseThePlugin):
ASIN = 'xxxx'
AAO_URL = 'http://www.albumart.org/index_detail.php?asin={0}'.format(ASIN)
def setUp(self):
super(AAOTest, self).setUp()
self.source = fetchart.AlbumArtOrg(logger, self.plugin.config)
@responses.activate
def run(self, *args, **kwargs):
super(AAOTest, self).run(*args, **kwargs)
def mock_response(self, url, body):
responses.add(responses.GET, url, body=body, content_type='text/html',
match_querystring=True)
def test_aao_scraper_finds_image(self):
body = b"""
<br />
<a href=\"TARGET_URL\" title=\"View larger image\"
class=\"thickbox\" style=\"color: #7E9DA2; text-decoration:none;\">
<img src=\"http://www.albumart.org/images/zoom-icon.jpg\"
alt=\"View larger image\" width=\"17\" height=\"15\" border=\"0\"/></a>
"""
self.mock_response(self.AAO_URL, body)
album = _common.Bag(asin=self.ASIN)
res = self.source.get(album)
self.assertEqual(list(res)[0], 'TARGET_URL')
def test_aao_scraper_returns_no_result_when_no_image_present(self):
self.mock_response(self.AAO_URL, b'blah blah')
album = _common.Bag(asin=self.ASIN)
res = self.source.get(album)
self.assertEqual(list(res), [])
class GoogleImageTest(UseThePlugin):
def setUp(self):
super(GoogleImageTest, self).setUp()
self.source = fetchart.GoogleImages(logger, self.plugin.config)
@responses.activate
def run(self, *args, **kwargs):
super(GoogleImageTest, self).run(*args, **kwargs)
def mock_response(self, url, json):
responses.add(responses.GET, url, body=json,
content_type='application/json')
def test_google_art_finds_image(self):
album = _common.Bag(albumartist="some artist", album="some album")
json = b'{"items": [{"link": "url_to_the_image"}]}'
self.mock_response(fetchart.GoogleImages.URL, json)
result_url = self.source.get(album)
self.assertEqual(list(result_url)[0], 'url_to_the_image')
def test_google_art_returns_no_result_when_error_received(self):
album = _common.Bag(albumartist="some artist", album="some album")
json = b'{"error": {"errors": [{"reason": "some reason"}]}}'
self.mock_response(fetchart.GoogleImages.URL, json)
result_url = self.source.get(album)
self.assertEqual(list(result_url), [])
def test_google_art_returns_no_result_with_malformed_response(self):
album = _common.Bag(albumartist="some artist", album="some album")
json = b"""bla blup"""
self.mock_response(fetchart.GoogleImages.URL, json)
result_url = self.source.get(album)
self.assertEqual(list(result_url), [])
@_common.slow_test()
class ArtImporterTest(UseThePlugin):
def setUp(self):
super(ArtImporterTest, self).setUp()
# Mock the album art fetcher to always return our test file.
self.art_file = os.path.join(self.temp_dir, 'tmpcover.jpg')
_common.touch(self.art_file)
self.old_afa = self.plugin.art_for_album
self.afa_response = self.art_file
def art_for_album(i, p, local_only=False):
return self.afa_response
self.plugin.art_for_album = art_for_album
# Test library.
self.libpath = os.path.join(self.temp_dir, 'tmplib.blb')
self.libdir = os.path.join(self.temp_dir, 'tmplib')
os.mkdir(self.libdir)
os.mkdir(os.path.join(self.libdir, 'album'))
itempath = os.path.join(self.libdir, 'album', 'test.mp3')
shutil.copyfile(os.path.join(_common.RSRC, 'full.mp3'), itempath)
self.lib = library.Library(self.libpath)
self.i = _common.item()
self.i.path = itempath
self.album = self.lib.add_album([self.i])
self.lib._connection().commit()
# The import configuration.
self.session = _common.import_session(self.lib)
# Import task for the coroutine.
self.task = importer.ImportTask(None, None, [self.i])
self.task.is_album = True
self.task.album = self.album
info = AlbumInfo(
album='some album',
album_id='albumid',
artist='some artist',
artist_id='artistid',
tracks=[],
)
self.task.set_choice(AlbumMatch(0, info, {}, set(), set()))
def tearDown(self):
self.lib._connection().close()
super(ArtImporterTest, self).tearDown()
self.plugin.art_for_album = self.old_afa
def _fetch_art(self, should_exist):
"""Execute the fetch_art coroutine for the task and return the
album's resulting artpath. ``should_exist`` specifies whether to
assert that art path was set (to the correct value) or or that
the path was not set.
"""
# Execute the two relevant parts of the importer.
self.plugin.fetch_art(self.session, self.task)
self.plugin.assign_art(self.session, self.task)
artpath = self.lib.albums()[0].artpath
if should_exist:
self.assertEqual(
artpath,
os.path.join(os.path.dirname(self.i.path), 'cover.jpg')
)
self.assertExists(artpath)
else:
self.assertEqual(artpath, None)
return artpath
def test_fetch_art(self):
assert not self.lib.albums()[0].artpath
self._fetch_art(True)
def test_art_not_found(self):
self.afa_response = None
self._fetch_art(False)
def test_no_art_for_singleton(self):
self.task.is_album = False
self._fetch_art(False)
def test_leave_original_file_in_place(self):
self._fetch_art(True)
self.assertExists(self.art_file)
def test_delete_original_file(self):
config['import']['delete'] = True
self._fetch_art(True)
self.assertNotExists(self.art_file)
def test_move_original_file(self):
config['import']['move'] = True
self._fetch_art(True)
self.assertNotExists(self.art_file)
def test_do_not_delete_original_if_already_in_place(self):
artdest = os.path.join(os.path.dirname(self.i.path), 'cover.jpg')
shutil.copyfile(self.art_file, artdest)
self.afa_response = artdest
self._fetch_art(True)
def test_fetch_art_if_imported_file_deleted(self):
# See #1126. Test the following scenario:
# - Album art imported, `album.artpath` set.
# - Imported album art file subsequently deleted (by user or other
# program).
# `fetchart` should import album art again instead of printing the
# message "<album> has album art".
self._fetch_art(True)
util.remove(self.album.artpath)
self.plugin.batch_fetch_art(self.lib, self.lib.albums(), force=False)
self.assertExists(self.album.artpath)
class ArtForAlbumTest(UseThePlugin):
""" Tests that fetchart.art_for_album respects the size
configuration (e.g., minwidth, enforce_ratio)
"""
IMG_225x225 = os.path.join(_common.RSRC, 'abbey.jpg')
IMG_348x348 = os.path.join(_common.RSRC, 'abbey-different.jpg')
IMG_500x490 = os.path.join(_common.RSRC, 'abbey-similar.jpg')
def setUp(self):
super(ArtForAlbumTest, self).setUp()
self.old_fs_source_get = self.plugin.fs_source.get
self.old_fetch_img = self.plugin._fetch_image
self.old_source_urls = self.plugin._source_urls
def fs_source_get(*_):
return self.image_file
def source_urls(_):
return ['']
def fetch_img(_):
return self.image_file
self.plugin.fs_source.get = fs_source_get
self.plugin._source_urls = source_urls
self.plugin._fetch_image = fetch_img
def tearDown(self):
self.plugin.fs_source.get = self.old_fs_source_get
self.plugin._source_urls = self.old_source_urls
self.plugin._fetch_image = self.old_fetch_img
super(ArtForAlbumTest, self).tearDown()
def _assertImageIsValidArt(self, image_file, should_exist):
self.assertExists(image_file)
self.image_file = image_file
local_artpath = self.plugin.art_for_album(None, [''], True)
remote_artpath = self.plugin.art_for_album(None, [], False)
self.assertEqual(local_artpath, remote_artpath)
if should_exist:
self.assertEqual(local_artpath, self.image_file)
self.assertExists(local_artpath)
return local_artpath
else:
self.assertIsNone(local_artpath)
def _assertImageResized(self, image_file, should_resize):
self.image_file = image_file
with patch.object(ArtResizer.shared, 'resize') as mock_resize:
self.plugin.art_for_album(None, [''], True)
self.assertEqual(mock_resize.called, should_resize)
def _require_backend(self):
"""Skip the test if the art resizer doesn't have ImageMagick or
PIL (so comparisons and measurements are unavailable).
"""
if ArtResizer.shared.method[0] == WEBPROXY:
self.skipTest("ArtResizer has no local imaging backend available")
def test_respect_minwidth(self):
self._require_backend()
self.plugin.minwidth = 300
self._assertImageIsValidArt(self.IMG_225x225, False)
self._assertImageIsValidArt(self.IMG_348x348, True)
def test_respect_enforce_ratio_yes(self):
self._require_backend()
self.plugin.enforce_ratio = True
self._assertImageIsValidArt(self.IMG_500x490, False)
self._assertImageIsValidArt(self.IMG_225x225, True)
def test_respect_enforce_ratio_no(self):
self.plugin.enforce_ratio = False
self._assertImageIsValidArt(self.IMG_500x490, True)
def test_resize_if_necessary(self):
self._require_backend()
self.plugin.maxwidth = 300
self._assertImageResized(self.IMG_225x225, False)
self._assertImageResized(self.IMG_348x348, True)
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == b'__main__':
unittest.main(defaultTest='suite')
| 37.574153 | 79 | 0.66287 | [
"MIT"
] | parapente/beets | test/test_art.py | 17,735 | Python |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
import json
import logging
import math
import os
import tempfile
import unittest
from builtins import range
from typing import List
import sys
# patches unittest.TestCase to be python3 compatible
import future.tests.base # pylint: disable=unused-import
import hamcrest as hc
import avro
import avro.datafile
from avro.datafile import DataFileWriter
from avro.io import DatumWriter
from fastavro.schema import parse_schema
from fastavro import writer
# pylint: disable=wrong-import-order, wrong-import-position, ungrouped-imports
try:
from avro.schema import Parse # avro-python3 library for python3
except ImportError:
from avro.schema import parse as Parse # avro library for python2
# pylint: enable=wrong-import-order, wrong-import-position, ungrouped-imports
import apache_beam as beam
from apache_beam import Create
from apache_beam.io import avroio
from apache_beam.io import filebasedsource
from apache_beam.io import iobase
from apache_beam.io import source_test_utils
from apache_beam.io.avroio import _create_avro_sink # For testing
from apache_beam.io.avroio import _create_avro_source # For testing
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
from apache_beam.transforms.display import DisplayData
from apache_beam.transforms.display_test import DisplayDataItemMatcher
# Import snappy optionally; some tests will be skipped when import fails.
try:
import snappy # pylint: disable=import-error
except ImportError:
snappy = None # pylint: disable=invalid-name
logging.warning('python-snappy is not installed; some tests will be skipped.')
RECORDS = [{
'name': 'Thomas', 'favorite_number': 1, 'favorite_color': 'blue'
}, {
'name': 'Henry', 'favorite_number': 3, 'favorite_color': 'green'
}, {
'name': 'Toby', 'favorite_number': 7, 'favorite_color': 'brown'
}, {
'name': 'Gordon', 'favorite_number': 4, 'favorite_color': 'blue'
}, {
'name': 'Emily', 'favorite_number': -1, 'favorite_color': 'Red'
}, {
'name': 'Percy', 'favorite_number': 6, 'favorite_color': 'Green'
}]
class AvroBase(object):
_temp_files = [] # type: List[str]
def __init__(self, methodName='runTest'):
super(AvroBase, self).__init__(methodName)
self.RECORDS = RECORDS
self.SCHEMA_STRING = '''
{"namespace": "example.avro",
"type": "record",
"name": "User",
"fields": [
{"name": "name", "type": "string"},
{"name": "favorite_number", "type": ["int", "null"]},
{"name": "favorite_color", "type": ["string", "null"]}
]
}
'''
@classmethod
def setUpClass(cls):
# Method has been renamed in Python 3
if sys.version_info[0] < 3:
cls.assertCountEqual = cls.assertItemsEqual
def setUp(self):
# Reducing the size of thread pools. Without this test execution may fail in
# environments with limited amount of resources.
filebasedsource.MAX_NUM_THREADS_FOR_SIZE_ESTIMATION = 2
def tearDown(self):
for path in self._temp_files:
if os.path.exists(path):
os.remove(path)
self._temp_files = []
def _write_data(self, directory, prefix, codec, count, sync_interval):
raise NotImplementedError
def _write_pattern(self, num_files):
assert num_files > 0
temp_dir = tempfile.mkdtemp()
file_name = None
for _ in range(num_files):
file_name = self._write_data(directory=temp_dir, prefix='mytemp')
assert file_name
file_name_prefix = file_name[:file_name.rfind(os.path.sep)]
return file_name_prefix + os.path.sep + 'mytemp*'
def _run_avro_test(
self, pattern, desired_bundle_size, perform_splitting, expected_result):
source = _create_avro_source(pattern, use_fastavro=self.use_fastavro)
if perform_splitting:
assert desired_bundle_size
splits = [
split
for split in source.split(desired_bundle_size=desired_bundle_size)
]
if len(splits) < 2:
raise ValueError(
'Test is trivial. Please adjust it so that at least '
'two splits get generated')
sources_info = [(split.source, split.start_position, split.stop_position)
for split in splits]
source_test_utils.assert_sources_equal_reference_source(
(source, None, None), sources_info)
else:
read_records = source_test_utils.read_from_source(source, None, None)
self.assertCountEqual(expected_result, read_records)
def test_read_without_splitting(self):
file_name = self._write_data()
expected_result = self.RECORDS
self._run_avro_test(file_name, None, False, expected_result)
def test_read_with_splitting(self):
file_name = self._write_data()
expected_result = self.RECORDS
self._run_avro_test(file_name, 100, True, expected_result)
def test_source_display_data(self):
file_name = 'some_avro_source'
source = \
_create_avro_source(
file_name,
validate=False,
use_fastavro=self.use_fastavro
)
dd = DisplayData.create_from(source)
# No extra avro parameters for AvroSource.
expected_items = [
DisplayDataItemMatcher('compression', 'auto'),
DisplayDataItemMatcher('file_pattern', file_name)
]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
def test_read_display_data(self):
file_name = 'some_avro_source'
read = \
avroio.ReadFromAvro(
file_name,
validate=False,
use_fastavro=self.use_fastavro)
dd = DisplayData.create_from(read)
# No extra avro parameters for AvroSource.
expected_items = [
DisplayDataItemMatcher('compression', 'auto'),
DisplayDataItemMatcher('file_pattern', file_name)
]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
def test_sink_display_data(self):
file_name = 'some_avro_sink'
sink = _create_avro_sink(
file_name,
self.SCHEMA,
'null',
'.end',
0,
None,
'application/x-avro',
use_fastavro=self.use_fastavro)
dd = DisplayData.create_from(sink)
expected_items = [
DisplayDataItemMatcher('schema', str(self.SCHEMA)),
DisplayDataItemMatcher(
'file_pattern',
'some_avro_sink-%(shard_num)05d-of-%(num_shards)05d.end'),
DisplayDataItemMatcher('codec', 'null'),
DisplayDataItemMatcher('compression', 'uncompressed')
]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
def test_write_display_data(self):
file_name = 'some_avro_sink'
write = avroio.WriteToAvro(
file_name, self.SCHEMA, use_fastavro=self.use_fastavro)
dd = DisplayData.create_from(write)
expected_items = [
DisplayDataItemMatcher('schema', str(self.SCHEMA)),
DisplayDataItemMatcher(
'file_pattern',
'some_avro_sink-%(shard_num)05d-of-%(num_shards)05d'),
DisplayDataItemMatcher('codec', 'deflate'),
DisplayDataItemMatcher('compression', 'uncompressed')
]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
def test_read_reentrant_without_splitting(self):
file_name = self._write_data()
source = _create_avro_source(file_name, use_fastavro=self.use_fastavro)
source_test_utils.assert_reentrant_reads_succeed((source, None, None))
def test_read_reantrant_with_splitting(self):
file_name = self._write_data()
source = _create_avro_source(file_name, use_fastavro=self.use_fastavro)
splits = [split for split in source.split(desired_bundle_size=100000)]
assert len(splits) == 1
source_test_utils.assert_reentrant_reads_succeed(
(splits[0].source, splits[0].start_position, splits[0].stop_position))
def test_read_without_splitting_multiple_blocks(self):
file_name = self._write_data(count=12000)
expected_result = self.RECORDS * 2000
self._run_avro_test(file_name, None, False, expected_result)
def test_read_with_splitting_multiple_blocks(self):
file_name = self._write_data(count=12000)
expected_result = self.RECORDS * 2000
self._run_avro_test(file_name, 10000, True, expected_result)
def test_split_points(self):
num_records = 12000
sync_interval = 16000
file_name = self._write_data(count=num_records, sync_interval=sync_interval)
source = _create_avro_source(file_name, use_fastavro=self.use_fastavro)
splits = [split for split in source.split(desired_bundle_size=float('inf'))]
assert len(splits) == 1
range_tracker = splits[0].source.get_range_tracker(
splits[0].start_position, splits[0].stop_position)
split_points_report = []
for _ in splits[0].source.read(range_tracker):
split_points_report.append(range_tracker.split_points())
# There will be a total of num_blocks in the generated test file,
# proportional to number of records in the file divided by syncronization
# interval used by avro during write. Each block has more than 10 records.
num_blocks = int(math.ceil(14.5 * num_records / sync_interval))
assert num_blocks > 1
# When reading records of the first block, range_tracker.split_points()
# should return (0, iobase.RangeTracker.SPLIT_POINTS_UNKNOWN)
self.assertEqual(
split_points_report[:10],
[(0, iobase.RangeTracker.SPLIT_POINTS_UNKNOWN)] * 10)
# When reading records of last block, range_tracker.split_points() should
# return (num_blocks - 1, 1)
self.assertEqual(split_points_report[-10:], [(num_blocks - 1, 1)] * 10)
def test_read_without_splitting_compressed_deflate(self):
file_name = self._write_data(codec='deflate')
expected_result = self.RECORDS
self._run_avro_test(file_name, None, False, expected_result)
def test_read_with_splitting_compressed_deflate(self):
file_name = self._write_data(codec='deflate')
expected_result = self.RECORDS
self._run_avro_test(file_name, 100, True, expected_result)
@unittest.skipIf(snappy is None, 'python-snappy not installed.')
def test_read_without_splitting_compressed_snappy(self):
file_name = self._write_data(codec='snappy')
expected_result = self.RECORDS
self._run_avro_test(file_name, None, False, expected_result)
@unittest.skipIf(snappy is None, 'python-snappy not installed.')
def test_read_with_splitting_compressed_snappy(self):
file_name = self._write_data(codec='snappy')
expected_result = self.RECORDS
self._run_avro_test(file_name, 100, True, expected_result)
def test_read_without_splitting_pattern(self):
pattern = self._write_pattern(3)
expected_result = self.RECORDS * 3
self._run_avro_test(pattern, None, False, expected_result)
def test_read_with_splitting_pattern(self):
pattern = self._write_pattern(3)
expected_result = self.RECORDS * 3
self._run_avro_test(pattern, 100, True, expected_result)
def test_dynamic_work_rebalancing_exhaustive(self):
def compare_split_points(file_name):
source = _create_avro_source(file_name, use_fastavro=self.use_fastavro)
splits = [
split for split in source.split(desired_bundle_size=float('inf'))
]
assert len(splits) == 1
source_test_utils.assert_split_at_fraction_exhaustive(splits[0].source)
# Adjusting block size so that we can perform a exhaustive dynamic
# work rebalancing test that completes within an acceptable amount of time.
file_name = self._write_data(count=5, sync_interval=2)
compare_split_points(file_name)
def test_corrupted_file(self):
file_name = self._write_data()
with open(file_name, 'rb') as f:
data = f.read()
# Corrupt the last character of the file which is also the last character of
# the last sync_marker.
# https://avro.apache.org/docs/current/spec.html#Object+Container+Files
corrupted_data = bytearray(data)
corrupted_data[-1] = (corrupted_data[-1] + 1) % 256
with tempfile.NamedTemporaryFile(delete=False,
prefix=tempfile.template) as f:
f.write(corrupted_data)
corrupted_file_name = f.name
source = _create_avro_source(
corrupted_file_name, use_fastavro=self.use_fastavro)
with self.assertRaisesRegex(ValueError, r'expected sync marker'):
source_test_utils.read_from_source(source, None, None)
def test_read_from_avro(self):
path = self._write_data()
with TestPipeline() as p:
assert_that(
p | avroio.ReadFromAvro(path, use_fastavro=self.use_fastavro),
equal_to(self.RECORDS))
def test_read_all_from_avro_single_file(self):
path = self._write_data()
with TestPipeline() as p:
assert_that(
p \
| Create([path]) \
| avroio.ReadAllFromAvro(use_fastavro=self.use_fastavro),
equal_to(self.RECORDS))
def test_read_all_from_avro_many_single_files(self):
path1 = self._write_data()
path2 = self._write_data()
path3 = self._write_data()
with TestPipeline() as p:
assert_that(
p \
| Create([path1, path2, path3]) \
| avroio.ReadAllFromAvro(use_fastavro=self.use_fastavro),
equal_to(self.RECORDS * 3))
def test_read_all_from_avro_file_pattern(self):
file_pattern = self._write_pattern(5)
with TestPipeline() as p:
assert_that(
p \
| Create([file_pattern]) \
| avroio.ReadAllFromAvro(use_fastavro=self.use_fastavro),
equal_to(self.RECORDS * 5))
def test_read_all_from_avro_many_file_patterns(self):
file_pattern1 = self._write_pattern(5)
file_pattern2 = self._write_pattern(2)
file_pattern3 = self._write_pattern(3)
with TestPipeline() as p:
assert_that(
p \
| Create([file_pattern1, file_pattern2, file_pattern3]) \
| avroio.ReadAllFromAvro(use_fastavro=self.use_fastavro),
equal_to(self.RECORDS * 10))
def test_sink_transform(self):
with tempfile.NamedTemporaryFile() as dst:
path = dst.name
with TestPipeline() as p:
# pylint: disable=expression-not-assigned
p \
| beam.Create(self.RECORDS) \
| avroio.WriteToAvro(path, self.SCHEMA, use_fastavro=self.use_fastavro)
with TestPipeline() as p:
# json used for stable sortability
readback = \
p \
| avroio.ReadFromAvro(path + '*', use_fastavro=self.use_fastavro) \
| beam.Map(json.dumps)
assert_that(readback, equal_to([json.dumps(r) for r in self.RECORDS]))
@unittest.skipIf(snappy is None, 'python-snappy not installed.')
def test_sink_transform_snappy(self):
with tempfile.NamedTemporaryFile() as dst:
path = dst.name
with TestPipeline() as p:
# pylint: disable=expression-not-assigned
p \
| beam.Create(self.RECORDS) \
| avroio.WriteToAvro(
path,
self.SCHEMA,
codec='snappy',
use_fastavro=self.use_fastavro)
with TestPipeline() as p:
# json used for stable sortability
readback = \
p \
| avroio.ReadFromAvro(path + '*', use_fastavro=self.use_fastavro) \
| beam.Map(json.dumps)
assert_that(readback, equal_to([json.dumps(r) for r in self.RECORDS]))
@unittest.skipIf(
sys.version_info[0] == 3 and os.environ.get('RUN_SKIPPED_PY3_TESTS') != '1',
'This test still needs to be fixed on Python 3. '
'TODO: BEAM-6522.')
class TestAvro(AvroBase, unittest.TestCase):
def __init__(self, methodName='runTest'):
super(TestAvro, self).__init__(methodName)
self.use_fastavro = False
self.SCHEMA = Parse(self.SCHEMA_STRING)
def _write_data(
self,
directory=None,
prefix=tempfile.template,
codec='null',
count=len(RECORDS),
sync_interval=avro.datafile.SYNC_INTERVAL):
old_sync_interval = avro.datafile.SYNC_INTERVAL
try:
avro.datafile.SYNC_INTERVAL = sync_interval
with tempfile.NamedTemporaryFile(delete=False,
dir=directory,
prefix=prefix) as f:
writer = DataFileWriter(f, DatumWriter(), self.SCHEMA, codec=codec)
len_records = len(self.RECORDS)
for i in range(count):
writer.append(self.RECORDS[i % len_records])
writer.close()
self._temp_files.append(f.name)
return f.name
finally:
avro.datafile.SYNC_INTERVAL = old_sync_interval
class TestFastAvro(AvroBase, unittest.TestCase):
def __init__(self, methodName='runTest'):
super(TestFastAvro, self).__init__(methodName)
self.use_fastavro = True
self.SCHEMA = parse_schema(json.loads(self.SCHEMA_STRING))
def _write_data(
self,
directory=None,
prefix=tempfile.template,
codec='null',
count=len(RECORDS),
**kwargs):
all_records = self.RECORDS * \
(count // len(self.RECORDS)) + self.RECORDS[:(count % len(self.RECORDS))]
with tempfile.NamedTemporaryFile(delete=False,
dir=directory,
prefix=prefix,
mode='w+b') as f:
writer(f, self.SCHEMA, all_records, codec=codec, **kwargs)
self._temp_files.append(f.name)
return f.name
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| 36.247544 | 80 | 0.692466 | [
"Apache-2.0"
] | AhnLab-OSS/beam | sdks/python/apache_beam/io/avroio_test.py | 18,450 | Python |
import os
from hisim import hisim_main
from hisim.simulationparameters import SimulationParameters
import shutil
import random
from hisim import log
from hisim.utils import PostProcessingOptions
import matplotlib.pyplot as plt
from hisim import utils
@utils.measure_execution_time
def test_basic_household():
# if os.path.isdir("../hisim/inputs/cache"):
# shutil.rmtree("../hisim/inputs/cache")
path = "../examples/basic_household.py"
func = "basic_household_explicit"
mysimpar = SimulationParameters.one_day_only(year=2019, seconds_per_timestep=60)
hisim_main.main(path, func,mysimpar )
log.information(os.getcwd())
@utils.measure_execution_time
def test_basic_household_with_default_connections():
# if os.path.isdir("../hisim/inputs/cache"):
# shutil.rmtree("../hisim/inputs/cache")
path = "../examples/basic_household.py"
func = "basic_household_with_default_connections"
mysimpar = SimulationParameters.one_day_only(year=2019, seconds_per_timestep=60)
hisim_main.main(path, func,mysimpar )
log.information(os.getcwd())
@utils.measure_execution_time
def test_basic_household_with_all_resultfiles():
# if os.path.isdir("../hisim/inputs/cache"):
# shutil.rmtree("../hisim/inputs/cache")
path = "../examples/basic_household.py"
func = "basic_household_explicit"
mysimpar = SimulationParameters.one_day_only(year=2019, seconds_per_timestep=60)
for option in PostProcessingOptions:
mysimpar.post_processing_options.append(option)
hisim_main.main(path, func,mysimpar )
log.information(os.getcwd())
#
# def test_basic_household_with_all_resultfiles_full_year():
# if os.path.isdir("../hisim/inputs/cache"):
# shutil.rmtree("../hisim/inputs/cache")
# path = "../examples/basic_household.py"
# func = "basic_household_explicit"
# mysimpar = SimulationParameters.full_year(year=2019, seconds_per_timestep=60)
# for option in PostProcessingOptions:
# mysimpar.post_processing_options.append(option)
# log.information(option)
# hisim_main.main(path, func,mysimpar)
# log.information(os.getcwd())
# def test_basic_household_boiler():
# path = "../examples/basic_household_boiler.py"
# func = "basic_household_boiler_explicit"
# mysimpar = SimulationParameters.one_day_only(year=2019, seconds_per_timestep=60)
# hisim_main.main(path, func, mysimpar)
# def test_basic_household_districtheating():
# path = "../examples/basic_household_Districtheating.py"
# func = "basic_household_Districtheating_explicit"
# mysimpar = SimulationParameters.one_day_only(year=2019, seconds_per_timestep=60)
# hisim_main.main(path, func, mysimpar)
# def test_basic_household_oilheater():
# path = "../examples/basic_household_Oilheater.py"
# func = "basic_household_Oilheater_explicit"
# mysimpar = SimulationParameters.one_day_only(year=2019, seconds_per_timestep=60)
# hisim_main.main(path, func, mysimpar)
@utils.measure_execution_time
def test_modular_household_configurations( ):
path = "../examples/modular_household.py"
func = "modular_household_explicit"
mysimpar = SimulationParameters.one_day_only( year = 2019, seconds_per_timestep = 60 )
# for pv_included in [ True, False ]:
# for smart_devices_included in [ True, False ]:
# for boiler_included in [ 'electricity', 'hydrogen', None ]:
# for heating_device_included in [ 'heat_pump', 'oil_heater', 'district_heating' ]:
predictive = True
pv_included = random.choice( [ True, False ] )
smart_devices_included = random.choice( [ True, False ] )
boiler_included = random.choice( [ 'electricity', 'hydrogen', None ] )
heating_device_included = random.choice( [ 'heat_pump', 'oil_heater', 'district_heating' ] )
mysimpar.reset_system_config( predictive = predictive,
pv_included = pv_included,
smart_devices_included = smart_devices_included,
boiler_included = boiler_included,
heating_device_included = heating_device_included )
hisim_main.main( path, func, mysimpar )
@utils.measure_execution_time
def test_first_example():
path = "../examples/examples.py"
func = "first_example"
mysimpar = SimulationParameters.one_day_only(year=2019, seconds_per_timestep=60)
hisim_main.main(path, func, mysimpar)
@utils.measure_execution_time
def test_second_example():
path = "../examples/examples.py"
func = "second_example"
mysimpar = SimulationParameters.one_day_only(year=2019, seconds_per_timestep=60)
hisim_main.main(path, func, mysimpar)
| 44.575472 | 99 | 0.720212 | [
"MIT"
] | MF-Zerai/HiSim | tests/test_examples.py | 4,725 | Python |
'''
RenameBot
This file is a part of mrvishal2k2 rename repo
Dont kang !!!
© Mrvishal2k2
'''
import pyrogram
from pyrogram import Client, filters
from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup
import logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
log = logging.getLogger(__name__)
@Client.on_message(filters.document | filters.video | filters.audio | filters.voice | filters.video_note | filters.animation)
async def rename_filter(c,m):
media = m.document or m.video or m.audio or m.voice or m.video_note or m.animation
## couldn't add photo bcoz i want all photos to use as thumb..
text = ""
button = []
try:
filename = media.file_name
text += f"FileName:\n{filename}\n"
except:
# some files dont gib name ..
filename = None
text += "Select the desired Option"
button.append([InlineKeyboardButton("Rename as File", callback_data="rename_file")])
# Thanks to albert for mime_type suggestion
if media.mime_type.startswith("video/"):
## how the f the other formats can be uploaded as video
button.append([InlineKeyboardButton("Rename as Video",callback_data="rename_video")])
button.append([InlineKeyboardButton("Convert as File",callback_data="convert_file")])
button.append([InlineKeyboardButton("Convert as Video",callback_data="convert_video")])
button.append([InlineKeyboardButton("Cancel ❌",callback_data="cancel")])
markup = InlineKeyboardMarkup(button)
try:
await c.send_chat_action(m.chat.id, "typing")
await m.reply_text(text,quote=True,reply_markup=markup,parse_mode="markdown",disable_web_page_preview=True)
except Exception as e:
log.info(str(e))
| 38.977778 | 126 | 0.72805 | [
"MIT"
] | KoshikKumar17/TG-RenameBot | root/plugins/main_filter.py | 1,757 | Python |
import digi
import digi.on as on
@on.control
def h0(c):
for k, v in c.items():
v["status"] = v.get("intent",
v.get("status", "undef"))
if __name__ == '__main__':
digi.run()
| 15.785714 | 53 | 0.502262 | [
"Apache-2.0"
] | NetSys/dspace | mocks/colorlamp/driver/handler.py | 221 | Python |
"""
builtin_bracket.py
"""
from __future__ import print_function
from _devbuild.gen.id_kind_asdl import Id
from _devbuild.gen.runtime_asdl import value
from _devbuild.gen.syntax_asdl import (
word, word_e, word_t, word__String, bool_expr,
)
from _devbuild.gen.types_asdl import lex_mode_e
from asdl import runtime
from core import error
from core.pyerror import e_usage, p_die, log
from core import vm
from frontend import match
from osh import sh_expr_eval
from osh import bool_parse
from osh import word_parse
from osh import word_eval
_ = log
from typing import cast, TYPE_CHECKING
if TYPE_CHECKING:
from _devbuild.gen.runtime_asdl import cmd_value__Argv, value__Str
from _devbuild.gen.syntax_asdl import word__String, bool_expr_t
from _devbuild.gen.types_asdl import lex_mode_t
from core.ui import ErrorFormatter
from core import optview
from core import state
class _StringWordEmitter(word_parse.WordEmitter):
"""For test/[, we need a word parser that returns String.
The BoolParser calls word_.BoolId(w), and deals with Kind.BoolUnary,
Kind.BoolBinary, etc. This is instead of Compound/Token (as in the
[[ case.
"""
def __init__(self, cmd_val):
# type: (cmd_value__Argv) -> None
self.cmd_val = cmd_val
self.i = 0
self.n = len(cmd_val.argv)
def ReadWord(self, unused_lex_mode):
# type: (lex_mode_t) -> word__String
"""Interface for bool_parse.py.
TODO: This should probably be word_t
"""
if self.i == self.n:
# Does it make sense to define Eof_Argv or something?
# TODO: Add a way to show this location. Show 1 char past the right-most
# spid of the last word? But we only have the left-most spid.
w = word.String(Id.Eof_Real, '', runtime.NO_SPID)
return w
#log('ARGV %s i %d', self.argv, self.i)
s = self.cmd_val.argv[self.i]
left_spid = self.cmd_val.arg_spids[self.i]
self.i += 1
# default is an operand word
id_ = match.BracketUnary(s)
if id_ == Id.Undefined_Tok:
id_ = match.BracketBinary(s)
if id_ == Id.Undefined_Tok:
id_ = match.BracketOther(s)
if id_ == Id.Undefined_Tok:
id_ = Id.Word_Compound
# NOTE: We only have the left spid now. It might be useful to add the
# right one.
w = word.String(id_, s, left_spid)
return w
def Read(self):
# type: () -> word__String
"""Interface used for special cases below."""
return self.ReadWord(lex_mode_e.ShCommand)
def Peek(self, offset):
# type: (int) -> str
"""For special cases."""
return self.cmd_val.argv[self.i + offset]
def Rewind(self, offset):
# type: (int) -> None
"""For special cases."""
self.i -= offset
class _WordEvaluator(word_eval.StringWordEvaluator):
def __init__(self):
# type: () -> None
word_eval.StringWordEvaluator.__init__(self)
def EvalWordToString(self, w, eval_flags=0):
# type: (word_t, int) -> value__Str
# do_fnmatch: for the [[ == ]] semantics which we don't have!
# I think I need another type of node
# Maybe it should be BuiltinEqual and BuiltinDEqual? Parse it into a
# different tree.
assert w.tag_() == word_e.String
string_word = cast(word__String, w)
return value.Str(string_word.s)
def _TwoArgs(w_parser):
# type: (_StringWordEmitter) -> bool_expr_t
"""Returns an expression tree to be evaluated."""
w0 = w_parser.Read()
w1 = w_parser.Read()
s0 = w0.s
if s0 == '!':
return bool_expr.LogicalNot(bool_expr.WordTest(w1))
unary_id = Id.Undefined_Tok
# Oil's preferred long flags
if w0.s.startswith('--'):
if s0 == '--dir':
unary_id = Id.BoolUnary_d
elif s0 == '--exists':
unary_id = Id.BoolUnary_e
elif s0 == '--file':
unary_id = Id.BoolUnary_f
elif s0 == '--symlink':
unary_id = Id.BoolUnary_L
if unary_id == Id.Undefined_Tok:
unary_id = match.BracketUnary(w0.s)
if unary_id == Id.Undefined_Tok:
p_die('Expected unary operator, got %r (2 args)', w0.s, word=w0)
return bool_expr.Unary(unary_id, w1)
def _ThreeArgs(w_parser):
# type: (_StringWordEmitter) -> bool_expr_t
"""Returns an expression tree to be evaluated."""
w0 = w_parser.Read()
w1 = w_parser.Read()
w2 = w_parser.Read()
# NOTE: Order is important here.
binary_id = match.BracketBinary(w1.s)
if binary_id != Id.Undefined_Tok:
return bool_expr.Binary(binary_id, w0, w2)
if w1.s == '-a':
return bool_expr.LogicalAnd(bool_expr.WordTest(w0), bool_expr.WordTest(w2))
if w1.s == '-o':
return bool_expr.LogicalOr(bool_expr.WordTest(w0), bool_expr.WordTest(w2))
if w0.s == '!':
w_parser.Rewind(2)
child = _TwoArgs(w_parser)
return bool_expr.LogicalNot(child)
if w0.s == '(' and w2.s == ')':
return bool_expr.WordTest(w1)
p_die('Expected binary operator, got %r (3 args)', w1.s, word=w1)
class Test(vm._Builtin):
def __init__(self, need_right_bracket, exec_opts, mem, errfmt):
# type: (bool, optview.Exec, state.Mem, ErrorFormatter) -> None
self.need_right_bracket = need_right_bracket
self.exec_opts = exec_opts
self.mem = mem
self.errfmt = errfmt
def Run(self, cmd_val):
# type: (cmd_value__Argv) -> int
"""The test/[ builtin.
The only difference between test and [ is that [ needs a matching ].
"""
if self.need_right_bracket: # Preprocess right bracket
if self.exec_opts.simple_test_builtin():
e_usage("should be invoked as 'test' (simple_test_builtin)")
strs = cmd_val.argv
if not strs or strs[-1] != ']':
self.errfmt.Print_('missing closing ]', span_id=cmd_val.arg_spids[0])
return 2
# Remove the right bracket
cmd_val.argv.pop()
cmd_val.arg_spids.pop()
w_parser = _StringWordEmitter(cmd_val)
w_parser.Read() # dummy: advance past argv[0]
b_parser = bool_parse.BoolParser(w_parser)
# There is a fundamental ambiguity due to poor language design, in cases like:
# [ -z ]
# [ -z -a ]
# [ -z -a ] ]
#
# See posixtest() in bash's test.c:
# "This is an implementation of a Posix.2 proposal by David Korn."
# It dispatches on expressions of length 0, 1, 2, 3, 4, and N args. We do
# the same here.
#
# Another ambiguity:
# -a is both a unary prefix operator and an infix operator. How to fix this
# ambiguity?
bool_node = None # type: bool_expr_t
n = len(cmd_val.argv) - 1
if self.exec_opts.simple_test_builtin() and n > 3:
e_usage("should only have 3 arguments or fewer (simple_test_builtin)")
try:
if n == 0:
return 1 # [ ] is False
elif n == 1:
w = w_parser.Read()
bool_node = bool_expr.WordTest(w)
elif n == 2:
bool_node = _TwoArgs(w_parser)
elif n == 3:
bool_node = _ThreeArgs(w_parser)
if n == 4:
a0 = w_parser.Peek(0)
if a0 == '!':
w_parser.Read() # skip !
child = _ThreeArgs(w_parser)
bool_node = bool_expr.LogicalNot(child)
elif a0 == '(' and w_parser.Peek(3) == ')':
w_parser.Read() # skip ')'
bool_node = _TwoArgs(w_parser)
else:
pass # fallthrough
if bool_node is None:
bool_node = b_parser.ParseForBuiltin()
except error.Parse as e:
self.errfmt.PrettyPrintError(e, prefix='(test) ')
return 2
# We technically don't need mem because we don't support BASH_REMATCH here.
word_ev = _WordEvaluator()
bool_ev = sh_expr_eval.BoolEvaluator(self.mem, self.exec_opts, None,
self.errfmt)
# We want [ a -eq a ] to always be an error, unlike [[ a -eq a ]]. This is a
# weird case of [[ being less strict.
bool_ev.Init_AlwaysStrict()
bool_ev.word_ev = word_ev
bool_ev.CheckCircularDeps()
try:
b = bool_ev.EvalB(bool_node)
except error._ErrorWithLocation as e:
# We want to catch e_die() and e_strict(). Those are both FatalRuntime
# errors now, but it might not make sense later.
# NOTE: This doesn't seem to happen. We have location info for all
# errors that arise out of [.
#if not e.HasLocation():
# raise
self.errfmt.PrettyPrintError(e, prefix='(test) ')
return 2 # 1 means 'false', and this usage error is like a parse error.
status = 0 if b else 1
return status
| 29.850534 | 82 | 0.651884 | [
"Apache-2.0"
] | Schweinepriester/oil | osh/builtin_bracket.py | 8,388 | Python |
import glob
import shutil
import subprocess
import os
import sys
import argparse
# Read and save metadata from file
def exiftool_metadata(path):
metadata = {}
exifToolPath = 'exifTool.exe'
''' use Exif tool to get the metadata '''
process = subprocess.Popen(
[
exifToolPath,
path
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True
)
''' get the tags in dict '''
for tag in process.stdout:
tag = tag.strip()
key = tag[:tag.find(':')].strip()
value = tag[tag.find(':') + 1:].strip()
metadata[key] = value
return metadata
class File:
def __init__(self, path):
self.metadata = exiftool_metadata(path)
def _get_file_metadata(self, key, no=''):
if key in self.metadata:
return self.metadata[key]
else:
return no
def copyCore(self, source, dst_dir: str, copy_duplicate=False):
logs = []
# if value of metadata not exists - folder name
no_metadata = 'none'
date = File._get_file_metadata(self, 'Date/Time Original')
if date == '':
date = File._get_file_metadata(self, 'Create Date', no_metadata)
mime_type = File._get_file_metadata(self, 'MIME Type', no_metadata)
dst_dir += f'''/{mime_type[:mime_type.find('/')]}/{date[:4]}/{date[5:7]}'''
filename = File._get_file_metadata(self, 'File Name')
f_name = filename
dst = dst_dir + '/' + filename
# File with the same name exists in dst. If source and dst have same size then determines 'copy_exists'
if os.path.isfile(dst):
i = 0
f_pth = File(dst)
if_same_size: bool = f_pth._get_file_metadata("File Size") == File._get_file_metadata(self, 'File Size')
if (not if_same_size) or copy_duplicate:
while os.path.isfile(dst):
filename = f'''{f_name[:f_name.find('.')]}_D{str(i)}.{File._get_file_metadata(self, 'File Type Extension')}'''
dst = f'''{dst_dir}/{filename}'''
i = i + 1
if if_same_size:
logs.append(f"Warning: file already exists but I must copy all files"
f" [copy_duplicate={copy_duplicate}], so I try do it ...")
else:
logs.append(f"Warning: file already exists but have other size, so I try copy it ...")
else:
logs.append(f"Warning: file already duplicate [copy_exists={copy_duplicate}]."
f"\nCopy aboard: {source} -> {dst}")
return logs
try:
if not os.path.isdir(dst_dir):
os.makedirs(dst_dir)
logs.append(f"New directory created: {dst_dir}")
shutil.copy(source, dst)
logs.append(f'''Copy done: {source} -> {dst}''')
except Exception as e:
logs.append(f'''Copy error [{e}]: {source} -> {dst}''')
return logs
def main():
# Arguments from console
parser = argparse.ArgumentParser()
parser.add_argument('-s', help="Obligatory: source directory path")
parser.add_argument('-d', help="Obligatory: destination folder path")
parser.add_argument('-e', help="Obligatory: copy duplicate files (T/True/F/False)")
args = parser.parse_args(sys.argv[1:])
# Setup variable
source_dir = args.s
dst_dir = args.d
df = {
"T": True,
"TRUE": True,
"F": False,
"FALSE": False
}
try:
copy_duplicate = df.get(args.e.upper(), False)
except AttributeError:
copy_duplicate = False
print(f"app.py: error: unrecognized arguments. Use -h or --help to see options")
exit(1)
# Number of log
l_lpm = 0
# source_dir = 'C:/Users'
# dst_dir = 'C:/Users'
# copy_duplicate = False
for f_inx, source in enumerate(glob.glob(source_dir + '/**/*.*', recursive=True)):
try:
f = File(source)
print("----------")
for log in f.copyCore(source, dst_dir, copy_duplicate):
l_lpm = l_lpm + 1
print(f'''{str(l_lpm)}.{f_inx + 1}) {log}''')
except Exception as e:
print(f'Copy error [{e}]: {source}')
if __name__ == '__main__':
main()
| 32.755556 | 130 | 0.556083 | [
"MIT"
] | skrzypak/Soaf | app.py | 4,422 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ReplicationStatus(Model):
"""This is the replication status of the gallery Image Version.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar aggregated_state: This is the aggregated replication status based on
all the regional replication status flags. Possible values include:
'Unknown', 'InProgress', 'Completed', 'Failed'
:vartype aggregated_state: str or
~azure.mgmt.compute.v2018_06_01.models.AggregatedReplicationState
:ivar summary: This is a summary of replication status for each region.
:vartype summary:
list[~azure.mgmt.compute.v2018_06_01.models.RegionalReplicationStatus]
"""
_validation = {
'aggregated_state': {'readonly': True},
'summary': {'readonly': True},
}
_attribute_map = {
'aggregated_state': {'key': 'aggregatedState', 'type': 'str'},
'summary': {'key': 'summary', 'type': '[RegionalReplicationStatus]'},
}
def __init__(self, **kwargs) -> None:
super(ReplicationStatus, self).__init__(**kwargs)
self.aggregated_state = None
self.summary = None
| 37.044444 | 78 | 0.637672 | [
"MIT"
] | JonathanGailliez/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/v2018_06_01/models/replication_status_py3.py | 1,667 | Python |
#!/usr/bin/env python
import SimpleHTTPServer
import SocketServer
import sys
import urllib
import logging
from optparse import OptionParser
class ResultsProvider(object):
'''Base class used to fetch data from server for forwarding'''
import requests
import socket
import time
def __init__(self, **kwargs):
'''Constructor with sensible requests defaults'''
self.session = self.requests.Session()
self.wait = kwargs.get('wait', 2.0)
self.session.verify = kwargs.get('verify', False)
self.session.timeout = kwargs.get('timeout', 5)
self.session.stream = kwargs.get('stream', False)
self.session.proxies = kwargs.get('proxies', {})
self.session.headers = kwargs.get('headers', {})
self.session.allow_redirects = kwargs.get('allow_redirects', True)
self.session.cookies = self.requests.utils.cookiejar_from_dict(kwargs.get('cookies', {}))
self.url = kwargs.get('url', None)
def doRequest(self, verb, url, **kwargs):
'''Makes web request with timeoout support using requests session'''
while 1:
try:
body = kwargs.pop('body') if kwargs.has_key('body') else None
rargs = {}
for a in ['data', 'json', 'params', 'headers']:
if kwargs.has_key(a):
rargs[a] = kwargs.pop(a)
req = self.requests.Request(verb, url, **rargs) # data, headers, params, json
prepped = req.prepare()
if body:
prepped.body = body
response = self.session.send(prepped, **kwargs) # other params here
break
except (self.socket.error, self.requests.exceptions.RequestException):
logging.exception('Retrying request in %.2f seconds...', self.wait)
self.time.sleep(self.wait)
continue
return response
def nextResult(self):
'''Redefine me to make the request and return the response.text'''
#return self.doRequest(url='http://site/whatever/' + str(calculated_value)).text
raise NotImplementedError
class ResultsProviderImpl(ResultsProvider):
'''Implementation for forwarding arbitrary requests to another server'''
def __init__(self, **kwargs):
super(ResultsProviderImpl, self).__init__(**kwargs)
self.hostname=kwargs.get('hostname')
self.protocol=kwargs.get('protocol', 'http')
self.port=kwargs.get('port')
def nextResult(self, verb, path, **kwargs):
r = self.doRequest(verb, '%s://%s:%s%s' %(self.protocol, self.hostname, self.port, path), **kwargs)
return r
class ThreadedTCPServer(SocketServer.ThreadingTCPServer):
'''Simple Threaded TCP server'''
pass
class ServerHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
'''Simple http server request handler'''
import datetime
counter=0
skip_headers = ['content-length', 'transfer-encoding', 'content-encoding', 'connection']
def print_debug(self, title, data):
sep = '=' * 40 + '\n'
dt = self.datetime.datetime.now()
dts = dt.strftime('%d/%m/%Y %H:%M:%S')
self.counter+=1
print sep + title + ' - ' + str(self.counter) + ' - ' + dts + '\n' + sep + data + '\n'
def send_response(self, code, message=None):
'''Redefine from original to get rid of extra headers'''
self.log_request(code)
if message is None:
if code in self.responses:
message = self.responses[code][0]
else:
message = ''
if self.request_version != 'HTTP/0.9':
self.wfile.write("%s %d %s\r\n" %
(self.protocol_version, code, message))
# print (self.protocol_version, code, message)
#self.send_header('Server', self.version_string())
#self.send_header('Date', self.date_time_string())
def do(self, verb, data=None):
args = {'headers' : self.headers.dict}
if data:
args['data'] = data
response = self.server.resultsProvider.nextResult(verb, self.path, **args)
if self.server.debug:
self.print_debug('HTTP Request Received', self.raw_requestline + str(self.headers) + '\r\n' + (data if data else ''))
self.send_response(response.status_code, response.reason)
for header in response.headers.iteritems():
if header[0].lower() not in self.skip_headers:
#self.print_debug('Header Sent', ' :'.join([header[0], header[1]]))
self.send_header(header[0], header[1])
self.send_header('Content-Length', int(len(response.content)))
self.send_header('Connection', 'close')
self.wfile.write('\r\n')
self.wfile.write(response.content)
if self.server.debug:
http_version = '.'.join([a for a in str(response.raw.version)])
version_line = 'HTTP/%s %s %s' %(http_version, response.status_code, response.reason)
headers = '\r\n'.join([ '%s : %s' %(a[0],a[1]) for a in response.headers.items()])
self.print_debug('HTTP Response Received', '\r\n'.join([version_line, headers, '\r\n' + response.content]))
#self.print_debug('Length of response', str(int(len(response.content))))
self.wfile.flush()
self.wfile.close()
def do_GET(self):
self.do('GET')
def do_HEAD(self):
self.do('HEAD')
def do_POST(self):
data = self.rfile.read(int(self.headers['Content-Length'])) if \
self.headers.has_key('Content-Length') else ''
self.do('POST', data=data)
def match_url(input):
return ((input.startswith('http://') or input.startswith('https://')) and \
input.endswith('/') and len(input.split('/')[2]) > 4 and len(input.split('/')) == 4)
if __name__ == '__main__':
parser = OptionParser(usage='%prog -u [url] [options]')
parser.add_option('-d', '--debug', dest='debug', action='store_true', help='show debugging messages')
parser.add_option('-u', '--url', dest='remoteurl', type='string', help='remote base url')
parser.add_option('-p', '--port', dest='port', type='int', default=8000, help='local listen port')
parser.add_option('-a', '--address', dest='address', type='string', default='0.0.0.0', help='local listen address')
parser.add_option('-x', '--proxy', dest='proxy', type='string', help='optional proxy to use in format http://address:port/')
opts, args = parser.parse_args()
if opts.remoteurl == None:
print 'Please provide a remote url using the -u --url option'
sys.exit()
elif not match_url(opts.remoteurl):
print 'Please enter remote url in format protocol://host[:port]/'
sys.exit()
try:
[protocol, _, host_port, _] = opts.remoteurl.split('/')
protocol = protocol.rstrip(':')
hostparts = host_port.split(':')
hostname = hostparts[0]
rport = int(hostparts[1]) if len(hostparts) > 1 else {'http' : 80, 'https' : 443}[protocol]
except:
print 'Please enter remote url in format protocol://host[:port]/'
sys.exit()
if opts.proxy:
if not match_url(opts.proxy) and not opts.proxy.startswith('https'):
print 'Please enter proxy in format http://host:port/'
sys.exit()
if opts.debug:
print 'Using proxy ' + opts.proxy
proxies = {protocol : opts.proxy}
else:
proxies = {}
httpd = ThreadedTCPServer((opts.address, opts.port), ServerHandler)
httpd.debug = opts.debug or False
# add the custom resultsprovider implementation
httpd.resultsProvider = ResultsProviderImpl(hostname=hostname, protocol=protocol, port=rport, proxies=proxies)
print "Serving at: http://%s:%s/, forwarding requests to %s" % (opts.address, str(opts.port), opts.remoteurl)
httpd.serve_forever()
| 37.605634 | 129 | 0.607241 | [
"BSD-3-Clause"
] | stephenbradshaw/pentesting_stuff | helper_servers/http_forwarder.py | 8,010 | Python |
import spacy
from spacy.tokens import Doc, Span, Token
import urllib
import xml.etree.ElementTree as ET
import re
from SpacyHu.BaseSpacyHuComponent import BaseSpacyHuComponent
class HuLemmaMorph(BaseSpacyHuComponent):
def __init__(self,
nlp,
label='Morph',
url='http://hlt.bme.hu/chatbot/gate/process?run='):
necessary_modules = ['QT', 'HFSTLemm']
super().__init__(nlp, label, url, necessary_modules)
Token.set_extension('morph', default='')
Token.set_extension('lemma', default='')
def get_word_from_annotation(self, annotation):
for feature in annotation.getchildren():
if feature.find('Name').text == 'string':
return feature.find('Value').text
def get_token_by_idx(self, idx, doc):
for token in doc:
if token.idx == idx:
return token
def get_lemma_from_morph(self, morph):
return set(re.findall(r'(?<=lemma=).*?(?=\})', morph))
def __call__(self, doc):
text = urllib.parse.quote_plus(doc.text)
result = urllib.request.urlopen(self.url + text).read()
annotationset = ET.fromstring(result).find('AnnotationSet')
for annotation in annotationset.getchildren():
if annotation.get('Type') != 'Token':
continue
word_index = int(annotation.get('StartNode'))
word = self.get_word_from_annotation(annotation)
for feature in annotation.getchildren():
if feature.find('Name').text == 'anas':
token = self.get_token_by_idx(word_index, doc)
anas = (feature.find('Value').text
if feature.find('Value').text is not None
else '')
token._.morph = set(anas.split(';'))
token._.lemma = self.get_lemma_from_morph(anas)
break
return doc
if __name__ == "__main__":
from Tokenizer import HuTokenizer
debug_text = 'Jó, hogy ez az alma piros, mert az olyan almákat szeretem.'
# debug_text = 'megszentségteleníthetetlenségeitekért meghalnak'
remote_url = 'http://hlt.bme.hu/chatbot/gate/process?run='
nlp = spacy.blank("en")
nlp.tokenizer = HuTokenizer(nlp.vocab, url=remote_url)
morph_analyzer = HuLemmaMorph(nlp, url=remote_url)
nlp.add_pipe(morph_analyzer, last=True)
doc = nlp(debug_text)
for token in doc:
print('Token is: ' + token.text)
print(token._.lemma)
print(token._.morph)
print()
| 36.305556 | 77 | 0.602142 | [
"MIT"
] | Prodinal/GateSpacyWrapping | SpacyHu/SpacyHu/LemmatizerMorphAnalyzer.py | 2,620 | Python |
def gen_mutants():
import tensorflow as tf
import pandas
import numpy as np
DATAFILE_TRAIN = 'mock_kaggle_edit_train.csv'
DATAFILE_VALIDATE = 'mock_kaggle_edit_validate.csv'
TRAINED_MODEL_PATH = 'savedModel'
TIME_STEPS = 10
NUMBER_OF_DAYS_TO_FORECAST = 1
BATCH_SIZE = 100
NUM_EPOCHS = 100
LSTM_UNITS = 250
TENSORBOARD_LOGDIR = 'tensorboard_log'
data_train = pandas.read_csv(DATAFILE_TRAIN)
data_validate = pandas.read_csv(DATAFILE_VALIDATE)
data_train.head()
numTrainingData = len(data_train)
numValidationData = len(data_validate)
trainingData_date = data_train['date'][0:numTrainingData]
trainingData_sales = data_train['sales'][0:numTrainingData]
trainindData_price = data_train['price'][0:numTrainingData]
validationData_date = data_validate['date'][0:numValidationData]
validationData_sales = data_validate['sales'][0:numValidationData]
validationData_price = data_validate['price'][0:numValidationData]
trainingData_sales.head()
print(len(trainingData_sales))
print(len(validationData_sales))
trainingData_sales_min = min(trainingData_sales)
trainingData_sales_max = max(trainingData_sales)
trainingData_sales_range = trainingData_sales_max - trainingData_sales_min
trainingData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in trainingData_sales]
validationData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in validationData_sales]
print('Min:', trainingData_sales_min)
print('Range:', trainingData_sales_max - trainingData_sales_min)
trainingDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))
targetDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))
start = 0
for i in range(TIME_STEPS, (len(trainingData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):
trainingDataSequence_sales[start,:,0] = trainingData_sales_normalised[start:i]
targetDataSequence_sales[start] = trainingData_sales_normalised[i:]
start = start + 1
[trainingDataSequence_sales[i,:,0] for i in range(3)]
[targetDataSequence_sales[i] for i in range(3)]
a = np.arange(len(targetDataSequence_sales))
np.random.shuffle(a)
trainingDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))
targetDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))
loc = 0
for i in a:
trainingDataSequence_sales_shuffle[loc] = trainingDataSequence_sales[i]
targetDataSequence_sales_shuffle[loc] = targetDataSequence_sales[i]
loc += 1
trainingDataSequence_sales = trainingDataSequence_sales_shuffle
targetDataSequence_sales = targetDataSequence_sales_shuffle
validationDataSequence_sales = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))
validationDataSequence_sales_target = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))
start = 0
for i in range(TIME_STEPS, (len(validationData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):
validationDataSequence_sales[start,:,0] = validationData_sales_normalised[start:i]
validationDataSequence_sales_target[start] = validationData_sales_normalised[i:i + NUMBER_OF_DAYS_TO_FORECAST]
start += 1
tf.reset_default_graph()
inputSequencePlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, TIME_STEPS, 1), name='inputSequencePlaceholder')
targetPlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, NUMBER_OF_DAYS_TO_FORECAST), name='targetPlaceholder')
cell = tf.nn.rnn_cell.LSTMCell(num_units=LSTM_UNITS, name='LSTM_cell')
(output, state) = tf.nn.dynamic_rnn(cell=cell, inputs=inputSequencePlaceholder, dtype=tf.float32)
lastCellOutput = output[:,-1,:]
print('output:', output)
print('state:', state)
print('lastCellOutput:', lastCellOutput)
weights = tf.Variable(initial_value=tf.truncated_normal(shape=(LSTM_UNITS, NUMBER_OF_DAYS_TO_FORECAST)))
bias = tf.Variable(initial_value=tf.ones(shape=NUMBER_OF_DAYS_TO_FORECAST))
forecast = tf.add(x=tf.matmul(a=lastCellOutput, b=weights), y=bias, name='forecast_normalised_scale')
forecast_originalScale = tf.add(x=forecast * trainingData_sales_range, y=trainingData_sales_min, name='forecast_original_scale')
print(forecast)
print(forecast_originalScale)
loss = tf.reduce_mean(tf.squared_difference(x=forecast, y=targetPlaceholder), name='loss_comp')
tf.summary.scalar(tensor=loss, name='loss')
optimizer = tf.train.AdamOptimizer(learning_rate=0.1)
minimize_step = optimizer.minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
tensorboard_writer = tf.summary.FileWriter(TENSORBOARD_LOGDIR, sess.graph)
all_summary_ops = tf.summary.merge_all()
numSteps = 0
for e in range(NUM_EPOCHS):
print('starting training for epoch:', e + 1)
startLocation = 0
iteration = 0
for iteration in range(int(len(targetDataSequence_sales) / BATCH_SIZE)):
print('epoch:', e + 1, ' iteration:', iteration + 1)
trainingBatchInput = trainingDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]
trainingBatchTarget = targetDataSequence_sales[startLocation:startLocation + BATCH_SIZE]
(_, lsBatch, forecastBatch, forecastBatch_originalScale, summary_values) = sess.run([minimize_step, loss, forecast, forecast_originalScale, all_summary_ops], feed_dict={inputSequencePlaceholder: trainingBatchInput, \
targetPlaceholder: trainingBatchTarget})
tensorboard_writer.add_summary(summary_values, numSteps)
numSteps += 1
if (iteration + 1) % 1 == 0:
print('got a loss of:', lsBatch)
print('the forecast of first 5 normalised are:', forecastBatch[0:5])
print('while the actuals were normalised :', trainingBatchTarget[0:5])
print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])
print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)
startLocation += BATCH_SIZE
if len(targetDataSequence_sales) > startLocation:
print('epoch:', e + 1, ' iteration:', iteration + 1)
trainingBatchInput = trainingDataSequence_sales[startLocation:len(targetDataSequence_sales),:,:]
trainingBatchTarget = targetDataSequence_sales[startLocation:len(targetDataSequence_sales)]
(_, lsBatch, forecastBatch, forecastBatch_originalScale) = sess.run([minimize_step, loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: trainingBatchInput, \
targetPlaceholder: trainingBatchTarget})
print('got a loss of:', lsBatch)
print('the forecast of first 5 normalised are:', forecastBatch[0:5])
print('while the actuals were normalised :', trainingBatchTarget[0:5])
print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])
print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)
totalValidationLoss = 0
startLocation = 0
print('starting validation')
for iter in range(len(validationDataSequence_sales) // BATCH_SIZE):
validationBatchInput = validationDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]
validationBatchTarget = validationDataSequence_sales_target[startLocation:startLocation + BATCH_SIZE]
(validationLsBatch, validationForecastBatch, validationForecastBatch_originalScale) = sess.run([loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: validationBatchInput, \
targetPlaceholder: validationBatchTarget})
startLocation += BATCH_SIZE
totalValidationLoss += validationLsBatch
print('first five predictions:', validationForecastBatch[0:5])
print('first five actuals :', validationBatchTarget[0:5])
print('the forecast of first 5 orignal scale are:', validationForecastBatch_originalScale[0:5])
print('while the actuals were original scale :', (validationBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)
if startLocation < len(validationDataSequence_sales):
validationBatchInput = validationDataSequence_sales[startLocation:len(validationDataSequence_sales)]
validationBatchTarget = validationDataSequence_sales_target[startLocation:len(validationDataSequence_sales)]
(validationLsBatch, validationForecastBatch) = sess.run([loss, forecast], feed_dict={inputSequencePlaceholder: validationBatchInput, \
targetPlaceholder: validationBatchTarget})
totalValidationLoss += validationLsBatch
print('Validation completed after epoch:', e + 1, '. Total validation loss:', totalValidationLoss)
print('----------- Saving Model')
tf.saved_model.simple_save(sess, export_dir=TRAINED_MODEL_PATH, inputs=\
{'inputSequencePlaceholder': inputSequencePlaceholder, 'targetPlaceholder': targetPlaceholder}, outputs=\
{'loss': loss, 'forecast_originalScale': forecast_originalScale})
print('saved model to:', TRAINED_MODEL_PATH)
print('----------- Finis') | 31.150134 | 232 | 0.62897 | [
"Apache-2.0"
] | anuragbms/Sales-forecasting-with-RNNs | MetamorphicTests/all_mutants/sales_forecasting_file/273.py | 11,619 | Python |
__version__ = '0.3.3'
import os
import sys
import logging
import argparse
from .core import WebCrawler
from .helpers import color_logging
def main():
""" parse command line options and run commands.
"""
parser = argparse.ArgumentParser(
description='A web crawler for testing website links validation.')
parser.add_argument(
'-V', '--version', dest='version', action='store_true',
help="show version")
parser.add_argument(
'--log-level', default='INFO',
help="Specify logging level, default is INFO.")
parser.add_argument(
'--config-file', help="Specify config file path.")
parser.add_argument(
'--seeds', default='http://debugtalk.com',
help="Specify crawl seed url(s), several urls can be specified with pipe; \
if auth needed, seeds can be specified like user1:pwd1@url1|user2:pwd2@url2")
parser.add_argument(
'--include-hosts', help="Specify extra hosts to be crawled.")
parser.add_argument(
'--cookies', help="Specify cookies, several cookies can be joined by '|'. \
e.g. 'lang:en,country:us|lang:zh,country:cn'")
parser.add_argument(
'--crawl-mode', default='BFS', help="Specify crawl mode, BFS or DFS.")
parser.add_argument(
'--max-depth', default=5, type=int, help="Specify max crawl depth.")
parser.add_argument(
'--concurrency', help="Specify concurrent workers number.")
parser.add_argument(
'--save-results', default='NO', help="Specify if save results, default is NO.")
parser.add_argument("--grey-user-agent",
help="Specify grey environment header User-Agent.")
parser.add_argument("--grey-traceid",
help="Specify grey environment cookie traceid.")
parser.add_argument("--grey-view-grey",
help="Specify grey environment cookie view_gray.")
try:
from jenkins_mail_py import MailgunHelper
mailer = MailgunHelper(parser)
except ImportError:
mailer = None
args = parser.parse_args()
if args.version:
print("WebCrawler version: {}".format(__version__))
exit(0)
log_level = getattr(logging, args.log_level.upper())
logging.basicConfig(level=log_level)
color_logging("args: %s" % args)
main_crawler(args, mailer)
def main_crawler(args, mailer=None):
include_hosts = args.include_hosts.split(',') if args.include_hosts else []
cookies_list = args.cookies.split('|') if args.cookies else ['']
jenkins_build_number = args.jenkins_build_number
logs_folder = os.path.join(os.getcwd(), "logs", '{}'.format(jenkins_build_number))
web_crawler = WebCrawler(args.seeds, include_hosts, logs_folder, args.config_file)
# set grey environment
if args.grey_user_agent and args.grey_traceid and args.grey_view_grey:
web_crawler.set_grey_env(args.grey_user_agent, args.grey_traceid, args.grey_view_grey)
canceled = False
try:
for cookies_str in cookies_list:
cookies_str_list = cookies_str.split(',')
cookies = {}
for cookie_str in cookies_str_list:
if ':' not in cookie_str:
continue
key, value = cookie_str.split(':')
cookies[key.strip()] = value.strip()
web_crawler.start(
cookies,
args.crawl_mode,
args.max_depth,
args.concurrency
)
if mailer and mailer.config_ready:
subject = "%s" % args.seeds
mail_content_ordered_dict, flag_code = web_crawler.get_mail_content_ordered_dict()
mailer.send_mail(subject, mail_content_ordered_dict, flag_code)
except KeyboardInterrupt:
canceled = True
color_logging("Canceling...", color='red')
finally:
save_results = False if args.save_results.upper() == "NO" else True
web_crawler.print_result(canceled, save_results)
| 37.388889 | 94 | 0.63794 | [
"MIT"
] | debugtalk/WebCrawler | webcrawler/__init__.py | 4,038 | Python |
#!/usr/bin/env python
import bottle
import os, json
from .utils import distance, neighbours, direction
from .defensive import find_my_tail, trouble, find_enemy_tail, eat_food, find_my_tail_emergency
from .snake import Snake
from .gameboard import GameBoard
SAFTEY = 0
SNAKE = 1
FOOD = 3
DANGER = 5
def move_response(move):
assert move in ['up', 'down', 'left', 'right'], \
"Move must be one of [up, down, left, right]"
return bottle.HTTPResponse(
status=200,
headers={
"Content-Type": "application/json"
},
body=json.dumps({
"move": move
})
)
def init(data):
"""
Initialize grid and update cell values\n
@param data -> Json response from bottle\n
@return game_id -> Game id for debuggin purposes when displaying grid\n
@return grid -> Grid with updated cell values\n
@return food -> Sorted array of food by closest to charlie\n
@return charlie -> My snake\n
@return enemies -> Array of all enemy snakes\n
@return check_food -> Secondary grid to look ahead when eating food
"""
food = []
enemies = []
grid = GameBoard(data['board']['height'], data['board']['width'])
check_food = GameBoard(data['board']['height'], data['board']['width'])
charlie = Snake(data['you'])
for i in data['board']['food']:
food.append([i['x'], i['y']])
grid.set_cell([i['x'], i['y']], FOOD)
check_food.set_cell([i['x'], i['y']], FOOD)
for snake in data['board']['snakes']:
snake = Snake(snake)
for coord in snake.coords:
grid.set_cell(coord, SNAKE)
check_food.set_cell(coord, SNAKE)
if snake.health < 100 and snake.length > 2 and data['turn'] >= 3:
grid.set_cell(snake.tail, SAFTEY)
check_food.set_cell(snake.tail, SAFTEY)
if snake.id != charlie.id:
for neighbour in neighbours(snake.head, grid, 0, snake.coords, [1]):
if snake.length >= charlie.length:
grid.set_cell(neighbour, DANGER)
check_food.set_cell(neighbour, DANGER)
enemies.append(snake)
food = sorted(food, key = lambda p: distance(p, charlie.head))
game_id = data['game']['id']
# print("turn is {}".format(data['turn']))
return game_id, grid, food, charlie, enemies, check_food
@bottle.post('/ping')
def ping():
return bottle.HTTPResponse(
status=200,
headers={
"Content-Type": "application/json"
},
body=json.dumps({})
)
@bottle.post('/start')
def start():
return bottle.HTTPResponse(
status=200,
headers={
"Content-Type": "application/json"
},
body=json.dumps({
"color": '#002080',
'headType': 'pixel',
'tailType': 'pixel'
})
)
@bottle.post('/move')
def move():
data = bottle.request.json
game_id, grid, food, charlie, enemies, check_food = init(data)
# grid.display_game(game_id)
if len(enemies) > 2 or charlie.length <= 25 or charlie.health <= 60:
path = eat_food(charlie, grid, food, check_food)
if path:
# print('eat path {}'.format(path))
return move_response(direction(path[0], path[1]))
if charlie.length >= 3:
path = find_my_tail(charlie, grid)
if path:
# print('find my tail path {}'.format(path))
return move_response(direction(path[0], path[1]))
if not path:
path = find_enemy_tail(charlie, enemies, grid)
if path:
# print('find enemy tail path {}'.format(path))
return move_response(direction(path[0], path[1]))
# # if our length is greater than threshold and no other path was available
if charlie.length >= 3:
path = find_my_tail_emergency(charlie, grid)
if path:
# print('find my tail emergency path {}'.format(path))
return move_response(direction(path[0], path[1]))
# Choose a random free space if no available enemy tail
if not path:
path = trouble(charlie, grid)
if path:
# print('trouble path {}'.format(path))
return move_response(direction(path[0], path[1]))
@bottle.post('/end')
def end():
return bottle.HTTPResponse(
status=200,
headers={
"Content-Type": "application/json"
},
body=json.dumps({})
)
application = bottle.default_app()
if __name__ == '__main__':
bottle.run(application, host=os.getenv('IP', '0.0.0.0'), port=os.getenv('PORT', '8080'), quiet = True) | 29.506329 | 103 | 0.586015 | [
"MIT"
] | ntmk/battlesnake-2019-pixelated | app/main.py | 4,662 | Python |
#!/usr/bin/python3
# -*- coding: utf8 -*-
# Copyright (c) 2020 Baidu, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Procedure Params
"""
class ProcedureParams:
"""
The procedure params dict
"""
def __init__(self):
"""
The constructor of the ProcedureParams class
"""
self.paramsDict = {} # the inner data for procedure params dict
def __getitem__(self, index):
"""
Get the procedure params according to the index.
Create the register when it does not exist.
:param index:
:return: ProcedureParamStorage
"""
value = self.paramsDict.get(index)
if value is not None:
return value
value = ProcedureParamStorage(index)
self.paramsDict[index] = value
return value
class ProcedureParamStorage:
"""
The storage for procedure param
"""
def __init__(self, index):
"""
The quantum param object needs to know its index.
:param index: the quantum register index
"""
self.index = index | 25.375 | 74 | 0.647783 | [
"Apache-2.0"
] | rickyHong/Qcompute-repl | QCompute/QuantumPlatform/ProcedureParams.py | 1,624 | Python |
class TopicDTO:
name = str
description = str
popularity = int
def __init__(self, name="", popularity=0, description = ""):
self.name=name
self.popularity=popularity
self.description = description
| 26.666667 | 64 | 0.625 | [
"MIT"
] | AngelStoyanov33/Flask-Forum | DTOs/TopicDTO.py | 240 | Python |
import os
from quick2wire.gpio import pins, In, Out, PullDown, gpio_admin
import pytest
@pytest.mark.gpio
@pytest.mark.loopback
class TestGPIO:
def test_pin_must_be_opened_before_use_and_is_unusable_after_being_closed(self):
pin = pins.pin(0)
with pytest.raises(IOError):
pin.value
pin.open()
try:
pin.value
finally:
pin.close()
with pytest.raises(IOError):
pin.value
def test_opens_and_closes_itself_when_used_as_a_context_manager(self):
pin = pins.pin(0)
with pin:
pin.value
with pytest.raises(IOError):
pin.value
def test_exports_gpio_device_to_userspace_when_opened_and_unexports_when_closed(self):
with pins.pin(0) as pin:
assert os.path.exists('/sys/class/gpio/gpio17/value')
assert not os.path.exists('/sys/class/gpio/gpio17/value')
def test_can_set_and_query_direction_of_pin_when_open(self):
with pins.pin(0) as pin:
pin.direction = Out
assert pin.direction == Out
assert content_of("/sys/class/gpio/gpio17/direction") == "out\n"
pin.direction = In
assert pin.direction == In
assert content_of("/sys/class/gpio/gpio17/direction") == "in\n"
def test_can_set_direction_on_construction(self):
pin = pins.pin(0, Out)
assert pin.direction == Out
assert not os.path.exists("/sys/class/gpio/gpio17/direction")
with pin:
assert content_of("/sys/class/gpio/gpio17/direction") == "out\n"
assert pin.direction == Out
def test_setting_value_of_output_pin_writes_to_device_file(self):
with pins.pin(0) as pin:
pin.direction = Out
pin.value = 1
assert pin.value == 1
assert content_of('/sys/class/gpio/gpio17/value') == '1\n'
pin.value = 0
assert pin.value == 0
assert content_of('/sys/class/gpio/gpio17/value') == '0\n'
def test_direction_and_value_of_pin_is_reset_when_closed(self):
with pins.pin(0, Out) as pin:
pin.value = 1
gpio_admin("export", 17, PullDown)
try:
assert content_of('/sys/class/gpio/gpio17/value') == '0\n'
assert content_of('/sys/class/gpio/gpio17/direction') == 'in\n'
finally:
gpio_admin("unexport", 17)
def test_cannot_get_a_pin_with_an_invalid_index(self):
with pytest.raises(IndexError):
pins.pin(-1)
with pytest.raises(IndexError):
pins.pin(len(pins))
def content_of(filename):
with open(filename, 'r') as f:
return f.read()
| 28.563107 | 90 | 0.569001 | [
"MIT"
] | pietersartain/ipseity | usr/share/quick2wire/test_gpio.py | 2,942 | Python |
#!/usr/bin/env python
# coding=utf-8
class PyPIPackageProject:
pass
| 10.571429 | 25 | 0.702703 | [
"MIT"
] | hansnow/asgi-webdav | asgi_webdav/core.py | 74 | Python |
import FWCore.ParameterSet.Config as cms
hltEgammaHcalPFClusterIsoUnseeded = cms.EDProducer("EgammaHLTHcalPFClusterIsolationProducer",
absEtaLowEdges = cms.vdouble(0.0, 1.479),
doRhoCorrection = cms.bool(False),
drMax = cms.double(0.3),
drVetoBarrel = cms.double(0.0),
drVetoEndcap = cms.double(0.0),
effectiveAreas = cms.vdouble(0.2, 0.25),
energyBarrel = cms.double(0.0),
energyEndcap = cms.double(0.0),
etaStripBarrel = cms.double(0.0),
etaStripEndcap = cms.double(0.0),
pfClusterProducerHCAL = cms.InputTag("hltParticleFlowClusterHCALForEgamma"),
pfClusterProducerHFEM = cms.InputTag(""),
pfClusterProducerHFHAD = cms.InputTag(""),
recoEcalCandidateProducer = cms.InputTag("hltEgammaCandidatesUnseeded"),
rhoMax = cms.double(99999999.0),
rhoProducer = cms.InputTag("hltFixedGridRhoFastjetAllCaloForEGamma"),
rhoScale = cms.double(1.0),
useEt = cms.bool(True),
useHF = cms.bool(False)
)
| 40.291667 | 93 | 0.718718 | [
"Apache-2.0"
] | PKUfudawei/cmssw | HLTrigger/Configuration/python/HLT_75e33/modules/hltEgammaHcalPFClusterIsoUnseeded_cfi.py | 967 | Python |
#!/usr/bin/env python
# encoding: utf-8
"""
@Author: yangwenhao
@Contact: 874681044@qq.com
@Software: PyCharm
@File: Cosine.py
@Time: 19-6-26 下午9:43
@Overview: Implement Cosine Score for speaker identification!
Enrollment set files will be in the 'Data/enroll_set.npy' and the classes-to-index file is 'Data/enroll_classes.npy'
Test set files are in the 'Data/test_set.npy' and the utterances-to-index file is 'Data/test_classes.npy'
"""
import numpy as np
import torch.nn as nn
ENROLL_FILE = "Data/xvector/enroll/extract_adagrad-lr0.1-wd0.0-embed512-alpha10.npy"
ENROLL_CLASS = "Data/enroll_classes.npy"
TEST_FILE = "Data/xvector/test/extract_adagrad-lr0.1-wd0.0-embed512-alpha10.npy"
TEST_CLASS = "Data/test_classes.npy"
# test_vec = np.array([1,2,3,4])
# refe_vec = np.array([8,3,3,4])
def normalize(narray, order=2, axis=1):
norm = np.linalg.norm(narray, ord=order, axis=axis, keepdims=True)
return(narray/norm + np.finfo(np.float32).eps)
def cos_dis(test_vec, refe_vec):
vec1 = normalize(test_vec, axis=0)
vec2 = normalize(refe_vec, axis=0)
dis = np.matmul(vec1, vec2.T)
return(dis)
enroll_features = np.load(ENROLL_FILE, allow_pickle=True)
enroll_classes = np.load(ENROLL_CLASS, allow_pickle=True).item()
test_features = np.load(TEST_FILE, allow_pickle=True)
test_classes = np.load(TEST_CLASS, allow_pickle=True)
enroll_dict = dict()
for item in enroll_classes:
num=0
feat = np.zeros([512], dtype=float)
for (label, feature) in enroll_features:
if label==enroll_classes[item]:
feat += feature.detach().numpy()
num += 1
enroll_dict[item] = feat/num
similarity = {}
for (label, feature) in test_features:
utt = {}
for item in enroll_dict:
utt[item] = np.linalg.norm(feature.detach().numpy()-enroll_dict[item])
for utterance in test_classes:
if int(utterance[1])==label.item():
test_id = utterance[0]
similarity[test_id]=utt
print(similarity)
# cos_dis(test_vec, refe_vec)
| 31.777778 | 116 | 0.708791 | [
"MIT"
] | Wenhao-Yang/DeepSpeaker-pytorch | Score/Cosine_Score.py | 2,006 | Python |
"""
Copyright 2013 The Trustees of Princeton University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import google
from google.appengine.ext import ndb
import google.appengine.api.memcache as google_memcache
import google.appengine.ext.deferred as google_deferred
from google.appengine.datastore.datastore_query import Cursor as GoogleCursor
def raise_(ex):
raise ex
class FutureWrapper( ndb.Future ):
state = ndb.Future.FINISHING
_done = True
def __init__( self, result ):
self.result = result
def get_result( self ):
return self.result
def done( self ):
return True
def wait( self ):
pass
def check_success( self ):
return None
def get_exception( self ):
return None
def get_traceback( self ):
return None
# TODO: wrap query for one item into a future
class FutureQueryWrapper( object ):
def __init__(self, query_fut):
self.query_fut = query_fut
def get_result( self ):
res = self.query_fut.get_result()
if res != None and len(res) > 0:
return res[0]
else:
return None
def done( self ):
return self.query_fut.done()
def wait( self):
return self.query_fut.wait()
def check_success( self ):
return self.query_fut.check_success()
def get_exception( self ):
return self.query_fut.get_exception()
def get_traceback( self ):
return self.query_fut.get_traceback()
# aliases for types
Model = ndb.Model
Integer = ndb.IntegerProperty
Float = ndb.FloatProperty
String = ndb.StringProperty
Text = ndb.TextProperty
Key = ndb.KeyProperty
Boolean = ndb.BooleanProperty
Json = ndb.JsonProperty
Blob = ndb.BlobProperty
Computed = ndb.ComputedProperty
Pickled = ndb.PickleProperty
Cursor = GoogleCursor
# aliases for keys
make_key = ndb.Key
def wait_futures( future_list ):
"""
Wait for all of a list of futures to finish.
Works with FutureWrapper.
"""
# see if any of these are NOT futures...then just wrap them into a future object
# that implements a get_result()
ret = []
futs = []
for f in future_list:
if f is None:
continue
if not isinstance( f, ndb.Future ) and not isinstance( f, FutureWrapper ):
# definitely not a future
ret.append( FutureWrapper( f ) )
else:
# a future or something compatible
futs.append( f )
ndb.Future.wait_all( futs )
return futs + ret
deferred = google_deferred
concurrent = ndb.tasklet
concurrent_return = (lambda x: (raise_(ndb.Return( x ))))
# asynchronous operations
get_multi_async = ndb.get_multi_async
put_multi_async = ndb.put_multi_async
# synchronous operations
get_multi = ndb.get_multi
put_multi = ndb.put_multi
delete_multi = ndb.delete_multi
# aliases for memcache
memcache = google_memcache
# aliases for transaction
transaction = ndb.transaction
transaction_async = ndb.transaction_async
transactional = ndb.transactional
# alises for query predicates
opAND = ndb.AND
opOR = ndb.OR
# aliases for top-level asynchronous loop
toplevel = ndb.toplevel
# aliases for common exceptions
RequestDeadlineExceededError = google.appengine.runtime.DeadlineExceededError
APIRequestDeadlineExceededError = google.appengine.runtime.apiproxy_errors.DeadlineExceededError
URLRequestDeadlineExceededError = google.appengine.api.urlfetch_errors.DeadlineExceededError
TransactionFailedError = google.appengine.ext.db.TransactionFailedError
| 25.694268 | 96 | 0.715419 | [
"Apache-2.0"
] | jcnelson/syndicate | ms/storage/backends/google_appengine.py | 4,034 | Python |
import sys
import os
import math
import imageio
from moviepy.editor import *
import time
def read_video(video_name):
# Read video from file
video_name_input = 'testset/' + video_name
video = VideoFileClip(video_name_input)
return video
def video2frame(video_name):
video = read_video(video_name)
video_frame_number = int(video.duration * video.fps) ## duration: second / fps: frame per second
video_frame_ciphers = math.ceil(math.log(video_frame_number, 10)) ## ex. 720 -> 3
if not os.path.exists('testset/' + video_name):
os.makedirs('testset/' + video_name)
for i in range(0, video_frame_number):
video.save_frame('testset/' + video_name + '/frame_' + str(i).zfill(video_frame_ciphers) + '.jpg', i/video.fps)
def video2poseframe(video_name):
import numpy as np
sys.path.append(os.path.dirname(__file__) + "/../")
from scipy.misc import imread, imsave
from config import load_config
from dataset.factory import create as create_dataset
from nnet import predict
from util import visualize
from dataset.pose_dataset import data_to_input
from multiperson.detections import extract_detections
from multiperson.predict import SpatialModel, eval_graph, get_person_conf_multicut
from multiperson.visualize import PersonDraw, visualize_detections
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw
import random
cfg = load_config("demo/pose_cfg_multi.yaml")
dataset = create_dataset(cfg)
sm = SpatialModel(cfg)
sm.load()
# Load and setup CNN part detector
sess, inputs, outputs = predict.setup_pose_prediction(cfg)
################
video = read_video(video_name)
video_frame_number = int(video.duration * video.fps) ## duration: second / fps: frame per second
video_frame_ciphers = math.ceil(math.log(video_frame_number, 10)) ## ex. 720 -> 3
if not os.path.exists('testset/' + video_name):
os.makedirs('testset/' + video_name)
for i in range(0, video_frame_number):
image = video.get_frame(i/video.fps)
######################
image_batch = data_to_input(image)
# Compute prediction with the CNN
outputs_np = sess.run(outputs, feed_dict={inputs: image_batch})
scmap, locref, pairwise_diff = predict.extract_cnn_output(outputs_np, cfg, dataset.pairwise_stats)
detections = extract_detections(cfg, scmap, locref, pairwise_diff)
unLab, pos_array, unary_array, pwidx_array, pw_array = eval_graph(sm, detections)
person_conf_multi = get_person_conf_multicut(sm, unLab, unary_array, pos_array)
print('person_conf_multi: ')
print(type(person_conf_multi))
print(person_conf_multi)
# Add library to save image
image_img = Image.fromarray(image)
# Save image with points of pose
draw = ImageDraw.Draw(image_img)
people_num = 0
point_num = 17
print('person_conf_multi.size: ')
print(person_conf_multi.size)
people_num = person_conf_multi.size / (point_num * 2)
people_num = int(people_num)
print('people_num: ')
print(people_num)
point_i = 0 # index of points
point_r = 5 # radius of points
people_real_num = 0
for people_i in range(0, people_num):
point_color_r = random.randrange(0, 256)
point_color_g = random.randrange(0, 256)
point_color_b = random.randrange(0, 256)
point_color = (point_color_r, point_color_g, point_color_b, 255)
point_count = 0
for point_i in range(0, point_num):
if person_conf_multi[people_i][point_i][0] + person_conf_multi[people_i][point_i][1] != 0: # If coordinates of point is (0, 0) == meaningless data
point_count = point_count + 1
if point_count > 5: # If there are more than 5 point in person, we define he/she is REAL PERSON
people_real_num = people_real_num + 1
for point_i in range(0, point_num):
draw.ellipse((person_conf_multi[people_i][point_i][0] - point_r, person_conf_multi[people_i][point_i][1] - point_r, person_conf_multi[people_i][point_i][0] + point_r, person_conf_multi[people_i][point_i][1] + point_r), fill=point_color)
print('people_real_num: ')
print(people_real_num)
video_name_result = 'testset/' + video_name + '/frame_pose_' + str(i).zfill(video_frame_ciphers) + '.jpg'
image_img.save(video_name_result, "JPG")
def video2posevideo(video_name):
time_start = time.clock()
import numpy as np
sys.path.append(os.path.dirname(__file__) + "/../")
from scipy.misc import imread, imsave
from config import load_config
from dataset.factory import create as create_dataset
from nnet import predict
from util import visualize
from dataset.pose_dataset import data_to_input
from multiperson.detections import extract_detections
from multiperson.predict import SpatialModel, eval_graph, get_person_conf_multicut
from multiperson.visualize import PersonDraw, visualize_detections
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw, ImageFont
font = ImageFont.truetype("./font/NotoSans-Bold.ttf", 24)
import random
cfg = load_config("demo/pose_cfg_multi.yaml")
dataset = create_dataset(cfg)
sm = SpatialModel(cfg)
sm.load()
draw_multi = PersonDraw()
# Load and setup CNN part detector
sess, inputs, outputs = predict.setup_pose_prediction(cfg)
################
video = read_video(video_name)
video_frame_number = int(video.duration * video.fps) ## duration: second / fps: frame per second
video_frame_ciphers = math.ceil(math.log(video_frame_number, 10)) ## ex. 720 -> 3
pose_frame_list = []
point_r = 3 # radius of points
point_min = 10 # threshold of points - If there are more than point_min points in person, we define he/she is REAL PERSON
part_min = 3 # threshold of parts - If there are more than part_min parts in person, we define he/she is REAL PERSON / part means head, arm and leg
point_num = 17 # There are 17 points in 1 person
def ellipse_set(person_conf_multi, people_i, point_i):
return (person_conf_multi[people_i][point_i][0] - point_r, person_conf_multi[people_i][point_i][1] - point_r, person_conf_multi[people_i][point_i][0] + point_r, person_conf_multi[people_i][point_i][1] + point_r)
def line_set(person_conf_multi, people_i, point_i, point_j):
return (person_conf_multi[people_i][point_i][0], person_conf_multi[people_i][point_i][1], person_conf_multi[people_i][point_j][0], person_conf_multi[people_i][point_j][1])
def draw_ellipse_and_line(draw, person_conf_multi, people_i, a, b, c, point_color):
draw.ellipse(ellipse_set(person_conf_multi, people_i, a), fill=point_color)
draw.ellipse(ellipse_set(person_conf_multi, people_i, b), fill=point_color)
draw.ellipse(ellipse_set(person_conf_multi, people_i, c), fill=point_color)
draw.line(line_set(person_conf_multi, people_i, a, b), fill=point_color, width=5)
draw.line(line_set(person_conf_multi, people_i, b, c), fill=point_color, width=5)
for i in range(0, video_frame_number):
image = video.get_frame(i/video.fps)
######################
image_batch = data_to_input(image)
# Compute prediction with the CNN
outputs_np = sess.run(outputs, feed_dict={inputs: image_batch})
scmap, locref, pairwise_diff = predict.extract_cnn_output(outputs_np, cfg, dataset.pairwise_stats)
detections = extract_detections(cfg, scmap, locref, pairwise_diff)
unLab, pos_array, unary_array, pwidx_array, pw_array = eval_graph(sm, detections)
person_conf_multi = get_person_conf_multicut(sm, unLab, unary_array, pos_array)
# print('person_conf_multi: ')
# print(type(person_conf_multi))
# print(person_conf_multi)
# Add library to save image
image_img = Image.fromarray(image)
# Save image with points of pose
draw = ImageDraw.Draw(image_img)
people_num = 0
people_real_num = 0
people_part_num = 0
people_num = person_conf_multi.size / (point_num * 2)
people_num = int(people_num)
print('people_num: ' + str(people_num))
for people_i in range(0, people_num):
point_color_r = random.randrange(0, 256)
point_color_g = random.randrange(0, 256)
point_color_b = random.randrange(0, 256)
point_color = (point_color_r, point_color_g, point_color_b, 255)
point_list = []
point_count = 0
point_i = 0 # index of points
part_count = 0 # count of parts in THAT person
# To find rectangle which include that people - list of points x, y coordinates
people_x = []
people_y = []
for point_i in range(0, point_num):
if person_conf_multi[people_i][point_i][0] + person_conf_multi[people_i][point_i][1] != 0: # If coordinates of point is (0, 0) == meaningless data
point_count = point_count + 1
point_list.append(point_i)
# Draw each parts
if (5 in point_list) and (7 in point_list) and (9 in point_list): # Draw left arm
draw_ellipse_and_line(draw, person_conf_multi, people_i, 5, 7, 9, point_color)
part_count = part_count + 1
if (6 in point_list) and (8 in point_list) and (10 in point_list): # Draw right arm
draw_ellipse_and_line(draw, person_conf_multi, people_i, 6, 8, 10, point_color)
part_count = part_count + 1
if (11 in point_list) and (13 in point_list) and (15 in point_list): # Draw left leg
draw_ellipse_and_line(draw, person_conf_multi, people_i, 11, 13, 15, point_color)
part_count = part_count + 1
if (12 in point_list) and (14 in point_list) and (16 in point_list): # Draw right leg
draw_ellipse_and_line(draw, person_conf_multi, people_i, 12, 14, 16, point_color)
part_count = part_count + 1
if point_count >= point_min:
people_real_num = people_real_num + 1
for point_i in range(0, point_num):
if person_conf_multi[people_i][point_i][0] + person_conf_multi[people_i][point_i][1] != 0: # If coordinates of point is (0, 0) == meaningless data
draw.ellipse(ellipse_set(person_conf_multi, people_i, point_i), fill=point_color)
people_x.append(person_conf_multi[people_i][point_i][0])
people_y.append(person_conf_multi[people_i][point_i][1])
# Draw rectangle which include that people
draw.rectangle([min(people_x), min(people_y), max(people_x), max(people_y)], fill=point_color, outline=5)
if part_count >= part_min:
people_part_num = people_part_num + 1
draw.text((0, 0), 'People(by point): ' + str(people_real_num) + ' (threshold = ' + str(point_min) + ')', (0,0,0), font=font)
draw.text((0, 32), 'People(by line): ' + str(people_part_num) + ' (threshold = ' + str(part_min) + ')', (0,0,0), font=font)
draw.text((0, 64), 'Frame: ' + str(i) + '/' + str(video_frame_number), (0,0,0), font=font)
draw.text((0, 96), 'Total time required: ' + str(round(time.clock() - time_start, 1)) + 'sec', (0,0,0))
print('people_real_num: ' + str(people_real_num))
print('people_part_num: ' + str(people_part_num))
print('frame: ' + str(i))
image_img_numpy = np.asarray(image_img)
pose_frame_list.append(image_img_numpy)
video_pose = ImageSequenceClip(pose_frame_list, fps=video.fps)
video_pose.write_videofile("testset/" + video_name + "_pose.mp4", fps=video.fps)
print("Time(s): " + str(time.clock() - time_start))
| 41.554795 | 256 | 0.659469 | [
"Apache-2.0"
] | PJunhyuk/people-counting-classification | video_pose_ed.py | 12,134 | Python |
from rest_framework import fields, serializers
from db.models.repos import Repo
class RepoSerializer(serializers.ModelSerializer):
project = fields.SerializerMethodField()
class Meta:
model = Repo
fields = ('project', 'created_at', 'updated_at', 'is_public', )
def get_user(self, obj):
return obj.user.username
def get_project(self, obj):
return obj.project.name
| 23.166667 | 71 | 0.688249 | [
"MPL-2.0"
] | AntonFriberg/polyaxon | polyaxon/api/repos/serializers.py | 417 | Python |
# coding=utf-8
from pub.tables.resources import *
from pub.tables.user import *
import pub.client.login as login
from pub.permission.user import is_logged,is_owner
def is_valid_key(key, r_type):
try:
resource_type.objects.get(key=key)
return False
except:
pass
try:
resource_info.objects.get(key=key)
return False
except:
pass
if (r_type == -1):
return True
try:
if(r_type==s.RESOURCE_TYPE_CUSTOMED):
resource_customed.objects.get(key=key)
return False
elif(r_type == s.RESOURCE_TYPE_TEMPLATED):
resource_templated.objects.get(key=key)
return False
elif(r_type == s.RESOURCE_TYPE_RESTFUL_API):
resource_restful.objects.get(key=key)
return False
elif(r_type == s.RESOURCE_TYPE_IFRAME):
resource_iframe.objects.get(key=key)
return False
elif(r_type == s.RESOURCE_TYPE_SHORT_LINK):
resource_link.objects.get(key=key)
return False
else:
return False
except:
return True
def set_permission(key,readable,writeable,modifiable,token=''):
try:
res = resource_permission.objects.get(key=key)
res.delete()
raise Exception()
except:
resource_permission.objects.create(key=key,readable=readable,writeable=writeable,modifiable=modifiable,token=token)
def can_read(request,key,token=''):
try:
readable,_,_,verify_token =__get_resource_permission(key)
return __accessibility_verfy(readable,request,key,token,verify_token)
except:
return False
def can_write(request,key,token=''):
try:
_,writeable,_,verify_token = __get_resource_permission(key)
return __accessibility_verfy(writeable,request,key,token,verify_token)
except:
return False
def can_modify(request,key,token=''):
try:
_,_,modifiable,verify_token = __get_resource_permission(key)
return __accessibility_verfy(modifiable,request,key,token,verify_token)
except:
return False
def can_create(request, r_type):
if not is_logged(request):
return False
return True
#
# try:
# user = login.get_user_by_session(request,request.session.get(s.SESSION_LOGIN))
# except:
# return False
#
# p = user_permission.objects.get(user_id=user, type=r_type).volume
#
# if p>0:
# return True
#
# return False
def did_create(request,r_type):
if is_logged(request):
user = login.get_user_by_session(request,request.session.get(s.SESSION_LOGIN))
p = user_permission.objects.get(user_id=user, type=r_type)
p.volume = p.volume - 1
p.save()
def __get_resource_permission(key):
p = resource_permission.objects.get(key=key)
readable = p.readable
writeable = p.writeable
modifiable = p.modifiable
token = p.token
return readable, writeable, modifiable, token
def __accessibility_verfy(accessibility, request, key, token, verify_token):
if accessibility == s.ACCESSIBILITY_PUBLIC:
return True
elif accessibility == s.ACCESSIBILITY_LOGIN or accessibility == s.ACCESSIBILITY_LOGIN_OR_TOKEN:
if is_logged(request):
return True
else:
if token != '':
if token == verify_token:
return True
elif accessibility == s.ACCESSIBILITY_PRIVATE:
if is_logged(request):
if is_owner(request, key):
return True
return False
elif accessibility == s.ACCESSIBILITY_TOKEN:
if token != '':
if token == verify_token:
return True
| 26.384615 | 123 | 0.640074 | [
"MIT"
] | DASTUDIO/MyVHost | pub/permission/resource.py | 3,773 | Python |
import urllib2
from zope.interface import implements
from plone.portlets.interfaces import IPortletDataProvider
from plone.app.portlets.portlets import base
from Products.CMFCore.utils import getToolByName
from zope import schema
from zope.formlib import form
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from wad.blog.utils import find_portlet_assignment_context
from wad.blog.blogentry import IBlogEntry
from wad.blog import MessageFactory as _
class IBlogCategoriesPortlet(IPortletDataProvider):
"""A portlet
It inherits from IPortletDataProvider because for this portlet, the
data that is being rendered and the portlet assignment itself are the
same.
"""
archive_view = schema.TextLine(
title=_(u"Archive view"),
description=_(u"The name of the archive view"),
default=u'blog-view',
required=True
)
class Assignment(base.Assignment):
"""Portlet assignment.
This is what is actually managed through the portlets UI and associated
with columns.
"""
implements(IBlogCategoriesPortlet)
def __init__(self, archive_view=u'blog-view'):
self.archive_view = archive_view
@property
def title(self):
"""This property is used to give the title of the portlet in the
"manage portlets" screen.
"""
return _("Categories")
class Renderer(base.Renderer):
"""Portlet renderer.
This is registered in configure.zcml. The referenced page template is
rendered, and the implicit variable 'view' will refer to an instance
of this class. Other methods can be added and referenced in the template.
"""
render = ViewPageTemplateFile('categories.pt')
def keywords(self):
catalog = getToolByName(self.context, 'portal_catalog')
keywords = catalog.uniqueValuesFor('Subject')
keywords = [unicode(k, 'utf-8') for k in keywords]
return keywords
def archive_url(self, subject):
# Get the path of where the portlet is created. That's the blog.
assignment_context = find_portlet_assignment_context(self.data,
self.context)
if assignment_context is None:
assignment_context = self.context
self.folder_url = assignment_context.absolute_url()
sub = urllib2.quote(subject.encode('utf-8'))
url = '%s/%s?category=%s' % (self.folder_url,
self.data.archive_view,
sub)
return url
def blog_url(self):
assignment_context = find_portlet_assignment_context(self.data,
self.context)
if assignment_context is None:
assignment_context = self.context
return assignment_context.absolute_url()
def count_entries(self, subject):
catalog = getToolByName(self.context, 'portal_catalog')
brains = catalog(object_provides=IBlogEntry.__identifier__,
Subject=subject.encode('utf-8'))
return len(brains)
def count_all_entries(self):
catalog = getToolByName(self.context, 'portal_catalog')
brains = catalog(object_provides=IBlogEntry.__identifier__)
return len(brains)
class AddForm(base.AddForm):
"""Portlet add form.
This is registered in configure.zcml. The form_fields variable tells
zope.formlib which fields to display. The create() method actually
constructs the assignment that is being added.
"""
form_fields = form.Fields(IBlogCategoriesPortlet)
def create(self, data):
return Assignment(**data)
class EditForm(base.EditForm):
"""Portlet edit form.
This is registered with configure.zcml. The form_fields variable tells
zope.formlib which fields to display.
"""
form_fields = form.Fields(IBlogCategoriesPortlet)
| 32.694215 | 77 | 0.671638 | [
"MIT"
] | potzenheimer/buildout.wad | src/wad.blog/wad/blog/portlets/categories.py | 3,956 | Python |
from configparser import ConfigParser
import feedparser
import re
import requests
import tweepy
def get_id(xkcd_link: str) -> int:
"""
Exctract comic id from xkcd link
"""
match = re.search(r"\d+", xkcd_link)
if match:
return int(match.group())
else:
return 0
def get_xkcd_rss_entries(url: str):
"""
Load latest XKCD RSS feed and extract latest entry
"""
# get latest rss feed
feed = feedparser.parse(url)
return feed.get("entries")
def get_latest_rss_entry(entries: list):
"""
Extract latest entry from XKCD RSS feed and
parse the ID
"""
entry = entries[0]
id_ = get_id(xkcd_link=entry.get("id"))
return id_, entry
def downdload_comic(entry: dict, filename: str) -> None:
"""
Download latest image and store it in
current working directory
"""
match = re.search(r'src="(.*png)"', entry["summary"])
if match:
img_url = match.groups()[0]
r = requests.get(img_url)
r.raise_for_status()
with open(filename, "wb") as f:
f.write(r.content)
return None
def initialize_twitter_api(config: ConfigParser):
"""
Do authentication and return read-to-use
twitter api object
"""
twitter_config = config["twitter"]
auth = tweepy.OAuthHandler(
twitter_config.get("consumer_key"), twitter_config.get("consumer_secret")
)
auth.set_access_token(
twitter_config.get("access_token"), twitter_config.get("access_secret")
)
api = tweepy.API(auth)
return api
def send_twitter_post(entry: dict, api: tweepy.API, img_fname: str) -> None:
"""
Post tweet on twitter
"""
match = re.search("title=(.*)/>", entry["summary"])
if match:
msg = match.groups()[0]
msg += f"\n {entry['link']}"
else:
msg = "-- No Title --"
api.update_with_media(status=msg, filename=img_fname)
return None
| 22.170455 | 81 | 0.621732 | [
"MIT"
] | lwittchen/twitter-bots | xkcd_feed/src/utils.py | 1,951 | Python |
from django.urls import path
from el_galleria import views
urlpatterns = [
path('', views.index, name="home"),
path('category/<str:selected_category>/', views.category, name="category"),
path('search/<str:search_str>/', views.search, name="search")
] | 26.5 | 79 | 0.69434 | [
"MIT"
] | kennjr/mi-galleria | el_galleria/urls.py | 265 | Python |
"""api_gw_test"""
# Remove warnings when using pytest fixtures
# pylint: disable=redefined-outer-name
import json
from test.conftest import ENDPOINT_URL
# warning disabled, this is used as a pylint fixture
from test.elasticsearch_test import ( # pylint: disable=unused-import
es_client,
populate_es_test_case_1,
)
from urllib.parse import urlencode
import boto3
import pytest
import requests
def to_localstack_url(api_id: str, url: str):
"""
Converts a API GW url to localstack
"""
return url.replace("4566", f"4566/restapis/{api_id}").replace(
"dev", "dev/_user_request_"
)
def api_gw_lambda_integrate_deploy(
api_client,
api: dict,
api_resource: dict,
lambda_func: dict,
http_method: str = "GET",
) -> str:
"""
Integrate lambda with api gw method and deploy api.
Return the invokation URL
"""
lambda_integration_arn = (
"arn:aws:apigateway:us-east-1:lambda:path/2015-03-31/functions/"
f"{lambda_func['FunctionArn']}/invocations"
)
api_client.put_integration(
restApiId=api["id"],
resourceId=api_resource["id"],
httpMethod=http_method,
type="AWS",
integrationHttpMethod="POST",
uri=lambda_integration_arn,
)
api_client.create_deployment(
restApiId=api["id"], stageName="dev",
)
return f"http://localhost:4566/restapis/{api['id']}/dev/_user_request_{api_resource['path']}"
@pytest.fixture
def api_gw_method(request):
"""api gw for testing"""
marker = request.node.get_closest_marker("api_gw_method_args")
put_method_args = marker.args[0]["put_method_args"]
put_method_response_args = marker.args[0]["put_method_response_args"]
api = None
def fin():
"""fixture finalizer"""
if api:
api_client.delete_rest_api(restApiId=api["id"])
# Hook teardown (finalizer) code
request.addfinalizer(fin)
api_client = boto3.client("apigateway", endpoint_url=ENDPOINT_URL)
api = api_client.create_rest_api(name="testapi")
root_resource_id = api_client.get_resources(restApiId=api["id"])["items"][0]["id"]
api_resource = api_client.create_resource(
restApiId=api["id"], parentId=root_resource_id, pathPart="test"
)
api_client.put_method(
restApiId=api["id"],
resourceId=api_resource["id"],
authorizationType="NONE",
**put_method_args,
)
api_client.put_method_response(
restApiId=api["id"],
resourceId=api_resource["id"],
statusCode="200",
**put_method_response_args,
)
return api_client, api, api_resource
@pytest.mark.api_gw_method_args(
{
"put_method_args": {"httpMethod": "GET",},
"put_method_response_args": {"httpMethod": "GET",},
}
)
@pytest.mark.lambda_function_args(
{
"name": "stac_endpoint",
"handler": "code.handler",
"environment": {"CBERS_STAC_BUCKET": "bucket",},
"timeout": 30,
"layers": (
{
"output_dir": "./test",
"layer_dir": "./cbers2stac/layers/common",
"tag": "common",
},
),
}
)
def test_root(api_gw_method, lambda_function):
"""
test_root_endpoint
"""
# Based on
# https://stackoverflow.com/questions/58859917/creating-aws-lambda-integrated-api-gateway-resource-with-boto3
api_client, api, api_resource = api_gw_method
lambda_client, lambda_func = lambda_function # pylint: disable=unused-variable
url = api_gw_lambda_integrate_deploy(api_client, api, api_resource, lambda_func)
req = requests.get(url)
assert req.status_code == 200
@pytest.mark.api_gw_method_args(
{
"put_method_args": {"httpMethod": "GET",},
"put_method_response_args": {"httpMethod": "GET",},
}
)
@pytest.mark.lambda_function_args(
{
"name": "elasticsearch",
"handler": "es.stac_search_endpoint_handler",
"environment": {},
"timeout": 30,
"layers": (
{
"output_dir": "./test",
"layer_dir": "./cbers2stac/layers/common",
"tag": "common",
},
),
}
)
def test_item_search_get(
api_gw_method, lambda_function, es_client
): # pylint: disable=too-many-locals,too-many-statements
"""
test_item_search_get
"""
api_client, api, api_resource = api_gw_method
lambda_client, lambda_func = lambda_function # pylint: disable=unused-variable
# ES_ENDPOINT is set by lambda_function
lambda_client.update_function_configuration(
FunctionName=lambda_func["FunctionName"],
Environment={"Variables": {"ES_PORT": "4571", "ES_SSL": "NO",}},
)
populate_es_test_case_1(es_client)
# Empty GET, return all 2 items
original_url = api_gw_lambda_integrate_deploy(
api_client, api, api_resource, lambda_func
)
req = requests.get(original_url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert len(fcol["features"]) == 2
# Single collection, return single item
url = f"{original_url}?collections=CBERS4-MUX"
req = requests.get(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert len(fcol["features"]) == 1
assert fcol["features"][0]["collection"] == "CBERS4-MUX"
# Two collections, return all items
url = f"{original_url}?collections=CBERS4-MUX,CBERS4-AWFI"
req = requests.get(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert len(fcol["features"]) == 2
# Paging, no next case
url = f"{original_url}"
req = requests.get(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert "links" not in fcol.keys()
# Paging, next page
url = f"{original_url}?limit=1"
req = requests.get(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert "links" in fcol.keys()
assert len(fcol["links"]) == 1
next_href = to_localstack_url(api["id"], fcol["links"][0]["href"])
req = requests.get(next_href)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert "links" not in fcol.keys()
assert fcol["features"][0]["id"] == "CBERS_4_MUX_20170528_090_084_L2"
# ids
url = f"{original_url}?ids=CBERS_4_MUX_20170528_090_084_L2"
req = requests.get(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert len(fcol["features"]) == 1
assert fcol["features"][0]["id"] == "CBERS_4_MUX_20170528_090_084_L2"
# query extension
url = f"{original_url}?"
url += urlencode({"query": '{"cbers:data_type": {"eq":"L4"}}'})
req = requests.get(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert len(fcol["features"]) == 1
assert fcol["features"][0]["id"] == "CBERS_4_AWFI_20170409_167_123_L4"
@pytest.mark.api_gw_method_args(
{
"put_method_args": {"httpMethod": "POST",},
"put_method_response_args": {"httpMethod": "POST",},
}
)
@pytest.mark.lambda_function_args(
{
"name": "elasticsearch",
"handler": "es.stac_search_endpoint_handler",
"environment": {},
"timeout": 30,
"layers": (
{
"output_dir": "./test",
"layer_dir": "./cbers2stac/layers/common",
"tag": "common",
},
),
}
)
def test_item_search_post(
api_gw_method, lambda_function, es_client
): # pylint: disable=too-many-locals
"""
test_item_search_post
"""
api_client, api, api_resource = api_gw_method
lambda_client, lambda_func = lambda_function # pylint: disable=unused-variable
# ES_ENDPOINT is set by lambda_function
lambda_client.update_function_configuration(
FunctionName=lambda_func["FunctionName"],
Environment={"Variables": {"ES_PORT": "4571", "ES_SSL": "NO",}},
)
populate_es_test_case_1(es_client)
url = api_gw_lambda_integrate_deploy(
api_client, api, api_resource, lambda_func, http_method="POST"
)
# POST with invalid bbox order, check error status code and message
req = requests.post(
url,
data=json.dumps(
{
"collections": ["mycollection"],
"bbox": [160.6, -55.95, -170, -25.89],
"limit": 100,
"datetime": "2019-01-01T00:00:00Z/2019-01-01T23:59:59Z",
}
),
)
assert req.status_code == 400, req.text
assert "First lon corner is not western" in req.text
# Same as above with fixed bbox
req = requests.post(
url,
data=json.dumps(
{
"collections": ["mycollection"],
"bbox": [-170, -25.89, 160.6, -55.95],
"limit": 100,
"datetime": "2019-01-01T00:00:00Z/2019-01-01T23:59:59Z",
}
),
)
assert req.status_code == 200, req.text
# Paging, no next case
req = requests.post(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert "links" not in fcol.keys()
# Paging, next page
body = {"limit": 1}
req = requests.post(url, data=json.dumps(body))
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert "links" in fcol.keys()
assert len(fcol["links"]) == 1
next_href = to_localstack_url(api["id"], fcol["links"][0]["href"])
req = requests.post(
next_href, data=json.dumps({**body, **fcol["links"][0]["body"]})
)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert "links" not in fcol.keys()
assert fcol["features"][0]["id"] == "CBERS_4_MUX_20170528_090_084_L2"
# ids
body = {"ids": ["CBERS_4_MUX_20170528_090_084_L2"]}
req = requests.post(url, data=json.dumps(body))
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert len(fcol["features"]) == 1
assert fcol["features"][0]["id"] == "CBERS_4_MUX_20170528_090_084_L2"
| 30.376119 | 113 | 0.622445 | [
"Apache-2.0"
] | fredliporace/cbers-2-stac | test/api_gw_test.py | 10,176 | Python |
_base_ = [
'../_base_/datasets/ffhq_flip.py',
'../_base_/models/stylegan/stylegan2_base.py',
'../_base_/default_runtime.py'
]
model = dict(
type='MSPIEStyleGAN2',
generator=dict(
type='MSStyleGANv2Generator',
head_pos_encoding=dict(type='CSG'),
deconv2conv=True,
up_after_conv=True,
head_pos_size=(4, 4),
up_config=dict(scale_factor=2, mode='bilinear', align_corners=True),
out_size=256),
discriminator=dict(
type='MSStyleGAN2Discriminator', in_size=256, with_adaptive_pool=True))
train_cfg = dict(
num_upblocks=6,
multi_input_scales=[0, 2, 4],
multi_scale_probability=[0.5, 0.25, 0.25])
data = dict(
samples_per_gpu=3,
train=dict(dataset=dict(imgs_root='./data/ffhq/ffhq_imgs/ffhq_512')))
ema_half_life = 10.
custom_hooks = [
dict(
type='VisualizeUnconditionalSamples',
output_dir='training_samples',
interval=5000),
dict(
type='ExponentialMovingAverageHook',
module_keys=('generator_ema', ),
interval=1,
interp_cfg=dict(momentum=0.5**(32. / (ema_half_life * 1000.))),
priority='VERY_HIGH')
]
checkpoint_config = dict(interval=10000, by_epoch=False, max_keep_ckpts=40)
lr_config = None
log_config = dict(
interval=100,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook'),
])
cudnn_benchmark = False
total_iters = 1100002
metrics = dict(
fid50k=dict(
type='FID',
num_images=50000,
inception_pkl='work_dirs/inception_pkl/ffhq-256-50k-rgb.pkl',
bgr2rgb=True),
pr10k3=dict(type='PR', num_images=10000, k=3))
| 26.714286 | 79 | 0.649436 | [
"Apache-2.0"
] | DequanWang/actnn-mmgen | configs/positional_encoding_in_gans/mspie-stylegan2_c2_config-d_ffhq_256-512_b3x8_1100k.py | 1,683 | Python |
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:25176")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:25176")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitmea address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitmea address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| 24.110769 | 79 | 0.668198 | [
"MIT"
] | bitmea-project/bitmea | contrib/bitrpc/bitrpc.py | 7,836 | Python |
#!/usr/bin/env python3
#
# MIT License
#
# Copyright (c) 2020 EntySec
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import sys
import tty
import termios
from Head.d3crypt import ghost
class typer:
def __init__(self):
self.d3crypt = d3crypt()
def get_char(self):
fd = sys.stdin.fileno()
old = termios.tcgetattr(fd)
try:
tty.setraw(fd)
return sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old)
def send_char(self, char):
self.ghost.send_command("shell", "input text " + char, False, False)
| 33.854167 | 80 | 0.720615 | [
"MIT"
] | D3crypT0r/D3crypt | Head/typer.py | 1,625 | Python |
"""
DeepChEmbed (DCE) Models
"""
from dimreducer import DeepAutoEncoder
from cluster import KMeansLayer
from cluster import KMeans
from keras import Model
from keras import optimizers
from keras.utils import normalize
import numpy as np
class DCE():
"""
The class to build a deep chemical embedding model.
Attributes:
autoencoder_dims: a list of dimensions for encoder, the first
element as input dimension, and the last one as
hidden layer dimension.
n_clusters: int, number of clusters for clustering layer.
alpha: float, parameters for soft label assigning.
update_interval: int, indicating every number of epoches, the harhened
labels will be upadated and/or convergence cretia will
be examed.
max_iteration: int, maximum iteration for the combined training
clustering_tol: float, convergence cretia for clustering layer
model: keras Model variable
HARDENING_FUNCS: smoothsetp hardening functions for unsupervised DCE
training, up to 9th order
"""
HARDENING_FUNCS = {
1: lambda x: x,
3: lambda x: (-2*x + 3) * x**2,
5: lambda x: ((6*x - 15)*x + 10) * x**3,
7: lambda x: (((-20*x + 70)*x - 84)*x + 35) * x**4,
9: lambda x: ((((70*x - 315)*x + 540)*x -420)*x + 126) * x**5}
def __init__(self, autoencoder_dims, n_clusters, update_interval=50,
max_iteration=1e4, clustering_tol=1e-4, alpha=1.0):
"""Construtor of DCE. """
self.autoencoder_dims = autoencoder_dims
self.n_clusters = n_clusters
self.alpha = alpha
self.update_interval = update_interval
self.max_iteration = max_iteration
self.clustering_tol = clustering_tol
self.model = None
return
def build_model(self, norm=True, act='relu'):
"""Build DCE using the initialized attributes
Args:
norm: boolean, wheher to add a normalization layer at the begining
of the autoencoder
act: string, keras activation function name for autoencoder
"""
autoencoder = DeepAutoEncoder(self.autoencoder_dims, act)
autoencoder.build_model(norm=norm)
embeding = autoencoder.model.get_layer(name='embedding_layer').output
clustering = KMeansLayer(self.n_clusters, alpha=self.alpha,
name='clustering')(embeding)
self.model = Model(inputs=autoencoder.model.input,
outputs=[clustering,autoencoder.model.output])
return
def train_model(self, data_train,
labels_train=None, data_test=None, labels_test=None,
verbose=1,
compiled=False, clustering_loss='kld',
decoder_loss='mse',clustering_loss_weight=0.5,
hardening_order=1, hardening_strength=2.0,
compiled=False,
optimizer='adam', lr=0.001, decay=0.0):
"""Train DCE Model:
If labels_train are not present, train DCE model in a unsupervised
learning process; otherwise, train DCE model in a supervised learning
process.
Args:
data_train: input training data
labels_train: true labels of traning data
data_test: input test data
labels_test: true lables of testing data
verbose: 0, turn off the screen prints
clustering_loss: string, clustering layer loss function
decoder_loss:, string, decoder loss function
clustering_loss_weight: float in [0,1], w_c,
harderning_order: odd int, the order of hardening function
harderning_strength: float >=1.0, the streng of the harderning
compiled: boolean, indicating if the model is compiled or not
optmizer: string, keras optimizers
lr: learning rate
dacay: learning rate dacay
Returns:
train_loss: training loss
test_loss: only if data_test and labels_test are not None in
supervised learning process
"""
if (not compiled):
assert clustering_loss_weight <= 1 and clustering_loss_weight >= 0
if optimizer == 'adam':
dce_optimizer = optimizers.Adam(lr=lr,decay=decay)
elif optimizer == 'sgd':
dce_optimizer = optimizers.sgd(lr=lr,decay=decay)
else:
raise Exception('Input optimizer was not found')
self.model.compile(loss={'clustering': clustering_loss,
'decoder_output': decoder_loss},
loss_weights=[clustering_loss_weight,
1 - clustering_loss_weight],
optimizer=dce_optimizer)
if (labels_train is not None):
supervised_learning = True
if verbose >= 1: print('Starting supervised learning')
else:
supervised_learning = False
if verbose >= 1: print('Starting unsupervised learning')
# initializing model by using sklean-Kmeans as guess
kmeans_init = KMeans(n_clusters=self.n_clusters)
kmeans_init.build_model()
encoder = Model(inputs=self.model.input,
outputs=self.model.get_layer(\
name='embedding_layer').output)
kmeans_init.model.fit(encoder.predict(data_train))
y_pred_last = kmeans_init.model.labels_
self.model.get_layer(name='clustering').\
set_weights([kmeans_init.model.cluster_centers_])
# Prepare training: p disctribution methods
if not supervised_learning:
# Unsupervised Learning
assert hardening_order in DCE.HARDENING_FUNCS.keys()
assert hardening_strength >= 1.0
h_func = DCE.HARDENING_FUNCS[hardening_order]
else:
# Supervised Learning
assert len(labels_train) == len(data_train)
assert len(np.unique(labels_train)) == self.n_clusters
p = np.zeros(shape=(len(labels_train), self.n_clusters))
for i in range(len(labels_train)):
p[i][labels_train[i]] = 1.0
if data_test is not None:
assert len(labels_test) == len(data_test)
assert len(np.unique(labels_test)) == self.n_clusters
p_test = np.zeros(shape=(len(labels_test), self.n_clusters))
for i in range(len(labels_test)):
p_test[i][labels_test[i]] = 1.0
validation_loss = []
# training start:
loss = []
for iteration in range(int(self.max_iteration)):
if iteration % self.update_interval == 0:
# updating p for unsupervised learning process
q, _ = self.model.predict(data_train)
if not supervised_learning:
p = DCE.hardening(q, h_func, hardening_strength)
# get label change i
y_pred = q.argmax(1)
delta_label_i = np.sum(y_pred != y_pred_last).\
astype(np.float32) / y_pred.shape[0]
y_pred_last = y_pred
# exam convergence
if iteration > 0 and delta_label_i < self.clustering_tol:
print(str(delta_label_i) +' < ' + str(self.clustering_tol))
print('Reached tolerance threshold. Stopping training.')
break
loss.append(self.model.train_on_batch(x=data_train,
y=[p,data_train]))
if supervised_learning and data_test is not None:
validation_loss.append(self.model.test_on_batch(
x=data_test, y=[p_test,data_test]))
if verbose > 0 and iteration % self.update_interval == 0:
print('Epoch: ' + str(iteration))
if verbose == 1:
print(' Total_loss = ' + str(loss[iteration][0]) +
';Delta_label = ' + str(delta_label_i))
print(' Clustering_loss = ' + str(loss[iteration][1]) +
'; Decoder_loss = ' + str(loss[iteration][2]))
if iteration == self.max_iteration - 1:
print('Reached maximum iteration. Stopping training.')
if data_test is None:
return np.array(loss).T
else:
return [np.array(loss).T, np.array(validation_loss).T]
@staticmethod
def hardening(q, h_func, stength):
"""hardening distribution P and return Q
Args:
q: input distributions.
h_func: input harderning function.
strength: hardening strength.
returns:
p: hardened and normatlized distributions.
"""
q = h_func(q)
weight = q ** stength / q.sum(0)
return (weight.T / weight.sum(1)).T
| 41.430493 | 79 | 0.573763 | [
"MIT"
] | chembed/DeepChEmbed | deepchembed/dce.py | 9,241 | Python |
import datetime
from . import status
from .errors import InvalidAuthRequest, ProtocolVersionUnsupported, NoMutualAuthType
from .signing import Key
from .response import AuthResponse
class AuthPrincipal:
def __init__(self, userid, auth_methods, ptags=None, session_expiry=None):
self.userid = userid
self.auth_methods = auth_methods
if ptags is None:
ptags = []
self.ptags = ptags
self.session_expiry = session_expiry
class LoginService:
"""High-level interface to implement a web login service (WLS).
This class provides a convenient interface for implementing a WLS with any
authentication backend. It is intended to be instantiated with a single
private key, which is used to sign the responses it generates.
Mechanisms deemed useful for WLS implementation are provided:
- storing the list of supported authentication methods, and checking
whether the WLS and a WAA's request have an method in common
- checking whether the protocol version specified in the WAA request is
supported by `ucam_wls`
These mechanisms can optionally be turned off.
Attributes:
key (ucam_wls.signing.Key): a private key to be used to sign responses
auth_methods (list): a list of supported authentication methods
"""
def __init__(self, key, auth_methods):
if not isinstance(key, Key):
raise TypeError("key must be a ucam_wls.signing.Key instance")
self.key = key
self.auth_methods = auth_methods
def have_mutual_auth_type(self, request):
if request.aauth and any(request.aauth):
return set(request.aauth) & set(self.auth_methods) != set()
else:
return True
def _pre_response(self, request, skip_handling_check, check_auth_types=True):
if not skip_handling_check:
if not request.data_valid:
raise InvalidAuthRequest
if check_auth_types and not self.have_mutual_auth_type(request):
raise NoMutualAuthType(
"WLS supports %s; WAA wants one of %s" % (
self.auth_methods, request.aauth
)
)
if not request.version_supported:
raise ProtocolVersionUnsupported(request.ver)
def _finish_response(self, response, sign=True, force_signature=False):
if sign or response.requires_signature:
if not response.is_signed or force_signature:
self.key.sign(response)
return response
def authenticate_active(self, request, principal, auth, life=None,
sign=True, skip_handling_check=False, *args, **kwargs):
"""Generate a WLS 'success' response based on interaction with the user
This function creates a WLS response specifying that the principal was
authenticated based on 'fresh' interaction with the user (e.g. input of
a username and password).
Args:
request (AuthRequest): the original WAA request
principal (AuthPrincipal): the principal authenticated by the WLS
auth (str): the authentication method used by the principal.
life (int): if specified, the validity (in seconds) of the
principal's session with the WLS.
sign (bool): whether to sign the response or not. Recommended to
leave this at the default value of `True` (see warning below).
*args: passed to `AuthResponse.respond_to_request`
**kwargs: passed to `AuthResponse.respond_to_request`
Returns:
An `AuthResponse` instance matching the given arguments.
Warning:
Responses indicating successful authentication *MUST* be signed by
the WLS. It is recommended that you leave `sign` set to `True`, or
make sure to sign the response manually afterwards.
"""
self._pre_response(request, skip_handling_check)
if request.iact == False:
raise ValueError("WAA demanded passive authentication (iact == 'no')")
if life is None and principal.session_expiry is not None:
life = int((principal.session_expiry - datetime.datetime.utcnow()).total_seconds())
response = AuthResponse.respond_to_request(
request=request, code=status.SUCCESS, principal=principal.userid,
auth=auth, ptags=principal.ptags, life=life, *args, **kwargs
)
return self._finish_response(response=response, sign=sign)
def authenticate_passive(self, request, principal, sso=[], sign=True,
skip_handling_check=False, *args, **kwargs):
"""Generate a WLS 'success' response based on a pre-existing identity
This function creates a WLS response specifying that the principal was
authenticated based on previous successful authentication (e.g. an
existing WLS session cookie).
Args:
request (AuthRequest): the original WAA request
principal (AuthPrincipal): the principal authenticated by the WLS
sso (list): a list of strings indicating the authentication methods
previously used for authentication by the principal. If an
empty list is passed, `principal.auth_methods` will be used.
sign (bool): whether to sign the response or not. Recommended to
leave this at the default value of `True` (see warning below).
*args: passed to `AuthResponse.respond_to_request`
**kwargs: passed to `AuthResponse.respond_to_request`
Returns:
An `AuthResponse` instance matching the given arguments.
Warning:
Responses indicating successful authentication *MUST* be signed by
the WLS. It is recommended that you leave `sign` set to `True`, or
make sure to sign the response manually afterwards.
"""
self._pre_response(request, skip_handling_check)
if request.iact == True:
raise ValueError("WAA demanded active authentication (iact == 'yes')")
if len(sso) == 0:
sso = principal.auth_methods
if len(sso) == 0:
raise ValueError("no authentication methods specified for `sso`")
if principal.session_expiry is not None:
life = int((principal.session_expiry - datetime.datetime.utcnow()).total_seconds())
else:
life = None
response = AuthResponse.respond_to_request(
request=request, code=status.SUCCESS, principal=principal.userid,
sso=sso, ptags=principal.ptags, life=life, *args, **kwargs
)
return self._finish_response(response=response, sign=sign)
def generate_failure(self, code, request, msg='', sign=True,
skip_handling_check=False, *args, **kwargs):
"""Generate a response indicating failure.
This is to be used in all cases where the outcome of user interaction
is not success. This function will refuse to handle a request where
the 'fail' parameter is 'yes' (in which case the WLS must not redirect
back to the WAA).
Args:
code (int): the response status code. Values specified in the
protocol are available as constants under `ucam_wls.status`.
request (AuthRequest): the original WAA request
msg (str): an optional message that could be shown to the end user
by the WAA
sign (bool): whether to sign the response or not.
*args: passed to `AuthResponse.respond_to_request`
**kwargs: passed to `AuthResponse.respond_to_request`
Returns:
An `AuthResponse` instance matching the given arguments.
Note:
Signatures on WLS responses indicating a non-success can optionally
be signed. In the interests of security, the default in this
function is to go ahead and sign anyway, but this can be turned off
if really desired.
"""
self._pre_response(request, skip_handling_check, check_auth_types=False)
if request.fail:
raise ValueError("WAA specified that WLS must not redirect "
"back to it on failure")
if code == status.SUCCESS:
raise ValueError("Failure responses must not have success status")
response = AuthResponse.respond_to_request(
request=request, code=code, *args, **kwargs
)
return self._finish_response(response=response, sign=sign)
| 43.346535 | 95 | 0.646071 | [
"MIT"
] | edwinbalani/ucam-wls | ucam_wls/context.py | 8,756 | Python |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'TableMagneticStoreWriteProperties',
'TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation',
'TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration',
'TableRetentionProperties',
]
@pulumi.output_type
class TableMagneticStoreWriteProperties(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "enableMagneticStoreWrites":
suggest = "enable_magnetic_store_writes"
elif key == "magneticStoreRejectedDataLocation":
suggest = "magnetic_store_rejected_data_location"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TableMagneticStoreWriteProperties. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TableMagneticStoreWriteProperties.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TableMagneticStoreWriteProperties.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
enable_magnetic_store_writes: Optional[bool] = None,
magnetic_store_rejected_data_location: Optional['outputs.TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation'] = None):
"""
:param bool enable_magnetic_store_writes: A flag to enable magnetic store writes.
:param 'TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationArgs' magnetic_store_rejected_data_location: The location to write error reports for records rejected asynchronously during magnetic store writes. See Magnetic Store Rejected Data Location below for more details.
"""
if enable_magnetic_store_writes is not None:
pulumi.set(__self__, "enable_magnetic_store_writes", enable_magnetic_store_writes)
if magnetic_store_rejected_data_location is not None:
pulumi.set(__self__, "magnetic_store_rejected_data_location", magnetic_store_rejected_data_location)
@property
@pulumi.getter(name="enableMagneticStoreWrites")
def enable_magnetic_store_writes(self) -> Optional[bool]:
"""
A flag to enable magnetic store writes.
"""
return pulumi.get(self, "enable_magnetic_store_writes")
@property
@pulumi.getter(name="magneticStoreRejectedDataLocation")
def magnetic_store_rejected_data_location(self) -> Optional['outputs.TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation']:
"""
The location to write error reports for records rejected asynchronously during magnetic store writes. See Magnetic Store Rejected Data Location below for more details.
"""
return pulumi.get(self, "magnetic_store_rejected_data_location")
@pulumi.output_type
class TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "s3Configuration":
suggest = "s3_configuration"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
s3_configuration: Optional['outputs.TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration'] = None):
"""
:param 'TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3ConfigurationArgs' s3_configuration: Configuration of an S3 location to write error reports for records rejected, asynchronously, during magnetic store writes. See S3 Configuration below for more details.
"""
if s3_configuration is not None:
pulumi.set(__self__, "s3_configuration", s3_configuration)
@property
@pulumi.getter(name="s3Configuration")
def s3_configuration(self) -> Optional['outputs.TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration']:
"""
Configuration of an S3 location to write error reports for records rejected, asynchronously, during magnetic store writes. See S3 Configuration below for more details.
"""
return pulumi.get(self, "s3_configuration")
@pulumi.output_type
class TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "bucketName":
suggest = "bucket_name"
elif key == "encryptionOption":
suggest = "encryption_option"
elif key == "kmsKeyId":
suggest = "kms_key_id"
elif key == "objectKeyPrefix":
suggest = "object_key_prefix"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
bucket_name: Optional[str] = None,
encryption_option: Optional[str] = None,
kms_key_id: Optional[str] = None,
object_key_prefix: Optional[str] = None):
"""
:param str bucket_name: Bucket name of the customer S3 bucket.
:param str encryption_option: Encryption option for the customer s3 location. Options are S3 server side encryption with an S3-managed key or KMS managed key. Valid values are `SSE_KMS` and `SSE_S3`.
:param str kms_key_id: KMS key arn for the customer s3 location when encrypting with a KMS managed key.
:param str object_key_prefix: Object key prefix for the customer S3 location.
"""
if bucket_name is not None:
pulumi.set(__self__, "bucket_name", bucket_name)
if encryption_option is not None:
pulumi.set(__self__, "encryption_option", encryption_option)
if kms_key_id is not None:
pulumi.set(__self__, "kms_key_id", kms_key_id)
if object_key_prefix is not None:
pulumi.set(__self__, "object_key_prefix", object_key_prefix)
@property
@pulumi.getter(name="bucketName")
def bucket_name(self) -> Optional[str]:
"""
Bucket name of the customer S3 bucket.
"""
return pulumi.get(self, "bucket_name")
@property
@pulumi.getter(name="encryptionOption")
def encryption_option(self) -> Optional[str]:
"""
Encryption option for the customer s3 location. Options are S3 server side encryption with an S3-managed key or KMS managed key. Valid values are `SSE_KMS` and `SSE_S3`.
"""
return pulumi.get(self, "encryption_option")
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> Optional[str]:
"""
KMS key arn for the customer s3 location when encrypting with a KMS managed key.
"""
return pulumi.get(self, "kms_key_id")
@property
@pulumi.getter(name="objectKeyPrefix")
def object_key_prefix(self) -> Optional[str]:
"""
Object key prefix for the customer S3 location.
"""
return pulumi.get(self, "object_key_prefix")
@pulumi.output_type
class TableRetentionProperties(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "magneticStoreRetentionPeriodInDays":
suggest = "magnetic_store_retention_period_in_days"
elif key == "memoryStoreRetentionPeriodInHours":
suggest = "memory_store_retention_period_in_hours"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TableRetentionProperties. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TableRetentionProperties.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TableRetentionProperties.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
magnetic_store_retention_period_in_days: int,
memory_store_retention_period_in_hours: int):
"""
:param int magnetic_store_retention_period_in_days: The duration for which data must be stored in the magnetic store. Minimum value of 1. Maximum value of 73000.
:param int memory_store_retention_period_in_hours: The duration for which data must be stored in the memory store. Minimum value of 1. Maximum value of 8766.
"""
pulumi.set(__self__, "magnetic_store_retention_period_in_days", magnetic_store_retention_period_in_days)
pulumi.set(__self__, "memory_store_retention_period_in_hours", memory_store_retention_period_in_hours)
@property
@pulumi.getter(name="magneticStoreRetentionPeriodInDays")
def magnetic_store_retention_period_in_days(self) -> int:
"""
The duration for which data must be stored in the magnetic store. Minimum value of 1. Maximum value of 73000.
"""
return pulumi.get(self, "magnetic_store_retention_period_in_days")
@property
@pulumi.getter(name="memoryStoreRetentionPeriodInHours")
def memory_store_retention_period_in_hours(self) -> int:
"""
The duration for which data must be stored in the memory store. Minimum value of 1. Maximum value of 8766.
"""
return pulumi.get(self, "memory_store_retention_period_in_hours")
| 46.277056 | 294 | 0.710196 | [
"ECL-2.0",
"Apache-2.0"
] | chivandikwa/pulumi-aws | sdk/python/pulumi_aws/timestreamwrite/outputs.py | 10,690 | Python |
#
# PySNMP MIB module Nortel-MsCarrier-MscPassport-ExtensionsMIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Nortel-MsCarrier-MscPassport-ExtensionsMIB
# Produced by pysmi-0.3.4 at Wed May 1 14:29:54 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsUnion")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
RowPointer, = mibBuilder.importSymbols("Nortel-MsCarrier-MscPassport-StandardTextualConventionsMIB", "RowPointer")
mscPassportMIBs, mscComponents = mibBuilder.importSymbols("Nortel-MsCarrier-MscPassport-UsefulDefinitionsMIB", "mscPassportMIBs", "mscComponents")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Unsigned32, MibIdentifier, iso, ObjectIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, NotificationType, Counter32, Bits, Gauge32, IpAddress, TimeTicks, Integer32, Counter64 = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "MibIdentifier", "iso", "ObjectIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "NotificationType", "Counter32", "Bits", "Gauge32", "IpAddress", "TimeTicks", "Integer32", "Counter64")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
extensionsMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 5))
mscExtensions = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 4))
mscExtensionIfTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 4, 1), )
if mibBuilder.loadTexts: mscExtensionIfTable.setStatus('mandatory')
if mibBuilder.loadTexts: mscExtensionIfTable.setDescription('A table which provides enterprise extensions to the standard ifTable.')
mscExtensionIfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 4, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: mscExtensionIfEntry.setStatus('mandatory')
if mibBuilder.loadTexts: mscExtensionIfEntry.setDescription(' An entry containing enterprise extensions to the standard ifEntry.')
mscIfRowPointer = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 4, 1, 1, 1), RowPointer()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscIfRowPointer.setStatus('mandatory')
if mibBuilder.loadTexts: mscIfRowPointer.setDescription('A pointer to the RowStatus variable for the component represented by the ifTable entry.')
extensionsGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 5, 1))
extensionsGroupCA = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 5, 1, 1))
extensionsGroupCA01 = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 5, 1, 1, 2))
extensionsGroupCA01A = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 5, 1, 1, 2, 2))
extensionsCapabilities = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 5, 3))
extensionsCapabilitiesCA = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 5, 3, 1))
extensionsCapabilitiesCA01 = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 5, 3, 1, 2))
extensionsCapabilitiesCA01A = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 5, 3, 1, 2, 2))
mibBuilder.exportSymbols("Nortel-MsCarrier-MscPassport-ExtensionsMIB", extensionsGroup=extensionsGroup, extensionsGroupCA01=extensionsGroupCA01, extensionsCapabilitiesCA=extensionsCapabilitiesCA, extensionsGroupCA=extensionsGroupCA, extensionsMIB=extensionsMIB, mscIfRowPointer=mscIfRowPointer, extensionsCapabilitiesCA01A=extensionsCapabilitiesCA01A, extensionsGroupCA01A=extensionsGroupCA01A, extensionsCapabilities=extensionsCapabilities, extensionsCapabilitiesCA01=extensionsCapabilitiesCA01, mscExtensions=mscExtensions, mscExtensionIfTable=mscExtensionIfTable, mscExtensionIfEntry=mscExtensionIfEntry)
| 114.243243 | 607 | 0.772652 | [
"Apache-2.0"
] | agustinhenze/mibs.snmplabs.com | pysnmp-with-texts/Nortel-MsCarrier-MscPassport-ExtensionsMIB.py | 4,227 | Python |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Simple check list from AllenNLP repo: https://github.com/allenai/allennlp/blob/master/setup.py
To create the package for pypi.
1. Change the version in __init__.py, setup.py as well as docs/source/conf.py. Remove the master from the links in
the new models of the README:
(https://huggingface.co/transformers/master/model_doc/ -> https://huggingface.co/transformers/model_doc/)
then run `make fix-copies` to fix the index of the documentation.
2. Unpin specific versions from setup.py that use a git install.
2. Commit these changes with the message: "Release: VERSION"
3. Add a tag in git to mark the release: "git tag VERSION -m 'Adds tag VERSION for pypi' "
Push the tag to git: git push --tags origin master
4. Build both the sources and the wheel. Do not change anything in setup.py between
creating the wheel and the source distribution (obviously).
For the wheel, run: "python setup.py bdist_wheel" in the top level directory.
(this will build a wheel for the python version you use to build it).
For the sources, run: "python setup.py sdist"
You should now have a /dist directory with both .whl and .tar.gz source versions.
5. Check that everything looks correct by uploading the package to the pypi test server:
twine upload dist/* -r pypitest
(pypi suggest using twine as other methods upload files via plaintext.)
You may have to specify the repository url, use the following command then:
twine upload dist/* -r pypitest --repository-url=https://test.pypi.org/legacy/
Check that you can install it in a virtualenv by running:
pip install -i https://testpypi.python.org/pypi transformers
6. Upload the final version to actual pypi:
twine upload dist/* -r pypi
7. Copy the release notes from RELEASE.md to the tag in github once everything is looking hunky-dory.
8. Add the release version to docs/source/_static/js/custom.js and .circleci/deploy.sh
9. Update README.md to redirect to correct documentation.
10. Update the version in __init__.py, setup.py to the new version "-dev" and push to master.
"""
import os
import re
import shutil
from distutils.core import Command
from pathlib import Path
from setuptools import find_packages, setup
# Remove stale transformers.egg-info directory to avoid https://github.com/pypa/pip/issues/5466
stale_egg_info = Path(__file__).parent / "transformers.egg-info"
if stale_egg_info.exists():
print(
(
"Warning: {} exists.\n\n"
"If you recently updated transformers to 3.0 or later, this is expected,\n"
"but it may prevent transformers from installing in editable mode.\n\n"
"This directory is automatically generated by Python's packaging tools.\n"
"I will remove it now.\n\n"
"See https://github.com/pypa/pip/issues/5466 for details.\n"
).format(stale_egg_info)
)
shutil.rmtree(stale_egg_info)
# IMPORTANT:
# 1. all dependencies should be listed here with their version requirements if any
# 2. once modified, run: `make deps_table_update` to update src/transformers/dependency_versions_table.py
_deps = [
"black>=20.8b1",
"cookiecutter==1.7.2",
"dataclasses",
"datasets",
"faiss-cpu",
"fastapi",
"filelock",
"flake8>=3.8.3",
"flax>=0.2.2",
"fugashi>=1.0",
"importlib_metadata",
"ipadic>=1.0.0,<2.0",
"isort>=5.5.4",
"jax>=0.2.8",
"jaxlib>=0.1.59",
"keras2onnx",
"numpy>=1.17",
"onnxconverter-common",
"onnxruntime-tools>=1.4.2",
"onnxruntime>=1.4.0",
"packaging",
"parameterized",
"protobuf",
"psutil",
"pydantic",
"pytest",
"pytest-xdist",
"python>=3.6.0",
"recommonmark",
"regex!=2019.12.17",
"requests",
"sacremoses",
"scikit-learn",
"sentencepiece==0.1.91",
"soundfile",
"sphinx-copybutton",
"sphinx-markdown-tables",
"sphinx-rtd-theme==0.4.3", # sphinx-rtd-theme==0.5.0 introduced big changes in the style.
"sphinx==3.2.1",
"starlette",
"tensorflow-cpu>=2.3",
"tensorflow>=2.3",
"timeout-decorator",
"tokenizers>=0.10.1,<0.11",
"torch>=1.0",
"torchaudio",
"tqdm>=4.27",
"unidic>=1.0.2",
"unidic_lite>=1.0.7",
"uvicorn",
]
# this is a lookup table with items like:
#
# tokenizers: "tokenizers==0.9.4"
# packaging: "packaging"
#
# some of the values are versioned whereas others aren't.
deps = {b: a for a, b in (re.findall(r"^(([^!=<>]+)(?:[!=<>].*)?$)", x)[0] for x in _deps)}
# since we save this data in src/transformers/dependency_versions_table.py it can be easily accessed from
# anywhere. If you need to quickly access the data from this table in a shell, you can do so easily with:
#
# python -c 'import sys; from transformers.dependency_versions_table import deps; \
# print(" ".join([ deps[x] for x in sys.argv[1:]]))' tokenizers datasets
#
# Just pass the desired package names to that script as it's shown with 2 packages above.
#
# If transformers is not yet installed and the work is done from the cloned repo remember to add `PYTHONPATH=src` to the script above
#
# You can then feed this for example to `pip`:
#
# pip install -U $(python -c 'import sys; from transformers.dependency_versions_table import deps; \
# print(" ".join([ deps[x] for x in sys.argv[1:]]))' tokenizers datasets)
#
def deps_list(*pkgs):
return [deps[pkg] for pkg in pkgs]
class DepsTableUpdateCommand(Command):
"""
A custom distutils command that updates the dependency table.
usage: python setup.py deps_table_update
"""
description = "build runtime dependency table"
user_options = [
# format: (long option, short option, description).
("dep-table-update", None, "updates src/transformers/dependency_versions_table.py"),
]
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
entries = "\n".join([f' "{k}": "{v}",' for k, v in deps.items()])
content = [
"# THIS FILE HAS BEEN AUTOGENERATED. To update:",
"# 1. modify the `_deps` dict in setup.py",
"# 2. run `make deps_table_update``",
"deps = {",
entries,
"}",
"",
]
target = "src/transformers/dependency_versions_table.py"
print(f"updating {target}")
with open(target, "w", encoding="utf-8", newline="\n") as f:
f.write("\n".join(content))
extras = {}
extras["ja"] = deps_list("fugashi", "ipadic", "unidic_lite", "unidic")
extras["sklearn"] = deps_list("scikit-learn")
extras["tf"] = deps_list("tensorflow", "onnxconverter-common", "keras2onnx")
extras["tf-cpu"] = deps_list("tensorflow-cpu", "onnxconverter-common", "keras2onnx")
extras["torch"] = deps_list("torch")
if os.name == "nt": # windows
extras["retrieval"] = deps_list("datasets") # faiss is not supported on windows
extras["flax"] = [] # jax is not supported on windows
else:
extras["retrieval"] = deps_list("faiss-cpu", "datasets")
extras["flax"] = deps_list("jax", "jaxlib", "flax")
extras["tokenizers"] = deps_list("tokenizers")
extras["onnxruntime"] = deps_list("onnxruntime", "onnxruntime-tools")
extras["modelcreation"] = deps_list("cookiecutter")
extras["serving"] = deps_list("pydantic", "uvicorn", "fastapi", "starlette")
extras["speech"] = deps_list("soundfile", "torchaudio")
extras["sentencepiece"] = deps_list("sentencepiece", "protobuf")
extras["testing"] = (
deps_list("pytest", "pytest-xdist", "timeout-decorator", "parameterized", "psutil", "datasets")
+ extras["retrieval"]
+ extras["modelcreation"]
)
extras["docs"] = deps_list("recommonmark", "sphinx", "sphinx-markdown-tables", "sphinx-rtd-theme", "sphinx-copybutton")
extras["quality"] = deps_list("black", "isort", "flake8")
extras["all"] = extras["tf"] + extras["torch"] + extras["flax"] + extras["sentencepiece"] + extras["tokenizers"]
extras["dev"] = (
extras["all"]
+ extras["testing"]
+ extras["quality"]
+ extras["ja"]
+ extras["docs"]
+ extras["sklearn"]
+ extras["modelcreation"]
)
extras["torchhub"] = deps_list(
"filelock",
"importlib_metadata",
"numpy",
"packaging",
"protobuf",
"regex",
"requests",
"sacremoses",
"sentencepiece",
"torch",
"tokenizers",
"tqdm",
)
# when modifying the following list, make sure to update src/transformers/dependency_versions_check.py
install_requires = [
deps["dataclasses"] + ";python_version<'3.7'", # dataclasses for Python versions that don't have it
deps["importlib_metadata"] + ";python_version<'3.8'", # importlib_metadata for Python versions that don't have it
deps["filelock"], # filesystem locks, e.g., to prevent parallel downloads
deps["numpy"],
deps["packaging"], # utilities from PyPA to e.g., compare versions
deps["regex"], # for OpenAI GPT
deps["requests"], # for downloading models over HTTPS
deps["sacremoses"], # for XLM
deps["tokenizers"],
deps["tqdm"], # progress bars in model download and training scripts
]
setup(
name="transformers",
version="4.4.0.dev0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots)
author="Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Sam Shleifer, Patrick von Platen, Sylvain Gugger, Google AI Language Team Authors, Open AI team Authors, Facebook AI Authors, Carnegie Mellon University Authors",
author_email="thomas@huggingface.co",
description="State-of-the-art Natural Language Processing for TensorFlow 2.0 and PyTorch",
long_description=open("README.md", "r", encoding="utf-8").read(),
long_description_content_type="text/markdown",
keywords="NLP deep learning transformer pytorch tensorflow BERT GPT GPT-2 google openai CMU",
license="Apache",
url="https://github.com/huggingface/transformers",
package_dir={"": "src"},
packages=find_packages("src"),
extras_require=extras,
entry_points={"console_scripts": ["transformers-cli=transformers.commands.transformers_cli:main"]},
python_requires=">=3.6.0",
install_requires=install_requires,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
cmdclass={"deps_table_update": DepsTableUpdateCommand},
)
| 36.33121 | 233 | 0.673562 | [
"Apache-2.0"
] | Ki6an/transformers | setup.py | 11,408 | Python |
from django.db import models
# Create your models here.
class BaseView(models.Model):
title = models.CharField(max_length=256)
def __unicode__(self):
return self.title
class port1View(models.Model):
def __unicode__(self):
return self.title
class port2View(models.Model):
title = models.CharField(max_length=256)
def __unicode__(self):
return self.title
class port3View(models.Model):
title = models.CharField(max_length=256)
def __unicode__(self):
return self.title
class port4View(models.Model):
title = models.CharField(max_length=256)
def __unicode__(self):
return self.title
class port5View(models.Model):
title = models.CharField(max_length=256)
def __unicode__(self):
return self.title
class port6View(models.Model):
title = models.CharField(max_length=256)
def __unicode__(self):
return self.title
| 18.456522 | 41 | 0.762073 | [
"MIT"
] | nandosarracino/mymainsite | mainsite/models.py | 849 | Python |
with open('/home/pi/kown_hosts') as kown_f,open('/home/pi/cache_hosts') as cache_f:
kown_hosts = kown_f.readlines()
cache_hosts = set(cache_f.readlines())
kown_hosts = [host.split() for host in kown_hosts]
with open('/etc/ansible/hosts','w') as wf:
wf.writelines([x.split()[1]+"\n" for x in cache_hosts])
| 35.444444 | 83 | 0.689655 | [
"Apache-2.0"
] | yujmo/python | rewrite_multi_pis_ansilbe_hosts.py | 319 | Python |
# import the necessary packages
import sys
import cv2
import numpy as np
import pandas as pd
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import Conv2DTranspose
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Reshape
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import Input
from tensorflow.keras import Model
from tensorflow.keras import backend as K
from tensorflow.keras.models import load_model
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
class CNNProcessData:
def __init__(self):
pass
def get_imagedatagenerator(self):
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
#rotation_range=20,
#width_shift_range=0.05,
#height_shift_range=0.05,
#horizontal_flip=True,
# vertical_flip=True,
#brightness_range=[0.8,1.2]
)
return datagen
def generate_croppings(self, testX, testY, image_size, number):
if number != 11:
raise Exception("Only implemented for number = 11 right now")
augmented_testX_1 = []
augmented_testX_2 = []
augmented_testX_3 = []
augmented_testX_4 = []
augmented_testX_5 = []
augmented_testX_6 = []
augmented_testX_7 = []
augmented_testX_8 = []
augmented_testX_9 = []
augmented_testX_10 = []
augmented_testX_11 = []
mid_image_size = int(round(image_size/2))
for img in testX:
height = img.shape[0]
small_height = int(round(height*0.1))
mid_height = int(round(height/2))
width = img.shape[1]
mid_width = int(round(width/2))
crop_img1 = img[height-image_size:height, 0:image_size]
crop_img2 = img[height-image_size:height, width-image_size:width]
crop_img3 = img[0:image_size, width-image_size:width]
crop_img4 = img[0:image_size, 0:image_size]
crop_img5 = img[mid_height-mid_image_size:mid_height+mid_image_size, mid_width-mid_image_size:mid_width+mid_image_size]
crop_img6 = img[mid_height-mid_image_size:mid_height+mid_image_size, 0:image_size]
crop_img7 = img[mid_height-mid_image_size:mid_height+mid_image_size, width-image_size:width]
crop_img8 = img[mid_height+small_height-mid_image_size:mid_height+small_height+mid_image_size, 0:image_size]
crop_img9 = img[mid_height+small_height-mid_image_size:mid_height+small_height+mid_image_size, width-image_size:width]
crop_img10 = img[mid_height-small_height-mid_image_size:mid_height-small_height+mid_image_size, 0:image_size]
crop_img11 = img[mid_height-small_height-mid_image_size:mid_height-small_height+mid_image_size, width-image_size:width]
augmented_testX_1.append(crop_img1)
augmented_testX_2.append(crop_img2)
augmented_testX_3.append(crop_img3)
augmented_testX_4.append(crop_img4)
augmented_testX_5.append(crop_img5)
augmented_testX_6.append(crop_img6)
augmented_testX_7.append(crop_img7)
augmented_testX_8.append(crop_img8)
augmented_testX_9.append(crop_img9)
augmented_testX_10.append(crop_img10)
augmented_testX_11.append(crop_img11)
augmented_testX_1 = np.array(augmented_testX_1)
augmented_testX_2 = np.array(augmented_testX_2)
augmented_testX_3 = np.array(augmented_testX_3)
augmented_testX_4 = np.array(augmented_testX_4)
augmented_testX_5 = np.array(augmented_testX_5)
augmented_testX_6 = np.array(augmented_testX_6)
augmented_testX_7 = np.array(augmented_testX_7)
augmented_testX_8 = np.array(augmented_testX_8)
augmented_testX_9 = np.array(augmented_testX_9)
augmented_testX_10 = np.array(augmented_testX_10)
augmented_testX_11 = np.array(augmented_testX_11)
testX = np.concatenate((augmented_testX_1, augmented_testX_2, augmented_testX_3, augmented_testX_4, augmented_testX_5, augmented_testX_6, augmented_testX_7, augmented_testX_8, augmented_testX_9, augmented_testX_10, augmented_testX_11))
# testXflipped = []
# for img in testX:
# horizontal_flip = cv2.flip( img, 0 )
# testXflipped.append(horizontal_flip)
# testXflipped = np.array(testXflipped)
# testX = np.concatenate((testX, testXflipped))
testY = np.repeat(testY, number)
return (testX, testY)
def create_montages(self, images, montage_image_number, image_size, full_montage_image_size):
output = []
if montage_image_number == 4:
data = images.reshape(int(len(images)/montage_image_number), montage_image_number, image_size, image_size, 3)
for iter in range(len(data)):
img_set = data[iter]
outputImage = np.zeros((full_montage_image_size, full_montage_image_size, 3))
outputImage[0:image_size, 0:image_size, :] = img_set[0]
outputImage[0:image_size, image_size:2*image_size, :] = img_set[1]
outputImage[image_size:2*image_size, 0:image_size, :] = img_set[2]
outputImage[image_size:2*image_size, image_size:2*image_size, :] = img_set[3]
# cv2.imshow("Result", outputImage)
# cv2.waitKey(0)
# raise Exception('Exit')
output.append(outputImage)
else:
raise Exception('Only implemented to montage 4 images into one image')
return np.array(output)
def process_cnn_data(self, images, aux_data, num_unique_stock_ids, num_unique_image_types, num_unique_time_days, image_size, keras_model_type, data_augmentation, data_augmentation_test, montage_image_number, full_montage_image_size, output_autoencoder_model_file_path, log_file_path):
if log_file_path is not None:
sys.stderr = open(log_file_path, 'a')
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
trainX = []
testX = []
trainY = []
testY = []
datagen = self.get_imagedatagenerator()
datagen.fit(images)
images = datagen.standardize(images)
aux_data["value"] = aux_data["value"].astype(float)
output_image_file = aux_data["output_image_file"].tolist()
# LSTM models group images by time, but are still ties to a single label e.g. X, Y = [img_t1, img_t2, img_t3], y1.
if keras_model_type == 'densenet121_lstm_imagenet':
images = images.reshape(num_unique_stock_ids * num_unique_image_types, num_unique_time_days, input_image_size, input_image_size, 3)
(train_aux_data, test_aux_data, train_images, test_images) = train_test_split(aux_data, images, test_size=0.2)
trainX_length = len(train_images)
testX_length = len(test_images)
train_images = train_images.reshape(trainX_length * num_unique_time_days, input_image_size, input_image_size, 3)
test_images = test_images.reshape(testX_length * num_unique_time_days, input_image_size, input_image_size, 3)
trainX_length_flat = len(train_images)
test_images = datagen.standardize(test_images)
# (testX, testY) = self.generate_croppings(testX, testY, image_size, data_augmentation_test)
testX_resized = []
for img in test_images:
testX_resized.append(cv2.resize(img, (image_size, image_size)))
test_images = np.array(testX_resized)
test_images = test_images.reshape(data_augmentation_test * testX_length, num_unique_time_days, image_size, image_size, 3)
# trainX_aug = []
# trainY_aug = []
# augmented = datagen.flow(train_images, train_aux_data, batch_size=trainX_length_flat)
# for i in range(0, data_augmentation):
# X, y = augmented.next()
# if len(trainX_aug) == 0:
# trainX_aug = X
# trainY_aug = y
# else:
# trainX_aug = np.concatenate((trainX_aug, X))
# trainY_aug = np.concatenate((trainY_aug, y))
#
# trainX = trainX_aug
# trainY = trainY_aug
trainX_resized = []
for img in train_images:
trainX_resized.append(cv2.resize(img, (image_size, image_size)))
train_images = np.array(trainX_resized)
train_images = train_images.reshape(data_augmentation * trainX_length, num_unique_time_days, image_size, image_size, 3)
else:
images = self.create_montages(images, montage_image_number, image_size, full_montage_image_size)
(encoder, decoder, autoencoder) = self.build_autoencoder(full_montage_image_size, full_montage_image_size, 3)
opt = Adam(lr=1e-3)
autoencoder.compile(loss="mse", optimizer=opt)
(train_aux_data, test_aux_data, train_images, test_images) = train_test_split(aux_data, images, test_size=0.2)
checkpoint = ModelCheckpoint(filepath=output_autoencoder_model_file_path, monitor='loss', verbose=1, save_best_only=True, mode='min', save_frequency=1, save_weights_only=False)
callbacks_list = [checkpoint]
# train the convolutional autoencoder
H = autoencoder.fit(
train_images, train_images,
validation_data=(test_images, test_images),
epochs=25,
batch_size=32,
callbacks=callbacks_list
)
decoded = autoencoder.predict(images)
output_image_counter = 0
for image in decoded:
cv2.imwrite(output_image_file[output_image_counter], image*255)
output_image_counter += 1
(train_aux_data, test_aux_data, train_images, test_images) = train_test_split(aux_data, decoded, test_size=0.2)
# testY_length = len(testY)
# (testX, testY) = self.generate_croppings(testX, testY, image_size, data_augmentation_test)
# testY = testY.reshape(data_augmentation_test * testY_length, 1)
# augmented = datagen.flow(trainX, trainY, batch_size=len(trainX))
# for i in range(0, data_augmentation):
# X, y = augmented.next()
stock_id_binarizer = LabelBinarizer().fit(aux_data["stock_id"])
train_stock_id_categorical = stock_id_binarizer.transform(train_aux_data["stock_id"])
test_stock_id_categorical = stock_id_binarizer.transform(test_aux_data["stock_id"])
accession_id_binarizer = LabelBinarizer().fit(aux_data["accession_id"])
train_accession_id_categorical = accession_id_binarizer.transform(train_aux_data["accession_id"])
test_accession_id_categorical = accession_id_binarizer.transform(test_aux_data["accession_id"])
female_id_binarizer = LabelBinarizer().fit(aux_data["female_id"])
train_female_id_categorical = female_id_binarizer.transform(train_aux_data["female_id"])
test_female_id_categorical = female_id_binarizer.transform(test_aux_data["female_id"])
male_id_binarizer = LabelBinarizer().fit(aux_data["male_id"])
train_male_id_categorical = male_id_binarizer.transform(train_aux_data["male_id"])
test_male_id_categorical = male_id_binarizer.transform(test_aux_data["male_id"])
continuous = [col for col in aux_data.columns if 'aux_trait_' in col]
cs = MinMaxScaler()
if len(continuous) > 0:
trainContinuous = cs.fit_transform(train_aux_data[continuous])
testContinuous = cs.transform(test_aux_data[continuous])
#trainX = np.hstack((train_stock_id_categorical, train_accession_id_categorical, train_female_id_categorical, train_male_id_categorical, trainContinuous))
#testX = np.hstack((test_stock_id_categorical, test_accession_id_categorical, test_female_id_categorical, test_male_id_categorical, testContinuous))
trainX = trainContinuous
testX = testContinuous
else:
trainX = []
testX = []
trainx = np.array(trainX)
testx = np.array(testX)
max_label = aux_data["value"].max()
trainY = train_aux_data["value"]/max_label
testY = test_aux_data["value"]/max_label
train_genotype_files = train_aux_data["genotype_file"].tolist()
test_genotype_files = test_aux_data["genotype_file"].tolist()
train_genotype_data = []
for f in train_genotype_files:
if log_file_path is not None:
eprint(f)
else:
print(f)
if pd.isna(f) is False:
geno_data = pd.read_csv(f, sep="\t", header=None, na_values="NA")
train_genotype_data.append(np.array(geno_data.iloc[:,0]))
test_genotype_data = []
for f in test_genotype_files:
if log_file_path is not None:
eprint(f)
else:
print(f)
if pd.isna(f) is False:
geno_data = pd.read_csv(f, sep="\t", header=None, na_values="NA")
test_genotype_data.append(np.array(geno_data.iloc[:,0]))
train_genotype_data = np.array(train_genotype_data)
test_genotype_data = np.array(test_genotype_data)
eprint(train_genotype_data)
eprint(testX)
eprint(trainX)
return (test_images, np.array(testX), testY.to_numpy(), test_genotype_data, train_images, np.array(trainX), trainY.to_numpy(), train_genotype_data)
def process_cnn_data_predictions(self, data, aux_data, num_unique_stock_ids, num_unique_image_types, num_unique_time_days, image_size, keras_model_type, input_autoencoder_model_file_path, training_data, data_augmentation_test, montage_image_number, full_montage_image_size):
trainX = []
testX = []
trainY = []
testY = []
datagen = self.get_imagedatagenerator()
datagen.fit(training_data)
data = datagen.standardize(data)
output_image_file = aux_data["output_image_file"].tolist()
data = self.create_montages(data, montage_image_number, image_size, full_montage_image_size)
autoencoder_model = load_model(input_autoencoder_model_file_path)
data = autoencoder_model.predict(data)
#ret = self.generate_croppings(data, None, image_size, data_augmentation_test)
#augmented_data = ret[0]
# LSTM models group images by time, but are still ties to a single label e.g. X, Y = [img_t1, img_t2, img_t3], y1.
if keras_model_type == 'KerasCNNLSTMDenseNet121ImageNetWeights':
data = data.reshape(data_augmentation_test * num_unique_stock_ids * num_unique_image_types, num_unique_time_days, image_size, image_size, 3)
output_image_counter = 0
for image in data:
cv2.imwrite(output_image_file[output_image_counter], image*255)
output_image_counter += 1
stock_id_binarizer = LabelBinarizer().fit(aux_data["stock_id"])
stock_id_categorical = stock_id_binarizer.transform(aux_data["stock_id"])
accession_id_binarizer = LabelBinarizer().fit(aux_data["accession_id"])
accession_id_categorical = accession_id_binarizer.transform(aux_data["accession_id"])
female_id_binarizer = LabelBinarizer().fit(aux_data["female_id"])
female_id_categorical = female_id_binarizer.transform(aux_data["female_id"])
male_id_binarizer = LabelBinarizer().fit(aux_data["male_id"])
male_id_categorical = male_id_binarizer.transform(aux_data["male_id"])
continuous = [col for col in aux_data.columns if 'aux_trait_' in col]
cs = MinMaxScaler()
if len(continuous) > 0:
fitContinuous = cs.fit_transform(aux_data[continuous])
# fitX = np.hstack([stock_id_categorical, accession_id_categorical, female_id_categorical, male_id_categorical, fitContinuous])
fitX = fitContinuous
else:
# fitX = np.hstack([stock_id_categorical, accession_id_categorical, female_id_categorical, male_id_categorical])
fitX = []
fitX = np.array(fitX)
max_label = aux_data["value"].max()
fitY = aux_data["value"]/max_label
genotype_files = aux_data["genotype_file"].tolist()
genotype_data = []
for f in genotype_files:
if pd.isna(f) is False:
geno_data = pd.read_csv(f, sep="\t", header=None, na_values="NA")
genotype_data.append(np.array(geno_data.iloc[:,0]))
genotype_data = np.array(genotype_data)
return (data, fitX, genotype_data, fitY.to_numpy())
def build_autoencoder(self, width, height, depth, filters=(32, 64), latentDim=16):
inputShape = (height, width, depth)
chanDim = -1
# define the input to the encoder
inputs = Input(shape=inputShape)
x = inputs
# loop over the number of filters
for f in filters:
# apply a CONV => RELU => BN operation
x = Conv2D(f, (3, 3), strides=2, padding="same")(x)
x = LeakyReLU(alpha=0.2)(x)
x = BatchNormalization(axis=chanDim)(x)
# flatten the network and then construct our latent vector
volumeSize = K.int_shape(x)
x = Flatten()(x)
latent = Dense(latentDim)(x)
# build the encoder model
encoder = Model(inputs, latent, name="encoder")
# start building the decoder model which will accept the
# output of the encoder as its inputs
latentInputs = Input(shape=(latentDim,))
x = Dense(np.prod(volumeSize[1:]))(latentInputs)
x = Reshape((volumeSize[1], volumeSize[2], volumeSize[3]))(x)
# loop over our number of filters again, but this time in
# reverse order
for f in filters[::-1]:
# apply a CONV_TRANSPOSE => RELU => BN operation
x = Conv2DTranspose(f, (3, 3), strides=2, padding="same")(x)
x = LeakyReLU(alpha=0.2)(x)
x = BatchNormalization(axis=chanDim)(x)
# apply a single CONV_TRANSPOSE layer used to recover the
# original depth of the image
x = Conv2DTranspose(depth, (3, 3), padding="same")(x)
outputs = Activation("sigmoid")(x)
# build the decoder model
decoder = Model(latentInputs, outputs, name="decoder")
# our autoencoder is the encoder + decoder
autoencoder = Model(inputs, decoder(encoder(inputs)), name="autoencoder")
# return a 3-tuple of the encoder, decoder, and autoencoder
return (encoder, decoder, autoencoder)
| 46.939467 | 288 | 0.666254 | [
"MIT"
] | solgenomics/DroneImageScripts | CNN/CNNProcessData.py | 19,386 | Python |
from numbers import Number
import yaml
from .color_tools import hex2rgb
def __default_grid__(ax):
"""This is a temporary function"""
ax.grid(b=True, which='major', color='#000000', alpha=0.2, linestyle='-', linewidth=0.5)
ax.grid(b=True, which='minor', color='#000000', alpha=0.1, linestyle='-', linewidth=0.25)
ax.minorticks_on() # Enables minor ticks without text, only the ticks.
class FigStyle:
def __init__(self, config_file):
self.__width = None
self.__ratio = None
self.__hspace = None
self.__colors = [None]
self.__linestyles = [None]
self.__markers = [None]
self.__grid = __default_grid__
self.__main_color = None
self.read_config_file(config_file) # This is what actually initializes the values.
@property
def colors(self):
return self.__colors
@property
def width(self):
return self.__width
@property
def ratio(self):
return self.__ratio
@property
def hspace(self):
return self.__hspace
@property
def grid(self):
return self.__grid
@property
def linestyles(self):
return self.__linestyles
@property
def markers(self):
return self.__markers
@property
def main_color(self):
return self.__main_color
def read_config_file(self, filename):
if not isinstance(filename, str):
raise ValueError('"file_name" must be a string')
with open(filename, 'r') as stream:
try:
data = yaml.load(stream)
except yaml.YAMLError as exc:
print(exc)
if 'width' not in data:
raise ValueError('The "figstyle" file must have a "width" field')
self.__width = float(data['width'])
if 'ratio' not in data:
raise ValueError('The "figstyle" file must have a "ratio" field')
if isinstance(data['ratio'], list) and len(data['ratio']) == 2 and isinstance(data['ratio'][0], Number) and isinstance(data['ratio'][1], Number):
self.__ratio = data['ratio']
else:
raise ValueError('Error reading "' + filename + '": ratio must be a list of two numbers [x_ratio, y_ratio]')
if 'hspace' not in data:
raise ValueError('The "figstyle" file must have a "hspace" field')
self.__hspace = float(data['hspace'])
if isinstance(data['colors'], list):
self.__colors = [None]*len(data['colors'])
for k in range(len(data['colors'])):
self.__colors[k] = hex2rgb(data['colors'][k])
if 'linestyles' in data:
if isinstance(data['linestyles'], list):
self.__linestyles = data['linestyles']
if 'markers' in data:
if isinstance(data['markers'], list):
self.__markers = data['markers']
if 'main_color' in data:
if isinstance(data['main_color'], str):
self.__main_color = hex2rgb(data['main_color'])
| 29.460674 | 147 | 0.691838 | [
"MIT"
] | SengerM/nicenquickplotlib | nicenquickplotlib/config_types.py | 2,622 | Python |
from machine import Pin, Map, PWM # include Pin, Map and PWM functions from machine module
import time # include time module
# create PWM on WIO BUZZER with 2000Hz frequency and 250 duty cycle
BUZZER = PWM(Pin(Map.WIO_BUZZER), freq=1000, duty=250)
| 37 | 92 | 0.741313 | [
"MIT"
] | lakshanthad/Wio_Terminal_Classroom_Ardupy | Classroom 4/Buzzer_PWM.py | 259 | Python |
import sys
from . import pghoard
sys.exit(pghoard.main())
| 10 | 24 | 0.733333 | [
"Apache-2.0"
] | Adnuntius/pghoard | pghoard/__main__.py | 60 | Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cms', '__first__'),
]
operations = [
migrations.CreateModel(
name='GoogleMap',
fields=[
('cmsplugin_ptr', models.OneToOneField(serialize=False, parent_link=True, auto_created=True, to='cms.CMSPlugin', primary_key=True)),
('title', models.CharField(verbose_name='map title', blank=True, null=True, max_length=100)),
('address', models.CharField(verbose_name='address', max_length=150)),
('zipcode', models.CharField(verbose_name='zip code', max_length=30)),
('city', models.CharField(verbose_name='city', max_length=100)),
('content', models.CharField(help_text='Displayed under address in the bubble.', blank=True, max_length=255, verbose_name='additional content')),
('zoom', models.PositiveSmallIntegerField(verbose_name='zoom level', default=13, choices=[(0, '0'), (1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6'), (7, '7'), (8, '8'), (9, '9'), (10, '10'), (11, '11'), (12, '12'), (13, '13'), (14, '14'), (15, '15'), (16, '16'), (17, '17'), (18, '18'), (19, '19'), (20, '20'), (21, '21')])),
('lat', models.DecimalField(help_text='Use latitude & longitude to fine tune the map position.', blank=True, max_digits=10, verbose_name='latitude', null=True, decimal_places=6)),
('lng', models.DecimalField(max_digits=10, verbose_name='longitude', blank=True, null=True, decimal_places=6)),
('route_planer_title', models.CharField(verbose_name='route planer title', blank=True, null=True, max_length=150, default='Calculate your fastest way to here')),
('route_planer', models.BooleanField(verbose_name='route planer', default=False)),
('width', models.CharField(help_text='Plugin width (in pixels or percent).', default='100%', max_length=6, verbose_name='width')),
('height', models.CharField(help_text='Plugin height (in pixels).', default='400px', max_length=6, verbose_name='height')),
('info_window', models.BooleanField(help_text='Show textbox over marker', default=True, verbose_name='info window')),
('scrollwheel', models.BooleanField(help_text='Enable scrollwheel zooming on the map', default=True, verbose_name='scrollwheel')),
('double_click_zoom', models.BooleanField(verbose_name='double click zoom', default=True)),
('draggable', models.BooleanField(verbose_name='draggable', default=True)),
('keyboard_shortcuts', models.BooleanField(verbose_name='keyboard shortcuts', default=True)),
('pan_control', models.BooleanField(verbose_name='Pan control', default=True)),
('zoom_control', models.BooleanField(verbose_name='zoom control', default=True)),
('street_view_control', models.BooleanField(verbose_name='Street View control', default=True)),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
]
| 72.533333 | 352 | 0.61826 | [
"Apache-2.0"
] | Glasgow2015/team-10 | env/lib/python2.7/site-packages/djangocms_googlemap/migrations_django/0001_initial.py | 3,264 | Python |
import os.path
import IMLearn.learners.regressors.linear_regression
from IMLearn.learners.regressors import PolynomialFitting
from IMLearn.utils import split_train_test
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.io as pio
pio.templates.default = "simple_white"
from IMLearn.metrics.loss_functions import mean_square_error
CITY_TEMPERATURE_DATA_PATH = os.path.join(os.path.curdir, "..", "datasets", "City_Temperature.csv")
def load_data(filename: str) -> pd.DataFrame:
"""
Load city daily temperature dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (Temp)
"""
data = pd.read_csv(filename, parse_dates=["Date"]).drop_duplicates()
data = data.drop(data[data["Temp"] < -70].index) # invalid Temp
data["DayOfYear"] = data['Date'].dt.dayofyear
return data
def question_2(data):
""" Exploring data specifically in Israel """
data = data.copy()
data = data[data["Country"] == "Israel"]
data["Year"] = data["Year"].astype(str)
fig = px.scatter(data, x="DayOfYear", y="Temp", color="Year", width=1500, height=700,
labels={"DayOfYear": "Day of Year", "Temp": "Temperature"},
title="Q2(1) The relation between the day in the year and the temperature in Israel")
fig.update_xaxes(range=[0, 365], tick0=0, dtick=20)
fig.show()
std_by_month = data.groupby("Month").std().reset_index()
fig = px.bar(std_by_month, x="Month", y="Temp", width=1500, height=700,
labels={"Temp": "Std of the daily temperatures"},
title="Q2(2) The Standard Deviation of the Daily Temperatures Per Month in Israel")
fig.data[-1].text = np.round(std_by_month["Temp"], 3)
fig.update_xaxes(tick0=1, dtick=1)
fig.update_traces(textposition='outside')
fig.show()
def question_3(data):
""" Exploring differences between countries"""
agg_data_mean = data.groupby(["Country", "Month"]).mean().reset_index()
agg_data_std = data.groupby(["Country", "Month"]).std().reset_index()
fig = px.line(agg_data_mean, x="Month", y="Temp", color="Country", error_y=agg_data_std["Temp"],
width=1500, height=700, labels={"Temp": "Averaged Temperature"},
title="Q3 The Average Monthly Temperatures in Different Countries")
fig.update_xaxes(tick0=1, dtick=1)
fig.show()
def question_4(data):
""" Fitting model for different values of `k` """
data = data[data["Country"] == "Israel"]
train_X, train_y, test_X, test_y = split_train_test(data["DayOfYear"], data["Temp"])
losses = np.array([])
for k in range(1, 11):
poly_fit = PolynomialFitting(k)
poly_fit.fit(train_X.to_numpy(), train_y.to_numpy())
loss = poly_fit.loss(test_X.to_numpy(), test_y.to_numpy())
losses = np.append(losses, round(loss, 2))
print(k, loss)
fig = px.bar(x=range(1, 11), y=losses, width=1500, height=700,
labels={"x": "Polynomials Degrees (k)", "y": "Test Error (MSE)"},
title="Q4 Test Errors for Different Polynomials Degrees (k)")
fig.data[-1].text = losses
fig.update_xaxes(tick0=1, dtick=1)
fig.update_traces(textposition="outside")
fig.show()
def question_5(data):
""" Evaluating fitted model on different countries """
data_israel = data[data["Country"] == "Israel"]
poly_fit = PolynomialFitting(k=5)
poly_fit.fit(data_israel["DayOfYear"], data_israel["Temp"])
other_countries = ["Jordan", "South Africa", "The Netherlands"]
losses = np.array([])
for country in other_countries:
country_data = data[data["Country"] == country]
loss = poly_fit.loss(country_data["DayOfYear"], country_data["Temp"])
losses = np.append(losses, loss)
fig = px.bar(x=np.array(other_countries), y=losses, width=700, height=700,
labels={"x": "Country", "y": "Losses (MSE)"}, title="Q5 Losses (MSE) per Country With k=5")
fig.data[-1].text = np.round(losses, 3)
fig.update_traces(textposition="outside")
fig.show()
if __name__ == '__main__':
np.random.seed(0)
# Question 1 - Load and preprocessing of city temperature dataset
data = load_data(CITY_TEMPERATURE_DATA_PATH)
# Question 2 - Exploring data for specific country
question_2(data)
# Question 3 - Exploring differences between countries
question_3(data)
# Question 4 - Fitting model for different values of `k`
question_4(data)
# Question 5 - Evaluating fitted model on different countries
question_5(data)
| 35.179104 | 108 | 0.65507 | [
"MIT"
] | noamwino/IML.HUJI | exercises/city_temperature_prediction.py | 4,714 | Python |
x = int(input())
m = int(input())
if x < 10:
if x <= m:
print(1)
else:
print(0)
else:
xarr = []
while x:
xarr = [x % 10] + xarr
x //= 10
n = len(xarr)
l = max(xarr) + 1
def check(base, xarr):
ans = xarr[0] * (base ** (n - 1))
if ans > m:
return False
return True
def check1(base, xarr):
ans = 0
for i in range(n):
ans += xarr[i] * base ** (n - 1 - i)
if ans > m:
return False
return True
r = 1
while check(2 * r, xarr):
r *= 2
r *= 2
ll, rr = l, r
while ll < rr:
mid = ll + (rr - ll) // 2
if check1(mid, xarr):
ll = mid + 1
else:
rr = mid
if ll - 1 < l:
print(0)
else:
print(ll - l)
| 18.977778 | 48 | 0.375878 | [
"MIT"
] | ApocalypseMac/CP | atcoder/ABC 192/D.py | 854 | Python |
def giving()
i01.moveHead(44,82)
i01.moveArm("left",15,55,68,10)
i01.moveArm("right",13,40,74,13)
i01.moveHand("left",61,0,14,0,0,180)
i01.moveHand("right",0,24,24,19,21,25)
i01.moveTorso(90,90,90)
| 25.5 | 39 | 0.681373 | [
"Apache-2.0"
] | Alexinator40/pyrobotlab | home/hairygael/GESTURES/giving.py | 204 | Python |
from tests.testmodels import Event, IntFields, MinRelation, Node, Reporter, Team, Tournament, Tree
from tortoise import Tortoise
from tortoise.contrib import test
from tortoise.exceptions import (
DoesNotExist,
FieldError,
IntegrityError,
MultipleObjectsReturned,
ParamsError,
)
from tortoise.expressions import F, RawSQL, Subquery
# TODO: Test the many exceptions in QuerySet
# TODO: .filter(intnum_null=None) does not work as expected
class TestQueryset(test.TestCase):
async def asyncSetUp(self):
await super().asyncSetUp()
# Build large dataset
self.intfields = [await IntFields.create(intnum=val) for val in range(10, 100, 3)]
self.db = Tortoise.get_connection("models")
async def test_all_count(self):
self.assertEqual(await IntFields.all().count(), 30)
self.assertEqual(await IntFields.filter(intnum_null=80).count(), 0)
async def test_exists(self):
ret = await IntFields.filter(intnum=0).exists()
self.assertFalse(ret)
ret = await IntFields.filter(intnum=10).exists()
self.assertTrue(ret)
ret = await IntFields.filter(intnum__gt=10).exists()
self.assertTrue(ret)
ret = await IntFields.filter(intnum__lt=10).exists()
self.assertFalse(ret)
async def test_limit_count(self):
self.assertEqual(await IntFields.all().limit(10).count(), 10)
async def test_limit_negative(self):
with self.assertRaisesRegex(ParamsError, "Limit should be non-negative number"):
await IntFields.all().limit(-10)
async def test_offset_count(self):
self.assertEqual(await IntFields.all().offset(10).count(), 20)
async def test_offset_negative(self):
with self.assertRaisesRegex(ParamsError, "Offset should be non-negative number"):
await IntFields.all().offset(-10)
async def test_join_count(self):
tour = await Tournament.create(name="moo")
await MinRelation.create(tournament=tour)
self.assertEqual(await MinRelation.all().count(), 1)
self.assertEqual(await MinRelation.filter(tournament__id=tour.id).count(), 1)
async def test_modify_dataset(self):
# Modify dataset
rows_affected = await IntFields.filter(intnum__gte=70).update(intnum_null=80)
self.assertEqual(rows_affected, 10)
self.assertEqual(await IntFields.filter(intnum_null=80).count(), 10)
self.assertEqual(await IntFields.filter(intnum_null__isnull=True).count(), 20)
await IntFields.filter(intnum_null__isnull=True).update(intnum_null=-1)
self.assertEqual(await IntFields.filter(intnum_null=None).count(), 0)
self.assertEqual(await IntFields.filter(intnum_null=-1).count(), 20)
async def test_distinct(self):
# Test distinct
await IntFields.filter(intnum__gte=70).update(intnum_null=80)
await IntFields.filter(intnum_null__isnull=True).update(intnum_null=-1)
self.assertEqual(
await IntFields.all()
.order_by("intnum_null")
.distinct()
.values_list("intnum_null", flat=True),
[-1, 80],
)
self.assertEqual(
await IntFields.all().order_by("intnum_null").distinct().values("intnum_null"),
[{"intnum_null": -1}, {"intnum_null": 80}],
)
async def test_limit_offset_values_list(self):
# Test limit/offset/ordering values_list
self.assertEqual(
await IntFields.all().order_by("intnum").limit(10).values_list("intnum", flat=True),
[10, 13, 16, 19, 22, 25, 28, 31, 34, 37],
)
self.assertEqual(
await IntFields.all()
.order_by("intnum")
.limit(10)
.offset(10)
.values_list("intnum", flat=True),
[40, 43, 46, 49, 52, 55, 58, 61, 64, 67],
)
self.assertEqual(
await IntFields.all()
.order_by("intnum")
.limit(10)
.offset(20)
.values_list("intnum", flat=True),
[70, 73, 76, 79, 82, 85, 88, 91, 94, 97],
)
self.assertEqual(
await IntFields.all()
.order_by("intnum")
.limit(10)
.offset(30)
.values_list("intnum", flat=True),
[],
)
self.assertEqual(
await IntFields.all().order_by("-intnum").limit(10).values_list("intnum", flat=True),
[97, 94, 91, 88, 85, 82, 79, 76, 73, 70],
)
self.assertEqual(
await IntFields.all()
.order_by("intnum")
.limit(10)
.filter(intnum__gte=40)
.values_list("intnum", flat=True),
[40, 43, 46, 49, 52, 55, 58, 61, 64, 67],
)
async def test_limit_offset_values(self):
# Test limit/offset/ordering values
self.assertEqual(
await IntFields.all().order_by("intnum").limit(5).values("intnum"),
[{"intnum": 10}, {"intnum": 13}, {"intnum": 16}, {"intnum": 19}, {"intnum": 22}],
)
self.assertEqual(
await IntFields.all().order_by("intnum").limit(5).offset(10).values("intnum"),
[{"intnum": 40}, {"intnum": 43}, {"intnum": 46}, {"intnum": 49}, {"intnum": 52}],
)
self.assertEqual(
await IntFields.all().order_by("intnum").limit(5).offset(30).values("intnum"), []
)
self.assertEqual(
await IntFields.all().order_by("-intnum").limit(5).values("intnum"),
[{"intnum": 97}, {"intnum": 94}, {"intnum": 91}, {"intnum": 88}, {"intnum": 85}],
)
self.assertEqual(
await IntFields.all()
.order_by("intnum")
.limit(5)
.filter(intnum__gte=40)
.values("intnum"),
[{"intnum": 40}, {"intnum": 43}, {"intnum": 46}, {"intnum": 49}, {"intnum": 52}],
)
async def test_in_bulk(self):
id_list = [item.pk for item in await IntFields.all().only("id").limit(2)]
ret = await IntFields.in_bulk(id_list=id_list)
self.assertEqual(list(ret.keys()), id_list)
async def test_first(self):
# Test first
self.assertEqual(
(await IntFields.all().order_by("intnum").filter(intnum__gte=40).first()).intnum, 40
)
self.assertEqual(
(await IntFields.all().order_by("intnum").filter(intnum__gte=40).first().values())[
"intnum"
],
40,
)
self.assertEqual(
(await IntFields.all().order_by("intnum").filter(intnum__gte=40).first().values_list())[
1
],
40,
)
self.assertEqual(
await IntFields.all().order_by("intnum").filter(intnum__gte=400).first(), None
)
self.assertEqual(
await IntFields.all().order_by("intnum").filter(intnum__gte=400).first().values(), None
)
self.assertEqual(
await IntFields.all().order_by("intnum").filter(intnum__gte=400).first().values_list(),
None,
)
async def test_get_or_none(self):
self.assertEqual((await IntFields.all().get_or_none(intnum=40)).intnum, 40)
self.assertEqual((await IntFields.all().get_or_none(intnum=40).values())["intnum"], 40)
self.assertEqual((await IntFields.all().get_or_none(intnum=40).values_list())[1], 40)
self.assertEqual(
await IntFields.all().order_by("intnum").get_or_none(intnum__gte=400), None
)
self.assertEqual(
await IntFields.all().order_by("intnum").get_or_none(intnum__gte=400).values(), None
)
self.assertEqual(
await IntFields.all().order_by("intnum").get_or_none(intnum__gte=400).values_list(),
None,
)
with self.assertRaises(MultipleObjectsReturned):
await IntFields.all().order_by("intnum").get_or_none(intnum__gte=40)
with self.assertRaises(MultipleObjectsReturned):
await IntFields.all().order_by("intnum").get_or_none(intnum__gte=40).values()
with self.assertRaises(MultipleObjectsReturned):
await IntFields.all().order_by("intnum").get_or_none(intnum__gte=40).values_list()
async def test_get(self):
await IntFields.filter(intnum__gte=70).update(intnum_null=80)
# Test get
self.assertEqual((await IntFields.all().get(intnum=40)).intnum, 40)
self.assertEqual((await IntFields.all().get(intnum=40).values())["intnum"], 40)
self.assertEqual((await IntFields.all().get(intnum=40).values_list())[1], 40)
self.assertEqual((await IntFields.all().all().all().all().all().get(intnum=40)).intnum, 40)
self.assertEqual(
(await IntFields.all().all().all().all().all().get(intnum=40).values())["intnum"], 40
)
self.assertEqual(
(await IntFields.all().all().all().all().all().get(intnum=40).values_list())[1], 40
)
self.assertEqual((await IntFields.get(intnum=40)).intnum, 40)
self.assertEqual((await IntFields.get(intnum=40).values())["intnum"], 40)
self.assertEqual((await IntFields.get(intnum=40).values_list())[1], 40)
with self.assertRaises(DoesNotExist):
await IntFields.all().get(intnum=41)
with self.assertRaises(DoesNotExist):
await IntFields.all().get(intnum=41).values()
with self.assertRaises(DoesNotExist):
await IntFields.all().get(intnum=41).values_list()
with self.assertRaises(DoesNotExist):
await IntFields.get(intnum=41)
with self.assertRaises(DoesNotExist):
await IntFields.get(intnum=41).values()
with self.assertRaises(DoesNotExist):
await IntFields.get(intnum=41).values_list()
with self.assertRaises(MultipleObjectsReturned):
await IntFields.all().get(intnum_null=80)
with self.assertRaises(MultipleObjectsReturned):
await IntFields.all().get(intnum_null=80).values()
with self.assertRaises(MultipleObjectsReturned):
await IntFields.all().get(intnum_null=80).values_list()
with self.assertRaises(MultipleObjectsReturned):
await IntFields.get(intnum_null=80)
with self.assertRaises(MultipleObjectsReturned):
await IntFields.get(intnum_null=80).values()
with self.assertRaises(MultipleObjectsReturned):
await IntFields.get(intnum_null=80).values_list()
async def test_delete(self):
# Test delete
await (await IntFields.get(intnum=40)).delete()
with self.assertRaises(DoesNotExist):
await IntFields.get(intnum=40)
self.assertEqual(await IntFields.all().count(), 29)
rows_affected = (
await IntFields.all().order_by("intnum").limit(10).filter(intnum__gte=70).delete()
)
self.assertEqual(rows_affected, 10)
self.assertEqual(await IntFields.all().count(), 19)
@test.requireCapability(support_update_limit_order_by=True)
async def test_delete_limit(self):
await IntFields.all().limit(1).delete()
self.assertEqual(await IntFields.all().count(), 29)
@test.requireCapability(support_update_limit_order_by=True)
async def test_delete_limit_order_by(self):
await IntFields.all().limit(1).order_by("-id").delete()
self.assertEqual(await IntFields.all().count(), 29)
with self.assertRaises(DoesNotExist):
await IntFields.get(intnum=97)
async def test_async_iter(self):
counter = 0
async for _ in IntFields.all():
counter += 1
self.assertEqual(await IntFields.all().count(), counter)
async def test_update_basic(self):
obj0 = await IntFields.create(intnum=2147483647)
await IntFields.filter(id=obj0.id).update(intnum=2147483646)
obj = await IntFields.get(id=obj0.id)
self.assertEqual(obj.intnum, 2147483646)
self.assertEqual(obj.intnum_null, None)
async def test_update_f_expression(self):
obj0 = await IntFields.create(intnum=2147483647)
await IntFields.filter(id=obj0.id).update(intnum=F("intnum") - 1)
obj = await IntFields.get(id=obj0.id)
self.assertEqual(obj.intnum, 2147483646)
async def test_update_badparam(self):
obj0 = await IntFields.create(intnum=2147483647)
with self.assertRaisesRegex(FieldError, "Unknown keyword argument"):
await IntFields.filter(id=obj0.id).update(badparam=1)
async def test_update_pk(self):
obj0 = await IntFields.create(intnum=2147483647)
with self.assertRaisesRegex(IntegrityError, "is PK and can not be updated"):
await IntFields.filter(id=obj0.id).update(id=1)
async def test_update_virtual(self):
tour = await Tournament.create(name="moo")
obj0 = await MinRelation.create(tournament=tour)
with self.assertRaisesRegex(FieldError, "is virtual and can not be updated"):
await MinRelation.filter(id=obj0.id).update(participants=[])
async def test_bad_ordering(self):
with self.assertRaisesRegex(FieldError, "Unknown field moo1fip for model IntFields"):
await IntFields.all().order_by("moo1fip")
async def test_duplicate_values(self):
with self.assertRaisesRegex(FieldError, "Duplicate key intnum"):
await IntFields.all().values("intnum", "intnum")
async def test_duplicate_values_list(self):
await IntFields.all().values_list("intnum", "intnum")
async def test_duplicate_values_kw(self):
with self.assertRaisesRegex(FieldError, "Duplicate key intnum"):
await IntFields.all().values("intnum", intnum="intnum_null")
async def test_duplicate_values_kw_badmap(self):
with self.assertRaisesRegex(FieldError, 'Unknown field "intnum2" for model "IntFields"'):
await IntFields.all().values(intnum="intnum2")
async def test_bad_values(self):
with self.assertRaisesRegex(FieldError, 'Unknown field "int2num" for model "IntFields"'):
await IntFields.all().values("int2num")
async def test_bad_values_list(self):
with self.assertRaisesRegex(FieldError, 'Unknown field "int2num" for model "IntFields"'):
await IntFields.all().values_list("int2num")
async def test_many_flat_values_list(self):
with self.assertRaisesRegex(
TypeError, "You can flat value_list only if contains one field"
):
await IntFields.all().values_list("intnum", "intnum_null", flat=True)
async def test_all_flat_values_list(self):
with self.assertRaisesRegex(
TypeError, "You can flat value_list only if contains one field"
):
await IntFields.all().values_list(flat=True)
async def test_all_values_list(self):
data = await IntFields.all().order_by("id").values_list()
self.assertEqual(data[2], (self.intfields[2].id, 16, None))
async def test_all_values(self):
data = await IntFields.all().order_by("id").values()
self.assertEqual(data[2], {"id": self.intfields[2].id, "intnum": 16, "intnum_null": None})
async def test_order_by_bad_value(self):
with self.assertRaisesRegex(FieldError, "Unknown field badid for model IntFields"):
await IntFields.all().order_by("badid").values_list()
async def test_annotate_order_expression(self):
data = (
await IntFields.annotate(idp=F("id") + 1)
.order_by("-idp")
.first()
.values_list("id", "idp")
)
self.assertEqual(data[0] + 1, data[1])
async def test_annotate_expression_filter(self):
count = await IntFields.annotate(intnum=F("intnum") + 1).filter(intnum__gt=30).count()
self.assertEqual(count, 23)
async def test_get_raw_sql(self):
sql = IntFields.all().sql()
self.assertRegex(sql, r"^SELECT.+FROM.+")
@test.requireCapability(support_index_hint=True)
async def test_force_index(self):
sql = IntFields.filter(pk=1).only("id").force_index("index_name").sql()
self.assertEqual(
sql,
"SELECT `id` `id` FROM `intfields` FORCE INDEX (`index_name`) WHERE `id`=1",
)
sql_again = IntFields.filter(pk=1).only("id").force_index("index_name").sql()
self.assertEqual(
sql_again,
"SELECT `id` `id` FROM `intfields` FORCE INDEX (`index_name`) WHERE `id`=1",
)
@test.requireCapability(support_index_hint=True)
async def test_force_index_avaiable_in_more_query(self):
sql_ValuesQuery = IntFields.filter(pk=1).force_index("index_name").values("id").sql()
self.assertEqual(
sql_ValuesQuery,
"SELECT `id` `id` FROM `intfields` FORCE INDEX (`index_name`) WHERE `id`=1",
)
sql_ValuesListQuery = (
IntFields.filter(pk=1).force_index("index_name").values_list("id").sql()
)
self.assertEqual(
sql_ValuesListQuery,
"SELECT `id` `0` FROM `intfields` FORCE INDEX (`index_name`) WHERE `id`=1",
)
sql_CountQuery = IntFields.filter(pk=1).force_index("index_name").count().sql()
self.assertEqual(
sql_CountQuery,
"SELECT COUNT(*) FROM `intfields` FORCE INDEX (`index_name`) WHERE `id`=1",
)
sql_ExistsQuery = IntFields.filter(pk=1).force_index("index_name").exists().sql()
self.assertEqual(
sql_ExistsQuery,
"SELECT 1 FROM `intfields` FORCE INDEX (`index_name`) WHERE `id`=1 LIMIT 1",
)
@test.requireCapability(support_index_hint=True)
async def test_use_index(self):
sql = IntFields.filter(pk=1).only("id").use_index("index_name").sql()
self.assertEqual(
sql,
"SELECT `id` `id` FROM `intfields` USE INDEX (`index_name`) WHERE `id`=1",
)
sql_again = IntFields.filter(pk=1).only("id").use_index("index_name").sql()
self.assertEqual(
sql_again,
"SELECT `id` `id` FROM `intfields` USE INDEX (`index_name`) WHERE `id`=1",
)
@test.requireCapability(support_index_hint=True)
async def test_use_index_avaiable_in_more_query(self):
sql_ValuesQuery = IntFields.filter(pk=1).use_index("index_name").values("id").sql()
self.assertEqual(
sql_ValuesQuery,
"SELECT `id` `id` FROM `intfields` USE INDEX (`index_name`) WHERE `id`=1",
)
sql_ValuesListQuery = IntFields.filter(pk=1).use_index("index_name").values_list("id").sql()
self.assertEqual(
sql_ValuesListQuery,
"SELECT `id` `0` FROM `intfields` USE INDEX (`index_name`) WHERE `id`=1",
)
sql_CountQuery = IntFields.filter(pk=1).use_index("index_name").count().sql()
self.assertEqual(
sql_CountQuery,
"SELECT COUNT(*) FROM `intfields` USE INDEX (`index_name`) WHERE `id`=1",
)
sql_ExistsQuery = IntFields.filter(pk=1).use_index("index_name").exists().sql()
self.assertEqual(
sql_ExistsQuery,
"SELECT 1 FROM `intfields` USE INDEX (`index_name`) WHERE `id`=1 LIMIT 1",
)
@test.requireCapability(support_for_update=True)
async def test_select_for_update(self):
sql1 = IntFields.filter(pk=1).only("id").select_for_update().sql()
sql2 = IntFields.filter(pk=1).only("id").select_for_update(nowait=True).sql()
sql3 = IntFields.filter(pk=1).only("id").select_for_update(skip_locked=True).sql()
sql4 = IntFields.filter(pk=1).only("id").select_for_update(of=("intfields",)).sql()
dialect = self.db.schema_generator.DIALECT
if dialect == "postgres":
self.assertEqual(
sql1,
'SELECT "id" "id" FROM "intfields" WHERE "id"=1 FOR UPDATE',
)
self.assertEqual(
sql2,
'SELECT "id" "id" FROM "intfields" WHERE "id"=1 FOR UPDATE NOWAIT',
)
self.assertEqual(
sql3,
'SELECT "id" "id" FROM "intfields" WHERE "id"=1 FOR UPDATE SKIP LOCKED',
)
self.assertEqual(
sql4,
'SELECT "id" "id" FROM "intfields" WHERE "id"=1 FOR UPDATE OF "intfields"',
)
elif dialect == "mysql":
self.assertEqual(
sql1,
"SELECT `id` `id` FROM `intfields` WHERE `id`=1 FOR UPDATE",
)
self.assertEqual(
sql2,
"SELECT `id` `id` FROM `intfields` WHERE `id`=1 FOR UPDATE NOWAIT",
)
self.assertEqual(
sql3,
"SELECT `id` `id` FROM `intfields` WHERE `id`=1 FOR UPDATE SKIP LOCKED",
)
self.assertEqual(
sql4,
"SELECT `id` `id` FROM `intfields` WHERE `id`=1 FOR UPDATE OF `intfields`",
)
async def test_select_related(self):
tournament = await Tournament.create(name="1")
reporter = await Reporter.create(name="Reporter")
event = await Event.create(name="1", tournament=tournament, reporter=reporter)
event = await Event.all().select_related("tournament", "reporter").get(pk=event.pk)
self.assertEqual(event.tournament.pk, tournament.pk)
self.assertEqual(event.reporter.pk, reporter.pk)
async def test_select_related_with_two_same_models(self):
parent_node = await Node.create(name="1")
child_node = await Node.create(name="2")
tree = await Tree.create(parent=parent_node, child=child_node)
tree = await Tree.all().select_related("parent", "child").get(pk=tree.pk)
self.assertEqual(tree.parent.pk, parent_node.pk)
self.assertEqual(tree.parent.name, parent_node.name)
self.assertEqual(tree.child.pk, child_node.pk)
self.assertEqual(tree.child.name, child_node.name)
@test.requireCapability(dialect="postgres")
async def test_postgres_search(self):
name = "hello world"
await Tournament.create(name=name)
ret = await Tournament.filter(name__search="hello").first()
self.assertEqual(ret.name, name)
async def test_subquery_select(self):
t1 = await Tournament.create(name="1")
ret = (
await Tournament.filter(pk=t1.pk)
.annotate(ids=Subquery(Tournament.filter(pk=t1.pk).values("id")))
.values("ids", "id")
)
self.assertEqual(ret, [{"id": t1.pk, "ids": t1.pk}])
async def test_subquery_access(self):
"""This test ensures that accessing a query does not modify it (#780)"""
tournament_1 = await Tournament.create(name="1")
event_1 = await Event.create(event_id=1, name="event 1", tournament=tournament_1)
event_2 = await Event.create(event_id=2, name="event 2", tournament=tournament_1)
team_1 = await Team.create(id=1, name="team 1")
team_2 = await Team.create(id=2, name="team 2")
await event_1.participants.add(team_1)
await event_2.participants.add(team_1, team_2)
self.assertEqual(await event_1.participants.all(), [team_1])
self.assertEqual(await event_2.participants.all(), [team_1, team_2])
sub_query_team_1 = Subquery(Event.filter(participants__id=1).values("event_id"))
sub_query_team_2 = Subquery(Event.filter(participants__id=2).values("event_id"))
query = Event.filter(pk__in=sub_query_team_1) # should select event 1 and event 2
query = query.filter(pk__in=sub_query_team_2) # should select only event 2
self.assertEqual(query.sql(), query.sql())
self.assertEqual(await query.count(), await query.count())
self.assertEqual(await query.count(), 1)
self.assertEqual(await query.all(), [event_2])
async def test_subquery_filter(self):
t1 = await Tournament.create(name="1")
ret = await Tournament.filter(pk=Subquery(Tournament.filter(pk=t1.pk).values("id"))).first()
self.assertEqual(ret, t1)
async def test_raw_sql_count(self):
t1 = await Tournament.create(name="1")
ret = await Tournament.filter(pk=t1.pk).annotate(count=RawSQL("count(*)")).values("count")
self.assertEqual(ret, [{"count": 1}])
async def test_raw_sql_select(self):
t1 = await Tournament.create(id=1, name="1")
ret = (
await Tournament.filter(pk=t1.pk)
.annotate(idp=RawSQL("id + 1"))
.filter(idp=2)
.values("idp")
)
self.assertEqual(ret, [{"idp": 2}])
async def test_raw_sql_filter(self):
ret = await Tournament.filter(pk=RawSQL("id + 1"))
self.assertEqual(ret, [])
async def test_annotation_field_priorior_to_model_field(self):
# Sometimes, field name in annotates also exist in model field sets
# and may need lift the former's priority in select query construction.
t1 = await Tournament.create(name="1")
ret = await Tournament.filter(pk=t1.pk).annotate(id=RawSQL("id + 1")).values("id")
self.assertEqual(ret, [{"id": t1.pk + 1}])
| 40.353968 | 100 | 0.619911 | [
"Apache-2.0"
] | spacemanspiff2007/tortoise-orm | tests/test_queryset.py | 25,423 | Python |
from data_importers.github_importer import BaseGitHubImporter
class Command(BaseGitHubImporter):
srid = 27700
districts_srid = 27700
council_id = "EPS"
elections = ["2021-05-06"]
scraper_name = "wdiv-scrapers/DC-PollingStations-EpsomAndEwell"
geom_type = "gml"
seen = set()
def district_record_to_dict(self, record):
poly = self.extract_geometry(record, self.geom_type, self.get_srid("districts"))
if record["id"] in [
"pollingdistricts.33",
"pollingdistricts.38",
"pollingdistricts.50",
]:
return None
return {
"internal_council_id": record["district"],
"name": record["district"],
"area": poly,
}
def station_record_to_dict(self, record):
postcode = " ".join(record["address"].split(" ")[-2:])
point = self.extract_geometry(record, self.geom_type, self.get_srid())
if (record["district"], postcode) in self.seen:
return None
else:
self.seen.add((record["district"], postcode))
return {
"internal_council_id": record["psnumber"],
"polling_district_id": record["district"],
"address": record["address"],
"postcode": postcode,
"location": point,
}
| 30.590909 | 88 | 0.581724 | [
"BSD-3-Clause"
] | DemocracyClub/UK-Polling-Stations | polling_stations/apps/data_importers/management/commands/import_epsom_and_ewell.py | 1,346 | Python |
"""
Output demo
^^^^^^^^^^^^^^
Demonstrate various output usage supported by PyWebIO
:demo_host:`Demo </?pywebio_api=output_usage>` `Source code <https://github.com/wang0618/PyWebIO/blob/dev/demos/output_usage.py>`_
"""
from pywebio import start_server
from pywebio.output import *
from pywebio.session import hold, get_info
from functools import partial
def t(eng, chinese):
"""return English or Chinese text according to the user's browser language"""
return chinese if 'zh' in get_info().user_language else eng
def code_block(code, strip_indent=4):
if strip_indent:
lines = (
i[strip_indent:] if (i[:strip_indent] == ' ' * strip_indent) else i
for i in code.splitlines()
)
code = '\n'.join(lines)
code = code.strip('\n')
def run_code(code, scope):
with use_scope(scope):
exec(code, globals())
with use_scope() as scope:
put_code(code, 'python')
put_buttons([{'label': t('Run', '运行'), 'value': '', 'color': 'success'}],
onclick=[partial(run_code, code=code, scope=scope)], small=True)
async def main():
"""PyWebIO Output demo
Demonstrate various output usage supported by PyWebIO.
演示PyWebIO输出模块的使用
"""
put_markdown(t("""# PyWebIO Output demo
You can get the source code of this demo in [here](https://github.com/wang0618/PyWebIO/blob/dev/demos/output_usage.py)
This demo only introduces part of the functions of the PyWebIO output module. For the complete features, please refer to the [User Guide](https://pywebio.readthedocs.io/zh_CN/latest/guide.html).
The output functions are all defined in the `pywebio.output` module and can be imported using `from pywebio.output import *`.
""", """# PyWebIO 输出演示
在[这里](https://github.com/wang0618/PyWebIO/blob/dev/demos/output_usage.py)可以获取本Demo的源码。
本Demo仅提供了PyWebIO输出模块的部分功能的演示,完整特性请参阅[用户指南](https://pywebio.readthedocs.io/zh_CN/latest/guide.html)。
PyWebIO的输出函数都定义在 `pywebio.output` 模块中,可以使用 `from pywebio.output import *` 引入。
### 基本输出
PyWebIO提供了一些便捷函数来输出表格、链接等格式:
"""), strip_indent=4)
code_block(t(r"""
# Text Output
put_text("Hello world!")
# Table Output
put_table([
['Commodity', 'Price'],
['Apple', '5.5'],
['Banana', '7'],
])
# Markdown Output
put_markdown('~~Strikethrough~~')
# File Output
put_file('hello_word.txt', b'hello word!')
""", r"""
# 文本输出
put_text("Hello world!")
# 表格输出
put_table([
['商品', '价格'],
['苹果', '5.5'],
['香蕉', '7'],
])
# Markdown输出
put_markdown('~~删除线~~')
# 文件输出
put_file('hello_word.txt', b'hello word!')
"""))
put_markdown(t(r"""For all output functions provided by PyWebIO, please refer to the document.
### Combined Output
The output functions whose name starts with put_ can be combined with some output functions as part of the final output:
You can pass `put_xxx()` calls to `put_table()` as cell content:
""", r"""PyWebIO提供的全部输出函数请参考PyWebIO文档
### 组合输出
函数名以 `put_` 开始的输出函数,可以与一些输出函数组合使用,作为最终输出的一部分。
比如`put_table()`支持以`put_xxx()`调用作为单元格内容:
"""), strip_indent=4)
code_block(r"""
put_table([
['Type', 'Content'],
['html', put_html('X<sup>2</sup>')],
['text', '<hr/>'], # equal to ['text', put_text('<hr/>')]
['buttons', put_buttons(['A', 'B'], onclick=toast)],
['markdown', put_markdown('`Awesome PyWebIO!`')],
['file', put_file('hello.text', b'hello world')],
['table', put_table([['A', 'B'], ['C', 'D']])]
])
""")
put_markdown(t(r"Similarly, you can pass `put_xxx()` calls to `popup()` as the popup content:",
r"类似地,`popup()`也可以将`put_xxx()`调用作为弹窗内容:"), strip_indent=4)
code_block(r"""
popup('Popup title', [
put_html('<h3>Popup Content</h3>'),
'plain html: <br/>', # equal to put_text('plain html: <br/>')
put_table([['A', 'B'], ['C', 'D']]),
put_buttons(['close_popup()'], onclick=lambda _: close_popup())
])
""")
put_markdown(t(r"For more output functions that accept `put_xxx()` calls as parameters, please refer to corresponding function documentation.",
r"更多接受`put_xxx()`作为参数的输出函数请参考函数文档。"))
put_markdown(t(r"""### Callback
PyWebIO allows you to output some buttons, and the provided callback function will be executed when the button is clicked.
This is an example:%s
The call to `put_table()` will not block. When user clicks a button, the corresponding callback function will be invoked:
""", r"""### 事件回调
PyWebIO允许你输出一些控件,当控件被点击时执行提供的回调函数,就像编写GUI程序一样。
下面是一个例子:%s
`put_table()`的调用不会阻塞。当用户点击了某行中的按钮时,PyWebIO会自动调用相应的回调函数:
""") % """
```python
from functools import partial
def edit_row(choice, row):
put_markdown("> You click`%s` button ar row `%s`" % (choice, row))
put_table([
['Idx', 'Actions'],
[1, put_buttons(['edit', 'delete'], onclick=partial(edit_row, row=1))],
[2, put_buttons(['edit', 'delete'], onclick=partial(edit_row, row=2))],
[3, put_buttons(['edit', 'delete'], onclick=partial(edit_row, row=3))],
])
```
""", strip_indent=4)
from functools import partial
@use_scope('table-callback')
def edit_row(choice, row):
put_markdown("> You click `%s` button ar row `%s`" % (choice, row))
put_table([
['Idx', 'Actions'],
[1, put_buttons(['edit', 'delete'], onclick=partial(edit_row, row=1))],
[2, put_buttons(['edit', 'delete'], onclick=partial(edit_row, row=2))],
[3, put_buttons(['edit', 'delete'], onclick=partial(edit_row, row=3))],
])
set_scope('table-callback')
put_markdown(t("Of course, PyWebIO also supports outputting individual button:", "当然,PyWebIO还支持单独的按钮控件:")+r"""
```python
def btn_click(btn_val):
put_markdown("> You click `%s` button" % btn_val)
put_buttons(['A', 'B', 'C'], onclick=btn_click)
```
""", strip_indent=4)
@use_scope('button-callback')
def btn_click(btn_val):
put_markdown("> You click `%s` button" % btn_val)
put_buttons(['A', 'B', 'C'], onclick=btn_click)
set_scope('button-callback')
put_markdown(t(r"""### Output Scope
PyWebIO uses the scope model to give more control to the location of content output. The output area of PyWebIO can be divided into different output domains. The output domain is called Scope in PyWebIO.
The output domain is a container of output content, and each output domain is arranged vertically, and the output domains can also be nested.
Each output function (function name like `put_xxx()`) will output its content to a scope, the default is "current scope". "current scope" is determined by the runtime context. The output function can also manually specify the scope to output. The scope name is unique within the session.
You can use `use_scope()` to open and enter a new output scope, or enter an existing output scope: %s
The above code will generate the following Scope layout:
""", r"""### 输出域Scope
PyWebIO使用Scope模型来对内容输出的位置进行灵活地控制,PyWebIO的内容输出区可以划分出不同的输出域,PyWebIO将输出域称作`Scope`。
输出域为输出内容的容器,各个输出域之间上下排列,输出域也可以进行嵌套。
每个输出函数(函数名形如 `put_xxx()` )都会将内容输出到一个Scope,默认为”当前Scope”,”当前Scope”由运行时上下文确定,输出函数也可以手动指定输出到的Scope。Scope名在会话内唯一。
可以使用 `use_scope()` 开启并进入一个新的输出域,或进入一个已经存在的输出域: %s
以上代码将会产生如下Scope布局:
""") % """
```python
with use_scope('A'):
put_text('Text in scope A')
with use_scope('B'):
put_text('Text in scope B')
with use_scope('C'):
put_text('Text in scope C')
```
""", strip_indent=4)
with use_scope('A'):
put_text('Text in scope A')
with use_scope('B'):
put_text('Text in scope B')
with use_scope('C'):
put_text('Text in scope C')
put_html("""<style>
#pywebio-scope-A {border: 1px solid red;}
#pywebio-scope-B {border: 1px solid blue;margin:2px}
#pywebio-scope-C {border: 1px solid green;margin-top:2px}
</style><br/>""")
put_markdown(t(r"""The output function (function name like `put_xxx()`) will output the content to the "current scope" by default, and the "current scope" of the runtime context can be set by `use_scope()`.
In addition, you can use the `scope` parameter of the output function to specify the destination scope to output:
""", r"""
输出函数(函数名形如 `put_xxx()` )在默认情况下,会将内容输出到”当前Scope”,可以通过 `use_scope()` 设置运行时上下文的”当前Scope”。
此外,也可以通过输出函数的 scope 参数指定输出的目的Scope:
"""), strip_indent=4)
put_grid([
[put_code("put_text('A', scope='A')", 'python'), None, put_buttons([t('Run', '运行')], [lambda: put_text('A', scope='A')])],
[put_code("put_text('B', scope='B')", 'python'), None, put_buttons([t('Run', '运行')], [lambda: put_text('B', scope='B')])],
[put_code("put_text('C', scope='C')", 'python'), None, put_buttons([t('Run', '运行')], [lambda: put_text('C', scope='C')])],
], cell_widths='1fr 10px auto')
put_markdown(t("The output content can be inserted into any positions of the target scope by using the `position` parameter of the output function.", "输出函数可以使用`position`参数指定内容在Scope中输出的位置") + """
```python
put_text(now(), scope='A', position=...)
```
""", strip_indent=4)
import datetime
put_buttons([('position=%s' % i, i) for i in [1, 2, 3, -1, -2, -3]],
lambda i: put_text(datetime.datetime.now(), position=i, scope='A'), small=True)
put_markdown(t(r"In addition to `use_scope()`, PyWebIO also provides the following scope control functions:",
r"除了 `use_scope()` , PyWebIO同样提供了以下scope控制函数: "))
put_grid([
[put_code("clear('B') # Clear content of Scope B", 'python'), None, put_buttons(['运行'], [lambda: clear('B')])],
[put_code("remove('C') # Remove Scope C", 'python'), None, put_buttons(['运行'], [lambda: remove('C')])],
[put_code("scroll_to('A') # Scroll the page to position of Scope A", 'python'), None, put_buttons(['运行'], [lambda: scroll_to('A')])],
], cell_widths='1fr 10px auto')
put_markdown(t(r"""### Layout
In general, using the various output functions introduced above is enough to output what you want, but these outputs are arranged vertically. If you want to make a more complex layout (such as displaying a code block on the left side of the page and an image on the right), you need to use layout functions.
The `pywebio.output` module provides 3 layout functions, and you can create complex layouts by combining them:
- `put_row()` : Use row layout to output content. The content is arranged horizontally
- `put_column()` : Use column layout to output content. The content is arranged vertically
- `put_grid()` : Output content using grid layout
Here is an example by combining `put_row()` and `put_column()`:
""", r"""### 布局
一般情况下,使用上文介绍的各种输出函数足以完成各种内容的展示,但直接调用输出函数产生的输出之间都是竖直排列的,如果想实现更复杂的布局(比如在页 面左侧显示一个代码块,在右侧显示一个图像),就需要借助布局函数。
`pywebio.output` 模块提供了3个布局函数,通过对他们进行组合可以完成各种复杂的布局:
- `put_row()` : 使用行布局输出内容. 内容在水平方向上排列
- `put_column()` : 使用列布局输出内容. 内容在竖直方向上排列
- `put_grid()` : 使用网格布局输出内容
比如,通过通过组合 `put_row()` 和 `put_column()` 实现的布局:
"""), strip_indent=4)
code_block(r"""
put_row([
put_column([
put_code('A'),
put_row([
put_code('B1'), None, # %s
put_code('B2'), None,
put_code('B3'),
]),
put_code('C'),
]), None,
put_code('D'), None,
put_code('E')
])
""" % t('None represents the space between the output', 'None 表示输出之间的空白'))
put_markdown(t(r"""### Style
If you are familiar with CSS styles, you can use the `style()` function to set a custom style for the output.
You can set the CSS style for a single `put_xxx()` output:
""", r"""### 样式
如果你熟悉 CSS样式 ,你还可以使用 `style()` 函数给输出设定自定义样式。
可以给单个的 `put_xxx()` 输出设定CSS样式,也可以配合组合输出使用:
"""), strip_indent=4)
code_block(r"""
style(put_text('Red'), 'color: red')
put_table([
['A', 'B'],
['C', style(put_text('Red'), 'color: red')],
])
""", strip_indent=4)
put_markdown(t(r"`style()` also accepts a list of output calls:", r"`style()` 也接受列表作为输入:"))
code_block(r"""
style([
put_text('Red'),
put_markdown('~~del~~')
], 'color: red')
put_collapse('title', style([
put_text('text'),
put_markdown('~~del~~'),
], 'margin-left: 20px'))
""", strip_indent=4)
put_markdown(t("""----
For more information about output of PyWebIO, please visit PyWebIO [User Guide](https://pywebio.readthedocs.io/zh_CN/latest/guide.html) and [output module documentation](https://pywebio.readthedocs.io/zh_CN/latest/output.html).
""","""----
PyWebIO的输出演示到这里就结束了,更多内容请访问PyWebIO[用户指南](https://pywebio.readthedocs.io/zh_CN/latest/guide.html)和[output模块文档](https://pywebio.readthedocs.io/zh_CN/latest/output.html)。
"""), lstrip=True)
await hold()
if __name__ == '__main__':
start_server(main, debug=True, port=8080, cdn=False)
| 37.19337 | 311 | 0.621064 | [
"MIT"
] | songshanyuwu/PyWebIO | demos/output_usage.py | 15,342 | Python |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-12-30 03:21
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.DeleteModel(
name='Subcategory',
),
migrations.AddField(
model_name='category',
name='parent_category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Category'),
),
migrations.AlterField(
model_name='salepost',
name='poster',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.DeleteModel(
name='User',
),
]
| 27.735294 | 124 | 0.61824 | [
"MIT"
] | dishad/ADD | core/migrations/0002_auto_20161229_2221.py | 943 | Python |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import flash
from flash.core.data.utils import download_data
from flash.image import ObjectDetectionData, ObjectDetector
# 1. Create the DataModule
# Dataset Credit: https://www.kaggle.com/ultralytics/coco128
download_data("https://github.com/zhiqwang/yolov5-rt-stack/releases/download/v0.3.0/coco128.zip", "data/")
datamodule = ObjectDetectionData.from_coco(
train_folder="data/coco128/images/train2017/",
train_ann_file="data/coco128/annotations/instances_train2017.json",
val_split=0.1,
batch_size=2,
)
# 2. Build the task
model = ObjectDetector(model="retinanet", num_classes=datamodule.num_classes)
# 3. Create the trainer and finetune the model
trainer = flash.Trainer(max_epochs=3, gpus=torch.cuda.device_count())
trainer.finetune(model, datamodule=datamodule)
# 4. Detect objects in a few images!
predictions = model.predict(
[
"data/coco128/images/train2017/000000000625.jpg",
"data/coco128/images/train2017/000000000626.jpg",
"data/coco128/images/train2017/000000000629.jpg",
]
)
print(predictions)
# 5. Save the model!
trainer.save_checkpoint("object_detection_model.pt")
| 34.74 | 106 | 0.763961 | [
"Apache-2.0"
] | tszumowski/lightning-flash | flash_examples/object_detection.py | 1,737 | Python |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras initializers for TF 2.
"""
# pylint: disable=g-classes-have-attributes
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from keras import backend
from tensorflow.python.ops import init_ops_v2
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.initializers.Initializer')
class Initializer(object):
"""Initializer base class: all Keras initializers inherit from this class.
Initializers should implement a `__call__` method with the following
signature:
```python
def __call__(self, shape, dtype=None, **kwargs):
# returns a tensor of shape `shape` and dtype `dtype`
# containing values drawn from a distribution of your choice.
```
Optionally, you an also implement the method `get_config` and the class
method `from_config` in order to support serialization -- just like with
any Keras object.
Here's a simple example: a random normal initializer.
```python
import tensorflow as tf
class ExampleRandomNormal(tf.keras.initializers.Initializer):
def __init__(self, mean, stddev):
self.mean = mean
self.stddev = stddev
def __call__(self, shape, dtype=None, **kwargs):
return tf.random.normal(
shape, mean=self.mean, stddev=self.stddev, dtype=dtype)
def get_config(self): # To support serialization
return {"mean": self.mean, "stddev": self.stddev}
```
Note that we don't have to implement `from_config` in the example above since
the constructor arguments of the class the keys in the config returned by
`get_config` are the same. In this case, the default `from_config`
works fine.
"""
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor.
**kwargs: Additional keyword arguments.
"""
raise NotImplementedError
def get_config(self):
"""Returns the configuration of the initializer as a JSON-serializable dict.
Returns:
A JSON-serializable Python dict.
"""
return {}
@classmethod
def from_config(cls, config):
"""Instantiates an initializer from a configuration dictionary.
Example:
```python
initializer = RandomUniform(-1, 1)
config = initializer.get_config()
initializer = RandomUniform.from_config(config)
```
Args:
config: A Python dictionary, the output of `get_config`.
Returns:
A `tf.keras.initializers.Initializer` instance.
"""
config.pop('dtype', None)
return cls(**config)
@keras_export('keras.initializers.Zeros', 'keras.initializers.zeros', v1=[])
class Zeros(tf.zeros_initializer, Initializer):
"""Initializer that generates tensors initialized to 0.
Also available via the shortcut function `tf.keras.initializers.zeros`.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.Zeros()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.Zeros()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
"""
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are
supported. If not specified, `tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`).
**kwargs: Additional keyword arguments.
"""
return super(Zeros, self).__call__(shape, dtype=_get_dtype(dtype), **kwargs)
@keras_export('keras.initializers.Ones', 'keras.initializers.ones', v1=[])
class Ones(tf.ones_initializer, Initializer):
"""Initializer that generates tensors initialized to 1.
Also available via the shortcut function `tf.keras.initializers.ones`.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.Ones()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.Ones()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
"""
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are
supported. If not specified, `tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`).
**kwargs: Additional keyword arguments.
"""
return super(Ones, self).__call__(shape, dtype=_get_dtype(dtype), **kwargs)
@keras_export('keras.initializers.Constant',
'keras.initializers.constant',
v1=[])
class Constant(Initializer):
"""Initializer that generates tensors with constant values.
Also available via the shortcut function `tf.keras.initializers.constant`.
Only scalar values are allowed.
The constant value provided must be convertible to the dtype requested
when calling the initializer.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.Constant(3.)
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.Constant(3.)
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
value: A Python scalar.
"""
def __init__(self, value=0):
self.value = value
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized to `self.value`.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. If not specified,
`tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`).
**kwargs: Additional keyword arguments.
"""
del kwargs
return tf.constant(
self.value, dtype=_get_dtype(dtype), shape=shape)
def get_config(self):
return {'value': self.value}
@keras_export('keras.initializers.RandomUniform',
'keras.initializers.random_uniform',
v1=[])
class RandomUniform(tf.random_uniform_initializer, Initializer):
"""Initializer that generates tensors with a uniform distribution.
Also available via the shortcut function
`tf.keras.initializers.random_uniform`.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.RandomUniform(minval=0., maxval=1.)
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.RandomUniform(minval=0., maxval=1.)
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
minval: A python scalar or a scalar tensor. Lower bound of the range of
random values to generate (inclusive).
maxval: A python scalar or a scalar tensor. Upper bound of the range of
random values to generate (exclusive).
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
"""
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point and integer
types are supported. If not specified,
`tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`).
**kwargs: Additional keyword arguments.
"""
return super(RandomUniform, self).__call__(
shape, dtype=_get_dtype(dtype), **kwargs)
@keras_export('keras.initializers.RandomNormal',
'keras.initializers.random_normal',
v1=[])
class RandomNormal(tf.random_normal_initializer, Initializer):
"""Initializer that generates tensors with a normal distribution.
Also available via the shortcut function
`tf.keras.initializers.random_normal`.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.RandomNormal(mean=0., stddev=1.)
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.RandomNormal(mean=0., stddev=1.)
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
mean: a python scalar or a scalar tensor. Mean of the random values to
generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the random
values to generate.
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
"""
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized to random normal values.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `tf.keras.backend.floatx()` is used, which
default to `float32` unless you configured it otherwise (via
`tf.keras.backend.set_floatx(float_dtype)`)
**kwargs: Additional keyword arguments.
"""
return super(RandomNormal, self).__call__(
shape, dtype=_get_dtype(dtype), **kwargs)
@keras_export('keras.initializers.TruncatedNormal',
'keras.initializers.truncated_normal',
v1=[])
class TruncatedNormal(init_ops_v2.TruncatedNormal, Initializer):
"""Initializer that generates a truncated normal distribution.
Also available via the shortcut function
`tf.keras.initializers.truncated_normal`.
The values generated are similar to values from a
`tf.keras.initializers.RandomNormal` initializer except that values more
than two standard deviations from the mean are
discarded and re-drawn.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.TruncatedNormal(mean=0., stddev=1.)
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.TruncatedNormal(mean=0., stddev=1.)
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
mean: a python scalar or a scalar tensor. Mean of the random values
to generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
"""
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized to random normal values (truncated).
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `tf.keras.backend.floatx()` is used, which
default to `float32` unless you configured it otherwise (via
`tf.keras.backend.set_floatx(float_dtype)`)
**kwargs: Additional keyword arguments.
"""
return super(TruncatedNormal, self).__call__(
shape, dtype=_get_dtype(dtype), **kwargs)
@keras_export('keras.initializers.VarianceScaling',
'keras.initializers.variance_scaling',
v1=[])
class VarianceScaling(init_ops_v2.VarianceScaling, Initializer):
"""Initializer capable of adapting its scale to the shape of weights tensors.
Also available via the shortcut function
`tf.keras.initializers.variance_scaling`.
With `distribution="truncated_normal" or "untruncated_normal"`, samples are
drawn from a truncated/untruncated normal distribution with a mean of zero and
a standard deviation (after truncation, if used) `stddev = sqrt(scale / n)`,
where `n` is:
- number of input units in the weight tensor, if `mode="fan_in"`
- number of output units, if `mode="fan_out"`
- average of the numbers of input and output units, if `mode="fan_avg"`
With `distribution="uniform"`, samples are drawn from a uniform distribution
within `[-limit, limit]`, where `limit = sqrt(3 * scale / n)`.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.VarianceScaling(
... scale=0.1, mode='fan_in', distribution='uniform')
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.VarianceScaling(
... scale=0.1, mode='fan_in', distribution='uniform')
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
scale: Scaling factor (positive float).
mode: One of "fan_in", "fan_out", "fan_avg".
distribution: Random distribution to use. One of "truncated_normal",
"untruncated_normal" and "uniform".
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
"""
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `tf.keras.backend.floatx()` is used, which
default to `float32` unless you configured it otherwise (via
`tf.keras.backend.set_floatx(float_dtype)`)
**kwargs: Additional keyword arguments.
"""
return super(VarianceScaling, self).__call__(
shape, dtype=_get_dtype(dtype), **kwargs)
@keras_export('keras.initializers.Orthogonal',
'keras.initializers.orthogonal',
v1=[])
class Orthogonal(init_ops_v2.Orthogonal, Initializer):
"""Initializer that generates an orthogonal matrix.
Also available via the shortcut function `tf.keras.initializers.orthogonal`.
If the shape of the tensor to initialize is two-dimensional, it is initialized
with an orthogonal matrix obtained from the QR decomposition of a matrix of
random numbers drawn from a normal distribution.
If the matrix has fewer rows than columns then the output will have orthogonal
rows. Otherwise, the output will have orthogonal columns.
If the shape of the tensor to initialize is more than two-dimensional,
a matrix of shape `(shape[0] * ... * shape[n - 2], shape[n - 1])`
is initialized, where `n` is the length of the shape vector.
The matrix is subsequently reshaped to give a tensor of the desired shape.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.Orthogonal()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.Orthogonal()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
gain: multiplicative factor to apply to the orthogonal matrix
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
References:
[Saxe et al., 2014](https://openreview.net/forum?id=_wzZwKpTDF_9C)
([pdf](https://arxiv.org/pdf/1312.6120.pdf))
"""
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized to an orthogonal matrix.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`)
**kwargs: Additional keyword arguments.
"""
return super(Orthogonal, self).__call__(
shape, dtype=_get_dtype(dtype), **kwargs)
@keras_export('keras.initializers.Identity',
'keras.initializers.identity',
v1=[])
class Identity(init_ops_v2.Identity, Initializer):
"""Initializer that generates the identity matrix.
Also available via the shortcut function `tf.keras.initializers.identity`.
Only usable for generating 2D matrices.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.Identity()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.Identity()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
gain: Multiplicative factor to apply to the identity matrix.
"""
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized to a 2D identity matrix.
Args:
shape: Shape of the tensor. It should have exactly rank 2.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`)
**kwargs: Additional keyword arguments.
"""
return super(Identity, self).__call__(
shape, dtype=_get_dtype(dtype), **kwargs)
@keras_export('keras.initializers.GlorotUniform',
'keras.initializers.glorot_uniform',
v1=[])
class GlorotUniform(VarianceScaling):
"""The Glorot uniform initializer, also called Xavier uniform initializer.
Also available via the shortcut function
`tf.keras.initializers.glorot_uniform`.
Draws samples from a uniform distribution within `[-limit, limit]`, where
`limit = sqrt(6 / (fan_in + fan_out))` (`fan_in` is the number of input units
in the weight tensor and `fan_out` is the number of output units).
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.GlorotUniform()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.GlorotUniform()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
References:
[Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html)
([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf))
"""
def __init__(self, seed=None):
super(GlorotUniform, self).__init__(
scale=1.0,
mode='fan_avg',
distribution='uniform',
seed=seed)
def get_config(self):
return {'seed': self.seed}
@keras_export('keras.initializers.GlorotNormal',
'keras.initializers.glorot_normal',
v1=[])
class GlorotNormal(VarianceScaling):
"""The Glorot normal initializer, also called Xavier normal initializer.
Also available via the shortcut function
`tf.keras.initializers.glorot_normal`.
Draws samples from a truncated normal distribution centered on 0 with `stddev
= sqrt(2 / (fan_in + fan_out))` where `fan_in` is the number of input units in
the weight tensor and `fan_out` is the number of output units in the weight
tensor.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.GlorotNormal()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.GlorotNormal()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
References:
[Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html)
([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf))
"""
def __init__(self, seed=None):
super(GlorotNormal, self).__init__(
scale=1.0,
mode='fan_avg',
distribution='truncated_normal',
seed=seed)
def get_config(self):
return {'seed': self.seed}
@keras_export('keras.initializers.LecunNormal',
'keras.initializers.lecun_normal',
v1=[])
class LecunNormal(VarianceScaling):
"""Lecun normal initializer.
Also available via the shortcut function
`tf.keras.initializers.lecun_normal`.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
Draws samples from a truncated normal distribution centered on 0 with `stddev
= sqrt(1 / fan_in)` where `fan_in` is the number of input units in the weight
tensor.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.LecunNormal()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.LecunNormal()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Arguments:
seed: A Python integer. Used to seed the random generator.
References:
- Self-Normalizing Neural Networks,
[Klambauer et al., 2017]
(https://papers.nips.cc/paper/6698-self-normalizing-neural-networks)
([pdf]
(https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf))
- Efficient Backprop,
[Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)
"""
def __init__(self, seed=None):
super(LecunNormal, self).__init__(
scale=1., mode='fan_in', distribution='truncated_normal', seed=seed)
def get_config(self):
return {'seed': self.seed}
@keras_export('keras.initializers.LecunUniform',
'keras.initializers.lecun_uniform',
v1=[])
class LecunUniform(VarianceScaling):
"""Lecun uniform initializer.
Also available via the shortcut function
`tf.keras.initializers.lecun_uniform`.
Draws samples from a uniform distribution within `[-limit, limit]`,
where `limit = sqrt(3 / fan_in)` (`fan_in` is the number of input units in the
weight tensor).
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.LecunUniform()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.LecunUniform()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Arguments:
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
References:
- Self-Normalizing Neural Networks,
[Klambauer et al., 2017](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks) # pylint: disable=line-too-long
([pdf](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf))
- Efficient Backprop,
[Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)
"""
def __init__(self, seed=None):
super(LecunUniform, self).__init__(
scale=1., mode='fan_in', distribution='uniform', seed=seed)
def get_config(self):
return {'seed': self.seed}
@keras_export('keras.initializers.HeNormal',
'keras.initializers.he_normal',
v1=[])
class HeNormal(VarianceScaling):
"""He normal initializer.
Also available via the shortcut function
`tf.keras.initializers.he_normal`.
It draws samples from a truncated normal distribution centered on 0 with
`stddev = sqrt(2 / fan_in)` where `fan_in` is the number of input units in the
weight tensor.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.HeNormal()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.HeNormal()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Arguments:
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
References:
[He et al., 2015](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) # pylint: disable=line-too-long
([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf))
"""
def __init__(self, seed=None):
super(HeNormal, self).__init__(
scale=2., mode='fan_in', distribution='truncated_normal', seed=seed)
def get_config(self):
return {'seed': self.seed}
@keras_export('keras.initializers.HeUniform',
'keras.initializers.he_uniform',
v1=[])
class HeUniform(VarianceScaling):
"""He uniform variance scaling initializer.
Also available via the shortcut function
`tf.keras.initializers.he_uniform`.
Draws samples from a uniform distribution within `[-limit, limit]`, where
`limit = sqrt(6 / fan_in)` (`fan_in` is the number of input units in the
weight tensor).
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.HeUniform()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.HeUniform()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Arguments:
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
References:
[He et al., 2015](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) # pylint: disable=line-too-long
([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf))
"""
def __init__(self, seed=None):
super(HeUniform, self).__init__(
scale=2., mode='fan_in', distribution='uniform', seed=seed)
def get_config(self):
return {'seed': self.seed}
def _get_dtype(dtype):
if dtype is None:
dtype = backend.floatx()
return tf.as_dtype(dtype)
| 35.041721 | 162 | 0.699111 | [
"Apache-2.0"
] | StanislavParovoy/Keras | keras/initializers/initializers_v2.py | 26,877 | Python |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import vlan
class access(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-interface - based on the path /interface/hundredgigabitethernet/switchport/access-mac-group-rspan-vlan-classification/access. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: The access layer characteristics of this interface.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__vlan',)
_yang_name = 'access'
_rest_name = 'access'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__vlan = YANGDynClass(base=YANGListType("access_vlan_id access_mac_group",vlan.vlan, yang_name="vlan", rest_name="rspan-vlan", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='access-vlan-id access-mac-group', extensions={u'tailf-common': {u'callpoint': u'rspan-mac-group-vlan-classification-config-phy', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'alt-name': u'rspan-vlan'}}), is_container='list', yang_name="vlan", rest_name="rspan-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'rspan-mac-group-vlan-classification-config-phy', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'alt-name': u'rspan-vlan'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='list', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'interface', u'hundredgigabitethernet', u'switchport', u'access-mac-group-rspan-vlan-classification', u'access']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'interface', u'HundredGigabitEthernet', u'switchport', u'access']
def _get_vlan(self):
"""
Getter method for vlan, mapped from YANG variable /interface/hundredgigabitethernet/switchport/access_mac_group_rspan_vlan_classification/access/vlan (list)
"""
return self.__vlan
def _set_vlan(self, v, load=False):
"""
Setter method for vlan, mapped from YANG variable /interface/hundredgigabitethernet/switchport/access_mac_group_rspan_vlan_classification/access/vlan (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_vlan is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vlan() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("access_vlan_id access_mac_group",vlan.vlan, yang_name="vlan", rest_name="rspan-vlan", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='access-vlan-id access-mac-group', extensions={u'tailf-common': {u'callpoint': u'rspan-mac-group-vlan-classification-config-phy', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'alt-name': u'rspan-vlan'}}), is_container='list', yang_name="vlan", rest_name="rspan-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'rspan-mac-group-vlan-classification-config-phy', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'alt-name': u'rspan-vlan'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vlan must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("access_vlan_id access_mac_group",vlan.vlan, yang_name="vlan", rest_name="rspan-vlan", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='access-vlan-id access-mac-group', extensions={u'tailf-common': {u'callpoint': u'rspan-mac-group-vlan-classification-config-phy', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'alt-name': u'rspan-vlan'}}), is_container='list', yang_name="vlan", rest_name="rspan-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'rspan-mac-group-vlan-classification-config-phy', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'alt-name': u'rspan-vlan'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='list', is_config=True)""",
})
self.__vlan = t
if hasattr(self, '_set'):
self._set()
def _unset_vlan(self):
self.__vlan = YANGDynClass(base=YANGListType("access_vlan_id access_mac_group",vlan.vlan, yang_name="vlan", rest_name="rspan-vlan", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='access-vlan-id access-mac-group', extensions={u'tailf-common': {u'callpoint': u'rspan-mac-group-vlan-classification-config-phy', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'alt-name': u'rspan-vlan'}}), is_container='list', yang_name="vlan", rest_name="rspan-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'rspan-mac-group-vlan-classification-config-phy', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'alt-name': u'rspan-vlan'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='list', is_config=True)
vlan = __builtin__.property(_get_vlan, _set_vlan)
_pyangbind_elements = {'vlan': vlan, }
| 64.825397 | 995 | 0.727473 | [
"Apache-2.0"
] | extremenetworks/pybind | pybind/nos/v6_0_2f/interface/hundredgigabitethernet/switchport/access_mac_group_rspan_vlan_classification/access/__init__.py | 8,168 | Python |
# Generated by Django 2.0.5 on 2018-06-07 10:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('it_purchase_app', '0030_auto_20180607_1020'),
]
operations = [
migrations.AlterField(
model_name='purchase',
name='manager_approval',
field=models.CharField(blank=True, choices=[('Not Decided', 'Not Decided'), ('Yes', 'Yes'), ('No', 'No')], max_length=500, null=True),
),
]
| 26.421053 | 146 | 0.609562 | [
"MIT"
] | gokhankaraboga/test | it_purchase_project/it_purchase_app/migrations/0031_auto_20180607_1031.py | 502 | Python |
#!/usr/bin/env python
# encoding: utf-8
from django.db import models
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django_extensions.db.fields import AutoSlugField
from v1.recipe.models import Recipe
class GroceryList(models.Model):
"""
The GroceryList is the core of list app.
It offers a home to many GroceryItems.
title = The name of the GroceryList.
slug = The HTML safe name of the GroceryList.
author = The User who created the GroceryList.
pub_date = The date that the GroceryList was created on.
"""
title = models.CharField(_("grocery list title"), max_length=250)
slug = AutoSlugField(_('slug'), populate_from='title')
author = models.ForeignKey(User, on_delete=models.CASCADE)
pub_date = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['pub_date']
def __str__(self):
return '%s' % self.title
def item_count(self):
"""get the number of items in the list"""
return GroceryItem.objects.filter(list=self).count()
class GroceryItem(models.Model):
"""
The GroceryItem is an item on a GroceryList.
list = The GroceryList that owns the GroceryItem.
title = The name of the GroceryItem.
completed = Whether or not the GroceryItem has been purchased or
added to the users shopping cart in the supermarket.
order = The order of the item in the GroceryList.
"""
list = models.ForeignKey(GroceryList, on_delete=models.CASCADE, related_name='items')
title = models.CharField(_("title"), max_length=550)
completed = models.BooleanField(_("completed"), default=False)
order = models.IntegerField(_("order"), default=0)
class Meta:
ordering = ['list_id', 'order', 'pk']
def __str__(self):
return '%s' % self.title
class GroceryShared(models.Model):
"""
Determines whether or not a GroceryList is shared to another user.
Shared lists allow other uses to add/delete/edit the GroceryList.
list = The GroceryList to be shared.
shared_by = The User that shared the List.
shared_to = The User that is given access to a GroceryList.
"""
list = models.ForeignKey(GroceryList, on_delete=models.CASCADE)
shared_by = models.ForeignKey(User, on_delete=models.CASCADE, related_name="shared_by")
shared_to = models.ForeignKey(User, on_delete=models.CASCADE, related_name="shared_to")
def __str__(self):
return '%s' % self.list.title
| 34.671233 | 91 | 0.699723 | [
"MIT"
] | BitFis/openeats-api | v1/list/models.py | 2,531 | Python |
#!/usr/bin/python
import hashlib
import sys
v = sys.argv[1]
index = 0
pw = ''
i = 0
while True:
suffix = str(i)
h = hashlib.md5(v+suffix).hexdigest()
if h.startswith("00000"):
pw += h[5]
print(v+suffix,h,pw)
if len(pw) == 8:
break
i += 1
print(pw)
| 13.818182 | 41 | 0.519737 | [
"MIT"
] | CheyenneWills/adventofcode | 2016/day5/p1.py | 304 | Python |
from pandas import read_csv
from IPython.display import display
import numpy as np
import sys
import math
###############################
####Maria Eugenia Lopez #####
###############################
def fully_grown_depuration(number_to_remove=0.50):
return plants.loc[plants.height_m > number_to_remove]
def convert_GPS_lat_long(df):
for index, row in df.iterrows():
lat_viejo = row["GPS_lat"]
latVal = (40008000*row["GPS_lat"])/360
#res= div*0.001#to convert to Klm
df.loc[index,"GPS_lat"] = latVal
lat_radians = math.radians(lat_viejo)
lonVal = (40075160*row["GPS_lon"])/360
lonVal = lonVal*math.cos(lat_radians)
#res = res*0.001
df.loc[index,"GPS_lon"] = lonVal
##----------------------------------------
##Part A Assembling a Data Set
##----------------------------------------
##----------------------------------------
##Input and Output: Data Frames
plants = read_csv('environmental_survey/plants2017.csv',
index_col=0)
plants.reset_index(level=0,inplace=True)
plants.drop(plants.index[plants.Plant == 'tree'], inplace=True)
#display(plants.head(n=50))
plants.reset_index(drop=True,inplace=True)
##----------------------------------------
##Functions
convert_GPS_lat_long( plants)
plants.rename(columns={'GPS_lon':'Meters_lon',
'GPS_lat':'Meters_lat'}, inplace=True)
##----------------------------------------
##Functions and Data Structures: Boolean Indexing
heiht_set_by_user = float(input("Set the height that you want: ") or "0.5")
plants = fully_grown_depuration(float(heiht_set_by_user))
#reseting the index after the depuration
plants.reset_index(drop=True,inplace=True)
display(plants)
| 27.04918 | 75 | 0.621212 | [
"MIT"
] | Maruja/MariaLopez | Assignment/Environmental_Project/part_A.py | 1,650 | Python |
#
# PySNMP MIB module EdgeSwitch-IPV6-TUNNEL-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/EdgeSwitch-IPV6-TUNNEL-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 18:56:15 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsUnion, ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsUnion", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection")
fastPath, = mibBuilder.importSymbols("EdgeSwitch-REF-MIB", "fastPath")
InetAddressPrefixLength, InetAddressIPv4 = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddressPrefixLength", "InetAddressIPv4")
Ipv6Address, Ipv6IfIndex, Ipv6AddressPrefix = mibBuilder.importSymbols("IPV6-TC", "Ipv6Address", "Ipv6IfIndex", "Ipv6AddressPrefix")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Unsigned32, ModuleIdentity, Bits, Gauge32, Integer32, NotificationType, ObjectIdentity, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, iso, Counter64, Counter32, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "ModuleIdentity", "Bits", "Gauge32", "Integer32", "NotificationType", "ObjectIdentity", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "iso", "Counter64", "Counter32", "TimeTicks")
RowStatus, DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "RowStatus", "DisplayString", "TextualConvention")
fastPathIpv6Tunnel = ModuleIdentity((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27))
fastPathIpv6Tunnel.setRevisions(('2011-01-26 00:00', '2007-05-23 00:00',))
if mibBuilder.loadTexts: fastPathIpv6Tunnel.setLastUpdated('201101260000Z')
if mibBuilder.loadTexts: fastPathIpv6Tunnel.setOrganization('Broadcom Inc')
agentTunnelIPV6Group = MibIdentifier((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1))
agentTunnelIPV6Table = MibTable((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 1), )
if mibBuilder.loadTexts: agentTunnelIPV6Table.setStatus('current')
agentTunnelIPV6Entry = MibTableRow((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 1, 1), ).setIndexNames((0, "EdgeSwitch-IPV6-TUNNEL-MIB", "agentTunnelID"))
if mibBuilder.loadTexts: agentTunnelIPV6Entry.setStatus('current')
agentTunnelID = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: agentTunnelID.setStatus('current')
agentTunnelIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentTunnelIfIndex.setStatus('current')
agentTunnelMode = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("undefined", 1), ("ip6over4", 2), ("ip6to4", 3))).clone('undefined')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: agentTunnelMode.setStatus('current')
agentTunnelLocalIP4Addr = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 1, 1, 4), InetAddressIPv4()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentTunnelLocalIP4Addr.setStatus('current')
agentTunnelRemoteIP4Addr = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 1, 1, 5), InetAddressIPv4()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentTunnelRemoteIP4Addr.setStatus('current')
agentTunnelLocalIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 1, 1, 6), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentTunnelLocalIfIndex.setStatus('current')
agentTunnelStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 1, 1, 7), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: agentTunnelStatus.setStatus('current')
agentTunnelIcmpUnreachableMode = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: agentTunnelIcmpUnreachableMode.setStatus('current')
agentTunnelIPV6PrefixTable = MibTable((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 2), )
if mibBuilder.loadTexts: agentTunnelIPV6PrefixTable.setStatus('current')
agentTunnelIPV6PrefixEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 2, 1), ).setIndexNames((0, "EdgeSwitch-IPV6-TUNNEL-MIB", "agentTunnelID"), (0, "EdgeSwitch-IPV6-TUNNEL-MIB", "agentTunnelIPV6PrefixPrefix"), (0, "EdgeSwitch-IPV6-TUNNEL-MIB", "agentTunnelIPV6PrefixPrefixLen"))
if mibBuilder.loadTexts: agentTunnelIPV6PrefixEntry.setStatus('current')
agentTunnelIPV6PrefixPrefix = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 2, 1, 1), Ipv6AddressPrefix())
if mibBuilder.loadTexts: agentTunnelIPV6PrefixPrefix.setStatus('current')
agentTunnelIPV6PrefixPrefixLen = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 2, 1, 2), InetAddressPrefixLength())
if mibBuilder.loadTexts: agentTunnelIPV6PrefixPrefixLen.setStatus('current')
agentTunnelIPV6PrefixStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 2, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: agentTunnelIPV6PrefixStatus.setStatus('current')
mibBuilder.exportSymbols("EdgeSwitch-IPV6-TUNNEL-MIB", agentTunnelIPV6PrefixStatus=agentTunnelIPV6PrefixStatus, agentTunnelIPV6Entry=agentTunnelIPV6Entry, agentTunnelIPV6Table=agentTunnelIPV6Table, agentTunnelIPV6PrefixEntry=agentTunnelIPV6PrefixEntry, agentTunnelLocalIP4Addr=agentTunnelLocalIP4Addr, fastPathIpv6Tunnel=fastPathIpv6Tunnel, agentTunnelID=agentTunnelID, agentTunnelIPV6PrefixPrefix=agentTunnelIPV6PrefixPrefix, agentTunnelIPV6PrefixPrefixLen=agentTunnelIPV6PrefixPrefixLen, agentTunnelIPV6PrefixTable=agentTunnelIPV6PrefixTable, agentTunnelStatus=agentTunnelStatus, agentTunnelIPV6Group=agentTunnelIPV6Group, agentTunnelRemoteIP4Addr=agentTunnelRemoteIP4Addr, agentTunnelLocalIfIndex=agentTunnelLocalIfIndex, agentTunnelMode=agentTunnelMode, PYSNMP_MODULE_ID=fastPathIpv6Tunnel, agentTunnelIcmpUnreachableMode=agentTunnelIcmpUnreachableMode, agentTunnelIfIndex=agentTunnelIfIndex)
| 123.169811 | 896 | 0.780331 | [
"Apache-2.0"
] | agustinhenze/mibs.snmplabs.com | pysnmp/EdgeSwitch-IPV6-TUNNEL-MIB.py | 6,528 | Python |
"""Test cases for the pypfilt.io module."""
import datetime
import numpy as np
import os
from pypfilt.io import read_table, date_column
def test_read_datetime():
# Test data: sequential dates with Fibonacci sequence.
content = """
date count
2020-01-01 1
2020-01-02 1
2020-01-03 2
2020-01-04 3
2020-01-05 5
2020-01-06 8
2020-01-07 13
2020-01-08 21
2020-01-09 34
"""
expect_rows = 9
expect_count = [1, 1]
for i in range(expect_rows - 2):
expect_count.append(expect_count[i] + expect_count[i + 1])
# Save this data to a temporary data file.
path = "test_read_datetime.ssv"
with open(path, encoding='utf-8', mode='w') as f:
f.write(content)
# Read the data and then remove the data file.
columns = [
date_column('date'),
('count', np.int_),
]
df = read_table(path, columns)
os.remove(path)
# Check that we received the expected number of rows.
assert len(df) == expect_rows
# Check that each row has the expected content.
for ix, row in enumerate(df):
assert isinstance(row['date'], datetime.datetime)
assert row['date'].year == 2020
assert row['date'].month == 1
assert row['date'].day == ix + 1
assert row['count'] == expect_count[ix]
| 25.5 | 66 | 0.617647 | [
"BSD-3-Clause"
] | ruarai/epifx.covid | local_pypfilt/tests/test_io.py | 1,326 | Python |
#coding:utf8
#authors : yqq
import logging
import json
from utils import decimal_default,get_linenumber
from base_handler import BaseHandler
from .proxy import AuthServiceProxy
from cashaddress import convert
import traceback
#设置精度
from decimal import Decimal
from decimal import getcontext
getcontext().prec = 8
from constants import BSV_RPC_URL as RPC_URL
STR_ADDRESS_TABLE = "t_btc_address"
class BTC_ListAccounts(BaseHandler):
@staticmethod
def addresses():
from sql import run
accounts = run("""select * from {};""".format(STR_ADDRESS_TABLE)) #TODO:后期数据量大的时候, 使用redis进行缓存地址
return [account['address'] for account in accounts]
def get(self):
try:
data = BTC_ListAccounts.addresses()
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_ListAccounts error:{0} in {1}".format(e,get_linenumber()))
g_exUserAddrs = BTC_ListAccounts.addresses() #使用全局变量保存交易所用户BTC地址 2019-06-01
class BTC_GetAccount(BaseHandler):
def get(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
commands = [["getaccount",self.get_argument("address")]]
data = btc_rpc_connection.batch_(commands)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetAccount error:{0} in {1}".format(e,get_linenumber()))
class BTC_GetAccountAddress(BaseHandler):
def get(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
commands = [["getaccountaddress",self.get_argument("account")]]
data = btc_rpc_connection.batch_(commands)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetAccoutAddress error:{0} in {1}".format(e,get_linenumber()))
class BTC_GetAccountBalance(BaseHandler):
def get(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
account = self.get_argument("account").decode("utf-8")
if account is None or len(account) == 0:
self.write(json.dumps(BaseHandler.error_ret()))
return
commands = [["getbalance", account]]
data = btc_rpc_connection.batch_(commands)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetAccountBalance error:{0} in {1}".format(e,get_linenumber()))
class BTC_GetBalance(BaseHandler):
def get(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
addr = self.get_argument("address")
data = BTC_ListUTXO.utxo(btc_rpc_connection, addr)
if not data:
self.write(json.dumps(BaseHandler.error_ret_with_data("0")))
return
from utils import accumulate
self.write(json.dumps(BaseHandler.success_ret_with_data('%.8f' % accumulate(data)), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetBalance error:{0} in {1}".format(e,get_linenumber()))
class BTC_ListUTXO(BaseHandler):
@staticmethod
def utxo(rpcconn, addrs, minconf=1, maxconf=9999999, opt=None):
argAddrs = addrs if isinstance(addrs, list) else [addrs]
if opt == None:
commands = [["listunspent", minconf, maxconf, argAddrs, True]]
else:
commands = [["listunspent", minconf, maxconf, argAddrs, True, opt]]
utxos = rpcconn.batch_(commands)[0]
#要进行地址格式的转换
for i in range(len(utxos)):
cashAddr = utxos[i]['address']
legacyAddr = convert.to_legacy_address(cashAddr)
utxos[i]['address'] = legacyAddr
utxos[i]['cashaddress'] = cashAddr
return utxos
def post(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
data = None
try:
minconf = int(self.get_argument("minconf")) if not self.get_argument("minconf") == "" else 1
maxconf = int(self.get_argument("maxconf")) if not self.get_argument("maxconf") == "" else 9999999
addr = self.get_argument("address")
data = BTC_ListUTXO.utxo(btc_rpc_connection,addr,minconf,maxconf)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetUTXO error:{0} in {1}".format(e,get_linenumber()))
class BTC_EstimateSmartFee(BaseHandler):
@staticmethod
def process(rpcconn, nConfTarget=2, strEstimateMode='ECONOMICAL'):
# commands = [["estimatesmartfee", nConfTarget, strEstimateMode ]]
# commands = [["estimatefee", nConfTarget]] # bsv 需要根据前面的区块来计算, 和 bch, btc , ltc 不一样
# data = rpcconn.batch_(commands)
# nFeeRate = data[0] if len(data) > 0 else Decimal(0.00001)
# return nFeeRate * 100000000 / 1000 # satoshi/Byte 即 in satoshis per byte
# if len(data) > 0:
# return data[0]['feerate'] * 100000000 / 1000 # satoshi/Byte 即 in satoshis per byte
return 20
@staticmethod
def calcFee(rpcconn, nIn=1, nOut = 2):
from decimal import Decimal
from decimal import getcontext
getcontext().prec = 8
rate = BTC_EstimateSmartFee.process(rpcconn)
rate = "%.8f" % (rate / Decimal(100000000.0))
return Decimal(str((148 * nIn + 34 * nOut + 10))) * Decimal(rate)
def get(self):
try:
rpcconn = AuthServiceProxy(RPC_URL)
data = BTC_EstimateSmartFee.calcFee(rpcconn)
data = '%.8f' % data
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s" % e)))
logging.error("BTC_EstimateSmartFee error:{0} in {1}".format(e, get_linenumber()))
pass
class BTC_CreateRawTransaction(BaseHandler):
@staticmethod
def process(rpcconn,from_addr,to_addr,amount):
#utxos
utxos = BTC_ListUTXO.utxo(rpcconn, from_addr)
#print(utxos)
def UtxoFilter(utxos, amount):
selected = []
from decimal import Decimal
nSum = Decimal('0')
#最小输入utxo金额 : 148 * rate 其中rate是 1000字节 所需的btc数量
nFee = Decimal('0.0')
for utxo in [item for item in utxos if int(item["confirmations"]) >= 1 and float(item["amount"]) > 0.0003 ]:
selected.append(utxo)
nSum += Decimal(str((utxo["amount"])))
if nSum > Decimal(str(amount)):
nFee = BTC_EstimateSmartFee.calcFee(rpcconn, len(selected), 2)
if nSum > nFee + amount:
break
return selected, nSum, nFee
selected, nSum , fee = UtxoFilter(utxos, amount)
# check if enough
# from utils import calcFee
if not isinstance(amount, Decimal):
amount = Decimal(str(amount))
# fee = BTC_EstimateSmartFee.calcFee(rpcconn, len(selected), 2)
if nSum < fee + amount:
return False,"budget not enough"
#return False,0 #需测试!!!
from utils import filtered
param_in = [filtered(item,["txid","vout"]) for item in selected]
param_out = {to_addr:amount, from_addr: nSum - amount - fee}
#print("--------------param_out-------------")
#print("fee" + str(fee))
#print(param_in)
#print(param_out)
#print("--------------param_out-------------")
# create raw transaction
commands = [["createrawtransaction",param_in,param_out]]
return True, {"hex":rpcconn.batch_(commands), "utxos":selected, "txout":param_out}
def post(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
from_addr = self.get_argument("from")
to_addr = self.get_argument("to")
#amount = float(self.get_argument("amount"))
from decimal import Decimal
amount = Decimal(str(self.get_argument("amount")))
ret, rsp = BTC_CreateRawTransaction.process(btc_rpc_connection,from_addr,to_addr,amount)
if not ret:
self.write(json.dumps(BaseHandler.error_ret_with_data(rsp)))
return
self.write(json.dumps(BaseHandler.success_ret_with_data(rsp), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_CreatRawTransaction error:{0} in {1}".format(e,get_linenumber()))
class BTC_SendRawTransaction(BaseHandler):
def post(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
rawdata = self.get_argument("rawdata")
if not rawdata: return
commands = [["sendrawtransaction",rawdata]]
data = btc_rpc_connection.batch_(commands)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_SendRawTransaction error:{0} in {1}".format(e,get_linenumber()))
class BTC_CreateRawTransactionEx(BaseHandler):
@staticmethod
def genearateInParam(rpcconn, src, dest):
utxos,gross,amount = [],Decimal('0'),sum(dest.values())
redundant = 0
for addr in src:
# utxos
all = BTC_ListUTXO.utxo(rpcconn,addr)
# recommend
from utils import recommended
selected,aggregate = recommended(all,amount)
# process
utxos += selected
gross += aggregate
# check if enough
redundant = gross - BTC_EstimateSmartFee.calcFee(rpcconn, len(utxos), len(dest.keys())+1) - amount
if redundant > 0:
return True,utxos,redundant
return False,utxos,redundant
@staticmethod
def generateOutParam(dest):
param_out = {}
for key,value in dest.items():
param_out[key] = Decimal(value) if isinstance(value, str) else Decimal(str(value))
return param_out
@staticmethod
def process(rpcconn, src, dest ):
# preprocess
param_out = BTC_CreateRawTransactionEx.generateOutParam(dest)
ret,utxos,redundant = BTC_CreateRawTransactionEx.genearateInParam(rpcconn,src,param_out)
if not ret: return False, "budget not enough"
# param_out refinement
param_out[src[0]] = redundant if src[0] not in param_out.keys() else param_out[src[0]] + redundant
#print(param_out)
# param_in refinement
from utils import filtered
param_in = [filtered(item,["txid","vout"]) for item in utxos]
#print(param_in)
return True, {"hex":rpcconn.batch_([["createrawtransaction",param_in,param_out]]),"utxos":utxos, "txout":param_out}
def get_argument_ex(self, str):
from utils import json2dict
str2dict = json2dict(self.request.body)
return str2dict[str] if str in str2dict.keys() else False
def post(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
src = self.get_argument_ex("src")
dest = self.get_argument_ex("dest")
if not isinstance(src, list):
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s" % ("src must be json list"))))
return
if not isinstance(dest, dict):
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s" % ("dest must be json object"))))
return
ret, rsp = BTC_CreateRawTransactionEx.process(btc_rpc_connection, src, dest)
if not ret:
self.write(json.dumps(BaseHandler.error_ret_with_data(rsp)))
return
self.write(json.dumps(BaseHandler.success_ret_with_data(rsp), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_CreateRawTransactionEx error:{0} in {1}".format(e,get_linenumber()))
class BTC_CreateRawTransactionEx_Collection(BaseHandler):
@staticmethod
def makeParams( rpcconn, lstSrc, lstDest):
if len(lstSrc) == 1 and lstSrc[0].strip() == "*":
lstSrcAddrs = g_exUserAddrs
else:
lstSrcAddrs = lstSrc
utxos, nSum = [], Decimal('0')
txAmount, fTxFee = 0, 0
#for addr in lstSrc:
if isinstance(lstSrc, list):
# bitcoin-cli -conf=/root/.bitcoin/bitcoin-test.conf listunspent 0 9999999 '[]' true '{ "minimumAmount": 0.005 }'
# commands = [["listunspent", 0, 99999999, [], True, {'minimumAmount':0.0003}]]
# lstUtxos = rpcconn.batch_(commands)[0]
# BSV 不支持 option操作
# opt = {'minimumAmount':0.0003}
lstUtxos = BTC_ListUTXO.utxo(rpcconn, [ ], 1, 9999999)
# print(len(lstUtxos))
for utxo in lstUtxos:
if Decimal(utxo['amount']) < 0.0003: continue
if utxo['address'].strip() in lstSrcAddrs:
utxos.append(utxo)
nSum += Decimal(str((utxo["amount"])))
fTxFee = BTC_EstimateSmartFee.calcFee(rpcconn, len(utxos), len(lstDest))
txAmount = nSum - fTxFee #实际转账金额
if txAmount <= 0.0003: #实际转账金额太小
return False, None, 0, 0
return True, utxos, txAmount , fTxFee
@staticmethod
def process(rpcconn, lstSrc, lstDest):
#lstSrcAddrs = []
bRet, utxos, txAmount, fTxFee = BTC_CreateRawTransactionEx_Collection.makeParams(rpcconn, lstSrc, lstDest)
if not bRet:
return False, "collection amount is too small!"
strDst = lstDest[0]
vout = {strDst : txAmount}
from utils import filtered
vin = [filtered(item,["txid","vout"]) for item in utxos]
strHex = rpcconn.batch_([["createrawtransaction", vin, vout]])
return True, {"hex": strHex, "utxos":utxos, "txout":vout, "txFee":fTxFee}
def get_argument_ex(self, str):
from utils import json2dict
str2dict = json2dict(self.request.body)
return str2dict[str] if str in str2dict.keys() else False
def post(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
src = self.get_argument_ex("src")
dest = self.get_argument_ex("dest")
if not isinstance(src, list):
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s" % ("src must be json list"))))
return
if not isinstance(dest, list):
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s" % ("dest must be json list"))))
return
ret, rsp = BTC_CreateRawTransactionEx_Collection.process(btc_rpc_connection, src, dest)
if not ret:
self.write(json.dumps(BaseHandler.error_ret_with_data(rsp)))
return
self.write(json.dumps(BaseHandler.success_ret_with_data(rsp), default=decimal_default))
except Exception as e:
# traceback.print_exc()
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_CreateRawTransactionEx error:{0} in {1}".format(e,get_linenumber()))
#查询需要归集的地址余额
class BTC_CollectionQuery(BaseHandler):
def get(self):
rpcconn = AuthServiceProxy(RPC_URL)
try:
# commands = [["listunspent", 0, 99999999, [], True, {'minimumAmount':0.0003}]]
# lstUtxos = rpcconn.batch_(commands)[0]
# opt = {'minimumAmount': 0.0003}
lstUtxos = BTC_ListUTXO.utxo(rpcconn, [], 1, 9999999)
mapRet = {}
for utxo in lstUtxos:
strAddr = utxo['address'].strip()
if Decimal(utxo['amount']) < 0.0003: continue
if strAddr not in g_exUserAddrs : continue
if strAddr not in mapRet:
mapRet[strAddr] = Decimal("0.0")
nAmount = utxo['amount']
mapRet[strAddr] = str( nAmount + Decimal( mapRet[strAddr]) )
self.write(json.dumps(BaseHandler.success_ret_with_data(mapRet), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_CollectionQuery error:{0} in {1}".format(e, get_linenumber()))
class BTC_ListTransactions(BaseHandler):
@staticmethod
def blktimes(rpc_connection,account="*",tx_counts=10):
commands = [["listtransactions",account,tx_counts]]
data = rpc_connection.batch_(commands)
if len(data) == 0: return []
#fix bug:only return those txs which be writen into blockchain @yqq 2019-03-21
return [item['blocktime'] for item in data[0] if "blocktime" in item][::-1]
#add 'include_watchonly' to include those address's transactions
# which not import private key into the wallet. #yqq 2019-03-26
@staticmethod
def process(rpc_connection,account="*",tx_counts=10,skips=0,include_watchonly=True):
commands = [["listtransactions",account,tx_counts,skips, include_watchonly]]
data = rpc_connection.batch_(commands)
if len(data) == 0: return []
#fix bug:only return those txs which be writen into blockchain @yqq 2019-03-21
txs = [item for item in data[0] if "blocktime" in item and item["category"] == "receive"]
from utils import filtered
return [filtered(item,["address","category","amount","confirmations","txid","blocktime"]) for item in txs][::-1]
def get(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
account = self.get_argument("account") if self.get_argument("account") else "*"
tx_counts = int(self.get_argument("count")) if self.get_argument("count") else 10
skips = int(self.get_argument("skips")) if self.get_argument("skips") else 0
data = BTC_ListTransactions.process(btc_rpc_connection,account,tx_counts,skips)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_ListTransActions error:{0} in {1}".format(e,get_linenumber()))
class BTC_CrawlTxData(BaseHandler):
@staticmethod
def process(rpc_connection, nblktime):
if len(g_exUserAddrs) == 0:
return []
txs = BTC_ListTransactions.process(rpc_connection, '*', 100000000)
retTxs = []
for tx in txs:
strLegacyAddr = convert.to_legacy_address(tx["address"].strip())
tx["address"] = strLegacyAddr.strip()
# print(tx)
if int(str(tx['blocktime'])) >= nblktime and tx["address"].strip() in g_exUserAddrs:
retTxs.append(tx)
return retTxs
def post(self):
rpc_connection = AuthServiceProxy(RPC_URL)
try:
lastscannedblktime = int(str(self.get_argument("blocktime")))
data = BTC_CrawlTxData.process(rpc_connection,lastscannedblktime)
for i in range(len(data)):
data[i]["amount"] = str(data[i]["amount"]) #convert to str to avoid bug
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_CrawlTxData error:{0} in {1}".format(e,get_linenumber()))
class BTC_GetBlockCount(BaseHandler):
@staticmethod
def process(rpcconn):
commands = [["getblockcount"]]
return int(rpcconn.batch_(commands))
def get(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
blknumber = BTC_GetBlockCount.process(btc_rpc_connection)
self.write(json.dumps(BaseHandler.success_ret_with_data(blknumber), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetBlockCount error:{0} in {1}".format(e,get_linenumber()))
class BTC_GetBlockHash(BaseHandler):
@staticmethod
def process(rpcconn,blknumber):
commands = [["getblockhash",blknumber]]
return rpcconn.batch_(commands)
def get(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
blknumber = self.get_argument("blknumber") if self.get_argument("blknumber") else BTC_GetBlockCount.process(btc_rpc_connection)
data = BTC_GetBlockHash.process(btc_rpc_connection,blknumber)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetBlockHash error:{0} in {1}".format(e,get_linenumber()))
class BTC_DecodeRawTransaction(BaseHandler):
def post(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
commands = [["decoderawtransaction",self.get_argument("rawdata")]]
data = btc_rpc_connection.batch_(commands)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetTransaction error:{0} in {1}".format(e,get_linenumber()))
class BTC_GetRawTransaction(BaseHandler):
def get(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
commands = [["getrawtransaction",self.get_argument("txid"),True]]
data = btc_rpc_connection.batch_(commands)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetTransaction error:{0} in {1}".format(e,get_linenumber()))
class BTC_GetBlock(BaseHandler):
def get(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
blkhash = self.get_argument("blkhash") if self.get_argument("blkhash") else BTC_GetBlockCount.process(btc_rpc_connection)
commands = [["getblock"]]
data = btc_rpc_connection.batch_(commands)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetBlockHash error:{0} in {1}".format(e,get_linenumber()))
| 43.808743 | 139 | 0.627541 | [
"MIT"
] | songning4/QBlockChainNotes | Python3/Tornado/apps/ExchangeWalletApi/ExWallet/bsv/handler.py | 24,277 | Python |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABC
import torch
import torch.nn as nn
import torch.nn.functional as F
from lib.loss.loss_helper import FSAuxCELoss, FSAuxRMILoss
from lib.utils.tools.logger import Logger as Log
class PixelContrastLoss(nn.Module, ABC):
def __init__(self, configer):
super(PixelContrastLoss, self).__init__()
self.configer = configer
self.temperature = self.configer.get('contrast', 'temperature')
self.base_temperature = self.configer.get('contrast', 'base_temperature')
self.ignore_label = -1
if self.configer.exists('loss', 'params') and 'ce_ignore_index' in self.configer.get('loss', 'params'):
self.ignore_label = self.configer.get('loss', 'params')['ce_ignore_index']
self.max_samples = self.configer.get('contrast', 'max_samples')
self.max_views = self.configer.get('contrast', 'max_views')
def _hard_anchor_sampling(self, X, y_hat, y):
batch_size, feat_dim = X.shape[0], X.shape[-1]
classes = []
total_classes = 0
for ii in range(batch_size):
this_y = y_hat[ii]
this_classes = torch.unique(this_y)
this_classes = [x for x in this_classes if x > 0 and x != self.ignore_label]
this_classes = [x for x in this_classes if (this_y == x).nonzero().shape[0] > self.max_views]
classes.append(this_classes)
total_classes += len(this_classes)
if total_classes == 0:
return None, None
n_view = self.max_samples // total_classes
n_view = min(n_view, self.max_views)
X_ = torch.zeros((total_classes, n_view, feat_dim), dtype=torch.float).cuda()
y_ = torch.zeros(total_classes, dtype=torch.float).cuda()
X_ptr = 0
for ii in range(batch_size):
this_y_hat = y_hat[ii]
this_y = y[ii]
this_classes = classes[ii]
for cls_id in this_classes:
hard_indices = ((this_y_hat == cls_id) & (this_y != cls_id)).nonzero()
easy_indices = ((this_y_hat == cls_id) & (this_y == cls_id)).nonzero()
num_hard = hard_indices.shape[0]
num_easy = easy_indices.shape[0]
if num_hard >= n_view / 2 and num_easy >= n_view / 2:
num_hard_keep = n_view // 2
num_easy_keep = n_view - num_hard_keep
elif num_hard >= n_view / 2:
num_easy_keep = num_easy
num_hard_keep = n_view - num_easy_keep
elif num_easy >= n_view / 2:
num_hard_keep = num_hard
num_easy_keep = n_view - num_hard_keep
else:
Log.info('this shoud be never touched! {} {} {}'.format(num_hard, num_easy, n_view))
raise Exception
perm = torch.randperm(num_hard)
hard_indices = hard_indices[perm[:num_hard_keep]]
perm = torch.randperm(num_easy)
easy_indices = easy_indices[perm[:num_easy_keep]]
indices = torch.cat((hard_indices, easy_indices), dim=0)
X_[X_ptr, :, :] = X[ii, indices, :].squeeze(1)
y_[X_ptr] = cls_id
X_ptr += 1
return X_, y_
def _contrastive(self, feats_, labels_):
anchor_num, n_view = feats_.shape[0], feats_.shape[1]
labels_ = labels_.contiguous().view(-1, 1)
mask = torch.eq(labels_, torch.transpose(labels_, 0, 1)).float().cuda()
contrast_count = n_view
contrast_feature = torch.cat(torch.unbind(feats_, dim=1), dim=0)
anchor_feature = contrast_feature
anchor_count = contrast_count
anchor_dot_contrast = torch.div(torch.matmul(anchor_feature, torch.transpose(contrast_feature, 0, 1)),
self.temperature)
logits_max, _ = torch.max(anchor_dot_contrast, dim=1, keepdim=True)
logits = anchor_dot_contrast - logits_max.detach()
mask = mask.repeat(anchor_count, contrast_count)
neg_mask = 1 - mask
logits_mask = torch.ones_like(mask).scatter_(1,
torch.arange(anchor_num * anchor_count).view(-1, 1).cuda(),
0)
mask = mask * logits_mask
neg_logits = torch.exp(logits) * neg_mask
neg_logits = neg_logits.sum(1, keepdim=True)
exp_logits = torch.exp(logits)
log_prob = logits - torch.log(exp_logits + neg_logits)
mean_log_prob_pos = (mask * log_prob).sum(1) / mask.sum(1)
loss = - (self.temperature / self.base_temperature) * mean_log_prob_pos
loss = loss.mean()
return loss
def forward(self, feats, labels=None, predict=None):
labels = labels.unsqueeze(1).float().clone()
labels = torch.nn.functional.interpolate(labels,
(feats.shape[2], feats.shape[3]), mode='nearest')
labels = labels.squeeze(1).long()
assert labels.shape[-1] == feats.shape[-1], '{} {}'.format(labels.shape, feats.shape)
batch_size = feats.shape[0]
labels = labels.contiguous().view(batch_size, -1)
predict = predict.contiguous().view(batch_size, -1)
feats = feats.permute(0, 2, 3, 1)
feats = feats.contiguous().view(feats.shape[0], -1, feats.shape[-1])
feats_, labels_ = self._hard_anchor_sampling(feats, labels, predict)
loss = self._contrastive(feats_, labels_)
return loss
class ContrastAuxCELoss(nn.Module, ABC):
def __init__(self, configer=None):
super(ContrastAuxCELoss, self).__init__()
self.configer = configer
ignore_index = -1
if self.configer.exists('loss', 'params') and 'ce_ignore_index' in self.configer.get('loss', 'params'):
ignore_index = self.configer.get('loss', 'params')['ce_ignore_index']
Log.info('ignore_index: {}'.format(ignore_index))
self.loss_weight = self.configer.get('contrast', 'loss_weight')
self.use_rmi = self.configer.get('contrast', 'use_rmi')
if self.use_rmi:
self.seg_criterion = FSAuxRMILoss(configer=configer)
else:
self.seg_criterion = FSAuxCELoss(configer=configer)
self.contrast_criterion = PixelContrastLoss(configer=configer)
def forward(self, preds, target):
h, w = target.size(1), target.size(2)
assert "seg" in preds
assert "seg_aux" in preds
seg = preds['seg']
seg_aux = preds['seg_aux']
embedding = preds['embedding'] if 'embedding' in preds else None
pred = F.interpolate(input=seg, size=(h, w), mode='bilinear', align_corners=True)
pred_aux = F.interpolate(input=seg_aux, size=(h, w), mode='bilinear', align_corners=True)
loss = self.seg_criterion([pred_aux, pred], target)
if embedding is not None:
_, predict = torch.max(seg, 1)
loss_contrast = self.contrast_criterion(embedding, target, predict)
return loss + self.loss_weight * loss_contrast
return loss
| 37.783505 | 112 | 0.600136 | [
"MIT"
] | NNNNAI/ContrastiveSeg | lib/loss/loss_contrast.py | 7,330 | Python |
# File: gsgmail_process_email.py
# Copyright (c) 2017-2021 Splunk Inc.
#
# Licensed under Apache 2.0 (https://www.apache.org/licenses/LICENSE-2.0.txt)
import email
import tempfile
from collections import OrderedDict
import os
import re
from bs4 import BeautifulSoup, UnicodeDammit
import phantom.app as phantom
import phantom.utils as ph_utils
import mimetypes
import socket
from email.header import decode_header, make_header
import shutil
import hashlib
import json
import magic
import random
import string
import phantom.rules as phantom_rules
from gsgmail_consts import *
import sys
from requests.structures import CaseInsensitiveDict
_container_common = {
"run_automation": False # Don't run any playbooks, when this artifact is added
}
_artifact_common = {
"run_automation": False # Don't run any playbooks, when this artifact is added
}
FILE_EXTENSIONS = {
'.vmsn': ['os memory dump', 'vm snapshot file'],
'.vmss': ['os memory dump', 'vm suspend file'],
'.js': ['javascript'],
'.doc': ['doc'],
'.docx': ['doc'],
'.xls': ['xls'],
'.xlsx': ['xls'],
}
MAGIC_FORMATS = [
(re.compile('^PE.* Windows'), ['pe file', 'hash']),
(re.compile('^MS-DOS executable'), ['pe file', 'hash']),
(re.compile('^PDF '), ['pdf']),
(re.compile('^MDMP crash'), ['process dump']),
(re.compile('^Macromedia Flash'), ['flash']),
]
EWS_DEFAULT_ARTIFACT_COUNT = 100
EWS_DEFAULT_CONTAINER_COUNT = 100
HASH_FIXED_PHANTOM_VERSION = "2.0.201"
OFFICE365_APP_ID = "a73f6d32-c9d5-4fec-b024-43876700daa6"
EXCHANGE_ONPREM_APP_ID = "badc5252-4a82-4a6d-bc53-d1e503857124"
IMAP_APP_ID = "9f2e9f72-b0e5-45d6-92a7-09ef820476c1"
uri_regexc = re.compile(URI_REGEX)
email_regexc = re.compile(EMAIL_REGEX, re.IGNORECASE)
email_regexc2 = re.compile(EMAIL_REGEX2, re.IGNORECASE)
hash_regexc = re.compile(HASH_REGEX)
ip_regexc = re.compile(IP_REGEX)
ipv6_regexc = re.compile(IPV6_REGEX)
class ProcessMail:
def __init__(self, base_connector, config):
self._base_connector = base_connector
self._config = config
self._email_id_contains = list()
self._container = dict()
self._artifacts = list()
self._attachments = list()
self._python_version = None
try:
self._python_version = int(sys.version_info[0])
except Exception:
raise Exception("Error occurred while getting the Phantom server's Python major version.")
def _get_file_contains(self, file_path):
contains = []
ext = os.path.splitext(file_path)[1]
contains.extend(FILE_EXTENSIONS.get(ext, []))
magic_str = magic.from_file(file_path)
for regex, cur_contains in MAGIC_FORMATS:
if regex.match(magic_str):
contains.extend(cur_contains)
return contains
def _is_ip(self, input_ip):
if ph_utils.is_ip(input_ip):
return True
if self.is_ipv6(input_ip):
return True
return False
def is_ipv6(self, input_ip):
try:
socket.inet_pton(socket.AF_INET6, input_ip)
except Exception:
return False
return True
def _clean_url(self, url):
url = url.strip('>),.]\r\n')
# Check before splicing, find returns -1 if not found
# _and_ you will end up splicing on -1 (incorrectly)
if '<' in url:
url = url[:url.find('<')]
elif '>' in url:
url = url[:url.find('>')]
return url
def _extract_urls_domains(self, file_data, urls, domains):
if not self._config[PROC_EMAIL_JSON_EXTRACT_DOMAINS] and not self._config[PROC_EMAIL_JSON_EXTRACT_URLS]:
return
# try to load the email
try:
soup = BeautifulSoup(file_data, "html.parser")
except Exception as e:
self._base_connector.debug_print(e)
return
uris = []
# get all tags that have hrefs
links = soup.find_all(href=True)
if links:
# it's html, so get all the urls
uris = [x['href'] for x in links if (not x['href'].startswith('mailto:'))]
# work on the text part of the link, they might be http links different from the href
# and were either missed by the uri_regexc while parsing text or there was no text counterpart
# in the email
uri_text = [self._clean_url(x.get_text()) for x in links]
if uri_text:
uri_text = [x for x in uri_text if x.startswith('http')]
if uri_text:
uris.extend(uri_text)
else:
# Parse it as a text file
uris = re.findall(uri_regexc, file_data)
if uris:
uris = [self._clean_url(x) for x in uris]
if self._config[PROC_EMAIL_JSON_EXTRACT_URLS]:
# add the uris to the urls
urls |= set(uris)
if self._config[PROC_EMAIL_JSON_EXTRACT_DOMAINS]:
for uri in uris:
domain = phantom.get_host_from_url(uri)
if domain and not self._is_ip(domain):
domains.add(domain)
# work on any mailto urls if present
if links:
mailtos = [x['href'] for x in links if (x['href'].startswith('mailto:'))]
for curr_email in mailtos:
domain = curr_email[curr_email.find('@') + 1:]
if domain and not self._is_ip(domain):
domains.add(domain)
return
def _get_ips(self, file_data, ips):
# First extract what looks like an IP from the file, this is a faster operation
ips_in_mail = re.findall(ip_regexc, file_data)
ip6_in_mail = re.findall(ipv6_regexc, file_data)
if ip6_in_mail:
for ip6_tuple in ip6_in_mail:
ip6s = [x for x in ip6_tuple if x]
ips_in_mail.extend(ip6s)
# Now validate them
if ips_in_mail:
ips_in_mail = set(ips_in_mail)
ips_in_mail = [x for x in ips_in_mail if self._is_ip(x)]
if ips_in_mail:
ips |= set(ips_in_mail)
def _handle_body(self, body, parsed_mail, email_id):
local_file_path = body['file_path']
ips = parsed_mail[PROC_EMAIL_JSON_IPS]
hashes = parsed_mail[PROC_EMAIL_JSON_HASHES]
urls = parsed_mail[PROC_EMAIL_JSON_URLS]
domains = parsed_mail[PROC_EMAIL_JSON_DOMAINS]
file_data = None
try:
with open(local_file_path, 'r') as f:
file_data = f.read()
except Exception:
with open(local_file_path, 'rb') as f:
file_data = f.read()
self._base_connector.debug_print("Reading file data using binary mode")
if (file_data is None) or (len(file_data) == 0):
return phantom.APP_ERROR
file_data = UnicodeDammit(file_data).unicode_markup.encode('utf-8').decode('utf-8')
self._parse_email_headers_as_inline(file_data, parsed_mail, email_id)
if self._config[PROC_EMAIL_JSON_EXTRACT_DOMAINS]:
emails = []
emails.extend(re.findall(email_regexc, file_data))
emails.extend(re.findall(email_regexc2, file_data))
for curr_email in emails:
domain = curr_email[curr_email.rfind('@') + 1:]
if domain and (not ph_utils.is_ip(domain)):
domains.add(domain)
self._extract_urls_domains(file_data, urls, domains)
if self._config[PROC_EMAIL_JSON_EXTRACT_IPS]:
self._get_ips(file_data, ips)
if self._config[PROC_EMAIL_JSON_EXTRACT_HASHES]:
hashs_in_mail = re.findall(hash_regexc, file_data)
if hashs_in_mail:
hashes |= set(hashs_in_mail)
return phantom.APP_SUCCESS
def _add_artifacts(self, cef_key, input_set, artifact_name, start_index, artifacts):
added_artifacts = 0
for entry in input_set:
# ignore empty entries
if not entry:
continue
artifact = {}
artifact.update(_artifact_common)
artifact['source_data_identifier'] = start_index + added_artifacts
artifact['cef'] = {cef_key: entry}
artifact['name'] = artifact_name
self._base_connector.debug_print('Artifact:', artifact)
artifacts.append(artifact)
added_artifacts += 1
return added_artifacts
def _parse_email_headers_as_inline(self, file_data, parsed_mail, email_id):
# remove the 'Forwarded Message' from the email text and parse it
p = re.compile(r'(?<=\r\n).*Forwarded Message.*\r\n', re.IGNORECASE)
email_text = p.sub('', file_data.strip())
mail = email.message_from_string(email_text)
self._parse_email_headers(parsed_mail, mail, add_email_id=email_id)
return phantom.APP_SUCCESS
def _add_email_header_artifacts(self, email_header_artifacts, start_index, artifacts):
added_artifacts = 0
for artifact in email_header_artifacts:
artifact['source_data_identifier'] = start_index + added_artifacts
artifacts.append(artifact)
added_artifacts += 1
return added_artifacts
def _create_artifacts(self, parsed_mail):
# get all the artifact data in their own list objects
ips = parsed_mail[PROC_EMAIL_JSON_IPS]
hashes = parsed_mail[PROC_EMAIL_JSON_HASHES]
urls = parsed_mail[PROC_EMAIL_JSON_URLS]
domains = parsed_mail[PROC_EMAIL_JSON_DOMAINS]
email_headers = parsed_mail[PROC_EMAIL_JSON_EMAIL_HEADERS]
# set the default artifact dict
artifact_id = 0
# add artifacts
added_artifacts = self._add_artifacts('sourceAddress', ips, 'IP Artifact', artifact_id, self._artifacts)
artifact_id += added_artifacts
added_artifacts = self._add_artifacts('fileHash', hashes, 'Hash Artifact', artifact_id, self._artifacts)
artifact_id += added_artifacts
added_artifacts = self._add_artifacts('requestURL', urls, 'URL Artifact', artifact_id, self._artifacts)
artifact_id += added_artifacts
added_artifacts = self._add_artifacts('destinationDnsDomain', domains, 'Domain Artifact', artifact_id, self._artifacts)
artifact_id += added_artifacts
added_artifacts = self._add_email_header_artifacts(email_headers, artifact_id, self._artifacts)
artifact_id += added_artifacts
return phantom.APP_SUCCESS
def _decode_uni_string(self, input_str, def_name):
# try to find all the decoded strings, we could have multiple decoded strings
# or a single decoded string between two normal strings separated by \r\n
# YEAH...it could get that messy
encoded_strings = re.findall(r'=\?.*?\?=', input_str, re.I)
# return input_str as is, no need to do any conversion
if not encoded_strings:
return input_str
# get the decoded strings
try:
decoded_strings = [decode_header(x)[0] for x in encoded_strings]
decoded_strings = [{'value': x[0], 'encoding': x[1]} for x in decoded_strings]
except Exception as e:
error_code, error_msg = self._get_error_message_from_exception(e)
self._base_connector.debug_print("Decoding: {0}. Error code: {1}. Error message: {2}".format(encoded_strings, error_code, error_msg))
return def_name
# convert to dict for safe access, if it's an empty list, the dict will be empty
decoded_strings = dict(enumerate(decoded_strings))
new_str = ''
new_str_create_count = 0
for i, encoded_string in enumerate(encoded_strings):
decoded_string = decoded_strings.get(i)
if not decoded_string:
# nothing to replace with
continue
value = decoded_string.get('value')
encoding = decoded_string.get('encoding')
if not encoding or not value:
# nothing to replace with
continue
try:
if encoding != 'utf-8':
value = str(value, encoding)
except Exception:
pass
try:
# commenting the existing approach due to a new approach being deployed below
# substitute the encoded string with the decoded one
# input_str = input_str.replace(encoded_string, value)
# make new string insted of replacing in the input string because issue find in PAPP-9531
if value:
new_str += UnicodeDammit(value).unicode_markup
new_str_create_count += 1
except Exception:
pass
# replace input string with new string because issue find in PAPP-9531
if new_str and new_str_create_count == len(encoded_strings):
self._base_connector.debug_print("Creating a new string entirely from the encoded_strings and assiging into input_str")
input_str = new_str
return input_str
def _get_container_name(self, parsed_mail, email_id):
# Create the default name
def_cont_name = "Email ID: {0}".format(email_id)
# get the subject from the parsed mail
subject = parsed_mail.get(PROC_EMAIL_JSON_SUBJECT)
# if no subject then return the default
if not subject:
return def_cont_name
try:
return str(make_header(decode_header(subject)))
except Exception:
return self._decode_uni_string(subject, def_cont_name)
def _handle_if_body(self, content_disp, content_type, part, bodies, file_path, parsed_mail):
process_as_body = False
# if content disposition is None then assume that it is
if content_disp is None:
process_as_body = True
# if content disposition is inline
elif content_disp.lower().strip() == 'inline':
if ('text/html' in content_type) or ('text/plain' in content_type):
process_as_body = True
if not process_as_body:
return phantom.APP_SUCCESS, True
part_payload = part.get_payload(decode=True)
if not part_payload:
return phantom.APP_SUCCESS, False
charset = part.get_content_charset()
with open(file_path, 'wb') as f: # noqa
f.write(part_payload)
bodies.append({'file_path': file_path, 'charset': part.get_content_charset()})
self._add_body_in_email_headers(parsed_mail, file_path, charset, content_type)
return phantom.APP_SUCCESS, False
def _handle_part(self, part, part_index, tmp_dir, extract_attach, parsed_mail):
bodies = parsed_mail[PROC_EMAIL_JSON_BODIES]
files = parsed_mail[PROC_EMAIL_JSON_FILES]
# get the file_name
file_name = part.get_filename()
content_disp = part.get('Content-Disposition')
content_type = part.get('Content-Type')
content_id = part.get('Content-ID')
if file_name is None:
# init name and extension to default values
name = "part_{0}".format(part_index)
extension = ".{0}".format(part_index)
# Try to create an extension from the content type if possible
if content_type is not None:
extension = mimetypes.guess_extension(re.sub(';.*', '', content_type))
# Try to create a name from the content id if possible
if content_id is not None:
name = content_id
file_name = "{0}{1}".format(name, extension)
else:
try:
file_name = str(make_header(decode_header(file_name)))
except Exception:
file_name = self._decode_uni_string(file_name, file_name)
# Remove any chars that we don't want in the name
file_path = "{0}/{1}_{2}".format(tmp_dir, part_index,
file_name.translate(str.maketrans("", "", ''.join(['<', '>', ' ']))))
self._base_connector.debug_print("file_path: {0}".format(file_path))
# is the part representing the body of the email
status, process_further = self._handle_if_body(content_disp, content_type, part, bodies, file_path, parsed_mail)
if not process_further:
return phantom.APP_SUCCESS
# is this another email as an attachment
if (content_type is not None) and (content_type.find(PROC_EMAIL_CONTENT_TYPE_MESSAGE) != -1):
return phantom.APP_SUCCESS
# This is an attachment, first check if it is another email or not
if extract_attach:
_, file_extension = os.path.splitext(file_name)
part_payload = part.get_payload(decode=True)
if not part_payload:
return phantom.APP_SUCCESS
try:
with open(file_path, 'wb') as f: # noqa
f.write(part_payload)
files.append({'file_name': file_name, 'file_path': file_path})
except IOError as e:
error_msg = str(e)
if "File name too long" in error_msg:
self.write_with_new_filename(tmp_dir, part_payload, file_extension, files, as_byte=False)
else:
self._base_connector.debug_print('Failed to write file: {}'.format(e))
return phantom.APP_SUCCESS
def _get_file_name(self, input_str):
try:
return str(make_header(decode_header(input_str)))
except Exception:
return self._decode_uni_string(input_str, input_str)
def _parse_email_headers(self, parsed_mail, part, charset=None, add_email_id=None):
email_header_artifacts = parsed_mail[PROC_EMAIL_JSON_EMAIL_HEADERS]
email_headers = part.items()
if not email_headers:
return 0
# Parse email keys first
headers = self._get_email_headers_from_part(part, charset)
cef_artifact = {}
cef_types = {}
if headers.get('From'):
emails = headers['From']
if emails:
cef_artifact.update({'fromEmail': emails})
if headers.get('To'):
emails = headers['To']
if emails:
cef_artifact.update({'toEmail': emails})
message_id = headers.get('Message-ID')
# if the header did not contain any email addresses and message ID then ignore this artifact
if not cef_artifact and not message_id:
return 0
cef_types.update({'fromEmail': ['email'], 'toEmail': ['email']})
if headers:
cef_artifact['emailHeaders'] = headers
# Adding the email id as a cef artifact crashes the UI when trying to show the action dialog box
# so not adding this right now. All the other code to process the emailId is there, but the refraining
# from adding the emailId
# add_email_id = False
if add_email_id:
cef_artifact['emailId'] = add_email_id
if self._email_id_contains:
cef_types.update({'emailId': self._email_id_contains})
artifact = {}
artifact.update(_artifact_common)
artifact['name'] = 'Email Artifact'
artifact['cef'] = cef_artifact
artifact['cef_types'] = cef_types
email_header_artifacts.append(artifact)
return len(email_header_artifacts)
def _get_email_headers_from_part(self, part, charset=None):
email_headers = list(part.items())
# TODO: the next 2 ifs can be condensed to use 'or'
if charset is None:
charset = part.get_content_charset()
if charset is None:
charset = 'utf8'
if not email_headers:
return {}
# Convert the header tuple into a dictionary
headers = CaseInsensitiveDict()
try:
[headers.update({x[0]: self._get_string(x[1], charset)}) for x in email_headers]
except Exception as e:
error_code, error_msg = self._get_error_message_from_exception(e)
err = "Error occurred while converting the header tuple into a dictionary"
self._base_connector.debug_print("{}. {}. {}".format(err, error_code, error_msg))
# Handle received separately
try:
received_headers = [self._get_string(x[1], charset) for x in email_headers if x[0].lower() == 'received']
except Exception as e:
error_code, error_msg = self._get_error_message_from_exception(e)
err = "Error occurred while handling the received header tuple separately"
self._base_connector.debug_print("{}. {}. {}".format(err, error_code, error_msg))
if received_headers:
headers['Received'] = received_headers
# handle the subject string, if required add a new key
subject = headers.get('Subject')
if subject:
try:
headers['decodedSubject'] = str(make_header(decode_header(subject)))
except Exception:
headers['decodedSubject'] = self._decode_uni_string(subject, subject)
return dict(headers)
def _get_error_message_from_exception(self, e):
""" This method is used to get appropriate error message from the exception.
:param e: Exception object
:return: error message
"""
try:
if e.args:
if len(e.args) > 1:
error_code = e.args[0]
error_msg = e.args[1]
elif len(e.args) == 1:
error_code = "Error code unavailable"
error_msg = e.args[0]
else:
error_code = "Error code unavailable"
error_msg = "Error message unavailable. Please check the asset configuration and|or action parameters."
except Exception:
error_code = "Error code unavailable"
error_msg = "Error message unavailable. Please check the asset configuration and|or action parameters."
return error_code, error_msg
def _handle_mail_object(self, mail, email_id, rfc822_email, tmp_dir, start_time_epoch):
parsed_mail = OrderedDict()
# Create a tmp directory for this email, will extract all files here
tmp_dir = tmp_dir
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
extract_attach = self._config[PROC_EMAIL_JSON_EXTRACT_ATTACHMENTS]
charset = mail.get_content_charset()
if charset is None:
charset = 'utf-8'
# Extract fields and place it in a dictionary
parsed_mail[PROC_EMAIL_JSON_SUBJECT] = mail.get('Subject', '')
parsed_mail[PROC_EMAIL_JSON_FROM] = mail.get('From', '')
parsed_mail[PROC_EMAIL_JSON_TO] = mail.get('To', '')
parsed_mail[PROC_EMAIL_JSON_DATE] = mail.get('Date', '')
parsed_mail[PROC_EMAIL_JSON_MSG_ID] = mail.get('Message-ID', '')
parsed_mail[PROC_EMAIL_JSON_FILES] = files = []
parsed_mail[PROC_EMAIL_JSON_BODIES] = bodies = []
parsed_mail[PROC_EMAIL_JSON_START_TIME] = start_time_epoch
parsed_mail[PROC_EMAIL_JSON_EMAIL_HEADERS] = []
# parse the parts of the email
if mail.is_multipart():
for i, part in enumerate(mail.walk()):
add_email_id = None
if i == 0:
add_email_id = email_id
self._parse_email_headers(parsed_mail, part, add_email_id=add_email_id)
self._base_connector.debug_print("part: {0}".format(part.__dict__))
self._base_connector.debug_print("part type", type(part))
if part.is_multipart():
self.check_and_update_eml(part)
continue
try:
ret_val = self._handle_part(part, i, tmp_dir, extract_attach, parsed_mail)
except Exception as e:
self._base_connector.debug_print("ErrorExp in _handle_part # {0}".format(i), e)
continue
if phantom.is_fail(ret_val):
continue
else:
self._parse_email_headers(parsed_mail, mail, add_email_id=email_id)
# parsed_mail[PROC_EMAIL_JSON_EMAIL_HEADERS].append(mail.items())
file_path = "{0}/part_1.text".format(tmp_dir)
with open(file_path, 'wb') as f: # noqa
f.write(mail.get_payload(decode=True))
bodies.append({'file_path': file_path, 'charset': charset})
self._add_body_in_email_headers(parsed_mail, file_path, mail.get_content_charset(), 'text/plain')
# get the container name
container_name = self._get_container_name(parsed_mail, email_id)
if container_name is None:
return phantom.APP_ERROR
# Add the container
# first save the container, to do that copy things from parsed_mail to a new object
container = {}
container_data = dict(parsed_mail)
# delete the header info, we dont make it a part of the container json
del (container_data[PROC_EMAIL_JSON_EMAIL_HEADERS])
container.update(_container_common)
self._container['source_data_identifier'] = email_id
self._container['name'] = container_name
self._container['data'] = {'raw_email': rfc822_email}
# Create the sets before handling the bodies If both the bodies add the same ip
# only one artifact should be created
parsed_mail[PROC_EMAIL_JSON_IPS] = set()
parsed_mail[PROC_EMAIL_JSON_HASHES] = set()
parsed_mail[PROC_EMAIL_JSON_URLS] = set()
parsed_mail[PROC_EMAIL_JSON_DOMAINS] = set()
# For bodies
for i, body in enumerate(bodies):
if not body:
continue
try:
self._handle_body(body, parsed_mail, email_id)
except Exception as e:
self._base_connector.debug_print_debug_print("ErrorExp in _handle_body # {0}: {1}".format(i, str(e)))
continue
# Files
self._attachments.extend(files)
self._create_artifacts(parsed_mail)
return phantom.APP_SUCCESS
def _add_body_in_email_headers(self, parsed_mail, file_path, charset, content_type):
# Add email_bodies to email_headers
email_headers = parsed_mail[PROC_EMAIL_JSON_EMAIL_HEADERS]
try:
with open(file_path, 'r') as f:
body_content = f.read()
except Exception:
with open(file_path, 'rb') as f:
body_content = f.read()
self._base_connector.debug_print("Reading file data using binary mode")
# Add body to the last added Email artifact
body_content = UnicodeDammit(body_content).unicode_markup.encode('utf-8').decode('utf-8')
if 'text/plain' in content_type:
try:
email_headers[-1]['cef']['bodyText'] = self._get_string(
body_content, charset)
except Exception as e:
try:
email_headers[-1]['cef']['bodyText'] = str(make_header(decode_header(body_content)))
except Exception:
email_headers[-1]['cef']['bodyText'] = self._decode_uni_string(body_content, body_content)
error_code, error_msg = self._get_error_message_from_exception(e)
err = "Error occurred while parsing text/plain body content for creating artifacts"
self._base_connector.debug_print("{}. {}. {}".format(err, error_code, error_msg))
elif 'text/html' in content_type:
try:
email_headers[-1]['cef']['bodyHtml'] = self._get_string(
body_content, charset)
except Exception as e:
try:
email_headers[-1]['cef']['bodyHtml'] = str(make_header(decode_header(body_content)))
except Exception:
email_headers[-1]['cef']['bodyHtml'] = self._decode_uni_string(body_content, body_content)
error_code, error_msg = self._get_error_message_from_exception(e)
err = "Error occurred while parsing text/html body content for creating artifacts"
self._base_connector.debug_print("{}. {}. {}".format(err, error_code, error_msg))
else:
if not email_headers[-1]['cef'].get('bodyOther'):
email_headers[-1]['cef']['bodyOther'] = {}
try:
email_headers[-1]['cef']['bodyOther'][content_type] = self._get_string(
body_content, charset)
except Exception as e:
try:
email_headers[-1]['cef']['bodyOther'][content_type] = str(make_header(decode_header(body_content)))
except Exception:
email_headers[-1]['cef']['bodyOther'][content_type] = self._decode_uni_string(body_content, body_content)
error_code, error_msg = self._get_error_message_from_exception(e)
err = "Error occurred while parsing bodyOther content for creating artifacts"
self._base_connector.debug_print("{}. {}. {}".format(err, error_code, error_msg))
def _get_string(self, input_str, charset):
try:
if input_str:
if self._python_version == 2:
input_str = UnicodeDammit(input_str).unicode_markup.encode(charset)
else:
input_str = UnicodeDammit(input_str).unicode_markup.encode(charset).decode(charset)
except Exception:
try:
input_str = str(make_header(decode_header(input_str)))
except Exception:
input_str = self._decode_uni_string(input_str, input_str)
self._base_connector.debug_print(
"Error occurred while converting to string with specific encoding {}".format(input_str))
return input_str
def _set_email_id_contains(self, email_id):
if not self._base_connector:
return
try:
email_id = self._get_string(email_id, 'utf-8')
except Exception:
email_id = str(email_id)
if self._base_connector.get_app_id() == EXCHANGE_ONPREM_APP_ID and email_id.endswith('='):
self._email_id_contains = ["exchange email id"]
elif self._base_connector.get_app_id() == OFFICE365_APP_ID and email_id.endswith('='):
self._email_id_contains = ["office 365 email id"]
elif self._base_connector.get_app_id() == IMAP_APP_ID and email_id.isdigit():
self._email_id_contains = ["imap email id"]
elif ph_utils.is_sha1(email_id):
self._email_id_contains = ["vault id"]
return
def _int_process_email(self, rfc822_email, email_id, start_time_epoch):
mail = email.message_from_string(rfc822_email)
tmp_dir = tempfile.mkdtemp(prefix='ph_email')
try:
ret_val = self._handle_mail_object(mail, email_id, rfc822_email, tmp_dir, start_time_epoch)
except Exception as e:
message = "ErrorExp in _handle_mail_object: {0}".format(e)
self._base_connector.debug_print(message)
return phantom.APP_ERROR, message, []
results = [{'container': self._container, 'artifacts': self._artifacts, 'files': self._attachments, 'temp_directory': tmp_dir}]
return ret_val, PROC_EMAIL_PARSED, results
def check_and_update_eml(self, part):
if self._config[PROC_EMAIL_JSON_EXTRACT_EMAIL_ATTACHMENTS]:
tmp_dir = None
msg = None
file_extension = ''
try:
tmp_dir = tempfile.mkdtemp(prefix='ph_email')
filename = self._get_file_name(part.get_filename())
_, file_extension = os.path.splitext(filename)
if filename.endswith('.eml'):
file_path = os.path.join(tmp_dir, filename)
msg = part.get_payload()[0]
with open(file_path, 'wb') as f: # noqa
f.write(msg.as_bytes())
self._attachments.append({'file_name': filename, 'file_path': file_path})
except IOError as e:
error_msg = str(e)
if "File name too long" in error_msg:
self.write_with_new_filename(tmp_dir, msg, file_extension, self._attachments, as_byte=True)
else:
self._base_connector.debug_print('Failed to write file: {}'.format(e))
except Exception as e:
self._base_connector.debug_print("Exception occurred: {}".format(e))
def write_with_new_filename(self, tmp_dir, data, file_extension, dict_to_fill, as_byte=False):
try:
random_suffix = '_' + ''.join(random.SystemRandom().choice(string.ascii_lowercase) for _ in range(16))
new_file_name = "ph_long_file_name_{0}{1}".format(random_suffix, file_extension)
file_path = os.path.join(tmp_dir, new_file_name)
with open(file_path, 'wb') as f:
if as_byte:
f.write(data.as_bytes())
else:
f.write(data)
dict_to_fill.append({'file_name': new_file_name, 'file_path': file_path})
except Exception as e:
self._base_connector.debug_print('Exception while writing file: {}'.format(e))
def process_email(self, rfc822_email, email_id, epoch):
try:
self._set_email_id_contains(email_id)
except Exception:
pass
ret_val, message, results = self._int_process_email(rfc822_email, email_id, epoch)
if not ret_val:
return phantom.APP_ERROR, message
self._parse_results(results)
return phantom.APP_SUCCESS, PROC_EMAIL_PROCESSED
def _parse_results(self, results):
param = self._base_connector.get_current_param()
container_count = EWS_DEFAULT_CONTAINER_COUNT
artifact_count = EWS_DEFAULT_ARTIFACT_COUNT
if param:
container_count = param.get(phantom.APP_JSON_CONTAINER_COUNT, EWS_DEFAULT_CONTAINER_COUNT)
artifact_count = param.get(phantom.APP_JSON_ARTIFACT_COUNT, EWS_DEFAULT_ARTIFACT_COUNT)
results = results[:container_count]
for result in results:
container = result.get('container')
if not container:
continue
container.update(_container_common)
try:
ret_val, message, container_id = self._base_connector.save_container(container)
except Exception as e:
self._base_connector.debug_print("Exception: ", e)
continue
self._base_connector.debug_print(PROC_EMAIL_SAVE_CONTAINER.format(ret_val, message, container_id))
if phantom.is_fail(ret_val):
message = PROC_EMAIL_FAILED_CONTAINER.format(container['source_data_identifier'], message)
self._base_connector.debug_print(message)
continue
if not container_id:
message = PROC_EMAIL_SAVE_CONTAINER_FAILED
self._base_connector.debug_print(message)
continue
files = result.get('files')
vault_artifacts_added = 0
for curr_file in files:
ret_val, added_to_vault = self._handle_file(curr_file, container_id)
if added_to_vault:
vault_artifacts_added += 1
artifacts = result.get('artifacts')
if not artifacts:
continue
if not self._base_connector.is_poll_now():
artifacts = artifacts[:artifact_count]
len_artifacts = len(artifacts)
for j, artifact in enumerate(artifacts):
if not artifact:
continue
# add the container id to the artifact
artifact['container_id'] = container_id
self._set_sdi(artifact)
# if it is the last artifact of the last container
if (j + 1) == len_artifacts:
# mark it such that active playbooks get executed
artifact['run_automation'] = True
ret_val, status_string, artifact_id = self._base_connector.save_artifact(artifact)
self._base_connector.debug_print(PROC_EMAIL_SAVE_CONT_PASSED.format(ret_val, status_string, artifact_id))
# delete any temp directories that were created by the email parsing function
[shutil.rmtree(x['temp_directory'], ignore_errors=True) for x in results if x.get('temp_directory')]
return self._base_connector.set_status(phantom.APP_SUCCESS)
def _add_vault_hashes_to_dictionary(self, cef_artifact, vault_id):
success, message, vault_info = phantom_rules.vault_info(vault_id=vault_id)
if not vault_info:
return phantom.APP_ERROR, "Vault ID not found"
# The return value is a list, each item represents an item in the vault
# matching the vault id, the info that we are looking for (the hashes)
# will be the same for every entry, so just access the first one
try:
metadata = vault_info[0].get('metadata')
except Exception:
return phantom.APP_ERROR, PROC_EMAIL_FAILED_VAULT_CONT_DATA
try:
cef_artifact['fileHashSha256'] = metadata['sha256']
except Exception:
pass
try:
cef_artifact['fileHashMd5'] = metadata['md5']
except Exception:
pass
try:
cef_artifact['fileHashSha1'] = metadata['sha1']
except Exception:
pass
return phantom.APP_SUCCESS, PROC_EMAIL_MAPPED_HASH_VAL
def _handle_file(self, curr_file, container_id):
file_name = curr_file.get('file_name')
local_file_path = curr_file['file_path']
contains = self._get_file_contains(local_file_path)
# lets move the data into the vault
vault_attach_dict = {}
if not file_name:
file_name = os.path.basename(local_file_path)
self._base_connector.debug_print("Vault file name: {0}".format(file_name))
vault_attach_dict[phantom.APP_JSON_ACTION_NAME] = self._base_connector.get_action_name()
vault_attach_dict[phantom.APP_JSON_APP_RUN_ID] = self._base_connector.get_app_run_id()
file_name = self._decode_uni_string(file_name, file_name)
# success, message, vault_id = phantom_rules.vault_add(container_id, local_file_path, file_name)
try:
success, message, vault_id = phantom_rules.vault_add(file_location=local_file_path, container=container_id, file_name=file_name, metadata=vault_attach_dict)
except Exception as e:
self._base_connector.debug_print(phantom.APP_ERR_FILE_ADD_TO_VAULT.format(e))
return phantom.APP_ERROR, phantom.APP_ERROR
if not success:
self._base_connector.debug_print(PROC_EMAIL_FAILED_VAULT_ADD_FILE.format(message))
return phantom.APP_ERROR, phantom.APP_ERROR
# add the vault id artifact to the container
cef_artifact = {}
if file_name:
cef_artifact.update({'fileName': file_name})
if vault_id:
cef_artifact.update({'vaultId': vault_id,
'cs6': vault_id,
'cs6Label': 'Vault ID'})
# now get the rest of the hashes and add them to the cef artifact
self._add_vault_hashes_to_dictionary(cef_artifact, vault_id)
if not cef_artifact:
return phantom.APP_SUCCESS, phantom.APP_ERROR
artifact = {}
artifact.update(_artifact_common)
artifact['container_id'] = container_id
artifact['name'] = 'Vault Artifact'
artifact['cef'] = cef_artifact
if contains:
artifact['cef_types'] = {'vaultId': contains, 'cs6': contains}
self._set_sdi(artifact)
ret_val, status_string, artifact_id = self._base_connector.save_artifact(artifact)
self._base_connector.debug_print(PROC_EMAIL_SAVE_CONT_PASSED.format(ret_val, status_string, artifact_id))
return phantom.APP_SUCCESS, ret_val
def cmp2(self, a, b):
return (a > b) - (a < b)
def _set_sdi(self, input_dict):
if 'source_data_identifier' in input_dict:
del input_dict['source_data_identifier']
dict_hash = None
# first get the phantom version
phantom_version = self._base_connector.get_product_version()
if not phantom_version:
dict_hash = self._create_dict_hash(input_dict)
else:
ver_cmp = self.cmp2(phantom_version, HASH_FIXED_PHANTOM_VERSION)
if ver_cmp == -1:
dict_hash = self._create_dict_hash(input_dict)
if dict_hash:
input_dict['source_data_identifier'] = dict_hash
else:
# Remove this code once the backend has fixed PS-4216 _and_ it has been
# merged into next so that 2.0 and 2.1 has the code
input_dict['source_data_identifier'] = self._create_dict_hash(input_dict)
return phantom.APP_SUCCESS
def _create_dict_hash(self, input_dict):
try:
input_dict_str = json.dumps(input_dict, sort_keys=True)
except Exception as e:
self._base_connector.debug_print('Exception: ', e)
return None
return hashlib.md5(input_dict_str.encode('utf-8')).hexdigest()
| 38.744015 | 168 | 0.621304 | [
"Apache-2.0"
] | chunmanjimmyf/phantom-apps | Apps/phgsgmail/gsgmail_process_email.py | 42,076 | Python |
# Copyright 2021 ETH Zurich, Media Technology Center
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import os
import pandas as pd
"""
This module is mainly used to transform the data from the partners into our desired format.
In the and only load_data and get_metadata is used in the algorithms.
"""
def load_data(folder, input_path='user_item', cut=40,high_cut=1000000, seed=None):
"""
loads the training,validation,test set from the folder, restricts the users with at least "cut" read articles and
returns the sets. The Format of the sets is pd.Series with index the UserID and value a list of ArticleIDs
:param folder/input_path: {folder}/{input_path} is the path to look for the *_train.pkl files
:param cut: value to cut off users with less than "cut" read articles
:return: three pd.Series. Index of each series is the UserID. The value is a list of ArticleIDs.
(look in create_split to see how the split is defines)
"""
# cut cuts off users that read less than cut articles
user_item_train, user_item_test, user_item_validation = pd.read_pickle(
f'{folder}/{input_path}_train.pkl'), pd.read_pickle(f'{folder}/{input_path}_test.pkl'), pd.read_pickle(
f'{folder}/{input_path}_validation.pkl')
user_item_train = user_item_train[user_item_train.str.len() > cut * 0.7]
user_item_train = user_item_train[user_item_train.str.len() < high_cut * 0.7]
user_item_test = user_item_test.loc[user_item_train.index]
user_item_validation = user_item_validation.loc[user_item_train.index]
return user_item_train, user_item_test, user_item_validation
def load_data_vertical(folder, input_path='user_item_vertical', cut=40):
"""
loads the training,validation,test set from the folder, restricts the users with at least "cut" read articles and
returns the sets. The Format of the sets is pd.Series with index the UserID and value a list of ArticleIDs
:param folder/input_path: {folder}/{input_path} is the path to look for the *_train.pkl files
:param cut: value to cut off users with less than "cut" read articles
:return: three pd.Series. Index of each series is the UserID. The value is a list of ArticleIDs.
(look in create_split to see how the split is defines)
"""
# cut cuts off users that read less than cut articles
user_item_train, user_item_test, user_item_validation = pd.read_parquet(
f'{folder}/{input_path}_train.pq'), pd.read_parquet(f'{folder}/{input_path}_test.pq'), pd.read_parquet(
f'{folder}/{input_path}_validation.pq')
user_item_train = user_item_train[user_item_train['count'] >cut]
user_item_test =user_item_test[user_item_test['count'] >cut]
user_item_validation = user_item_validation[user_item_validation['count'] >cut]
user_item_train['resource_id']=user_item_train['article_id']
user_item_test['resource_id']=user_item_test['article_id']
user_item_validation['resource_id']=user_item_validation['article_id']
return user_item_train, user_item_test, user_item_validation
def load_data_cv(folder, input_path='user_item', cut=40, high_cut=100000,seed=1):
"""
Same as load_data but only returns random 80% of the training set
"""
# cut cuts off users that read less than cut articles
user_item_train, user_item_test, user_item_validation = load_data(folder, input_path=input_path, cut=cut,high_cut=high_cut)
user_item_train = user_item_train.sample(frac=0.8,random_state=seed)
user_item_test = user_item_test.sample(frac=1, random_state=seed)
return user_item_train, user_item_test, user_item_validation
def load_data_vertical_cv(folder, input_path='user_item_vertical', cut=40, high_cut=100000,seed=1):
"""
Same as load_data but only returns random 80% of the training set
"""
# cut cuts off users that read less than cut articles
user_item_train, user_item_test, user_item_validation = load_data_vertical(folder, input_path=input_path, cut=cut)
user_item_train = user_item_train.sample(frac=0.8,random_state=seed)
user_item_test = user_item_test.sample(frac=1, random_state=seed)
return user_item_train, user_item_test, user_item_validation
def get_metadata(folder, usecols=[]):
"""
Loads and returns the article metadata.
The algorithms expect the format to be a Dataframe with two columns:
- "resource_id": unique id for the article
- "text": full text of the article (without html tags)
"""
if not usecols:
usecols = ['text', 'resource_id']
metadata = pd.read_csv(f"{folder}/meta.csv", usecols=usecols)
return metadata.dropna(subset=['text'])
def transform_item_matrix_to_horizontal_format(folder, output_path='user_item_matrix.pkl',
input_path='user_item_matrix_vertical.pq', sortby='ts'):
"""
Transforms vertical User-Item matrix where ich row is one click into a horizontal User-item matrix where we have
one row for each user and each row contains a (sorted) list of articles she/he clicked on.
:param folder: Input folder
:param output_path: Filename/path for outputfile
:param input_path: Filename/path for inputfile. This pickled file contains a DataFrame with three columns:
"user_ix": the UserID and "article_id" the ArticleID and "<sortby>" which should be timestamp
to sort by. Each UserID ArticleID pair indicates a click of the user on the article at a time.
:param sortby: Columnname of the timestamp column to sort by
:return: returns a Series where the index is the UserID and values is the by timestamp
sorted list of clicked ArticleIDs
"""
now = datetime.datetime.now()
matrices = pd.read_parquet(f"{folder}/{input_path}")
grouped = matrices.sort_values(sortby).groupby(['user_ix']).apply(lambda x: list(x['article_id']))
grouped.to_pickle(f"{folder}/{output_path}")
print(f"Data transformed {datetime.datetime.now() - now}")
def create_split(folder, input_path='user_item_matrix.pkl', ouput_path='user_item', cut_dump=10):
"""
Loads the horizontal user item data from folder and creates a user-wise a 70% train, 20% validation, 10% test split.
This means for each user the first 70% read articles are in the train the next 20% in validation and the last 10%
read articles in the test set. We remove users with less than 10 clicked articles.
This is the data that is loaded to train/test the models in the end.
"""
now = datetime.datetime.now()
user_item = pd.read_pickle(f"{folder}/{input_path}")
user_item = user_item[user_item.str.len() > (cut_dump)]
user_item_train = user_item.apply(lambda x: x[:int(len(x) * 0.7)])
user_item_test = user_item.apply(lambda x: x[int(len(x) * 0.7):int(len(x) * 0.9)])
user_item_validation = user_item.apply(lambda x: x[int(len(x) * 0.9):])
user_item_train.name = 'article_id'
user_item_test.name = 'article_id'
user_item_validation.name = 'article_id'
user_item_train.to_pickle(f'{folder}/{ouput_path}_train.pkl')
user_item_test.to_pickle(f'{folder}/{ouput_path}_test.pkl')
user_item_validation.to_pickle(f'{folder}/{ouput_path}_validation.pkl')
print(f"Split created {datetime.datetime.now() - now}")
def create_split_vertical(folder, input_path='user_item_matrix_vertical.pq', ouput_path='user_item_vertical', cut_dump=10,time_column='ts'):
"""
Loads the horizontal user item data from folder and creates a user-wise a 70% train, 20% validation, 10% test split.
This means for each user the first 70% read articles are in the train the next 20% in validation and the last 10%
read articles in the test set. We remove users with less than 10 clicked articles.
This is the data that is loaded to train/test the models in the end.
"""
now = datetime.datetime.now()
user_item = pd.read_parquet(f"{folder}/{input_path}").sort_values(time_column)
user_item['count']=user_item.groupby(['user_ix']).article_id.transform('count')
user_item = user_item[user_item['count']>cut_dump]
grouped = user_item.groupby(['user_ix'])
user_item['percentile'] = (grouped.article_id.cumcount() + 1) / grouped.article_id.transform('count')
user_item_train = user_item[user_item['percentile']<=0.7]
user_item_test = user_item[(user_item['percentile']>0.7) & (user_item['percentile']<0.9)]
user_item_validation = user_item[user_item['percentile']>0.9]
user_item_train.to_parquet(f'{folder}/{ouput_path}_train.pq')
user_item_test.to_parquet(f'{folder}/{ouput_path}_test.pq')
user_item_validation.to_parquet(f'{folder}/{ouput_path}_validation.pq')
print(f"Split created {datetime.datetime.now() - now}")
def transform_horizontal_to_vertical(df):
"""
Transforms the horizontal format into vertical format
:param df:
:return:
"""
return df.explode().reset_index()
if __name__ == "__main__":
import pandas as pd
folder = os.getenv('DATA_FOLDER','processed')
# Transforms the user-item-matrix into a user-series. For each user we store the articles read as one sorted list.
# Save the new format.
# This format is more convenient for creating the split and for training some of the algorithms.
transform_item_matrix_to_horizontal_format(folder=folder)
# Create a train,test,validation split. 70%,10%,20% and save it
create_split(folder=folder, cut_dump=10)
create_split_vertical(folder=folder, cut_dump=10)
# loads the saved train,validation,test split
train, test, validation = load_data(folder=folder, cut=40)
# # if you wish to transform into normal user-item-format
# train_vertical = transform_horizontal_to_vertical(train)
| 49.591346 | 140 | 0.728938 | [
"Apache-2.0"
] | MTC-ETH/RecommenderSystems | preprocessing.py | 10,315 | Python |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from foundations.step import Step
from training.metric_logger import MetricLogger
from testing import test_case
class TestMetricLogger(test_case.TestCase):
def test_create(self):
MetricLogger()
@staticmethod
def create_logger():
logger = MetricLogger()
logger.add('train_accuracy', Step.from_iteration(0, 400), 0.5)
logger.add('train_accuracy', Step.from_iteration(1, 400), 0.6)
logger.add('test_accuracy', Step.from_iteration(0, 400), 0.4)
return logger
def test_add_get(self):
logger = TestMetricLogger.create_logger()
self.assertEqual(logger.get_data('train_accuracy'), [(0, 0.5), (1, 0.6)])
self.assertEqual(logger.get_data('test_accuracy'), [(0, 0.4)])
self.assertEqual(logger.get_data('test_loss'), [])
def test_overwrite(self):
logger = TestMetricLogger.create_logger()
logger.add('train_accuracy', Step.from_iteration(0, 400), 1.0)
self.assertEqual(logger.get_data('train_accuracy'), [(0, 1.0), (1, 0.6)])
def test_sorting(self):
logger = TestMetricLogger.create_logger()
logger.add('train_accuracy', Step.from_iteration(5, 400), 0.9)
logger.add('train_accuracy', Step.from_iteration(3, 400), 0.7)
logger.add('train_accuracy', Step.from_iteration(4, 400), 0.8)
self.assertEqual(logger.get_data('train_accuracy'),
[(0, 0.5), (1, 0.6), (3, 0.7), (4, 0.8), (5, 0.9)])
def test_str(self):
logger = TestMetricLogger.create_logger()
expected = ['train_accuracy,0,0.5', 'train_accuracy,1,0.6', 'test_accuracy,0,0.4']
self.assertEqual(str(logger), '\n'.join(expected))
def test_create_from_string(self):
logger = TestMetricLogger.create_logger()
logger2 = MetricLogger.create_from_string(str(logger))
self.assertEqual(logger.get_data('train_accuracy'), logger2.get_data('train_accuracy'))
self.assertEqual(logger.get_data('test_accuracy'), logger2.get_data('test_accuracy'))
self.assertEqual(str(logger), str(logger2))
def test_file_operations(self):
logger = TestMetricLogger.create_logger()
save_loc = os.path.join(self.root, 'temp_logger')
logger.save(save_loc)
logger2 = MetricLogger.create_from_file(save_loc)
self.assertEqual(logger.get_data('train_accuracy'), logger2.get_data('train_accuracy'))
self.assertEqual(logger.get_data('test_accuracy'), logger2.get_data('test_accuracy'))
self.assertEqual(str(logger), str(logger2))
test_case.main()
| 41.231884 | 96 | 0.660105 | [
"MIT"
] | sbam13/open_lth | training/test/test_metric_logger.py | 2,845 | Python |
import logging
import os
from enum import Enum
from functools import lru_cache
from typing import Optional
from pydantic import BaseSettings, PostgresDsn
logger = logging.getLogger(__name__)
class EnvironmentEnum(str, Enum):
PRODUCTION = "production"
LOCAL = "local"
class GlobalConfig(BaseSettings):
TITLE: str = "Endorser"
DESCRIPTION: str = "An endorser service for aca-py wallets"
ENVIRONMENT: EnvironmentEnum
DEBUG: bool = False
TESTING: bool = False
TIMEZONE: str = "UTC"
# the following defaults match up with default values in scripts/.env.example
# these MUST be all set in non-local environments.
PSQL_HOST: str = os.environ.get("ENDORSER_POSTGRESQL_HOST", "localhost")
PSQL_PORT: int = os.environ.get("ENDORSER_POSTGRESQL_PORT", 5432)
PSQL_DB: str = os.environ.get("ENDORSER_POSTGRESQL_DB", "traction")
PSQL_USER: str = os.environ.get("ENDORSER_DB_USER", "tractionuser")
PSQL_PASS: str = os.environ.get("ENDORSER_DB_USER_PWD", "tractionPass")
PSQL_ADMIN_USER: str = os.environ.get("ENDORSER_DB_ADMIN", "tractionadminuser")
PSQL_ADMIN_PASS: str = os.environ.get("ENDORSER_DB_ADMIN_PWD", "tractionadminPass")
# application connection is async
# fmt: off
SQLALCHEMY_DATABASE_URI: PostgresDsn = (
f"postgresql+asyncpg://{PSQL_USER}:{PSQL_PASS}@{PSQL_HOST}:{PSQL_PORT}/{PSQL_DB}" # noqa: E501
)
# migrations connection uses owner role and is synchronous
SQLALCHEMY_DATABASE_ADMIN_URI: PostgresDsn = (
f"postgresql://{PSQL_ADMIN_USER}:{PSQL_ADMIN_PASS}@{PSQL_HOST}:{PSQL_PORT}/{PSQL_DB}" # noqa: E501
)
# fmt: on
ACAPY_ADMIN_URL: str = os.environ.get(
"ENDORSER_ACAPY_ADMIN_URL", "http://localhost:9031"
)
ACAPY_ADMIN_URL_API_KEY: str = os.environ.get(
"ENDORSER_ACAPY_ADMIN_URL_API_KEY", "change-me"
)
ENDORSER_API_ADMIN_USER: str = os.environ.get("ENDORSER_API_ADMIN_USER", "endorser")
ENDORSER_API_ADMIN_KEY: str = os.environ.get("ENDORSER_API_ADMIN_KEY", "change-me")
ENDORSER_WEBHOOK_URL: str = os.environ.get(
"ENDORSER_WEBHOOK_URL", "http://endorser-api:5000/webhook"
)
ACAPY_WEBHOOK_URL_API_KEY_NAME = "x-api-key"
ACAPY_WEBHOOK_URL_API_KEY: str = os.environ.get("ACAPY_WEBHOOK_URL_API_KEY", "")
DB_ECHO_LOG: bool = False
# Api V1 prefix
API_V1_STR = "/v1"
# openssl rand -hex 32
JWT_SECRET_KEY = "09d25e094faa6ca2556c818166b7a9563b93f7099f6f0f4caa6cf63b88e8d3e7"
JWT_ALGORITHM = "HS256"
JWT_ACCESS_TOKEN_EXPIRE_MINUTES = 300
class Config:
case_sensitive = True
class LocalConfig(GlobalConfig):
"""Local configurations."""
DEBUG: bool = True
ENVIRONMENT: EnvironmentEnum = EnvironmentEnum.LOCAL
class ProdConfig(GlobalConfig):
"""Production configurations."""
DEBUG: bool = False
ENVIRONMENT: EnvironmentEnum = EnvironmentEnum.PRODUCTION
class FactoryConfig:
def __init__(self, environment: Optional[str]):
self.environment = environment
def __call__(self) -> GlobalConfig:
if self.environment == EnvironmentEnum.LOCAL.value:
return LocalConfig()
return ProdConfig()
@lru_cache()
def get_configuration() -> GlobalConfig:
return FactoryConfig(os.environ.get("ENVIRONMENT"))()
settings = get_configuration()
| 30.669725 | 107 | 0.714029 | [
"Apache-2.0"
] | Open-Earth-Foundation/traction | services/endorser/api/core/config.py | 3,343 | Python |
import _plotly_utils.basevalidators
class InsidetextanchorValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="insidetextanchor", parent_name="funnel", **kwargs):
super(InsidetextanchorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
values=kwargs.pop("values", ["end", "middle", "start"]),
**kwargs,
)
| 38.153846 | 87 | 0.669355 | [
"MIT"
] | labaran1/plotly.py | packages/python/plotly/plotly/validators/funnel/_insidetextanchor.py | 496 | Python |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet accounts properly when there are cloned transactions with malleated scriptsigs."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class TxnMallTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
super(TxnMallTest, self).setup_network()
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
def run_test(self):
# All nodes should start with 12,500 WEI:
starting_balance = 12500
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress() # bug workaround, coins generated assigned to first getnewaddress!
self.nodes[0].settxfee(.001)
node0_address1 = self.nodes[0].getnewaddress()
node0_txid1 = self.nodes[0].sendtoaddress(node0_address1, 12190)
node0_tx1 = self.nodes[0].gettransaction(node0_txid1)
node0_address2 = self.nodes[0].getnewaddress()
node0_txid2 = self.nodes[0].sendtoaddress(node0_address2, 290)
node0_tx2 = self.nodes[0].gettransaction(node0_txid2)
assert_equal(self.nodes[0].getbalance(),
starting_balance + node0_tx1["fee"] + node0_tx2["fee"])
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress()
# Send tx1, and another transaction tx2 that won't be cloned
txid1 = self.nodes[0].sendtoaddress(node1_address, 400)
txid2 = self.nodes[0].sendtoaddress(node1_address, 200)
# Construct a clone of tx1, to be malleated
rawtx1 = self.nodes[0].getrawtransaction(txid1,1)
clone_inputs = [{"txid":rawtx1["vin"][0]["txid"],"vout":rawtx1["vin"][0]["vout"]}]
clone_outputs = {rawtx1["vout"][0]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][0]["value"],
rawtx1["vout"][1]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][1]["value"]}
clone_locktime = rawtx1["locktime"]
clone_raw = self.nodes[0].createrawtransaction(clone_inputs, clone_outputs, clone_locktime)
# createrawtransaction randomizes the order of its outputs, so swap them if necessary.
# output 0 is at version+#inputs+input+sigstub+sequence+#outputs
# 400 WEI serialized is 00902f5009000000
pos0 = 2*(4+1+36+1+4+1)
hex400 = "00902f5009000000"
output_len = 16 + 2 + 2 * int("0x" + clone_raw[pos0 + 16 : pos0 + 16 + 2], 0)
if (rawtx1["vout"][0]["value"] == 400 and clone_raw[pos0 : pos0 + 16] != hex400 or rawtx1["vout"][0]["value"] != 400 and clone_raw[pos0 : pos0 + 16] == hex400):
output0 = clone_raw[pos0 : pos0 + output_len]
output1 = clone_raw[pos0 + output_len : pos0 + 2 * output_len]
clone_raw = clone_raw[:pos0] + output1 + output0 + clone_raw[pos0 + 2 * output_len:]
# Use a different signature hash type to sign. This creates an equivalent but malleated clone.
# Don't send the clone anywhere yet
tx1_clone = self.nodes[0].signrawtransactionwithwallet(clone_raw, None, "ALL|ANYONECANPAY")
assert_equal(tx1_clone["complete"], True)
# Have node0 mine a block, if requested:
if (self.options.mine_block):
self.nodes[0].generate(1)
self.sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 500WEI for another
# matured block, minus tx1 and tx2 amounts, and minus transaction fees:
expected = starting_balance + node0_tx1["fee"] + node0_tx2["fee"]
if self.options.mine_block:
expected += 500
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Send clone and its parent to miner
self.nodes[2].sendrawtransaction(node0_tx1["hex"])
txid1_clone = self.nodes[2].sendrawtransaction(tx1_clone["hex"])
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].sendrawtransaction(node0_tx2["hex"])
self.nodes[2].sendrawtransaction(tx2["hex"])
self.nodes[2].generate(1) # Mine another block to make sure we sync
self.sync_blocks()
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx1_clone = self.nodes[0].gettransaction(txid1_clone)
tx2 = self.nodes[0].gettransaction(txid2)
# Verify expected confirmations
assert_equal(tx1["confirmations"], -2)
assert_equal(tx1_clone["confirmations"], 2)
assert_equal(tx2["confirmations"], 1)
# Check node0's total balance; should be same as before the clone, + 1000 WEI for 2 matured,
# less possible orphaned matured subsidy
expected += 1000
if (self.options.mine_block):
expected -= 500
assert_equal(self.nodes[0].getbalance(), expected)
if __name__ == '__main__':
TxnMallTest().main()
| 45.587786 | 168 | 0.64501 | [
"MIT"
] | weicrypto/wei | test/functional/wallet_txn_clone.py | 5,972 | Python |
import collections
from collections import defaultdict
import sys
import json
import random
from jsmin import jsmin
from io import StringIO
import numpy as np
import copy
import os
script_n = os.path.basename(__file__).split('.')[0]
script_n = script_n.split('_', 1)[1]
def to_ng(loc):
return (int(loc[0]/4), int(loc[1]/4), int(loc[2]/40))
'''Load data'''
import compress_pickle
fname = 'gen_210518_setup01_v2_syndb_threshold_20_coalesced.gz'
grc_mfs_locs = compress_pickle.load(fname)
mfs_locs = defaultdict(list)
for grc in grc_mfs_locs:
for mf in grc_mfs_locs[grc]:
for syn in grc_mfs_locs[grc][mf]:
mfs_locs[mf].append(syn['syn_loc0'])
# print(mfs_locs[mf]); asdf
asdff = (172644, 113468, 89)
asdfff = (137580, 101824, 369)
# white list for big boutons
whitelist = set([
(172644, 113468, 89),
(163520, 98364, 83),
(113008, 109372, 1154),
(70424, 116512, 71),
(186536, 100020, 130),
(86780, 110184, 81),
(177992, 108528, 1164),
(127368, 101716, 1143),
(155036, 103252, 71),
(97884, 104152, 1160),
(109476, 104808, 76),
(82936, 122484, 76),
(113532, 104660, 1150),
(78904, 115540, 1158),
(190684, 91276, 1015),
(160500, 99828, 1165),
(109020, 115476, 74),
(93516, 101476, 858),
(126728, 104988, 86),
(173456, 106376, 71),
(197436, 95688, 898),
(122752, 110608, 85),
(122192, 119344, 70),
(122396, 118840, 83),
(204868, 103452, 145),
(94212, 107860, 1137),
(92360, 105844, 1162),
(84704, 115452, 119),
(54036, 105484, 394),
(110624, 105800, 70),
(170512, 99132, 107),
(71200, 114308, 1123),
(106588, 98692, 1160),
(70164, 107908, 1015),
(144772, 106812, 105),
(asdff),
(asdff),
(asdff),
])
blacklist = set([
(137580, 101824, 369),
(127384, 115252, 746),
(155268, 99276, 918),
(182000, 91966, 716),
(119828, 107400, 312),
(171384, 94244, 573),
(asdfff),
(asdfff),
(asdfff),
(asdfff),
(asdfff),
(asdfff),
])
'''Cluster and extract locations of MF boutons'''
from sklearn.cluster import DBSCAN
mfs_bouton_locs = {}
'''if a bouton location has less than this many synapses then it won't be considered in order to reduce false positives'''
# bouton_synapse_threshold = 6 # safe for determining big bouton locations
bouton_synapse_threshold = 2
bouton_synapse_threshold = 3
bouton_synapse_threshold = 4 # 4 is a bit iffy, since it has some semi big boutons
bouton_synapse_threshold = 5
# bouton_synapse_threshold = 6 # this threshold has quite a bit of FPs
for mf in mfs_locs:
dbscan = DBSCAN(eps=8000, min_samples=2) # max dist set to 8um
# dbscan = DBSCAN(eps=10000, min_samples=2) # max dist set to 8um
dbscan.fit(mfs_locs[mf])
loc_by_label = defaultdict(list)
for loc, label in zip(mfs_locs[mf], dbscan.labels_):
loc_by_label[label].append(loc)
mf_bouton_locs = []
for label in loc_by_label:
if len(loc_by_label[label]) <= bouton_synapse_threshold:
whitelisted = False
for loc in loc_by_label[label]:
if to_ng(loc) in whitelist:
whitelisted = True
if not whitelisted:
if len(loc_by_label[label]) >= 2:
print(f'Ignoring {mf} due to insufficient synapses')
for loc in loc_by_label[label]:
print(to_ng(loc))
continue
sum = [0, 0, 0]
for loc in loc_by_label[label]:
sum = [sum[0]+loc[0], sum[1]+loc[1], sum[2]+loc[2]]
center = [
int(sum[0]/len(loc_by_label[label])),
int(sum[1]/len(loc_by_label[label])),
int(sum[2]/len(loc_by_label[label])),
]
mf_bouton_locs.append(center)
mfs_bouton_locs[mf] = mf_bouton_locs
# print(mf_bouton_locs)
# for loc in mf_bouton_locs:
# print([int(loc[0]/4), int(loc[1]/4), int(loc[2]/40)])
mfs_bouton_count = defaultdict(list)
for mf in mfs_bouton_locs:
mfs_bouton_count[len(mfs_bouton_locs[mf])].append(mf)
for count in sorted(mfs_bouton_count.keys()):
print(f'{count}: {mfs_bouton_count[count]}')
'''save mfs_bouton_locs'''
import compress_pickle
compress_pickle.dump((
mfs_bouton_locs
), f"{script_n}.gz")
asdf
for loc in mfs_bouton_locs['mf_431']:
print([int(loc[0]/4), int(loc[1]/4), int(loc[2]/40)])
for loc in mfs_locs['mf_41']:
print([int(loc[0]/4), int(loc[1]/4), int(loc[2]/40)])
| 28.427673 | 122 | 0.623009 | [
"MIT"
] | htem/cb2_project_analysis | analysis/gen_db/mf_grc/gen_mf_locs_210518.py | 4,520 | Python |
import unittest
class TestPython(unittest.TestCase):
def test_float_to_int_coercion(self):
self.assertEqual(1, int(1.0))
def test_get_empty_dict(self):
self.assertIsNone({}.get('key'))
def test_trueness(self):
self.assertTrue(bool(10))
| 19.785714 | 41 | 0.67509 | [
"MIT"
] | microcoder/course-python-mipt | 4/tests/test_python.py | 277 | Python |
# third-party
from flask import render_template, url_for, request, jsonify
# locals
from . import warehouse
@warehouse.route('/element_types', methods=['GET'])
def index():
return render_template("warehouse/element_types.html")
@warehouse.route('/element_type', methods=['POST'])
def create_new_element_type():
print(request.__dict__)
print(request.data)
print(request.get_json())
return jsonify({
"success": True
})
# @warehouse.route('/element_type', methods=['GET'])
# @warehouse.route('/element_type/<element_type_id>', methods=['GET'])
# def element_type(element_type_id=None):
# pass
# @warehouse.route('/element_type', methods=['POST'])
# def new_element_type()
| 26.296296 | 70 | 0.707042 | [
"MIT"
] | thiagolcmelo/dynamic | warehouse/views.py | 710 | Python |
# copyright (c) 2018 paddlepaddle authors. all rights reserved.
#
# licensed under the apache license, version 2.0 (the "license");
# you may not use this file except in compliance with the license.
# you may obtain a copy of the license at
#
# http://www.apache.org/licenses/license-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the license is distributed on an "as is" basis,
# without warranties or conditions of any kind, either express or implied.
# see the license for the specific language governing permissions and
# limitations under the license.
import os
import unittest
import random
import numpy as np
import paddle.fluid as fluid
import six
import paddle
from paddle.fluid.framework import IrGraph
from paddle.fluid.contrib.slim.quantization import QuantizationTransformPass
from paddle.fluid.contrib.slim.quantization import QuantizationFreezePass
from paddle.fluid.contrib.slim.quantization import ConvertToInt8Pass
from paddle.fluid.contrib.slim.quantization import TransformForMobilePass
from paddle.fluid.contrib.slim.quantization import AddQuantDequantPass
from paddle.fluid import core
paddle.enable_static()
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
os.environ["CPU_NUM"] = "1"
def linear_fc(num):
data = fluid.layers.data(name='image', shape=[1, 32, 32], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
hidden = data
for _ in six.moves.xrange(num):
hidden = fluid.layers.fc(hidden, size=128, act='relu')
loss = fluid.layers.cross_entropy(input=hidden, label=label)
loss = fluid.layers.mean(loss)
return loss
def residual_block(num, quant_skip_pattern=None):
def conv_bn_layer(input,
ch_out,
filter_size,
stride,
padding,
act='relu',
bias_attr=False):
tmp = fluid.layers.conv2d(
input=input,
filter_size=filter_size,
num_filters=ch_out,
stride=stride,
padding=padding,
act=None,
bias_attr=bias_attr)
return fluid.layers.batch_norm(input=tmp, act=act)
data = fluid.layers.data(
name='image',
shape=[1, 1, 32, 32],
dtype='float32',
append_batch_size=False)
label = fluid.layers.data(
name='label', shape=[1, 1], dtype='int64', append_batch_size=False)
hidden = data
for _ in six.moves.xrange(num):
conv = conv_bn_layer(hidden, 16, 3, 1, 1, act=None, bias_attr=True)
short = conv_bn_layer(hidden, 16, 1, 1, 0, act=None)
hidden = fluid.layers.elementwise_add(x=conv, y=short, act='relu')
matmul_weight = fluid.layers.create_parameter(
shape=[1, 16, 32, 32], dtype='float32')
hidden = fluid.layers.matmul(hidden, matmul_weight, True, True)
if quant_skip_pattern:
with fluid.name_scope(quant_skip_pattern):
pool = fluid.layers.pool2d(
input=hidden, pool_size=2, pool_type='avg', pool_stride=2)
else:
pool = fluid.layers.pool2d(
input=hidden, pool_size=2, pool_type='avg', pool_stride=2)
fc = fluid.layers.fc(input=pool, size=10)
loss = fluid.layers.cross_entropy(input=fc, label=label)
loss = fluid.layers.mean(loss)
return loss
def conv_net(img, label, quant_skip_pattern):
conv_pool_1 = fluid.nets.simple_img_conv_pool(
input=img,
filter_size=5,
num_filters=20,
pool_size=2,
pool_stride=2,
pool_type='max',
act="relu")
conv_pool_1 = fluid.layers.batch_norm(conv_pool_1)
conv_pool_2 = fluid.nets.simple_img_conv_pool(
input=conv_pool_1,
filter_size=5,
num_filters=50,
pool_size=2,
pool_stride=2,
pool_type='avg',
act="relu")
hidden = fluid.layers.fc(input=conv_pool_2, size=100, act='relu')
with fluid.name_scope(quant_skip_pattern):
prediction = fluid.layers.fc(input=hidden, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label)
avg_loss = fluid.layers.mean(loss)
return avg_loss
class TestQuantizationTransformPass(unittest.TestCase):
def setUp(self):
self.quantizable_op_and_inputs = {
'conv2d': ['Input', 'Filter'],
'depthwise_conv2d': ['Input', 'Filter'],
'mul': ['X', 'Y']
}
self.quantizable_grad_op_inputs = {
'conv2d_grad': ['Input', 'Filter'],
'depthwise_conv2d_grad': ['Input', 'Filter'],
'mul_grad': ['X', 'Y']
}
def check_program(self, program):
quantized_ops = set()
for block in program.blocks:
for op in block.ops:
# check forward
if op.type in self.quantizable_op_and_inputs:
for arg_name in op.input_arg_names:
self.assertTrue(
arg_name.endswith('.quantized.dequantized'))
quantized_ops.add(arg_name)
for op in block.ops:
# check backward
if op.type in self.quantizable_grad_op_inputs:
for pname in self.quantizable_grad_op_inputs[op.type]:
arg_name = op.input(pname)[0]
self.assertTrue(
arg_name.endswith('.quantized.dequantized'))
self.assertTrue(arg_name in quantized_ops)
def linear_fc_quant(self,
activation_quant_type,
weight_quantize_type,
for_ci=True):
main = fluid.Program()
startup = fluid.Program()
with fluid.program_guard(main, startup):
loss = linear_fc(3)
opt = fluid.optimizer.Adam(learning_rate=0.001)
opt.minimize(loss)
place = fluid.CPUPlace()
graph = IrGraph(core.Graph(main.desc), for_test=False)
transform_pass = QuantizationTransformPass(
scope=fluid.global_scope(),
place=place,
activation_quantize_type=activation_quant_type,
weight_quantize_type=weight_quantize_type)
transform_pass.apply(graph)
if not for_ci:
marked_nodes = set()
for op in graph.all_op_nodes():
if op.name().find('quantize') > -1:
marked_nodes.add(op)
graph.draw('.', 'quantize_fc_' + activation_quant_type,
marked_nodes)
program = graph.to_program()
self.check_program(program)
val_graph = IrGraph(core.Graph(program.desc), for_test=False)
if not for_ci:
val_marked_nodes = set()
for op in val_graph.all_op_nodes():
if op.name().find('quantize') > -1:
val_marked_nodes.add(op)
val_graph.draw('.', 'val_fc_' + activation_quant_type,
val_marked_nodes)
def test_linear_fc_quant_abs_max(self):
self.linear_fc_quant('abs_max', 'abs_max', for_ci=True)
def test_linear_fc_quant_range_abs_max(self):
self.linear_fc_quant('range_abs_max', 'abs_max', for_ci=True)
def test_linear_fc_quant_moving_average_abs_max(self):
self.linear_fc_quant(
'moving_average_abs_max', 'channel_wise_abs_max', for_ci=True)
def residual_block_quant(self,
activation_quant_type,
weight_quantize_type,
quantizable_op_type,
for_ci=True):
main = fluid.Program()
startup = fluid.Program()
with fluid.program_guard(main, startup):
loss = residual_block(2)
opt = fluid.optimizer.Adam(learning_rate=0.001)
opt.minimize(loss)
place = fluid.CPUPlace()
graph = IrGraph(core.Graph(main.desc), for_test=False)
transform_pass = QuantizationTransformPass(
scope=fluid.global_scope(),
place=place,
activation_quantize_type=activation_quant_type,
weight_quantize_type=weight_quantize_type,
quantizable_op_type=quantizable_op_type)
transform_pass.apply(graph)
if not for_ci:
marked_nodes = set()
for op in graph.all_op_nodes():
if op.name().find('quantize') > -1:
marked_nodes.add(op)
graph.draw('.', 'quantize_residual_' + activation_quant_type,
marked_nodes)
program = graph.to_program()
self.check_program(program)
val_graph = IrGraph(core.Graph(program.desc), for_test=False)
if not for_ci:
val_marked_nodes = set()
for op in val_graph.all_op_nodes():
if op.name().find('quantize') > -1:
val_marked_nodes.add(op)
val_graph.draw('.', 'val_residual_' + activation_quant_type,
val_marked_nodes)
def test_residual_block_abs_max(self):
quantizable_op_type = ['conv2d', 'depthwise_conv2d', 'mul', 'matmul']
self.residual_block_quant(
'abs_max', 'abs_max', quantizable_op_type, for_ci=True)
def test_residual_block_range_abs_max(self):
quantizable_op_type = ['conv2d', 'depthwise_conv2d', 'mul', 'matmul']
self.residual_block_quant(
'range_abs_max', 'abs_max', quantizable_op_type, for_ci=True)
def test_residual_block_moving_average_abs_max(self):
quantizable_op_type = ['conv2d', 'depthwise_conv2d', 'mul', 'matmul']
self.residual_block_quant(
'moving_average_abs_max',
'channel_wise_abs_max',
quantizable_op_type,
for_ci=True)
class TestQuantizationFreezePass(unittest.TestCase):
def freeze_graph(self,
use_cuda,
seed,
activation_quant_type,
bias_correction=False,
weight_quant_type='abs_max',
for_ci=True,
quant_skip_pattern='skip_quant'):
def build_program(main, startup, is_test):
main.random_seed = seed
startup.random_seed = seed
with fluid.unique_name.guard():
with fluid.program_guard(main, startup):
img = fluid.layers.data(
name='image', shape=[1, 28, 28], dtype='float32')
label = fluid.layers.data(
name='label', shape=[1], dtype='int64')
loss = conv_net(img, label, quant_skip_pattern)
if not is_test:
opt = fluid.optimizer.Adam(learning_rate=0.001)
opt.minimize(loss)
return [img, label], loss
random.seed(0)
np.random.seed(0)
main = fluid.Program()
startup = fluid.Program()
test_program = fluid.Program()
feeds, loss = build_program(main, startup, False)
build_program(test_program, startup, True)
test_program = test_program.clone(for_test=True)
main_graph = IrGraph(core.Graph(main.desc), for_test=False)
test_graph = IrGraph(core.Graph(test_program.desc), for_test=True)
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
scope = fluid.Scope()
with fluid.scope_guard(scope):
exe.run(startup)
transform_pass = QuantizationTransformPass(
scope=scope,
place=place,
activation_quantize_type=activation_quant_type,
weight_quantize_type=weight_quant_type,
skip_pattern=quant_skip_pattern)
transform_pass.apply(main_graph)
transform_pass.apply(test_graph)
dev_name = '_gpu_' if use_cuda else '_cpu_'
if not for_ci:
marked_nodes = set()
for op in main_graph.all_op_nodes():
if op.name().find('quantize') > -1:
marked_nodes.add(op)
main_graph.draw('.', 'main' + dev_name + activation_quant_type + '_'
+ weight_quant_type, marked_nodes)
marked_nodes = set()
for op in test_graph.all_op_nodes():
if op.name().find('quantize') > -1:
marked_nodes.add(op)
test_graph.draw('.', 'test' + dev_name + activation_quant_type + '_'
+ weight_quant_type, marked_nodes)
build_strategy = fluid.BuildStrategy()
build_strategy.memory_optimize = False
build_strategy.enable_inplace = False
build_strategy.fuse_all_reduce_ops = False
binary = fluid.CompiledProgram(main_graph.graph).with_data_parallel(
loss_name=loss.name, build_strategy=build_strategy)
quantized_test_program = test_graph.to_program()
iters = 5
batch_size = 8
train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.mnist.train(), buf_size=500),
batch_size=batch_size)
test_reader = paddle.batch(
paddle.dataset.mnist.test(), batch_size=batch_size)
feeder = fluid.DataFeeder(feed_list=feeds, place=place)
with fluid.scope_guard(scope):
for _ in range(iters):
data = next(train_reader())
loss_v = exe.run(binary,
feed=feeder.feed(data),
fetch_list=[loss])
if not for_ci:
print('{}: {}'.format('loss' + dev_name +
activation_quant_type + '_' +
weight_quant_type, loss_v))
test_data = next(test_reader())
with fluid.program_guard(quantized_test_program):
w_var = fluid.framework._get_var('conv2d_1.w_0.quantized',
quantized_test_program)
# Testing
with fluid.scope_guard(scope):
test_loss1, w_quant = exe.run(program=quantized_test_program,
feed=feeder.feed(test_data),
fetch_list=[loss, w_var])
# Freeze graph for inference, but the weight of fc/conv is still float type.
freeze_pass = QuantizationFreezePass(
scope=scope, place=place, bias_correction=bias_correction, \
weight_quantize_type=weight_quant_type)
freeze_pass.apply(test_graph)
if not for_ci:
marked_nodes = set()
for op in test_graph.all_op_nodes():
if op.name().find('quantize') > -1:
marked_nodes.add(op)
test_graph.draw('.', 'test_freeze' + dev_name +
activation_quant_type + '_' + weight_quant_type,
marked_nodes)
server_program = test_graph.to_program()
with fluid.scope_guard(scope):
test_loss2, = exe.run(program=server_program,
feed=feeder.feed(test_data),
fetch_list=[loss])
self.assertAlmostEqual(test_loss1, test_loss2, delta=5e-3)
if not for_ci:
print(
'{}: {}'.format('test_loss1' + dev_name + activation_quant_type
+ '_' + weight_quant_type, test_loss1))
print(
'{}: {}'.format('test_loss2' + dev_name + activation_quant_type
+ '_' + weight_quant_type, test_loss2))
w_freeze = np.array(scope.find_var('conv2d_1.w_0').get_tensor())
# Maybe failed, this is due to the calculation precision
# self.assertAlmostEqual(np.sum(w_freeze), np.sum(w_quant))
if not for_ci:
print('{}: {}'.format('w_freeze' + dev_name + activation_quant_type
+ '_' + weight_quant_type, np.sum(w_freeze)))
print('{}: {}'.format('w_quant' + dev_name + activation_quant_type +
'_' + weight_quant_type, np.sum(w_quant)))
# Convert parameter to 8-bit.
convert_int8_pass = ConvertToInt8Pass(scope=scope, place=place)
convert_int8_pass.apply(test_graph)
if not for_ci:
marked_nodes = set()
for op in test_graph.all_op_nodes():
if op.name().find('quantize') > -1:
marked_nodes.add(op)
test_graph.draw('.', 'test_int8' + dev_name + activation_quant_type
+ '_' + weight_quant_type, marked_nodes)
server_program_int8 = test_graph.to_program()
# Save the 8-bit parameter and model file.
with fluid.scope_guard(scope):
fluid.io.save_inference_model(
'server_int8' + dev_name + activation_quant_type + '_' +
weight_quant_type, ['image', 'label'], [loss], exe,
server_program_int8)
# Test whether the 8-bit parameter and model file can be loaded successfully.
[infer, feed, fetch] = fluid.io.load_inference_model(
'server_int8' + dev_name + activation_quant_type + '_' +
weight_quant_type, exe)
# Check the loaded 8-bit weight.
w_8bit = np.array(scope.find_var('conv2d_1.w_0.int8').get_tensor())
self.assertEqual(w_8bit.dtype, np.int8)
self.assertEqual(np.sum(w_8bit), np.sum(w_freeze))
if not for_ci:
print('{}: {}'.format('w_8bit' + dev_name + activation_quant_type +
'_' + weight_quant_type, np.sum(w_8bit)))
print('{}: {}'.format('w_freeze' + dev_name + activation_quant_type
+ '_' + weight_quant_type, np.sum(w_freeze)))
mobile_pass = TransformForMobilePass()
mobile_pass.apply(test_graph)
if not for_ci:
marked_nodes = set()
for op in test_graph.all_op_nodes():
if op.name().find('quantize') > -1:
marked_nodes.add(op)
test_graph.draw('.', 'test_mobile' + dev_name +
activation_quant_type + '_' + weight_quant_type,
marked_nodes)
mobile_program = test_graph.to_program()
with fluid.scope_guard(scope):
fluid.io.save_inference_model(
'mobile_int8' + dev_name + activation_quant_type + '_' +
weight_quant_type, ['image', 'label'], [loss], exe,
mobile_program)
def test_freeze_graph_cuda_dynamic(self):
if fluid.core.is_compiled_with_cuda():
with fluid.unique_name.guard():
self.freeze_graph(
True,
seed=1,
activation_quant_type='abs_max',
weight_quant_type='abs_max',
for_ci=True)
with fluid.unique_name.guard():
self.freeze_graph(
True,
seed=1,
activation_quant_type='abs_max',
weight_quant_type='channel_wise_abs_max',
for_ci=True)
def test_freeze_graph_cpu_dynamic(self):
with fluid.unique_name.guard():
self.freeze_graph(
False,
seed=2,
activation_quant_type='abs_max',
weight_quant_type='abs_max',
for_ci=True)
self.freeze_graph(
False,
seed=2,
activation_quant_type='abs_max',
weight_quant_type='channel_wise_abs_max',
for_ci=True)
def test_freeze_graph_cuda_static(self):
if fluid.core.is_compiled_with_cuda():
with fluid.unique_name.guard():
self.freeze_graph(
True,
seed=1,
activation_quant_type='range_abs_max',
bias_correction=True,
weight_quant_type='abs_max',
for_ci=True)
self.freeze_graph(
True,
seed=1,
activation_quant_type='range_abs_max',
weight_quant_type='abs_max',
for_ci=True)
self.freeze_graph(
True,
seed=1,
activation_quant_type='moving_average_abs_max',
weight_quant_type='abs_max',
for_ci=True)
self.freeze_graph(
True,
seed=1,
activation_quant_type='range_abs_max',
weight_quant_type='channel_wise_abs_max',
for_ci=True)
self.freeze_graph(
True,
seed=1,
activation_quant_type='moving_average_abs_max',
weight_quant_type='channel_wise_abs_max',
for_ci=True)
self.freeze_graph(
True,
seed=1,
activation_quant_type='moving_average_abs_max',
bias_correction=True,
weight_quant_type='channel_wise_abs_max',
for_ci=True)
def test_freeze_graph_cpu_static(self):
with fluid.unique_name.guard():
self.freeze_graph(
False,
seed=2,
activation_quant_type='range_abs_max',
weight_quant_type='abs_max',
for_ci=True)
self.freeze_graph(
False,
seed=2,
activation_quant_type='moving_average_abs_max',
weight_quant_type='abs_max',
for_ci=True)
self.freeze_graph(
False,
seed=2,
activation_quant_type='range_abs_max',
weight_quant_type='channel_wise_abs_max',
for_ci=True)
self.freeze_graph(
False,
seed=2,
activation_quant_type='moving_average_abs_max',
weight_quant_type='channel_wise_abs_max',
for_ci=True)
def quant_dequant_residual_block(num, quant_skip_pattern=None):
def conv_bn_layer(input,
ch_out,
filter_size,
stride,
padding,
act='relu',
bias_attr=False):
tmp = fluid.layers.conv2d(
input=input,
filter_size=filter_size,
num_filters=ch_out,
stride=stride,
padding=padding,
act=None,
bias_attr=bias_attr)
return fluid.layers.batch_norm(input=tmp, act=act)
data1 = fluid.layers.data(name='image', shape=[1, 32, 32], dtype='float32')
data2 = fluid.layers.data(
name='matmul_input', shape=[16, 32, 32], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
hidden = data1
for _ in six.moves.xrange(num):
conv = conv_bn_layer(hidden, 16, 3, 1, 1, act=None, bias_attr=True)
short = conv_bn_layer(hidden, 16, 1, 1, 0, act=None)
hidden = fluid.layers.elementwise_add(x=conv, y=short, act='relu')
hidden = fluid.layers.matmul(hidden, data2, True, True)
if isinstance(quant_skip_pattern, str):
with fluid.name_scope(quant_skip_pattern):
pool1 = fluid.layers.pool2d(
input=hidden, pool_size=2, pool_type='avg', pool_stride=2)
pool2 = fluid.layers.pool2d(
input=hidden, pool_size=2, pool_type='max', pool_stride=2)
pool_add = fluid.layers.elementwise_add(
x=pool1, y=pool2, act='relu')
elif isinstance(quant_skip_pattern, list):
assert len(
quant_skip_pattern
) > 1, 'test config error: the len of quant_skip_pattern list should be greater than 1.'
with fluid.name_scope(quant_skip_pattern[0]):
pool1 = fluid.layers.pool2d(
input=hidden, pool_size=2, pool_type='avg', pool_stride=2)
pool2 = fluid.layers.pool2d(
input=hidden, pool_size=2, pool_type='max', pool_stride=2)
with fluid.name_scope(quant_skip_pattern[1]):
pool_add = fluid.layers.elementwise_add(
x=pool1, y=pool2, act='relu')
else:
pool1 = fluid.layers.pool2d(
input=hidden, pool_size=2, pool_type='avg', pool_stride=2)
pool2 = fluid.layers.pool2d(
input=hidden, pool_size=2, pool_type='max', pool_stride=2)
pool_add = fluid.layers.elementwise_add(x=pool1, y=pool2, act='relu')
fc = fluid.layers.fc(input=pool_add, size=10)
loss = fluid.layers.cross_entropy(input=fc, label=label)
loss = fluid.layers.mean(loss)
return loss
class TestAddQuantDequantPass(unittest.TestCase):
def setUp(self):
self._target_ops = {'elementwise_add', 'pool2d'}
self._target_grad_ops = {'elementwise_add_grad', 'pool2d_grad'}
def check_graph(self, graph, skip_pattern=None):
ops = graph.all_op_nodes()
for op_node in ops:
if op_node.name() in self._target_ops:
user_skipped = False
if isinstance(skip_pattern, list):
user_skipped = op_node.op().has_attr("op_namescope") and \
any(pattern in op_node.op().attr("op_namescope") for pattern in skip_pattern)
elif isinstance(skip_pattern, str):
user_skipped = op_node.op().has_attr("op_namescope") and \
op_node.op().attr("op_namescope").find(skip_pattern) != -1
if user_skipped:
continue
in_nodes_all_not_persistable = True
for input_name in op_node.input_arg_names():
in_node = graph._find_node_by_name(op_node.inputs,
input_name)
in_nodes_all_not_persistable = (
in_nodes_all_not_persistable and
not in_node.persistable())
if not in_nodes_all_not_persistable:
continue
input_names = op_node.input_arg_names()
for input_name in input_names:
self.assertTrue(input_name.endswith('.quant_dequant'))
def residual_block_quant(self,
quantizable_op_type,
skip_pattern=None,
for_ci=True):
main = fluid.Program()
startup = fluid.Program()
with fluid.program_guard(main, startup):
loss = quant_dequant_residual_block(2, skip_pattern)
opt = fluid.optimizer.Adam(learning_rate=0.001)
opt.minimize(loss)
place = fluid.CPUPlace()
graph = IrGraph(core.Graph(main.desc), for_test=False)
add_quant_dequant_pass = AddQuantDequantPass(
scope=fluid.global_scope(),
place=place,
skip_pattern=skip_pattern,
quantizable_op_type=quantizable_op_type)
add_quant_dequant_pass.apply(graph)
if not for_ci:
marked_nodes = set()
for op in graph.all_op_nodes():
if op.name().find('quant') > -1:
marked_nodes.add(op)
graph.draw('.', 'add_quant_dequant_graph', marked_nodes)
self.check_graph(graph, skip_pattern)
program = graph.to_program()
val_graph = IrGraph(core.Graph(program.desc), for_test=False)
if not for_ci:
val_marked_nodes = set()
for op in val_graph.all_op_nodes():
if op.name().find('quant') > -1:
val_marked_nodes.add(op)
val_graph.draw('.', 'val_add_quant_dequant_graph', val_marked_nodes)
def test_residual_block(self):
quantizable_op_type = ['elementwise_add', 'pool2d', 'mul', 'matmul']
self.residual_block_quant(
quantizable_op_type, skip_pattern=None, for_ci=True)
def test_residual_block_skip_pattern(self):
quantizable_op_type = ['elementwise_add', 'pool2d', 'mul', 'matmul']
self.residual_block_quant(
quantizable_op_type, skip_pattern='skip_quant', for_ci=True)
def test_residual_block_skip_pattern(self):
quantizable_op_type = ['elementwise_add', 'pool2d', 'mul', 'matmul']
self.residual_block_quant(
quantizable_op_type,
skip_pattern=['skip_quant1', 'skip_quant2'],
for_ci=True)
if __name__ == '__main__':
unittest.main()
| 42.29233 | 112 | 0.56919 | [
"Apache-2.0"
] | 0x45f/Paddle | python/paddle/fluid/contrib/slim/tests/test_quantization_pass.py | 29,224 | Python |
#!/usr/bin/envpython
# -*- coding: utf-8 -*-
def black(string):
return'\033[30m'+string+'\033[0m'
def blue(string):
return'\033[94m'+string+'\033[0m'
def gray(string):
return'\033[1;30m'+string+'\033[0m'
def green(string):
return'\033[92m'+string+'\033[0m'
def cyan(string):
return'\033[96m'+string+'\033[0m'
def lightPurple(string):
return'\033[94m'+string+'\033[0m'
def purple(string):
return'\033[95m'+string+'\033[0m'
def red(string):
return'\033[91m'+string+'\033[0m'
def underline(string):
return'\033[4m'+string+'\033[0m'
def white(string):
return'\033[0m'+string+'\033[0m'
def white_2(string):
return'\033[1m'+string+'\033[0m'
def yellow(string):
return'\033[93m'+string+'\033[0m'
| 19.205128 | 39 | 0.635514 | [
"MIT"
] | Mattewn99/Instagram-Autogagement | utils/color.py | 749 | Python |
import cProfile
import json
import logging
import os
import pstats
import signal
import tempfile
import time
import traceback
from django.conf import settings
from django.utils.timezone import now as tz_now
from django.db import DatabaseError, OperationalError, connection as django_connection
from django.db.utils import InterfaceError, InternalError
import psutil
import redis
from awx.main.consumers import emit_channel_notification
from awx.main.models import (JobEvent, AdHocCommandEvent, ProjectUpdateEvent,
InventoryUpdateEvent, SystemJobEvent, UnifiedJob,
Job)
from awx.main.tasks import handle_success_and_failure_notifications
from awx.main.models.events import emit_event_detail
from .base import BaseWorker
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
class CallbackBrokerWorker(BaseWorker):
'''
A worker implementation that deserializes callback event data and persists
it into the database.
The code that *generates* these types of messages is found in the
ansible-runner display callback plugin.
'''
MAX_RETRIES = 2
last_stats = time.time()
total = 0
last_event = ''
prof = None
def __init__(self):
self.buff = {}
self.pid = os.getpid()
self.redis = redis.Redis.from_url(settings.BROKER_URL)
for key in self.redis.keys('awx_callback_receiver_statistics_*'):
self.redis.delete(key)
def read(self, queue):
try:
res = self.redis.blpop(settings.CALLBACK_QUEUE, timeout=settings.JOB_EVENT_BUFFER_SECONDS)
if res is None:
return {'event': 'FLUSH'}
self.total += 1
return json.loads(res[1])
except redis.exceptions.RedisError:
logger.exception("encountered an error communicating with redis")
time.sleep(1)
except (json.JSONDecodeError, KeyError):
logger.exception("failed to decode JSON message from redis")
finally:
self.record_statistics()
return {'event': 'FLUSH'}
def record_statistics(self):
# buffer stat recording to once per (by default) 5s
if time.time() - self.last_stats > settings.JOB_EVENT_STATISTICS_INTERVAL:
try:
self.redis.set(f'awx_callback_receiver_statistics_{self.pid}', self.debug())
self.last_stats = time.time()
except Exception:
logger.exception("encountered an error communicating with redis")
self.last_stats = time.time()
def debug(self):
return f'. worker[pid:{self.pid}] sent={self.total} rss={self.mb}MB {self.last_event}'
@property
def mb(self):
return '{:0.3f}'.format(
psutil.Process(self.pid).memory_info().rss / 1024.0 / 1024.0
)
def toggle_profiling(self, *args):
if self.prof:
self.prof.disable()
filename = f'callback-{self.pid}.pstats'
filepath = os.path.join(tempfile.gettempdir(), filename)
with open(filepath, 'w') as f:
pstats.Stats(self.prof, stream=f).sort_stats('cumulative').print_stats()
pstats.Stats(self.prof).dump_stats(filepath + '.raw')
self.prof = False
logger.error(f'profiling is disabled, wrote {filepath}')
else:
self.prof = cProfile.Profile()
self.prof.enable()
logger.error('profiling is enabled')
def work_loop(self, *args, **kw):
if settings.AWX_CALLBACK_PROFILE:
signal.signal(signal.SIGUSR1, self.toggle_profiling)
return super(CallbackBrokerWorker, self).work_loop(*args, **kw)
def flush(self, force=False):
now = tz_now()
if (
force or
any([len(events) >= 1000 for events in self.buff.values()])
):
for cls, events in self.buff.items():
logger.debug(f'{cls.__name__}.objects.bulk_create({len(events)})')
for e in events:
if not e.created:
e.created = now
e.modified = now
try:
cls.objects.bulk_create(events)
except Exception:
# if an exception occurs, we should re-attempt to save the
# events one-by-one, because something in the list is
# broken/stale
for e in events:
try:
e.save()
except Exception:
logger.exception('Database Error Saving Job Event')
for e in events:
emit_event_detail(e)
self.buff = {}
def perform_work(self, body):
try:
flush = body.get('event') == 'FLUSH'
if flush:
self.last_event = ''
if not flush:
event_map = {
'job_id': JobEvent,
'ad_hoc_command_id': AdHocCommandEvent,
'project_update_id': ProjectUpdateEvent,
'inventory_update_id': InventoryUpdateEvent,
'system_job_id': SystemJobEvent,
}
job_identifier = 'unknown job'
for key, cls in event_map.items():
if key in body:
job_identifier = body[key]
break
self.last_event = f'\n\t- {cls.__name__} for #{job_identifier} ({body.get("event", "")} {body.get("uuid", "")})' # noqa
if body.get('event') == 'EOF':
try:
final_counter = body.get('final_counter', 0)
logger.info('Event processing is finished for Job {}, sending notifications'.format(job_identifier))
# EOF events are sent when stdout for the running task is
# closed. don't actually persist them to the database; we
# just use them to report `summary` websocket events as an
# approximation for when a job is "done"
emit_channel_notification(
'jobs-summary',
dict(group_name='jobs', unified_job_id=job_identifier, final_counter=final_counter)
)
# Additionally, when we've processed all events, we should
# have all the data we need to send out success/failure
# notification templates
uj = UnifiedJob.objects.get(pk=job_identifier)
if isinstance(uj, Job):
# *actual playbooks* send their success/failure
# notifications in response to the playbook_on_stats
# event handling code in main.models.events
pass
elif hasattr(uj, 'send_notification_templates'):
handle_success_and_failure_notifications.apply_async([uj.id])
except Exception:
logger.exception('Worker failed to emit notifications: Job {}'.format(job_identifier))
return
event = cls.create_from_data(**body)
self.buff.setdefault(cls, []).append(event)
retries = 0
while retries <= self.MAX_RETRIES:
try:
self.flush(force=flush)
break
except (OperationalError, InterfaceError, InternalError):
if retries >= self.MAX_RETRIES:
logger.exception('Worker could not re-establish database connectivity, giving up on one or more events.')
return
delay = 60 * retries
logger.exception('Database Error Saving Job Event, retry #{i} in {delay} seconds:'.format(
i=retries + 1,
delay=delay
))
django_connection.close()
time.sleep(delay)
retries += 1
except DatabaseError:
logger.exception('Database Error Saving Job Event')
break
except Exception as exc:
tb = traceback.format_exc()
logger.error('Callback Task Processor Raised Exception: %r', exc)
logger.error('Detail: {}'.format(tb))
| 40.840376 | 136 | 0.550178 | [
"Apache-2.0"
] | EzzioMoreira/awx | awx/main/dispatch/worker/callback.py | 8,699 | Python |
from flask import render_template, flash, redirect, url_for, request
from flask.views import MethodView
from app.middleware import auth
from app.models.user import User
from app.validators.register_form import RegisterForm
from app.services import avatar_service
class RegisterController(MethodView):
@auth.optional
def get(self):
"""
Show register form
Returns:
Register template with form
"""
return render_template('auth/register.html', form=RegisterForm())
@auth.optional
def post(self):
"""
Handle the POST request and sign up the user if form validation passes
Returns:
A redirect or a template with the validation errors
"""
form = RegisterForm()
if form.validate_on_submit():
form.validate_username(form.username)
avatar = 'no-image.png'
if 'avatar' in request.files and request.files['avatar']:
avatar = avatar_service.save(form.avatar.data)
User.create(form.username.data, form.password.data, avatar)
flash('Your account has been created. You may now login.', 'info')
return redirect(url_for('login'))
return render_template('auth/register.html', form=form)
| 25.934783 | 74 | 0.709975 | [
"MIT"
] | TheSynt4x/flask-blog | app/controllers/auth/register.py | 1,193 | Python |