Unnamed: 0
int64 0
109
| code
stringlengths 1.08k
119k
| length
int64 1.08k
119k
| entities
stringlengths 118
32.3k
|
---|---|---|---|
0 | """
[2014-11-26] Challenge #190 [Intermediate] Words inside of words
https://www.reddit.PI:KEY
#Description
This weeks challenge is a short yet interesting one that should hopefully help you exercise elegant solutions to a
problem rather than bruteforcing a challenge.
#Challenge
Given the wordlist [enable1.txt](http://www.joereynoldsaudio.com/enable1.txt), you must find the word in that file
which also contains the greatest number of words within that word.
For example, the word 'grayson' has the following words in it
Grayson
Gray
Grays
Ray
Rays
Son
On
Here's another example, the word 'reports' has the following
reports
report
port
ports
rep
You're tasked with finding the word in that file that contains the most words.
NOTE : If you have a different wordlist you would like to use, you're free to do so.
#Restrictions
* To keep output slightly shorter, a word will only be considered a word if it is 2 or more letters in length
* The word you are using may not be permuted to get a different set of words (You can't change 'report' to 'repotr' so
that you can add more words to your list)
#Finally
Have a good challenge idea?
Consider submitting it to /r/dailyprogrammer_ideas
"""
def main():
pass
if __name__ == "__main__":
main()
| 1,256 | [['DATE_TIME', '2014-11-26'], ['DATE_TIME', 'This weeks'], ['PERSON', 'enable1.txt](http://www.joereynoldsaudio.com'], ['PERSON', 'grayson'], ['PERSON', 'repotr'], ['URL', 'https://www.red'], ['URL', 'http://www.joereynoldsaudio.com/enable1.txt']] |
1 | from mpl_toolkits.mplot3d import axes3d
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from socket import *
import time
# Объявляем все глобальные переменные
HOST = '127.0.0.1'
PORT = 21566
BUFSIZ = 512
ADDR = (HOST, PORT)
bad_packet = 0
good_packet = 0
# fig, ax = plt.subplots()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Socket
# tcpCliSock = socket(AF_INET, SOCK_STREAM)
# tcpCliSock.connect(ADDR)
# Запрет на ожидание
plt.ion()
tstart = time.time()
# real-time plotting loop
X, Y, Z = [], [], []
while True:
try:
# читаем данные из сети
tcpCliSock.c
data = tcpCliSock.recv(BUFSIZ)
if data:
print(len(X), data)
data = data.decode().split(',')
if len(data) == 9:
# print('Data received', data)
# tcpCliSock.send(b'Ok')
good_packet += 1
else:
bad_packet += 1
# читаем данные из сети
data = tcpCliSock.recv(BUFSIZ)
X.append(data[0])
Y.append(data[1])
Z.append(data[2])
frame = ax.scatter(X, Y, Z, c='b', marker='o')
# Remove old line collection before drawing
#if oldcol is not None:
# ax.collections.remove(oldcol)
plt.pause(0.001 / len(X))
except KeyboardInterrupt:
tcpCliSock.close()
print('FPS: %f' % (len(X) / (time.time() - tstart)))
break
| 1,493 | [['LOCATION', 'Объявляем'], ['LOCATION', 'PORT'], ['LOCATION', 'tcpCliSock'], ['PERSON', 'данные из'], ['PERSON', 'данные из'], ['IP_ADDRESS', '127.0.0.1'], ['URL', 'toolkits.mp'], ['URL', 'matplotlib.py'], ['URL', 'matplotlib.an'], ['URL', 'plt.su'], ['URL', 'plt.fi'], ['URL', 'fig.ad'], ['URL', 'tcpCliSock.co'], ['URL', 'plt.io'], ['URL', 'tcpCliSock.re'], ['URL', 'data.de'], ['URL', 'tcpCliSock.se'], ['URL', 'tcpCliSock.re'], ['URL', 'ax.sc'], ['URL', 'ax.collections.re'], ['URL', 'plt.pa'], ['URL', 'tcpCliSock.cl']] |
2 | #!/usr/bin/env python
"""Encoding and decoding of a question once for each codec.
Example execution:
$ ./question.py
ASN.1 specification:
-- A simple protocol taken from Wikipedia.
Foo DEFINITIONS ::= BEGIN
Question ::= SEQUENCE {
id INTEGER,
question IA5String
}
Answer ::= SEQUENCE {
id INTEGER,
answer BOOLEAN
}
END
Question to encode: {'id': 1, 'question': 'Is 1+1=3?'}
BER:
Encoded: 300e0201011609497320312b313d333f (16 bytes)
Decoded: {'id': 1, 'question': 'Is 1+1=3?'}
DER:
Encoded: 300e0201011609497320312b313d333f (16 bytes)
Decoded: {'id': 1, 'question': 'Is 1+1=3?'}
JER:
Encoded: PI:KEY (31 bytes)
Decoded: {'id': 1, 'question': 'Is 1+1=3?'}
OER:
Encoded: 010109497320312b313d333f (12 bytes)
Decoded: {'id': 1, 'question': 'Is 1+1=3?'}
PER:
Encoded: 010109497320312b313d333f (12 bytes)
Decoded: {'id': 1, 'question': 'Is 1+1=3?'}
UPER:
Encoded: 01010993cd03156c5eb37e (11 bytes)
Decoded: {'id': 1, 'question': 'Is 1+1=3?'}
XER:
Encoded: PI:KEY (61 bytes)
Decoded: {'id': 1, 'question': 'Is 1+1=3?'}
Protocol Buffers:
Encoded: 08011209497320312b313d333f (13 bytes)
Decoded:
id: 1
question: "Is 1+1=3?"
$
"""
from __future__ import print_function
import os
from binascii import hexlify
import asn1tools
from foo_pb2 import Question
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
FOO_ASN_PATH = os.path.join(SCRIPT_DIR,
'..',
'..',
'..',
'tests',
'files',
'foo.asn')
# Print the specification.
print('ASN.1 specification:')
print()
with open(FOO_ASN_PATH) as fin:
print(fin.read())
# The question to encode.
question = {'id': 1, 'question': 'Is 1+1=3?'}
print("Question to encode:", question)
# Encode and decode the question once for each codec.
for codec in ['ber', 'der', 'jer', 'oer', 'per', 'uper', 'xer']:
foo = asn1tools.compile_files(FOO_ASN_PATH, codec)
encoded = foo.encode('Question', question)
decoded = foo.decode('Question', encoded)
print()
print('{}:'.format(codec.upper()))
print('Encoded: {} ({} bytes)'.format(hexlify(encoded).decode('ascii'),
len(encoded)))
print('Decoded:', decoded)
# Also encode using protocol buffers.
question = Question()
question.id = 1
question.question = 'Is 1+1=3?'
encoded = question.SerializeToString()
decoded = question
print()
print('Protocol Buffers:')
print('Encoded: {} ({} bytes)'.format(hexlify(encoded).decode('ascii'),
len(encoded)))
print('Decoded:')
print(decoded)
| 2,721 | [['PERSON', 'ASN.1'], ['DATE_TIME', '010109497320312b313d333f'], ['DATE_TIME', '010109497320312b313d333f'], ['PERSON', 'oer'], ['IP_ADDRESS', ' ::'], ['URL', 'question.py'], ['URL', 'os.pa'], ['URL', 'os.path.re'], ['URL', 'os.path.jo'], ['URL', 'foo.as'], ['URL', 'fin.re'], ['URL', 'asn1tools.com'], ['URL', 'foo.de'], ['URL', 'question.id'], ['URL', 'question.Se']] |
3 | # -*- coding: utf-8 -*-
# Copyright (C) 2014 Johannes Baiter dummy@email.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Various utility functions and classes.
"""
from __future__ import division, unicode_literals, print_function
import abc
import glob
import json
import logging
import os
import pkg_resources
import platform
import re
import subprocess
from unicodedata import normalize
import blinker
import colorama
import psutil
import roman
from colorama import Fore, Back, Style
from spreads.vendor.pathlib import Path
class SpreadsException(Exception):
""" General exception """
pass
class DeviceException(SpreadsException):
""" Raised when a device-related error occured. """
pass
class MissingDependencyException(SpreadsException):
""" Raised when a dependency for a plugin is missing. """
pass
def get_version():
""" Get installed version via pkg_resources. """
return pkg_resources.require('spreads')[0].version
def find_in_path(name):
""" Find executable in $PATH.
:param name: name of the executable
:type name: unicode
:returns: Path to executable or None if not found
:rtype: unicode or None
"""
candidates = None
if is_os('windows'):
import _winreg
if name.startswith('scantailor'):
try:
cmd = _winreg.QueryValue(
_winreg.HKEY_CLASSES_ROOT,
'Scan Tailor Project\\shell\\open\\command')
bin_path = cmd.split('" "')[0][1:]
if name.endswith('-cli'):
bin_path = bin_path[:-4] + "-cli.exe"
return bin_path if os.path.exists(bin_path) else None
except OSError:
return None
else:
path_dirs = os.environ.get('PATH').split(';')
path_dirs.append(os.getcwd())
path_exts = os.environ.get('PATHEXT').split(';')
candidates = (os.path.join(p, name + e)
for p in path_dirs
for e in path_exts)
else:
candidates = (os.path.join(p, name)
for p in os.environ.get('PATH').split(':'))
return next((c for c in candidates if os.path.exists(c)), None)
def is_os(osname):
""" Check if the current operating system matches the expected.
:param osname: Operating system name as returned by
:py:func:`platform.system`
:returns: Whether the OS matches or not
:rtype: bool
"""
return platform.system().lower() == osname
def check_futures_exceptions(futures):
"""" Go through passed :py:class:`concurrent.futures._base.Future` objects
and re-raise the first Exception raised by any one of them.
:param futures: Iterable that contains the futures to be checked
:type futures: iterable with :py:class:`concurrent.futures._base.Future`
instances
"""
if any(x.exception() for x in futures):
raise next(x for x in futures if x.exception()).exception()
def get_free_space(path):
""" Return free space on file-system underlying the passed path.
:param path: Path on file-system the free space of which is desired.
:type path; unicode
:return: Free space in bytes.
:rtype: int
"""
return psutil.disk_usage(unicode(path)).free
def get_subprocess(cmdline, **kwargs):
""" Get a :py:class:`subprocess.Popen` instance.
On Windows systems, the process will be ran in the background and won't
open a cmd-window or appear in the taskbar.
The function signature matches that of the :py:class:`subprocess.Popen`
initialization method.
"""
if subprocess.mswindows and 'startupinfo' not in kwargs:
su = subprocess.STARTUPINFO()
su.dwFlags |= subprocess.STARTF_USESHOWWINDOW
su.wShowWindow = subprocess.SW_HIDE
kwargs['startupinfo'] = su
return subprocess.Popen(cmdline, **kwargs)
def wildcardify(pathnames):
""" Try to generate a single path with wildcards that matches all
`pathnames`.
:param pathnames: List of pathnames to find a wildcard string for
:type pathanmes: List of str/unicode
:return: The wildcard string or None if none was found
:rtype: unicode or None
"""
wildcard_str = ""
for idx, char in enumerate(pathnames[0]):
if all(p[idx] == char for p in pathnames[1:]):
wildcard_str += char
elif not wildcard_str or wildcard_str[-1] != "*":
wildcard_str += "*"
matched_paths = glob.glob(wildcard_str)
if not sorted(pathnames) == sorted(matched_paths):
return None
return wildcard_str
def diff_dicts(old, new):
""" Get the difference between two dictionaries.
:param old: Dictionary to base comparison on
:type old: dict
:param new: Dictionary to compare with
:type new: dict
:return: A (possibly nested) dictionary containing all items from `new`
that differ from the ones in `old`
:rtype: dict
"""
out = {}
for key, value in old.iteritems():
if new[key] != value:
out[key] = new[key]
elif isinstance(value, dict):
diff = diff_dicts(value, new[key])
if diff:
out[key] = diff
return out
def slugify(text, delimiter=u'-'):
"""Generates an ASCII-only slug.
Code adapted from Flask snipped by Armin Ronacher:
http://flask.pocoo.org/snippets/5/
:param text: Text to create slug for
:type text: unicode
:param delimiter: Delimiter to use in slug
:type delimiter: unicode
:return: The generated slug
:rtype: unicode
"""
punctuation_re = r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+'
result = []
for word in re.split(punctuation_re, text.lower()):
word = normalize('NFKD', word).encode('ascii', 'ignore')
if word:
result.append(word)
return unicode(delimiter.join(result))
class _instancemethodwrapper(object): # noqa
def __init__(self, callable):
self.callable = callable
self.__dontcall__ = False
def __getattr__(self, key):
return getattr(self.callable, key)
def __call__(self, *args, **kwargs):
if self.__dontcall__:
raise TypeError('Attempted to call abstract method.')
return self.callable(*args, **kwargs)
class _classmethod(classmethod): # noqa
def __init__(self, func):
super(_classmethod, self).__init__(func)
isabstractmethod = getattr(func, '__isabstractmethod__', False)
if isabstractmethod:
self.__isabstractmethod__ = isabstractmethod
def __get__(self, instance, owner):
result = _instancemethodwrapper(super(_classmethod, self)
.__get__(instance, owner))
isabstractmethod = getattr(self, '__isabstractmethod__', False)
if isabstractmethod:
result.__isabstractmethod__ = isabstractmethod
abstractmethods = getattr(owner, '__abstractmethods__', None)
if abstractmethods and result.__name__ in abstractmethods:
result.__dontcall__ = True
return result
class abstractclassmethod(_classmethod): # noqa
""" New decorator class that implements the @abstractclassmethod decorator
added in Python 3.3 for Python 2.7.
Kudos to http://stackoverflow.com/a/13640018/487903
"""
def __init__(self, func):
func = abc.abstractmethod(func)
super(abstractclassmethod, self).__init__(func)
class ColourStreamHandler(logging.StreamHandler):
""" A colorized output StreamHandler
Kudos to Leigh MacDonald: http://goo.gl/Lpr6C5
"""
# Some basic colour scheme defaults
colours = {
'DEBUG': Fore.CYAN,
'INFO': Fore.GREEN,
'WARN': Fore.YELLOW,
'WARNING': Fore.YELLOW,
'ERROR': Fore.RED,
'CRIT': Back.RED + Fore.WHITE,
'CRITICAL': Back.RED + Fore.WHITE
}
@property
def is_tty(self):
""" Check if we are using a "real" TTY. If we are not using a TTY it
means that the colour output should be disabled.
:return: Using a TTY status
:rtype: bool
"""
try:
return getattr(self.stream, 'isatty', None)()
except:
return False
def emit(self, record):
try:
message = self.format(record)
if not self.is_tty:
self.stream.write(message)
else:
self.stream.write(self.colours[record.levelname] +
message + Style.RESET_ALL)
self.stream.write(getattr(self, 'terminator', '\n'))
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class EventHandler(logging.Handler):
""" Subclass of :py:class:`logging.Handler` that emits a
:py:class:`blinker.base.Signal` whenever a new record is emitted.
"""
signals = blinker.Namespace()
on_log_emit = signals.signal('logrecord', doc="""\
Sent when a log record was emitted.
:keyword :class:`logging.LogRecord` record: the LogRecord
""")
def emit(self, record):
self.on_log_emit.send(record=record)
def get_data_dir(create=False):
""" Return (and optionally create) the user's default data directory.
:param create: Create the data directory if it doesn't exist
:type create: bool
:return: Path to the default data directory
:rtype: unicode
"""
unix_dir_var = 'XDG_DATA_HOME'
unix_dir_fallback = '~/.config'
windows_dir_var = 'APPDATA'
windows_dir_fallback = '~\\AppData\\Roaming'
mac_dir = '~/Library/Application Support'
base_dir = None
if is_os('darwin'):
if Path(unix_dir_fallback).exists:
base_dir = unix_dir_fallback
else:
base_dir = mac_dir
elif is_os('windows'):
if windows_dir_var in os.environ:
base_dir = os.environ[windows_dir_var]
else:
base_dir = windows_dir_fallback
else:
if unix_dir_var in os.environ:
base_dir = os.environ[unix_dir_var]
else:
base_dir = unix_dir_fallback
app_path = Path(base_dir)/'spreads'
if create and not app_path.exists():
app_path.mkdir()
return unicode(app_path)
def colorize(text, color):
""" Return text with a new ANSI foreground color.
:param text: Text to be wrapped
:param color: ANSI color to wrap text in
:type color: str (from `colorama.ansi <http://git.io/9qnt0Q>`)
:return: Colorized text
"""
return color + text + colorama.Fore.RESET
class RomanNumeral(object):
""" Number type that represents integers as Roman numerals and that
can be used in all arithmetic operations applicable to integers.
"""
@staticmethod
def is_roman(value):
""" Check if `value` is a valid Roman numeral.
:param value: Value to be checked
:type value: unicode
:returns: Whether the value is valid or not
:rtype: bool
"""
return bool(roman.romanNumeralPattern.match(value))
def __init__(self, value, case='upper'):
""" Create a new instance.
:param value: Value of the instance
:type value: int, unicode containing valid Roman numeral or
:py:class:`RomanNumeral`
"""
self._val = self._to_int(value)
self._case = case
if isinstance(value, basestring) and not self.is_roman(value):
self._case = 'lower'
elif isinstance(value, RomanNumeral):
self._case = value._case
def _to_int(self, value):
if isinstance(value, int):
return value
elif isinstance(value, basestring) and self.is_roman(value.upper()):
return roman.fromRoman(value.upper())
elif isinstance(value, RomanNumeral):
return value._val
else:
raise ValueError("Value must be a valid roman numeral, a string"
" representing one or an integer: '{0}'"
.format(value))
def __cmp__(self, other):
if self._val > self._to_int(other):
return 1
elif self._val == self._to_int(other):
return 0
elif self._val < self._to_int(other):
return -1
def __add__(self, other):
return RomanNumeral(self._val + self._to_int(other), self._case)
def __sub__(self, other):
return RomanNumeral(self._val - self._to_int(other), self._case)
def __int__(self):
return self._val
def __str__(self):
strval = roman.toRoman(self._val)
if self._case == 'lower':
return strval.lower()
else:
return strval
def __unicode__(self):
return unicode(str(self))
def __repr__(self):
return str(self)
class CustomJSONEncoder(json.JSONEncoder):
""" Custom :py:class:`json.JSONEncoder`.
Uses an object's `to_dict` method if present for serialization.
Serializes :py:class:`pathlib.Path` instances to the string
representation of their relative path to a BagIt-compliant directory or
their absolute path if not applicable.
"""
def default(self, obj):
if hasattr(obj, 'to_dict'):
return obj.to_dict()
if isinstance(obj, Path):
# Serialize paths that belong to a workflow as paths relative to
# its base directory
base = next((p for p in obj.parents if (p/'bagit.txt').exists()),
None)
if base:
return unicode(obj.relative_to(base))
else:
return unicode(obj.absolute())
return json.JSONEncoder.default(self, obj)
| 14,758 | [['EMAIL_ADDRESS', 'dummy@email.com'], ['DATE_TIME', '2014'], ['PERSON', 'Johannes Baiter'], ['PERSON', 'get_subprocess(cmdline'], ['PERSON', 'Armin Ronacher'], ['PERSON', 'punctuation_re'], ['PERSON', 'Kudos'], ['PERSON', 'Leigh MacDonald'], ['URL', 'blinker.Na'], ['URL', 'signals.si'], ['NRP', 'Serialize'], ['LOCATION', 'next((p'], ['LOCATION', 'unicode(obj.relative_to(base'], ['URL', 'http://www.gnu.org/licenses/'], ['URL', 'http://flask.pocoo.org/snippets/5/'], ['URL', 'http://stackoverflow.com/a/13640018/487903'], ['URL', 'http://goo.gl/Lpr6C5'], ['URL', 'http://git.io/9qnt0Q'], ['URL', 'email.com'], ['URL', 'spreads.vendor.pa'], ['URL', 'resources.re'], ['URL', 'name.st'], ['URL', 'winreg.HK'], ['URL', 'os.pa'], ['URL', 'os.environ.ge'], ['URL', 'os.ge'], ['URL', 'os.environ.ge'], ['URL', 'os.path.jo'], ['URL', 'os.path.jo'], ['URL', 'os.environ.ge'], ['URL', 'os.pa'], ['URL', 'platform.sy'], ['URL', 'platform.sy'], ['URL', 'subprocess.ms'], ['URL', 'subprocess.ST'], ['URL', 'subprocess.ST'], ['URL', 'su.wS'], ['URL', 'glob.gl'], ['URL', 'old.it'], ['URL', 'delimiter.jo'], ['URL', 'self.ca'], ['URL', 'self.ca'], ['URL', 'self.ca'], ['URL', 'logging.St'], ['URL', 'Fore.CY'], ['URL', 'Fore.GR'], ['URL', 'Fore.YE'], ['URL', 'Fore.YE'], ['URL', 'Fore.RED'], ['URL', 'Back.RED'], ['URL', 'Back.RED'], ['URL', 'self.st'], ['URL', 'self.fo'], ['URL', 'self.is'], ['URL', 'self.st'], ['URL', 'self.st'], ['URL', 'self.co'], ['URL', 'Style.RE'], ['URL', 'self.st'], ['URL', 'blinker.base.Si'], ['URL', 'emit.se'], ['URL', 'path.mk'], ['URL', 'colorama.an'], ['URL', 'colorama.Fore.RE'], ['URL', 'roman.romanNumeralPattern.ma'], ['URL', 'self.is'], ['URL', 'self.is'], ['URL', 'roman.fr'], ['URL', 'roman.to'], ['URL', 'pathlib.Pa'], ['URL', 'obj.to'], ['URL', 'obj.pa'], ['URL', 'obj.re'], ['URL', 'json.JSONEncoder.de']] |
4 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Stores various configuration options and constants for Oppia."""
import copy
import datetime
import os
# Whether to unconditionally log info messages.
DEBUG = False
# The platform for the storage backend. This is used in the model-switching
# code in core/platform.
PLATFORM = 'gae'
# This should be string comparison, since all environment variables
# are converted to string
IS_MINIFIED = os.environ.get('MINIFICATION') == 'True'
# Whether we should serve the development or production experience.
# DEV_MODE should only be changed to False in the production environment.
# To use minified resources in the development environment,
# change the MINIFICATION env variable in app.yaml to True.
# When DEV_MODE is True, this indicates that we are not running in
# the production App Engine environment, which affects things like
# login/logout URLs,as well as third-party libraries
# that App Engine normally provides.
if PLATFORM == 'gae':
DEV_MODE = (
not os.environ.get('SERVER_SOFTWARE')
or os.environ['SERVER_SOFTWARE'].startswith('Development'))
else:
raise Exception('Invalid platform: expected one of [\'gae\']')
TESTS_DATA_DIR = os.path.join('core', 'tests', 'data')
SAMPLE_EXPLORATIONS_DIR = os.path.join('data', 'explorations')
SAMPLE_COLLECTIONS_DIR = os.path.join('data', 'collections')
INTERACTIONS_DIR = os.path.join('extensions', 'interactions')
GADGETS_DIR = os.path.join('extensions', 'gadgets')
RTE_EXTENSIONS_DIR = os.path.join('extensions', 'rich_text_components')
OBJECT_TEMPLATES_DIR = os.path.join('extensions', 'objects', 'templates')
# Choose production template if minification flag is used or
# if in production mode
TEMPLATES_DIR_PREFIX = 'prod' if (IS_MINIFIED or not DEV_MODE) else 'dev'
FRONTEND_TEMPLATES_DIR = os.path.join(
'core', 'templates', TEMPLATES_DIR_PREFIX, 'head')
DEPENDENCIES_TEMPLATES_DIR = os.path.join('extensions', 'dependencies')
VALUE_GENERATORS_DIR = os.path.join('extensions', 'value_generators')
OBJECT_DEFAULT_VALUES_FILE_PATH = os.path.join(
'extensions', 'interactions', 'object_defaults.json')
RULES_DESCRIPTIONS_FILE_PATH = os.path.join(
os.getcwd(), 'extensions', 'interactions', 'rules.json')
# The maximum number of results to retrieve in a datastore query.
DEFAULT_QUERY_LIMIT = 1000
# The maximum number of results to retrieve in a datastore query
# for top rated published explorations in /library page.
NUMBER_OF_TOP_RATED_EXPLORATIONS_FOR_LIBRARY_PAGE = 8
# The maximum number of results to retrieve in a datastore query
# for recently published explorations in /library page.
RECENTLY_PUBLISHED_QUERY_LIMIT_FOR_LIBRARY_PAGE = 8
# The maximum number of results to retrieve in a datastore query
# for top rated published explorations in /library/top_rated page.
NUMBER_OF_TOP_RATED_EXPLORATIONS_FULL_PAGE = 20
# The maximum number of results to retrieve in a datastore query
# for recently published explorations in /library/recently_published page.
RECENTLY_PUBLISHED_QUERY_LIMIT_FULL_PAGE = 20
# The current version of the dashboard stats blob schema. If any backward-
# incompatible changes are made to the stats blob schema in the data store,
# this version number must be changed.
CURRENT_DASHBOARD_STATS_SCHEMA_VERSION = 1
# The current version of the exploration states blob schema. If any backward-
# incompatible changes are made to the states blob schema in the data store,
# this version number must be changed and the exploration migration job
# executed.
CURRENT_EXPLORATION_STATES_SCHEMA_VERSION = 7
# The current version of the all collection blob schemas (such as the nodes
# structure within the Collection domain object). If any backward-incompatible
# changes are made to any of the blob schemas in the data store, this version
# number must be changed.
CURRENT_COLLECTION_SCHEMA_VERSION = 2
# The default number of exploration tiles to load at a time in the search
# results page.
SEARCH_RESULTS_PAGE_SIZE = 20
# The default number of commits to show on a page in the exploration history
# tab.
COMMIT_LIST_PAGE_SIZE = 50
# The default number of items to show on a page in the exploration feedback
# tab.
FEEDBACK_TAB_PAGE_SIZE = 20
# Default title for a newly-minted exploration.
DEFAULT_EXPLORATION_TITLE = ''
# Default category for a newly-minted exploration.
DEFAULT_EXPLORATION_CATEGORY = ''
# Default objective for a newly-minted exploration.
DEFAULT_EXPLORATION_OBJECTIVE = ''
# Default name for the initial state of an exploration.
DEFAULT_INIT_STATE_NAME = 'Introduction'
# The default content text for the initial state of an exploration.
DEFAULT_INIT_STATE_CONTENT_STR = ''
# Default title for a newly-minted collection.
DEFAULT_COLLECTION_TITLE = ''
# Default category for a newly-minted collection.
DEFAULT_COLLECTION_CATEGORY = ''
# Default objective for a newly-minted collection.
DEFAULT_COLLECTION_OBJECTIVE = ''
# A dict containing the accepted image formats (as determined by the imghdr
# module) and the corresponding allowed extensions in the filenames of uploaded
# files.
ACCEPTED_IMAGE_FORMATS_AND_EXTENSIONS = {
'jpeg': ['jpg', 'jpeg'],
'png': ['png'],
'gif': ['gif']
}
# A string containing the disallowed characters in state or exploration names.
# The underscore is needed because spaces in names must be converted to
# underscores when displayed as part of a URL or key. The other conventions
# here are derived from the Wikipedia guidelines for naming articles.
INVALID_NAME_CHARS = u':#/|_%<>[]{}\ufffd\\' + chr(127)
for ind in range(32):
INVALID_NAME_CHARS += chr(ind)
# Prefix for data sent from the server to the client via JSON.
XSSI_PREFIX = ')]}\'\n'
# A regular expression for alphanumeric characters.
ALPHANUMERIC_REGEX = r'^[A-Za-z0-9]+$'
# A regular expression for alphanumeric words separated by single spaces.
# Ex.: 'valid name', 'another valid name', 'invalid name'.
ALPHANUMERIC_SPACE_REGEX = r'^[0-9A-Za-z]+(?:[ ]?[0-9A-Za-z]+)*$'
# A regular expression for tags.
TAG_REGEX = r'^[a-z ]+$'
# Invalid names for parameters used in expressions.
AUTOMATICALLY_SET_PARAMETER_NAMES = ['answer', 'choices']
INVALID_PARAMETER_NAMES = AUTOMATICALLY_SET_PARAMETER_NAMES + [
'abs', 'all', 'and', 'any', 'else', 'floor', 'if', 'log', 'or',
'pow', 'round', 'then']
# These are here rather than in rating_services.py to avoid import
# circularities with exp_services.
# TODO (Jacob) Refactor exp_services to remove this problem.
_EMPTY_RATINGS = {'1': 0, '2': 0, '3': 0, '4': 0, '5': 0}
def get_empty_ratings():
return copy.deepcopy(_EMPTY_RATINGS)
# Empty scaled average rating as a float.
EMPTY_SCALED_AVERAGE_RATING = 0.0
# To use GAE email service.
EMAIL_SERVICE_PROVIDER_GAE = 'gae_email_service'
# To use mailgun email service.
EMAIL_SERVICE_PROVIDER_MAILGUN = 'mailgun_email_service'
# Use GAE email service by default.
EMAIL_SERVICE_PROVIDER = EMAIL_SERVICE_PROVIDER_GAE
# If the Mailgun email API is used, the "None" below should be replaced
# with the Mailgun API key.
MAILGUN_API_KEY = None
# If the Mailgun email API is used, the "None" below should be replaced
# with the Mailgun domain name (ending with mailgun.org).
MAILGUN_DOMAIN_NAME = None
# Committer id for system actions.
SYSTEM_COMMITTER_ID = 'admin'
SYSTEM_EMAIL_ADDRESS = dummy@email.com'
ADMIN_EMAIL_ADDRESS = dummy@email.com'
NOREPLY_EMAIL_ADDRESS = dummy@email.com'
# Ensure that SYSTEM_EMAIL_ADDRESS and ADMIN_EMAIL_ADDRESS are both valid and
# correspond to owners of the app before setting this to True. If
# SYSTEM_EMAIL_ADDRESS is not that of an app owner, email messages from this
# address cannot be sent. If True then emails can be sent to any user.
CAN_SEND_EMAILS = False
# If you want to turn on this facility please check the email templates in the
# send_role_notification_email() function in email_manager.py and modify them
# accordingly.
CAN_SEND_EDITOR_ROLE_EMAILS = False
# If enabled then emails will be sent to creators for feedback messages.
CAN_SEND_FEEDBACK_MESSAGE_EMAILS = False
# Time to wait before sending feedback message emails (currently set to 1
# hour).
DEFAULT_FEEDBACK_MESSAGE_EMAIL_COUNTDOWN_SECS = 3600
# Whether to send an email when new feedback message is received for
# an exploration.
DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE = True
# Whether to send email updates to a user who has not specified a preference.
DEFAULT_EMAIL_UPDATES_PREFERENCE = False
# Whether to send an invitation email when the user is granted
# new role permissions in an exploration.
DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE = True
# Whether to require an email to be sent, following a moderator action.
REQUIRE_EMAIL_ON_MODERATOR_ACTION = False
# Whether to allow custom event reporting to Google Analytics.
CAN_SEND_ANALYTICS_EVENTS = False
# Timespan in minutes before allowing duplicate emails.
DUPLICATE_EMAIL_INTERVAL_MINS = 2
# Number of digits after decimal to which the average ratings value in the
# dashboard is rounded off to.
AVERAGE_RATINGS_DASHBOARD_PRECISION = 2
EMAIL_INTENT_SIGNUP = 'signup'
EMAIL_INTENT_DAILY_BATCH = 'daily_batch'
EMAIL_INTENT_EDITOR_ROLE_NOTIFICATION = 'editor_role_notification'
EMAIL_INTENT_FEEDBACK_MESSAGE_NOTIFICATION = 'feedback_message_notification'
EMAIL_INTENT_SUGGESTION_NOTIFICATION = 'suggestion_notification'
EMAIL_INTENT_REPORT_BAD_CONTENT = 'report_bad_content'
EMAIL_INTENT_MARKETING = 'marketing'
EMAIL_INTENT_PUBLICIZE_EXPLORATION = 'publicize_exploration'
EMAIL_INTENT_UNPUBLISH_EXPLORATION = 'unpublish_exploration'
EMAIL_INTENT_DELETE_EXPLORATION = 'delete_exploration'
MODERATOR_ACTION_PUBLICIZE_EXPLORATION = 'publicize_exploration'
MODERATOR_ACTION_UNPUBLISH_EXPLORATION = 'unpublish_exploration'
DEFAULT_SALUTATION_HTML_FN = (
lambda recipient_username: 'Hi %s,' % recipient_username)
DEFAULT_SIGNOFF_HTML_FN = (
lambda sender_username: (
'Thanks!<br>%s (Oppia moderator)' % sender_username))
VALID_MODERATOR_ACTIONS = {
MODERATOR_ACTION_PUBLICIZE_EXPLORATION: {
'email_config': 'publicize_exploration_email_html_body',
'email_subject_fn': (
lambda exp_title: (
'Your Oppia exploration "%s" has been featured!' % exp_title)),
'email_intent': EMAIL_INTENT_PUBLICIZE_EXPLORATION,
'email_salutation_html_fn': DEFAULT_SALUTATION_HTML_FN,
'email_signoff_html_fn': DEFAULT_SIGNOFF_HTML_FN,
},
MODERATOR_ACTION_UNPUBLISH_EXPLORATION: {
'email_config': 'unpublish_exploration_email_html_body',
'email_subject_fn': (
lambda exp_title: (
'Your Oppia exploration "%s" has been unpublished' % exp_title)
),
'email_intent': 'unpublish_exploration',
'email_salutation_html_fn': DEFAULT_SALUTATION_HTML_FN,
'email_signoff_html_fn': DEFAULT_SIGNOFF_HTML_FN,
},
}
# Panel properties and other constants for the default skin.
GADGET_PANEL_AXIS_HORIZONTAL = 'horizontal'
PANELS_PROPERTIES = {
'bottom': {
'width': 350,
'height': 100,
'stackable_axis': GADGET_PANEL_AXIS_HORIZONTAL,
'pixels_between_gadgets': 80,
'max_gadgets': 1
}
}
# When the site terms were last updated, in UTC.
REGISTRATION_PAGE_LAST_UPDATED_UTC = datetime.datetime(2015, 10, 14, 2, 40, 0)
# Format of string for dashboard statistics logs.
# NOTE TO DEVELOPERS: This format should not be changed, since it is used in
# the existing storage models for UserStatsModel.
DASHBOARD_STATS_DATETIME_STRING_FORMAT = '%Y-%m-%d'
# The maximum size of an uploaded file, in bytes.
MAX_FILE_SIZE_BYTES = 1048576
# The default language code for an exploration.
DEFAULT_LANGUAGE_CODE = 'en'
# The id of the default skin.
# TODO(sll): Deprecate this; it is no longer used.
DEFAULT_SKIN_ID = 'conversation_v1'
# The prefix for an 'accepted suggestion' commit message.
COMMIT_MESSAGE_ACCEPTED_SUGGESTION_PREFIX = 'Accepted suggestion by'
# User id and username for exploration migration bot. Commits made by this bot
# are not reflected in the exploration summary models, but are recorded in the
# exploration commit log.
MIGRATION_BOT_USER_ID = 'OppiaMigrationBot'
MIGRATION_BOT_USERNAME = 'OppiaMigrationBot'
# Ids and locations of the permitted extensions.
ALLOWED_RTE_EXTENSIONS = {
'Collapsible': {
'dir': os.path.join(RTE_EXTENSIONS_DIR, 'Collapsible')
},
'Image': {
'dir': os.path.join(RTE_EXTENSIONS_DIR, 'Image')
},
'Link': {
'dir': os.path.join(RTE_EXTENSIONS_DIR, 'Link')
},
'Math': {
'dir': os.path.join(RTE_EXTENSIONS_DIR, 'Math')
},
'Tabs': {
'dir': os.path.join(RTE_EXTENSIONS_DIR, 'Tabs')
},
'Video': {
'dir': os.path.join(RTE_EXTENSIONS_DIR, 'Video')
},
}
# These categories and interactions are displayed in the order in which they
# appear in the interaction selector.
ALLOWED_INTERACTION_CATEGORIES = [{
'name': 'General',
'interaction_ids': [
'Continue',
'EndExploration',
'ImageClickInput',
'ItemSelectionInput',
'MultipleChoiceInput',
'TextInput'
],
}, {
'name': 'Math',
'interaction_ids': [
'GraphInput',
'LogicProof',
'NumericInput',
'SetInput',
'MathExpressionInput',
]
}, {
'name': 'Programming',
'interaction_ids': [
'CodeRepl',
'PencilCodeEditor',
],
}, {
'name': 'Music',
'interaction_ids': [
'MusicNotesInput'
],
}, {
'name': 'Geography',
'interaction_ids': [
'InteractiveMap'
],
}]
ALLOWED_GADGETS = {
'ScoreBar': {
'dir': os.path.join(GADGETS_DIR, 'ScoreBar')
},
}
# Gadgets subclasses must specify a valid panel option from this list.
ALLOWED_GADGET_PANELS = ['bottom']
# Demo explorations to load through the admin panel. The id assigned to each
# exploration is based on the key of the exploration in this dict, so ensure it
# doesn't change once it's in the list. Only integer-based indices should be
# used in this list, as it maintains backward compatibility with how demo
# explorations used to be assigned IDs. The value of each entry in this dict is
# either a YAML file or a directory (depending on whether it ends in .yaml).
# These explorations can be found under data/explorations.
DEMO_EXPLORATIONS = {
u'0': 'welcome.yaml',
u'1': 'multiples.yaml',
u'2': 'binary_search',
u'3': 'root_linear_coefficient_theorem.yaml',
u'4': 'three_balls',
# TODO(bhenning): Replace demo exploration '5' with a new exploration
# described in #1376.
u'6': 'boot_verbs.yaml',
u'7': 'hola.yaml',
u'8': 'adventure.yaml',
u'9': 'pitch_perfect.yaml',
u'10': 'test_interactions',
u'11': 'modeling_graphs',
u'12': 'protractor_test_1.yaml',
u'13': 'solar_system',
u'14': 'about_oppia.yaml',
u'15': 'classifier_demo_exploration.yaml',
u'16': 'all_interactions',
}
DEMO_COLLECTIONS = {
u'0': 'welcome_to_collections.yaml'
}
# IDs of explorations which should not be displayable in either the learner or
# editor views.
DISABLED_EXPLORATION_IDS = ['5']
# Google Group embed URL for the Forum page.
EMBEDDED_GOOGLE_GROUP_URL = (
'https://groups.google.com/forum/embed/?place=forum/oppia')
# Whether to allow YAML file uploads.
ALLOW_YAML_FILE_UPLOAD = False
# Prefix for all taskqueue-related URLs.
TASKQUEUE_URL_PREFIX = '/task'
TASK_URL_FEEDBACK_MESSAGE_EMAILS = (
'%s/email/batchfeedbackmessageemailhandler' % TASKQUEUE_URL_PREFIX)
TASK_URL_FEEDBACK_STATUS_EMAILS = (
'%s/email/feedbackthreadstatuschangeemailhandler' % TASKQUEUE_URL_PREFIX)
TASK_URL_FLAG_EXPLORATION_EMAILS = (
'%s/email/flagexplorationemailhandler' % TASKQUEUE_URL_PREFIX)
TASK_URL_INSTANT_FEEDBACK_EMAILS = (
'%s/email/instantfeedbackmessageemailhandler' % TASKQUEUE_URL_PREFIX)
TASK_URL_SUGGESTION_EMAILS = (
'%s/email/suggestionemailhandler' % TASKQUEUE_URL_PREFIX)
# TODO(sll): Add all other URLs here.
ADMIN_URL = '/admin'
COLLECTION_DATA_URL_PREFIX = '/collection_handler/data'
EDITABLE_COLLECTION_DATA_URL_PREFIX = '/collection_editor_handler/data'
COLLECTION_RIGHTS_PREFIX = '/collection_editor_handler/rights'
COLLECTION_EDITOR_URL_PREFIX = '/collection_editor/create'
COLLECTION_URL_PREFIX = '/collection'
DASHBOARD_URL = '/dashboard'
DASHBOARD_CREATE_MODE_URL = '%s?mode=create' % DASHBOARD_URL
DASHBOARD_DATA_URL = '/dashboardhandler/data'
DASHBOARD_EXPLORATION_STATS_PREFIX = '/dashboardhandler/explorationstats'
EDITOR_URL_PREFIX = '/create'
EXPLORATION_DATA_PREFIX = '/createhandler/data'
EXPLORATION_INIT_URL_PREFIX = '/explorehandler/init'
EXPLORATION_METADATA_SEARCH_URL = '/exploration/metadata_search'
EXPLORATION_RIGHTS_PREFIX = '/createhandler/rights'
EXPLORATION_SUMMARIES_DATA_URL = '/explorationsummarieshandler/data'
EXPLORATION_URL_PREFIX = '/explore'
EXPLORATION_URL_EMBED_PREFIX = '/embed/exploration'
FEEDBACK_STATS_URL_PREFIX = '/feedbackstatshandler'
FEEDBACK_THREAD_URL_PREFIX = '/threadhandler'
FEEDBACK_THREADLIST_URL_PREFIX = '/threadlisthandler'
FEEDBACK_THREAD_VIEW_EVENT_URL = '/feedbackhandler/thread_view_event'
FLAG_EXPLORATION_URL_PREFIX = '/flagexplorationhandler'
LIBRARY_GROUP_DATA_URL = '/librarygrouphandler'
LIBRARY_INDEX_URL = '/library'
LIBRARY_INDEX_DATA_URL = '/libraryindexhandler'
LIBRARY_RECENTLY_PUBLISHED_URL = '/library/recently_published'
LIBRARY_SEARCH_URL = '/search/find'
LIBRARY_SEARCH_DATA_URL = '/searchhandler/data'
LIBRARY_TOP_RATED_URL = '/library/top_rated'
NEW_COLLECTION_URL = '/collection_editor_handler/create_new'
NEW_EXPLORATION_URL = '/contributehandler/create_new'
RECENT_COMMITS_DATA_URL = '/recentcommitshandler/recent_commits'
RECENT_FEEDBACK_MESSAGES_DATA_URL = '/recent_feedback_messages'
ROBOTS_TXT_URL = '/robots.txt'
SITE_FEEDBACK_FORM_URL = ''
SITE_LANGUAGE_DATA_URL = '/save_site_language'
SIGNUP_DATA_URL = '/signuphandler/data'
SIGNUP_URL = '/signup'
SPLASH_URL = '/splash'
SUGGESTION_ACTION_URL_PREFIX = '/suggestionactionhandler'
SUGGESTION_LIST_URL_PREFIX = '/suggestionlisthandler'
SUGGESTION_URL_PREFIX = '/suggestionhandler'
UPLOAD_EXPLORATION_URL = '/contributehandler/upload'
USERNAME_CHECK_DATA_URL = '/usernamehandler/data'
NAV_MODE_ABOUT = 'about'
NAV_MODE_BLOG = 'blog'
NAV_MODE_COLLECTION = 'collection'
NAV_MODE_CONTACT = 'contact'
NAV_MODE_CREATE = 'create'
NAV_MODE_DASHBOARD = 'dashboard'
NAV_MODE_DONATE = 'donate'
NAV_MODE_EXPLORE = 'explore'
NAV_MODE_LIBRARY = 'library'
NAV_MODE_PROFILE = 'profile'
NAV_MODE_SIGNUP = 'signup'
NAV_MODE_SPLASH = 'splash'
NAV_MODE_TEACH = 'teach'
NAV_MODE_THANKS = 'thanks'
# Event types.
EVENT_TYPE_STATE_HIT = 'state_hit'
EVENT_TYPE_ANSWER_SUBMITTED = 'answer_submitted'
EVENT_TYPE_DEFAULT_ANSWER_RESOLVED = 'default_answer_resolved'
EVENT_TYPE_NEW_THREAD_CREATED = 'feedback_thread_created'
EVENT_TYPE_THREAD_STATUS_CHANGED = 'feedback_thread_status_changed'
EVENT_TYPE_RATE_EXPLORATION = 'rate_exploration'
# The values for these event types should be left as-is for backwards
# compatibility.
EVENT_TYPE_START_EXPLORATION = 'start'
EVENT_TYPE_MAYBE_LEAVE_EXPLORATION = 'leave'
EVENT_TYPE_COMPLETE_EXPLORATION = 'complete'
ACTIVITY_STATUS_PRIVATE = 'private'
ACTIVITY_STATUS_PUBLIC = 'public'
ACTIVITY_STATUS_PUBLICIZED = 'publicized'
# Play type constants
PLAY_TYPE_PLAYTEST = 'playtest'
PLAY_TYPE_NORMAL = 'normal'
# Predefined commit messages.
COMMIT_MESSAGE_EXPLORATION_DELETED = 'Exploration deleted.'
COMMIT_MESSAGE_COLLECTION_DELETED = 'Collection deleted.'
# Unfinished features.
SHOW_TRAINABLE_UNRESOLVED_ANSWERS = False
# Number of unresolved answers to be displayed in the dashboard for each
# exploration.
TOP_UNRESOLVED_ANSWERS_COUNT_DASHBOARD = 3
# Number of open feedback to be displayed in the dashboard for each exploration.
OPEN_FEEDBACK_COUNT_DASHBOARD = 3
# NOTE TO DEVELOPERS: This should be synchronized with base.js
ENABLE_STRING_CLASSIFIER = False
SHOW_COLLECTION_NAVIGATION_TAB_HISTORY = False
SHOW_COLLECTION_NAVIGATION_TAB_STATS = False
# Output formats of downloaded explorations.
OUTPUT_FORMAT_JSON = 'json'
OUTPUT_FORMAT_ZIP = 'zip'
# Types of updates shown in the 'recent updates' table in the dashboard page.
UPDATE_TYPE_EXPLORATION_COMMIT = 'exploration_commit'
UPDATE_TYPE_COLLECTION_COMMIT = 'collection_commit'
UPDATE_TYPE_FEEDBACK_MESSAGE = 'feedback_thread'
# Possible values for user query status.
# Valid status transitions are: processing --> completed --> archived
# Or processing --> failed.
USER_QUERY_STATUS_PROCESSING = 'processing'
USER_QUERY_STATUS_COMPLETED = 'completed'
USER_QUERY_STATUS_ARCHIVED = 'archived'
USER_QUERY_STATUS_FAILED = 'failed'
# The time difference between which to consider two login events "close". This
# is taken to be 12 hours.
PROXIMAL_TIMEDELTA_SECS = 12 * 60 * 60
DEFAULT_COLOR = '#a33f40'
DEFAULT_THUMBNAIL_ICON = 'Lightbulb'
# List of supported default categories. For now, each category has a specific
# color associated with it. Each category also has a thumbnail icon whose
# filename is "{{CategoryName}}.svg".
CATEGORIES_TO_COLORS = {
'Mathematics': '#cd672b',
'Algebra': '#cd672b',
'Arithmetic': '#d68453',
'Calculus': '#b86330',
'Logic': '#d68453',
'Combinatorics': '#cf5935',
'Graph Theory': '#cf5935',
'Probability': '#cf5935',
'Statistics': '#cd672b',
'Geometry': '#d46949',
'Trigonometry': '#d46949',
'Algorithms': '#d0982a',
'Computing': '#bb8b2f',
'Programming': '#d9aa53',
'Astronomy': '#879d6c',
'Biology': '#97a766',
'Chemistry': '#aab883',
'Engineering': '#8b9862',
'Environment': '#aba86d',
'Medicine': '#97a766',
'Physics': '#879d6c',
'Architecture': '#6e3466',
'Art': '#895a83',
'Music': '#6a3862',
'Philosophy': '#613968',
'Poetry': '#7f507f',
'English': '#193a69',
'Languages': '#1b4174',
'Latin': '#3d5a89',
'Reading': '#193a69',
'Spanish': '#405185',
'Gaulish': '#1b4174',
'Business': '#387163',
'Economics': '#5d8b7f',
'Geography': '#3c6d62',
'Government': '#538270',
'History': '#3d6b52',
'Law': '#538270',
'Education': '#942e20',
'Puzzles': '#a8554a',
'Sport': '#893327',
'Welcome': '#992a2b',
}
# Types of activities that can be created with Oppia.
ACTIVITY_TYPE_EXPLORATION = 'exploration'
ACTIVITY_TYPE_COLLECTION = 'collection'
ALL_ACTIVITY_TYPES = [ACTIVITY_TYPE_EXPLORATION, ACTIVITY_TYPE_COLLECTION]
# A sorted list of default categories for which icons and background colours
# exist.
ALL_CATEGORIES = sorted(CATEGORIES_TO_COLORS.keys())
# These categories are shown in the library navbar.
SEARCH_DROPDOWN_CATEGORIES = sorted([
'Mathematics',
'Statistics',
'Algorithms',
'Programming',
'Biology',
'Chemistry',
'Physics',
'Medicine',
'English',
'Architecture',
'Art',
'Music',
'Reading',
'Business',
'Economics',
'Geography',
'History',
])
# The i18n id for the header of the "Featured Activities" category in the
# library index page.
LIBRARY_CATEGORY_FEATURED_ACTIVITIES = 'I18N_LIBRARY_GROUPS_FEATURED_ACTIVITIES'
# The i18n id for the header of the "Top Rated Explorations" category in the
# library index page.
LIBRARY_CATEGORY_TOP_RATED_EXPLORATIONS = (
'I18N_LIBRARY_GROUPS_TOP_RATED_EXPLORATIONS')
# The i18n id for the header of the "Recently Published" category in the
# library index page.
LIBRARY_CATEGORY_RECENTLY_PUBLISHED = 'I18N_LIBRARY_GROUPS_RECENTLY_PUBLISHED'
# The group name that appears at the end of the url for the recently published
# page.
LIBRARY_GROUP_RECENTLY_PUBLISHED = 'recently_published'
# The group name that appears at the end of the url for the top rated page.
LIBRARY_GROUP_TOP_RATED = 'top_rated'
# NOTE TO DEVELOPERS: The LIBRARY_PAGE_MODE constants defined below should have
# the same value as the ones defined in LIBRARY_PAGE_MODES in Library.js. For
# example LIBRARY_PAGE_MODE_GROUP should have the same value as
# LIBRARY_PAGE_MODES.GROUP.
# Page mode for the group pages such as top rated and recently published
# explorations.
LIBRARY_PAGE_MODE_GROUP = 'group'
# Page mode for the main library page.
LIBRARY_PAGE_MODE_INDEX = 'index'
# Page mode for the search results page.
LIBRARY_PAGE_MODE_SEARCH = 'search'
# List of supported language codes. Each description has a
# parenthetical part that may be stripped out to give a shorter
# description.
ALL_LANGUAGE_CODES = [{
'code': 'en', 'description': u'English',
}, {
'code': 'ar', 'description': u'العربية (Arabic)',
}, {
'code': 'bg', 'description': u'български (Bulgarian)',
}, {
'code': 'ca', 'description': u'català (Catalan)',
}, {
'code': 'zh', 'description': u'中文 (Chinese)',
}, {
'code': 'hr', 'description': u'hrvatski (Croatian)',
}, {
'code': 'cs', 'description': u'čeština (Czech)',
}, {
'code': 'da', 'description': u'dansk (Danish)',
}, {
'code': 'nl', 'description': u'Nederlands (Dutch)',
}, {
'code': 'tl', 'description': u'Filipino (Filipino)',
}, {
'code': 'fi', 'description': u'suomi (Finnish)',
}, {
'code': 'fr', 'description': u'français (French)',
}, {
'code': 'de', 'description': u'Deutsch (German)',
}, {
'code': 'el', 'description': u'ελληνικά (Greek)',
}, {
'code': 'he', 'description': u'עברית (Hebrew)',
}, {
'code': 'hi', 'description': u'हिन्दी (Hindi)',
}, {
'code': 'hu', 'description': u'magyar (Hungarian)',
}, {
'code': 'id', 'description': u'Bahasa Indonesia (Indonesian)',
}, {
'code': 'it', 'description': u'italiano (Italian)',
}, {
'code': 'ja', 'description': u'日本語 (Japanese)',
}, {
'code': 'ko', 'description': u'한국어 (Korean)',
}, {
'code': 'lv', 'description': u'latviešu (Latvian)',
}, {
'code': 'lt', 'description': u'lietuvių (Lithuanian)',
}, {
'code': 'no', 'description': u'Norsk (Norwegian)',
}, {
'code': 'fa', 'description': u'فارسی (Persian)',
}, {
'code': 'pl', 'description': u'polski (Polish)',
}, {
'code': 'pt', 'description': u'português (Portuguese)',
}, {
'code': 'ro', 'description': u'română (Romanian)',
}, {
'code': 'ru', 'description': u'русский (Russian)',
}, {
'code': 'sr', 'description': u'српски (Serbian)',
}, {
'code': 'sk', 'description': u'slovenčina (Slovak)',
}, {
'code': 'sl', 'description': u'slovenščina (Slovenian)',
}, {
'code': 'es', 'description': u'español (Spanish)',
}, {
'code': 'sv', 'description': u'svenska (Swedish)',
}, {
'code': 'th', 'description': u'ภาษาไทย (Thai)',
}, {
'code': 'tr', 'description': u'Türkçe (Turkish)',
}, {
'code': 'uk', 'description': u'українська (Ukrainian)',
}, {
'code': 'vi', 'description': u'Tiếng Việt (Vietnamese)',
}]
# Defaults for topic similarities
DEFAULT_TOPIC_SIMILARITY = 0.5
SAME_TOPIC_SIMILARITY = 1.0
# NOTE TO DEVELOPERS: While adding another language, please ensure that the
# languages are in alphabetical order.
SUPPORTED_SITE_LANGUAGES = [{
'id': 'id',
'text': 'Bahasa Indonesia'
}, {
'id': 'en',
'text': 'English'
}, {
'id': 'es',
'text': 'Español'
}, {
'id': 'pt',
'text': 'Português'
}, {
'id': 'pt-br',
'text': 'Português (Brasil)'
}, {
'id': 'vi',
'text': 'Tiếng Việt'
}, {
'id': 'hi',
'text': 'हिन्दी'
}]
SYSTEM_USERNAMES = [SYSTEM_COMMITTER_ID, MIGRATION_BOT_USERNAME]
SYSTEM_USER_IDS = [SYSTEM_COMMITTER_ID, MIGRATION_BOT_USERNAME]
# The following are all page descriptions for the meta tag.
ABOUT_PAGE_DESCRIPTION = (
'Oppia is an open source learning platform that connects a community of '
'teachers and learners. You can use this site to create 1-1 learning '
'scenarios for others.')
BLOG_PAGE_DESCRIPTION = (
'Keep up to date with Oppia news and updates via our blog.')
CONTACT_PAGE_DESCRIPTION = (
'Contact the Oppia team, submit feedback, and learn how to get involved '
'with the Oppia project.')
CREATE_PAGE_DESCRIPTION = (
'Help others learn new things. Create lessons through explorations and '
'share your knowledge with the community.')
DASHBOARD_PAGE_DESCRIPTION = (
'Keep track of the lessons you have created, as well as feedback from '
'learners.')
DONATE_PAGE_DESCRIPTION = (
'Donate to The Oppia Foundation.')
FORUM_PAGE_DESCRIPTION = (
'Engage with the Oppia community by discussing questions, bugs and '
'explorations in the forum.')
LIBRARY_GROUP_PAGE_DESCRIPTION = (
'Discover top-rated or recently-published explorations on Oppia. Learn '
'from these explorations or help improve an existing one for the '
'community.')
LIBRARY_PAGE_DESCRIPTION = (
'Looking to learn something new? Find explorations created by professors, '
'teachers and Oppia users in a subject you\'re interested in, and start '
'exploring!')
PREFERENCES_PAGE_DESCRIPTION = (
'Change your Oppia profile settings and preferences')
SEARCH_PAGE_DESCRIPTION = (
'Discover a new exploration to learn from, or help improve an existing '
'one for the community.')
SIGNUP_PAGE_DESCRIPTION = (
'Sign up for Oppia and begin exploring a new subject.')
SPLASH_PAGE_DESCRIPTION = (
'Oppia is a free site for sharing knowledge via interactive lessons '
'called \'explorations\'. Learn from user-created explorations, or teach '
'and create your own.')
TEACH_PAGE_DESCRIPTION = (
'The Oppia library is full of user-created lessons called \'explorations\'.'
' Read about how to participate in the community and begin creating '
'explorations.')
TERMS_PAGE_DESCRIPTION = (
'Oppia is a 501(c)(3) registered non-profit open-source e-learning '
'platform. Learn about our terms and conditions for creating and '
'distributing learning material.')
THANKS_PAGE_DESCRIPTION = (
'Thank you for donating to The Oppia Foundation.')
SITE_NAME = 'Oppia.org'
# The type of the response returned by a handler when an exception is raised.
HANDLER_TYPE_HTML = 'html'
HANDLER_TYPE_JSON = 'json'
| 30,534 | [['EMAIL_ADDRESS', 'dummy@email.com'], ['EMAIL_ADDRESS', 'dummy@email.com'], ['EMAIL_ADDRESS', 'dummy@email.com'], ['URL', "https://groups.google.com/forum/embed/?place=forum/oppia'"], ['DATE_TIME', '2014'], ['PERSON', 'IS_MINIFIED'], ['PERSON', 'NUMBER_OF_TOP_RATED_EXPLORATIONS_FOR_LIBRARY_PAGE'], ['PERSON', 'ACCEPTED_IMAGE_FORMATS_AND_EXTENSIONS'], ['PERSON', "r'^[A-Za-z0-9]+$"], ['PERSON', "-Za-z]+)*$'"], ['PERSON', 'TODO'], ['PERSON', 'Committer'], ['PERSON', 'email_manager.py'], ['PERSON', 'DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE = True'], ['DATE_TIME', 'minutes'], ['PERSON', 'EMAIL_INTENT_EDITOR_ROLE_NOTIFICATION'], ['PERSON', "EMAIL_INTENT_FEEDBACK_MESSAGE_NOTIFICATION = '"], ['PERSON', 'VALID_MODERATOR_ACTIONS'], ['PERSON', 'GADGET_PANEL_AXIS_HORIZONTAL'], ['DATE_TIME', '10'], ['DATE_TIME', '14'], ['DATE_TIME', '40'], ['LOCATION', 'UserStatsModel'], ['DATE_TIME', "OppiaMigrationBot'"], ['URL', 'os.path.jo'], ['LOCATION', 'ALLOWED_INTERACTION_CATEGORIES'], ['PERSON', 'MultipleChoiceInput'], ['PERSON', 'SetInput'], ['PERSON', 'EDITOR_URL_PREFIX'], ['NRP', 'EXPLORATION_METADATA_SEARCH_URL'], ['PERSON', 'SUGGESTION_URL_PREFIX'], ['PERSON', 'NAV_MODE_COLLECTION'], ['LOCATION', 'NAV_MODE_LIBRARY'], ['PERSON', 'EVENT_TYPE_DEFAULT_ANSWER_RESOLVED'], ['PERSON', "EVENT_TYPE_THREAD_STATUS_CHANGED = '"], ['PERSON', 'EVENT_TYPE_MAYBE_LEAVE_EXPLORATION'], ['PERSON', 'USER_QUERY_STATUS_ARCHIVED'], ['DATE_TIME', '12 hours'], ['PERSON', 'Lightbulb'], ['DATE_TIME', "8b9862'"], ['DATE_TIME', "405185'"], ['PERSON', "u'български"], ['NRP', 'Bulgarian'], ['NRP', 'Chinese'], ['NRP', 'Croatian'], ['NRP', 'Czech'], ['NRP', 'Danish'], ['NRP', 'Dutch'], ['PERSON', "u'Filipino"], ['NRP', 'Finnish'], ['NRP', 'French'], ['NRP', 'German'], ['NRP', 'Greek'], ['PERSON', "u'magyar"], ['NRP', 'Hungarian'], ['LOCATION', 'Indonesia'], ['NRP', 'Indonesian'], ['NRP', 'Italian'], ['NRP', 'Japanese'], ['PERSON', 'ko'], ['NRP', 'Korean'], ['NRP', 'Latvian'], ['NRP', 'Lithuanian'], ['NRP', 'Norwegian'], ['NRP', 'Persian'], ['NRP', 'Polish'], ['NRP', 'Portuguese'], ['PERSON', "u'română"], ['NRP', 'Romanian'], ['NRP', 'Russian'], ['NRP', 'Serbian'], ['PERSON', "u'slovenčina"], ['NRP', 'Slovak'], ['NRP', 'Slovenian'], ['NRP', 'Spanish'], ['DATE_TIME', "u'svenska"], ['NRP', 'Swedish'], ['NRP', 'Thai'], ['NRP', 'Turkish'], ['NRP', 'Ukrainian'], ['NRP', 'Vietnamese'], ['LOCATION', 'Indonesia'], ['PERSON', 'Tiếng'], ['PERSON', 'TEACH_PAGE_DESCRIPTION'], ['PERSON', 'THANKS_PAGE_DESCRIPTION'], ['URL', 'http://www.apache.org/licenses/LICENSE-2.0'], ['URL', 'os.environ.ge'], ['URL', 'os.environ.ge'], ['URL', 'os.path.jo'], ['URL', 'os.path.jo'], ['URL', 'os.path.jo'], ['URL', 'os.path.jo'], ['URL', 'os.path.jo'], ['URL', 'os.path.jo'], ['URL', 'os.path.jo'], ['URL', 'os.path.jo'], ['URL', 'os.path.jo'], ['URL', 'os.path.jo'], ['URL', 'os.path.jo'], ['URL', 'os.path.jo'], ['URL', 'os.ge'], ['URL', 'services.py'], ['URL', 'copy.de'], ['URL', 'mailgun.org'], ['URL', 'email.com'], ['URL', 'email.com'], ['URL', 'email.com'], ['URL', 'manager.py'], ['URL', 'os.path.jo'], ['URL', 'os.path.jo'], ['URL', 'os.path.jo'], ['URL', 'os.path.jo'], ['URL', 'os.path.jo'], ['URL', 'os.path.jo'], ['URL', 'COLORS.ke'], ['URL', 'MODES.GR'], ['URL', 'Oppia.org']] |
5 | """
orthopoly.py - A suite of functions for generating orthogonal polynomials
and quadrature rules.
Copyright (c) 2014 Greg von Winckel
All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Last updated on Wed Jan 1 14:29:25 MST 2014
Modified by David A. Ham (dummy@email.com), 2016
"""
import numpy as np
from functools import reduce
from math import gamma
def gauss(alpha, beta):
"""
Compute the Gauss nodes and weights from the recursion
coefficients associated with a set of orthogonal polynomials
Inputs:
alpha - recursion coefficients
beta - recursion coefficients
Outputs:
x - quadrature nodes
w - quadrature weights
Adapted from the MATLAB code by Walter Gautschi
http://www.cs.purdue.edu/archives/2002/wxg/codes/gauss.m
"""
from numpy.linalg import eigh
A = np.diag(np.sqrt(beta)[1:], 1) + np.diag(alpha)
x, V = eigh(A, "U")
w = beta[0] * np.real(np.power(V[0, :], 2))
return x, w
def lobatto(alpha, beta, xl1, xl2):
"""
Compute the Lobatto nodes and weights with the preassigned
nodea xl1,xl2
Inputs:
alpha - recursion coefficients
beta - recursion coefficients
xl1 - assigned node location
xl2 - assigned node location
Outputs:
x - quadrature nodes
w - quadrature weights
Based on the section 7 of the paper
"Some modified matrix eigenvalue problems"
by Gene Golub, SIAM Review Vol 15, No. 2, April 1973, pp.318--334
"""
from numpy.linalg import solve
n = len(alpha) - 1
en = np.zeros(n)
en[-1] = 1
A1 = np.vstack((np.sqrt(beta), alpha - xl1))
J1 = np.diag(A1[0, 1:-1], 1) + np.diag(A1[1, 1:]) + np.diag(A1[0, 1:-1], -1)
A2 = np.vstack((np.sqrt(beta), alpha - xl2))
J2 = np.diag(A2[0, 1:-1], 1) + np.diag(A2[1, 1:]) + np.diag(A2[0, 1:-1], -1)
g1 = solve(J1, en)
g2 = solve(J2, en)
C = np.array(((1, -g1[-1]), (1, -g2[-1])))
xl = np.array((xl1, xl2))
ab = solve(C, xl)
alphal = alpha
alphal[-1] = ab[0]
betal = beta
betal[-1] = ab[1]
x, w = gauss(alphal, betal)
return x, w
def rec_jacobi(N, a, b):
"""
Generate the recursion coefficients alpha_k, beta_k
P_{k+1}(x) = (x-alpha_k)*P_{k}(x) - beta_k P_{k-1}(x)
for the Jacobi polynomials which are orthogonal on [-1,1]
with respect to the weight w(x)=[(1-x)^a]*[(1+x)^b]
Inputs:
N - polynomial order
a - weight parameter
b - weight parameter
Outputs:
alpha - recursion coefficients
beta - recursion coefficients
Adapted from the MATLAB code by Dirk Laurie and Walter Gautschi
http://www.cs.purdue.edu/archives/2002/wxg/codes/r_jacobi.m
"""
nu = (b - a) / float(a + b + 2)
mu = 2 ** (a + b + 1) * gamma(a + 1) * gamma(b + 1) / gamma(a + b + 2)
if N == 1:
alpha = nu
beta = mu
else:
n = np.arange(1.0, N)
nab = 2 * n + a + b
alpha = np.hstack((nu, (b ** 2 - a ** 2) / (nab * (nab + 2))))
n = n[1:]
nab = nab[1:]
B1 = 4 * (a + 1) * (b + 1) / float((a + b + 2) ** 2 * (a + b + 3))
B = 4 * (n + a) * (n + b) * n * (n + a + b) / \
(nab ** 2 * (nab + 1) * (nab - 1))
beta = np.hstack((mu, B1, B))
return alpha, beta
def rec_jacobi01(N, a, b):
"""
Generate the recursion coefficients alpha_k, beta_k
for the Jacobi polynomials which are orthogonal on [0,1]
See rec_jacobi for the recursion coefficients on [-1,1]
Inputs:
N - polynomial order
a - weight parameter
b - weight parameter
Outputs:
alpha - recursion coefficients
beta - recursion coefficients
Adapted from the MATLAB implementation:
https://www.cs.purdue.edu/archives/2002/wxg/codes/r_jacobi01.m
"""
if a <= -1 or b <= -1:
raise ValueError('''Jacobi coefficients are defined only
for alpha,beta > -1''')
if not isinstance(N, int):
raise TypeError('N must be an integer')
if N < 1:
raise ValueError('N must be at least 1')
c, d = rec_jacobi(N, a, b)
alpha = (1 + c) / 2
beta = d / 4
beta[0] = d[0] / 2 ** (a + b + 1)
return alpha, beta
def polyval(alpha, beta, x):
"""
Evaluate polynomials on x given the recursion coefficients alpha and beta
"""
N = len(alpha)
m = len(x)
P = np.zeros((m, N + 1))
P[:, 0] = 1
P[:, 1] = (x - alpha[0]) * P[:, 0]
for k in range(1, N):
P[:, k + 1] = (x - alpha[k]) * P[:, k] - beta[k] * P[:, k - 1]
return P
def jacobi(N, a, b, x, NOPT=1):
"""
JACOBI computes the Jacobi polynomials which are orthogonal on [-1,1]
with respect to the weight w(x)=[(1-x)^a]*[(1+x)^b] and evaluate them
on the given grid up to P_N(x). Setting NOPT=2 returns the
L2-normalized polynomials
"""
m = len(x)
P = np.zeros((m, N + 1))
apb = a + b
a1 = a - 1
b1 = b - 1
c = apb * (a - b)
P[:, 0] = 1
if N > 0:
P[:, 1] = 0.5 * (a - b + (apb + 2) * x)
if N > 1:
for k in range(2, N + 1):
k2 = 2 * k
g = k2 + apb
g1 = g - 1
g2 = g - 2
d = 2.0 * (k + a1) * (k + b1) * g
P[:, k] = (g1 * (c + g2 * g * x) * P[:, k - 1] -
d * P[:, k - 2]) / (k2 * (k + apb) * g2)
if NOPT == 2:
k = np.arange(N + 1)
pnorm = 2 ** (apb + 1) * gamma(k + a + 1) * gamma(k + b + 1) / \
((2 * k + a + b + 1) * (gamma(k + 1) * gamma(k + a + b + 1)))
P *= 1 / np.sqrt(pnorm)
return P
def jacobiD(N, a, b, x, NOPT=1):
"""
JACOBID computes the first derivatives of the normalized Jacobi
polynomials which are orthogonal on [-1,1] with respect
to the weight w(x)=[(1-x)^a]*[(1+x)^b] and evaluate them
on the given grid up to P_N(x). Setting NOPT=2 returns
the derivatives of the L2-normalized polynomials
"""
z = np.zeros((len(x), 1))
if N == 0:
Px = z
else:
Px = 0.5 * np.hstack((z, jacobi(N - 1, a + 1, b + 1, x, NOPT) *
((a + b + 2 + np.arange(N)))))
return Px
def mm_log(N, a):
"""
MM_LOG Modified moments for a logarithmic weight function.
The call mm=MM_LOG(n,a) computes the first n modified moments of the
logarithmic weight function w(t)=t^a log(1/t) on [0,1] relative to
shifted Legendre polynomials.
REFERENCE: Walter Gautschi,``On the preceding paper `A Legendre
polynomial integral' by James L. Blue'',
Math. Comp. 33 (1979), 742-743.
Adapted from the MATLAB implementation:
https://www.cs.purdue.edu/archives/2002/wxg/codes/mm_log.m
"""
if a <= -1:
raise ValueError('Parameter a must be greater than -1')
prod = lambda z: reduce(lambda x, y: x * y, z, 1)
mm = np.zeros(N)
c = 1
for n in range(N):
if isinstance(a, int) and a < n:
p = range(n - a, n + a + 2)
mm[n] = (-1) ** (n - a) / prod(p)
mm[n] *= gamma(a + 1) ** 2
else:
if n == 0:
mm[0] = 1 / (a + 1) ** 2
else:
k = np.arange(1, n + 1)
s = 1 / (a + 1 + k) - 1 / (a + 1 - k)
p = (a + 1 - k) / (a + 1 + k)
mm[n] = (1 / (a + 1) + sum(s)) * prod(p) / (a + 1)
mm[n] *= c
c *= 0.5 * (n + 1) / (2 * n + 1)
return mm
def mod_chebyshev(N, mom, alpham, betam):
"""
Calcuate the recursion coefficients for the orthogonal polynomials
which are are orthogonal with respect to a weight function which is
represented in terms of its modifed moments which are obtained by
integrating the monic polynomials against the weight function.
References
----------
John C. Wheeler, "Modified moments and Gaussian quadratures"
Rocky Mountain Journal of Mathematics, Vol. 4, Num. 2 (1974), 287--296
Walter Gautschi, "Orthogonal Polynomials (in Matlab)
Journal of Computational and Applied Mathematics, Vol. 178 (2005) 215--234
Adapted from the MATLAB implementation:
https://www.cs.purdue.edu/archives/2002/wxg/codes/chebyshev.m
"""
if not isinstance(N, int):
raise TypeError('N must be an integer')
if N < 1:
raise ValueError('N must be at least 1')
N = min(N, int(len(mom) / 2))
alpha = np.zeros(N)
beta = np.zeros(N)
normsq = np.zeros(N)
sig = np.zeros((N + 1, 2 * N))
alpha[0] = alpham[0] + mom[1] / mom[0]
beta[0] = mom[0]
sig[1, :] = mom
for n in range(2, N + 1):
for m in range(n - 1, 2 * N - n + 1):
sig[n, m] = sig[n - 1, m + 1] - (alpha[n - 2] - alpham[m]) * sig[n - 1, m] - \
beta[n - 2] * sig[n - 2, m] + betam[m] * sig[n - 1, m - 1]
alpha[n - 1] = alpham[n - 1] + sig[n, n] / sig[n, n - 1] - sig[n - 1, n - 1] / \
sig[n - 1, n - 2]
beta[n - 1] = sig[n, n - 1] / sig[n - 1, n - 2]
normsq = np.diagonal(sig, -1)
return alpha, beta, normsq
def rec_jaclog(N, a):
"""
Generate the recursion coefficients alpha_k, beta_k
P_{k+1}(x) = (x-alpha_k)*P_{k}(x) - beta_k P_{k-1}(x)
for the monic polynomials which are orthogonal on [0,1]
with respect to the weight w(x)=x^a*log(1/x)
Inputs:
N - polynomial order
a - weight parameter
Outputs:
alpha - recursion coefficients
beta - recursion coefficients
Adated from the MATLAB code:
https://www.cs.purdue.edu/archives/2002/wxg/codes/r_jaclog.m
"""
alphaj, betaj = rec_jacobi01(2 * N, 0, 0)
mom = mm_log(2 * N, a)
alpha, beta, _ = mod_chebyshev(N, mom, alphaj, betaj)
return alpha, beta
| 10,886 | [['EMAIL_ADDRESS', 'dummy@email.com'], ['DATE_TIME', '2014'], ['PERSON', 'Greg von Winckel'], ['LOCATION', 'DAMAGES'], ['PERSON', 'WHETHER'], ['DATE_TIME', 'Wed Jan 1'], ['DATE_TIME', '2014'], ['PERSON', 'David A. Ham'], ['DATE_TIME', '2016'], ['PERSON', 'Walter Gautschi\n '], ['PERSON', 'Gene Golub'], ['DATE_TIME', 'April 1973'], ['LOCATION', 'P_{k+1}(x'], ['PERSON', 'Dirk Laurie'], ['PERSON', 'Walter Gautschi\n '], ['LOCATION', "ValueError('N"], ['PERSON', 'NOPT'], ['PERSON', 'Px'], ['PERSON', 'Px'], ['PERSON', 'NOPT'], ['PERSON', 'Walter Gautschi,``On'], ['PERSON', "James L. Blue''"], ['DATE_TIME', '33 (1979'], ['LOCATION', 'mm[0'], ['LOCATION', 'alpham'], ['PERSON', 'betam'], ['PERSON', 'John C. Wheeler'], ['NRP', 'Gaussian'], ['DATE_TIME', '1974'], ['PERSON', 'Walter Gautschi'], ['DATE_TIME', '2005'], ['LOCATION', "ValueError('N"], ['DATE_TIME', 'min(N'], ['LOCATION', 'rec_jaclog(N'], ['LOCATION', 'P_{k+1}(x'], ['URL', 'http://www.cs.purdue.edu/archives/2002/wxg/codes/gauss.m'], ['URL', 'http://www.cs.purdue.edu/archives/2002/wxg/codes/r_jacobi.m'], ['URL', 'https://www.cs.purdue.edu/archives/2002/wxg/codes/r_jacobi01.m'], ['URL', 'https://www.cs.purdue.edu/archives/2002/wxg/codes/mm_log.m'], ['URL', 'https://www.cs.purdue.edu/archives/2002/wxg/codes/chebyshev.m'], ['URL', 'https://www.cs.purdue.edu/archives/2002/wxg/codes/r_jaclog.m'], ['URL', 'orthopoly.py'], ['URL', 'email.com'], ['URL', 'numpy.li'], ['URL', 'np.re'], ['URL', 'numpy.li'], ['URL', 'np.ar'], ['URL', 'np.ar'], ['URL', 'np.ar'], ['URL', 'np.ar'], ['URL', 'np.ar'], ['URL', 'np.ar']] |
6 | ## @package TriggerObjectBlock_cfi
# Configuration file that defines the producer of ROOT-tuple for trigger objects.
#
# \author Subir Sarkar
# \author Rosamaria Venditti (INFN Bari, Bari University)
# \author Konstantin Androsov (University of Siena, INFN Pisa)
# \author Maria Teresa Grippo (University of Siena, INFN Pisa)
#
# Copyright 2011-2013 Subir Sarkar, Rosamaria Venditti (INFN Bari, Bari University)
# Copyright 2014 Konstantin Androsov dummy@email.com,
# Maria Teresa Grippo dummy@email.com
#
# This file is part of X->HH->bbTauTau.
#
# X->HH->bbTauTau is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# X->HH->bbTauTau is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with X->HH->bbTauTau. If not, see <http://www.gnu.org/licenses/>.
import FWCore.ParameterSet.Config as cms
triggerObjectBlock = cms.EDAnalyzer("TriggerObjectBlock",
verbosity = cms.int32(0),
hltInputTag = cms.InputTag('TriggerResults','','HLT'),
triggerEventTag = cms.InputTag('patTriggerEvent'),
hltPathsOfInterest = cms.vstring ("HLT_DoubleMu",
"HLT_Mu",
"HLT_IsoMu",
"HLT_TripleMu",
"IsoPFTau",
"TrkIsoT",
"HLT_Ele"),
May10ReRecoData = cms.bool(False)
)
| 1,844 | [['EMAIL_ADDRESS', 'dummy@email.com'], ['EMAIL_ADDRESS', 'dummy@email.com'], ['PERSON', 'Subir Sarkar'], ['PERSON', 'Rosamaria Venditti'], ['PERSON', 'Konstantin Androsov'], ['PERSON', 'Maria Teresa Grippo'], ['DATE_TIME', '2011-2013'], ['PERSON', 'Subir Sarkar'], ['PERSON', 'Rosamaria Venditti'], ['DATE_TIME', '2014'], ['PERSON', 'Konstantin Androsov'], ['PERSON', 'Maria Teresa Grippo'], ['PERSON', 'triggerEventTag = cms'], ['PERSON', "InputTag('patTriggerEvent"], ['URL', 'http://www.gnu.org/licenses/'], ['URL', 'email.com'], ['URL', 'email.com'], ['URL', 'FWCore.ParameterSet.Co'], ['URL', 'cms.int'], ['URL', 'cms.In'], ['URL', 'cms.In'], ['URL', 'cms.bo']] |
7 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2004-2010 Edgewall Software
# Copyright (C) 2004 Daniel Lundin dummy@email.com
# Copyright (C) 2005-2006 Christopher Lenz dummy@email.com
# Copyright (C) 2006-2007 Christian Boos dummy@email.com
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Daniel Lundin dummy@email.com
# Christopher Lenz dummy@email.com
# Christian Boos dummy@email.com
"""
File metadata management
------------------------
The `trac.mimeview` package centralizes the intelligence related to
file metadata, principally concerning the `type` (MIME type) of the
content and, if relevant, concerning the text encoding (charset) used
by the content.
There are primarily two approaches for getting the MIME type of a
given file, either taking advantage of existing conventions for the
file name, or examining the file content and applying various
heuristics.
The module also knows how to convert the file content from one type to
another type.
In some cases, only the `url` pointing to the file's content is
actually needed, that's why we avoid to read the file's content when
it's not needed.
The actual `content` to be converted might be a `unicode` object, but
it can also be the raw byte string (`str`) object, or simply an object
that can be `read()`.
.. note:: (for plugin developers)
The Mimeview API is quite complex and many things there are
currently a bit difficult to work with (e.g. what an actual
`content` might be, see the last paragraph of this description).
So this area is mainly in a ''work in progress'' state, which will
be improved along the lines described in :teo:`#3332`.
In particular, if you are interested in writing `IContentConverter`
and `IHTMLPreviewRenderer` components, note that those interfaces
will be merged into a new style `IContentConverter`. Feel free to
contribute remarks and suggestions for improvements to the
corresponding ticket (#3332 as well).
"""
import re
from StringIO import StringIO
from genshi import Markup, Stream
from genshi.core import TEXT, START, END, START_NS, END_NS
from genshi.builder import Fragment, tag
from genshi.input import HTMLParser
from trac.config import IntOption, ListOption, Option
from trac.core import *
from trac.resource import Resource
from trac.util import Ranges, content_disposition
from trac.util.text import exception_to_unicode, to_utf8, to_unicode
from trac.util.translation import _, tag_
__all__ = ['Context', 'Mimeview', 'RenderingContext', 'get_mimetype',
'is_binary', 'detect_unicode', 'content_to_unicode', 'ct_mimetype']
class RenderingContext(object):
"""
A rendering context specifies ''how'' the content should be rendered.
It holds together all the needed contextual information that will be
needed by individual renderer components.
To that end, a context keeps track of the Href instance (`.href`) which
should be used as a base for building URLs.
It also provides a `PermissionCache` (`.perm`) which can be used to
restrict the output so that only the authorized information is shown.
A rendering context may also be associated to some Trac resource which
will be used as the implicit reference when rendering relative links
or for retrieving relative content and can be used to retrieve related
metadata.
Rendering contexts can be nested, and a new context can be created from
an existing context using the call syntax. The previous context can be
retrieved using the `.parent` attribute.
For example, when rendering a wiki text of a wiki page, the context will
be associated to a resource identifying that wiki page.
If that wiki text contains a `[[TicketQuery]]` wiki macro, the macro will
set up nested contexts for each matching ticket that will be used for
rendering the ticket descriptions.
:since: version 0.11
"""
def __init__(self, resource, href=None, perm=None):
"""Directly create a `RenderingContext`.
:param resource: the associated resource
:type resource: `Resource`
:param href: an `Href` object suitable for creating URLs
:param perm: a `PermissionCache` object used for restricting the
generated output to "authorized" information only.
The actual `.perm` attribute of the rendering context will be bound
to the given `resource` so that fine-grained permission checks will
apply to that.
"""
self.parent = None #: The parent context, if any
self.resource = resource
self.href = href
self.perm = perm(resource) if perm and resource else perm
self._hints = None
@staticmethod
def from_request(*args, **kwargs):
""":deprecated: since 1.0, use `web_context` instead."""
from trac.web.chrome import web_context
return web_context(*args, **kwargs)
def __repr__(self):
path = []
context = self
while context:
if context.resource.realm: # skip toplevel resource
path.append(repr(context.resource))
context = context.parent
return '<%s %s>' % (type(self).__name__, ' - '.join(reversed(path)))
def child(self, resource=None, id=False, version=False, parent=False):
"""Create a nested rendering context.
`self` will be the parent for the new nested context.
:param resource: either a `Resource` object or the realm string for a
resource specification to be associated to the new
context. If `None`, the resource will be the same
as the resource of the parent context.
:param id: the identifier part of the resource specification
:param version: the version of the resource specification
:return: the new context object
:rtype: `RenderingContext`
>>> context = RenderingContext('wiki', 'WikiStart')
>>> ticket1 = Resource('ticket', 1)
>>> context.child('ticket', 1).resource == ticket1
True
>>> context.child(ticket1).resource is ticket1
True
>>> context.child(ticket1)().resource is ticket1
True
"""
if resource:
resource = Resource(resource, id=id, version=version,
parent=parent)
else:
resource = self.resource
context = RenderingContext(resource, href=self.href, perm=self.perm)
context.parent = self
# hack for context instances created by from_request()
# this is needed because various parts of the code rely on a request
# object being available, but that will hopefully improve in the
# future
if hasattr(self, 'req'):
context.req = self.req
return context
__call__ = child
def __contains__(self, resource):
"""Check whether a resource is in the rendering path.
The primary use for this check is to avoid to render the content of a
resource if we're already embedded in a context associated to that
resource.
:param resource: a `Resource` specification which will be checked for
"""
context = self
while context:
if context.resource and \
context.resource.realm == resource.realm and \
context.resource.id == resource.id:
# we don't care about version here
return True
context = context.parent
# Rendering hints
#
# A rendering hint is a key/value pairs that can influence renderers,
# wiki formatters and processors in the way they produce their output.
# The keys are strings, but the values could be anything.
#
# In nested contexts, the hints are inherited from their parent context,
# unless overriden locally.
def set_hints(self, **keyvalues):
"""Set rendering hints for this rendering context.
>>> ctx = RenderingContext('timeline')
>>> ctx.set_hints(wiki_flavor='oneliner', shorten_lines=True)
>>> t_ctx = ctx('ticket', 1)
>>> t_ctx.set_hints(wiki_flavor='html', preserve_newlines=True)
>>> (t_ctx.get_hint('wiki_flavor'), t_ctx.get_hint('shorten_lines'), \
t_ctx.get_hint('preserve_newlines'))
('html', True, True)
>>> (ctx.get_hint('wiki_flavor'), ctx.get_hint('shorten_lines'), \
ctx.get_hint('preserve_newlines'))
('oneliner', True, None)
"""
if self._hints is None:
self._hints = {}
hints = self._parent_hints()
if hints is not None:
self._hints.update(hints)
self._hints.update(keyvalues)
def get_hint(self, hint, default=None):
"""Retrieve a rendering hint from this context or an ancestor context.
>>> ctx = RenderingContext('timeline')
>>> ctx.set_hints(wiki_flavor='oneliner')
>>> t_ctx = ctx('ticket', 1)
>>> t_ctx.get_hint('wiki_flavor')
'oneliner'
>>> t_ctx.get_hint('preserve_newlines', True)
True
"""
hints = self._hints
if hints is None:
hints = self._parent_hints()
if hints is None:
return default
return hints.get(hint, default)
def has_hint(self, hint):
"""Test whether a rendering hint is defined in this context or in some
ancestor context.
>>> ctx = RenderingContext('timeline')
>>> ctx.set_hints(wiki_flavor='oneliner')
>>> t_ctx = ctx('ticket', 1)
>>> t_ctx.has_hint('wiki_flavor')
True
>>> t_ctx.has_hint('preserve_newlines')
False
"""
hints = self._hints
if hints is None:
hints = self._parent_hints()
if hints is None:
return False
return hint in hints
def _parent_hints(self):
p = self.parent
while p and p._hints is None:
p = p.parent
return p and p._hints
class Context(RenderingContext):
""":deprecated: old name kept for compatibility, use `RenderingContext`."""
# Some common MIME types and their associated keywords and/or file extensions
KNOWN_MIME_TYPES = {
'application/javascript': 'js',
'application/msword': 'doc dot',
'application/pdf': 'pdf',
'application/postscript': 'ps',
'application/rtf': 'rtf',
'application/x-sh': 'sh',
'application/x-csh': 'csh',
'application/x-troff': 'nroff roff troff',
'application/x-yaml': 'yml yaml',
'application/rss+xml': 'rss',
'application/xsl+xml': 'xsl',
'application/xslt+xml': 'xslt',
'image/x-icon': 'ico',
'image/svg+xml': 'svg',
'model/vrml': 'vrml wrl',
'text/css': 'css',
'text/html': 'html htm',
'text/plain': 'txt TXT text README INSTALL '
'AUTHORS COPYING ChangeLog RELEASE',
'text/xml': 'xml',
# see also TEXT_X_TYPES below
'text/x-csrc': 'c xs',
'text/x-chdr': 'h',
'text/x-c++src': 'cc CC cpp C c++ C++',
'text/x-c++hdr': 'hh HH hpp H',
'text/x-csharp': 'cs c# C#',
'text/x-diff': 'patch',
'text/x-eiffel': 'e',
'text/x-elisp': 'el',
'text/x-fortran': 'f',
'text/x-haskell': 'hs',
'text/x-ini': 'ini cfg',
'text/x-objc': 'm mm',
'text/x-ocaml': 'ml mli',
'text/x-makefile': 'make mk Makefile GNUMakefile',
'text/x-pascal': 'pas',
'text/x-perl': 'pl pm PL',
'text/x-php': 'php3 php4',
'text/x-python': 'py',
'text/x-pyrex': 'pyx',
'text/x-ruby': 'rb',
'text/x-scheme': 'scm',
'text/x-textile': 'txtl',
'text/x-vba': 'vb vba bas',
'text/x-verilog': 'v',
'text/x-vhdl': 'vhd',
}
for t in KNOWN_MIME_TYPES.keys():
types = KNOWN_MIME_TYPES[t].split()
if t.startswith('text/x-'):
types.append(t[len('text/x-'):])
KNOWN_MIME_TYPES[t] = types
# extend the above with simple (text/x-<something>: <something>) mappings
TEXT_X_TYPES = """
ada asm asp awk idl inf java ksh lua m4 mail psp rfc rst sql tcl tex zsh
"""
for x in TEXT_X_TYPES.split():
KNOWN_MIME_TYPES.setdefault('text/x-%s' % x, []).append(x)
# Default mapping from keywords/extensions to known MIME types:
MIME_MAP = {}
for t, exts in KNOWN_MIME_TYPES.items():
MIME_MAP[t] = t
for e in exts:
MIME_MAP[e] = t
# Simple builtin autodetection from the content using a regexp
MODE_RE = re.compile(r"""
\#!.+?env\s+(\w+) # 1. look for shebang with env
| \#!(?:[/\w.-_]+/)?(\w+) # 2. look for regular shebang
| -\*-\s*(?:mode:\s*)?([\w+-]+)\s*-\*- # 3. look for Emacs' -*- mode -*-
| vim:.*?(?:syntax|filetype|ft)=(\w+) # 4. look for VIM's syntax=<n>
""", re.VERBOSE)
def get_mimetype(filename, content=None, mime_map=MIME_MAP):
"""Guess the most probable MIME type of a file with the given name.
`filename` is either a filename (the lookup will then use the suffix)
or some arbitrary keyword.
`content` is either a `str` or an `unicode` string.
"""
suffix = filename.split('.')[-1]
if suffix in mime_map:
# 1) mimetype from the suffix, using the `mime_map`
return mime_map[suffix]
else:
mimetype = None
try:
import mimetypes
# 2) mimetype from the suffix, using the `mimetypes` module
mimetype = mimetypes.guess_type(filename)[0]
except Exception:
pass
if not mimetype and content:
match = re.search(MODE_RE, content[:1000] + content[-1000:])
if match:
mode = match.group(1) or match.group(2) or match.group(4) or \
match.group(3).lower()
if mode in mime_map:
# 3) mimetype from the content, using the `MODE_RE`
return mime_map[mode]
else:
if is_binary(content):
# 4) mimetype from the content, using`is_binary`
return 'application/octet-stream'
return mimetype
def ct_mimetype(content_type):
"""Return the mimetype part of a content type."""
return (content_type or '').split(';')[0].strip()
def is_binary(data):
"""Detect binary content by checking the first thousand bytes for zeroes.
Operate on either `str` or `unicode` strings.
"""
if isinstance(data, str) and detect_unicode(data):
return False
return '\0' in data[:1000]
def detect_unicode(data):
"""Detect different unicode charsets by looking for BOMs (Byte Order Mark).
Operate obviously only on `str` objects.
"""
if data.startswith('\xff\xfe'):
return 'utf-16-le'
elif data.startswith('\xfe\xff'):
return 'utf-16-be'
elif data.startswith('\xef\xbb\xbf'):
return 'utf-8'
else:
return None
def content_to_unicode(env, content, mimetype):
"""Retrieve an `unicode` object from a `content` to be previewed.
In case the raw content had an unicode BOM, we remove it.
>>> from trac.test import EnvironmentStub
>>> env = EnvironmentStub()
>>> content_to_unicode(env, u"\ufeffNo BOM! h\u00e9 !", '')
u'No BOM! h\\xe9 !'
>>> content_to_unicode(env, "\xef\xbb\xbfNo BOM! h\xc3\xa9 !", '')
u'No BOM! h\\xe9 !'
"""
mimeview = Mimeview(env)
if hasattr(content, 'read'):
content = content.read(mimeview.max_preview_size)
u = mimeview.to_unicode(content, mimetype)
if u and u[0] == u'\ufeff':
u = u[1:]
return u
class IHTMLPreviewRenderer(Interface):
"""Extension point interface for components that add HTML renderers of
specific content types to the `Mimeview` component.
.. note::
This interface will be merged with IContentConverter, as
conversion to text/html will simply be a particular content
conversion.
Note however that the IHTMLPreviewRenderer will still be
supported for a while through an adapter, whereas the
IContentConverter interface itself will be changed.
So if all you want to do is convert to HTML and don't feel like
following the API changes, you should rather implement this
interface for the time being.
"""
#: implementing classes should set this property to True if they
#: support text content where Trac should expand tabs into spaces
expand_tabs = False
#: indicate whether the output of this renderer is source code that can
#: be decorated with annotations
returns_source = False
def get_quality_ratio(mimetype):
"""Return the level of support this renderer provides for the `content`
of the specified MIME type. The return value must be a number between
0 and 9, where 0 means no support and 9 means "perfect" support.
"""
def render(context, mimetype, content, filename=None, url=None):
"""Render an XHTML preview of the raw `content` in a RenderingContext.
The `content` might be:
* a `str` object
* an `unicode` string
* any object with a `read` method, returning one of the above
It is assumed that the content will correspond to the given `mimetype`.
Besides the `content` value, the same content may eventually
be available through the `filename` or `url` parameters.
This is useful for renderers that embed objects, using <object> or
<img> instead of including the content inline.
Can return the generated XHTML text as a single string or as an
iterable that yields strings. In the latter case, the list will
be considered to correspond to lines of text in the original content.
"""
class IHTMLPreviewAnnotator(Interface):
"""Extension point interface for components that can annotate an XHTML
representation of file contents with additional information."""
def get_annotation_type():
"""Return a (type, label, description) tuple
that defines the type of annotation and provides human readable names.
The `type` element should be unique to the annotator.
The `label` element is used as column heading for the table,
while `description` is used as a display name to let the user
toggle the appearance of the annotation type.
"""
def get_annotation_data(context):
"""Return some metadata to be used by the `annotate_row` method below.
This will be called only once, before lines are processed.
If this raises an error, that annotator won't be used.
"""
def annotate_row(context, row, number, line, data):
"""Return the XHTML markup for the table cell that contains the
annotation data.
`context` is the context corresponding to the content being annotated,
`row` is the tr Element being built, `number` is the line number being
processed and `line` is the line's actual content.
`data` is whatever additional data the `get_annotation_data` method
decided to provide.
"""
class IContentConverter(Interface):
"""An extension point interface for generic MIME based content
conversion.
.. note:: This api will likely change in the future (see :teo:`#3332`)
"""
def get_supported_conversions():
"""Return an iterable of tuples in the form (key, name, extension,
in_mimetype, out_mimetype, quality) representing the MIME conversions
supported and
the quality ratio of the conversion in the range 0 to 9, where 0 means
no support and 9 means "perfect" support. eg. ('latex', 'LaTeX', 'tex',
'text/x-trac-wiki', 'text/plain', 8)"""
def convert_content(req, mimetype, content, key):
"""Convert the given content from mimetype to the output MIME type
represented by key. Returns a tuple in the form (content,
output_mime_type) or None if conversion is not possible."""
class Content(object):
"""A lazy file-like object that only reads `input` if necessary."""
def __init__(self, input, max_size):
self.input = input
self.max_size = max_size
self.content = None
def read(self, size=-1):
if size == 0:
return ''
if self.content is None:
self.content = StringIO(self.input.read(self.max_size))
return self.content.read(size)
def reset(self):
if self.content is not None:
self.content.seek(0)
class Mimeview(Component):
"""Generic HTML renderer for data, typically source code."""
required = True
renderers = ExtensionPoint(IHTMLPreviewRenderer)
annotators = ExtensionPoint(IHTMLPreviewAnnotator)
converters = ExtensionPoint(IContentConverter)
default_charset = Option('trac', 'default_charset', 'utf-8',
"""Charset to be used when in doubt.""")
tab_width = IntOption('mimeviewer', 'tab_width', 8,
"""Displayed tab width in file preview. (''since 0.9'')""")
max_preview_size = IntOption('mimeviewer', 'max_preview_size', 262144,
"""Maximum file size for HTML preview. (''since 0.9'')""")
mime_map = ListOption('mimeviewer', 'mime_map',
'text/x-dylan:dylan, text/x-idl:ice, text/x-ada:ads:adb',
doc="""List of additional MIME types and keyword mappings.
Mappings are comma-separated, and for each MIME type,
there's a colon (":") separated list of associated keywords
or file extensions. (''since 0.10'')""")
treat_as_binary = ListOption('mimeviewer', 'treat_as_binary',
'application/octet-stream, application/pdf, application/postscript, '
'application/msword,application/rtf,',
doc="""Comma-separated list of MIME types that should be treated as
binary data. (''since 0.11.5'')""")
def __init__(self):
self._mime_map = None
# Public API
def get_supported_conversions(self, mimetype):
"""Return a list of target MIME types in same form as
`IContentConverter.get_supported_conversions()`, but with the converter
component appended. Output is ordered from best to worst quality."""
converters = []
for converter in self.converters:
conversions = converter.get_supported_conversions() or []
for k, n, e, im, om, q in conversions:
if im == mimetype and q > 0:
converters.append((k, n, e, im, om, q, converter))
converters = sorted(converters, key=lambda i: i[-2], reverse=True)
return converters
def convert_content(self, req, mimetype, content, key, filename=None,
url=None):
"""Convert the given content to the target MIME type represented by
`key`, which can be either a MIME type or a key. Returns a tuple of
(content, output_mime_type, extension)."""
if not content:
return ('', 'text/plain;charset=utf-8', '.txt')
# Ensure we have a MIME type for this content
full_mimetype = mimetype
if not full_mimetype:
if hasattr(content, 'read'):
content = content.read(self.max_preview_size)
full_mimetype = self.get_mimetype(filename, content)
if full_mimetype:
mimetype = ct_mimetype(full_mimetype) # split off charset
else:
mimetype = full_mimetype = 'text/plain' # fallback if not binary
# Choose best converter
candidates = list(self.get_supported_conversions(mimetype) or [])
candidates = [c for c in candidates if key in (c[0], c[4])]
if not candidates:
raise TracError(
_("No available MIME conversions from %(old)s to %(new)s",
old=mimetype, new=key))
# First successful conversion wins
for ck, name, ext, input_mimettype, output_mimetype, quality, \
converter in candidates:
output = converter.convert_content(req, mimetype, content, ck)
if output:
return (output[0], output[1], ext)
raise TracError(
_("No available MIME conversions from %(old)s to %(new)s",
old=mimetype, new=key))
def get_annotation_types(self):
"""Generator that returns all available annotation types."""
for annotator in self.annotators:
yield annotator.get_annotation_type()
def render(self, context, mimetype, content, filename=None, url=None,
annotations=None, force_source=False):
"""Render an XHTML preview of the given `content`.
`content` is the same as an `IHTMLPreviewRenderer.render`'s
`content` argument.
The specified `mimetype` will be used to select the most appropriate
`IHTMLPreviewRenderer` implementation available for this MIME type.
If not given, the MIME type will be infered from the filename or the
content.
Return a string containing the XHTML text.
When rendering with an `IHTMLPreviewRenderer` fails, a warning is added
to the request associated with the context (if any), unless the
`disable_warnings` hint is set to `True`.
"""
if not content:
return ''
if not isinstance(context, RenderingContext):
raise TypeError("RenderingContext expected (since 0.11)")
# Ensure we have a MIME type for this content
full_mimetype = mimetype
if not full_mimetype:
if hasattr(content, 'read'):
content = content.read(self.max_preview_size)
full_mimetype = self.get_mimetype(filename, content)
if full_mimetype:
mimetype = ct_mimetype(full_mimetype) # split off charset
else:
mimetype = full_mimetype = 'text/plain' # fallback if not binary
# Determine candidate `IHTMLPreviewRenderer`s
candidates = []
for renderer in self.renderers:
qr = renderer.get_quality_ratio(mimetype)
if qr > 0:
candidates.append((qr, renderer))
candidates.sort(lambda x, y: cmp(y[0], x[0]))
# Wrap file-like object so that it can be read multiple times
if hasattr(content, 'read'):
content = Content(content, self.max_preview_size)
# First candidate which renders successfully wins.
# Also, we don't want to expand tabs more than once.
expanded_content = None
for qr, renderer in candidates:
if force_source and not getattr(renderer, 'returns_source', False):
continue # skip non-source renderers in force_source mode
if isinstance(content, Content):
content.reset()
try:
ann_names = ', '.join(annotations) if annotations else \
'no annotations'
self.log.debug('Trying to render HTML preview using %s [%s]',
renderer.__class__.__name__, ann_names)
# check if we need to perform a tab expansion
rendered_content = content
if getattr(renderer, 'expand_tabs', False):
if expanded_content is None:
content = content_to_unicode(self.env, content,
full_mimetype)
expanded_content = content.expandtabs(self.tab_width)
rendered_content = expanded_content
result = renderer.render(context, full_mimetype,
rendered_content, filename, url)
if not result:
continue
if not (force_source or getattr(renderer, 'returns_source',
False)):
# Direct rendering of content
if isinstance(result, basestring):
if not isinstance(result, unicode):
result = to_unicode(result)
return Markup(to_unicode(result))
elif isinstance(result, Fragment):
return result.generate()
else:
return result
# Render content as source code
if annotations:
m = context.req.args.get('marks') if context.req else None
return self._render_source(context, result, annotations,
m and Ranges(m))
else:
if isinstance(result, list):
result = Markup('\n').join(result)
return tag.div(class_='code')(tag.pre(result)).generate()
except Exception, e:
self.log.warning('HTML preview using %s failed: %s',
renderer.__class__.__name__,
exception_to_unicode(e, traceback=True))
if context.req and not context.get_hint('disable_warnings'):
from trac.web.chrome import add_warning
add_warning(context.req,
_("HTML preview using %(renderer)s failed (%(err)s)",
renderer=renderer.__class__.__name__,
err=exception_to_unicode(e)))
def _render_source(self, context, stream, annotations, marks=None):
from trac.web.chrome import add_warning
annotators, labels, titles = {}, {}, {}
for annotator in self.annotators:
atype, alabel, atitle = annotator.get_annotation_type()
if atype in annotations:
labels[atype] = alabel
titles[atype] = atitle
annotators[atype] = annotator
annotations = [a for a in annotations if a in annotators]
if isinstance(stream, list):
stream = HTMLParser(StringIO(u'\n'.join(stream)))
elif isinstance(stream, unicode):
text = stream
def linesplitter():
for line in text.splitlines(True):
yield TEXT, line, (None, -1, -1)
stream = linesplitter()
annotator_datas = []
for a in annotations:
annotator = annotators[a]
try:
data = (annotator, annotator.get_annotation_data(context))
except TracError, e:
self.log.warning("Can't use annotator '%s': %s", a, e.message)
add_warning(context.req, tag.strong(
tag_("Can't use %(annotator)s annotator: %(error)s",
annotator=tag.em(a), error=tag.pre(e.message))))
data = (None, None)
annotator_datas.append(data)
def _head_row():
return tag.tr(
[tag.th(labels[a], class_=a, title=titles[a])
for a in annotations] +
[tag.th(u'\xa0', class_='content')]
)
def _body_rows():
for idx, line in enumerate(_group_lines(stream)):
row = tag.tr()
if marks and idx + 1 in marks:
row(class_='hilite')
for annotator, data in annotator_datas:
if annotator:
annotator.annotate_row(context, row, idx+1, line, data)
else:
row.append(tag.td())
row.append(tag.td(line))
yield row
return tag.table(class_='code')(
tag.thead(_head_row()),
tag.tbody(_body_rows())
)
def get_max_preview_size(self):
""":deprecated: use `max_preview_size` attribute directly."""
return self.max_preview_size
def get_charset(self, content='', mimetype=None):
"""Infer the character encoding from the `content` or the `mimetype`.
`content` is either a `str` or an `unicode` object.
The charset will be determined using this order:
* from the charset information present in the `mimetype` argument
* auto-detection of the charset from the `content`
* the configured `default_charset`
"""
if mimetype:
ctpos = mimetype.find('charset=')
if ctpos >= 0:
return mimetype[ctpos + 8:].strip()
if isinstance(content, str):
utf = detect_unicode(content)
if utf is not None:
return utf
return self.default_charset
@property
def mime_map(self):
# Extend default extension to MIME type mappings with configured ones
if not self._mime_map:
self._mime_map = MIME_MAP.copy()
for mapping in self.config['mimeviewer'].getlist('mime_map'):
if ':' in mapping:
assocations = mapping.split(':')
for keyword in assocations: # Note: [0] kept on purpose
self._mime_map[keyword] = assocations[0]
return self._mime_map
def get_mimetype(self, filename, content=None):
"""Infer the MIME type from the `filename` or the `content`.
`content` is either a `str` or an `unicode` object.
Return the detected MIME type, augmented by the
charset information (i.e. "<mimetype>; charset=..."),
or `None` if detection failed.
"""
mimetype = get_mimetype(filename, content, self.mime_map)
charset = None
if mimetype:
charset = self.get_charset(content, mimetype)
if mimetype and charset and not 'charset' in mimetype:
mimetype += '; charset=' + charset
return mimetype
def is_binary(self, mimetype=None, filename=None, content=None):
"""Check if a file must be considered as binary."""
if not mimetype and filename:
mimetype = self.get_mimetype(filename, content)
if mimetype:
mimetype = ct_mimetype(mimetype)
if mimetype in self.treat_as_binary:
return True
if content is not None and is_binary(content):
return True
return False
def to_utf8(self, content, mimetype=None):
"""Convert an encoded `content` to utf-8.
:deprecated: since 0.10, you should use `unicode` strings only.
"""
return to_utf8(content, self.get_charset(content, mimetype))
def to_unicode(self, content, mimetype=None, charset=None):
"""Convert `content` (an encoded `str` object) to an `unicode` object.
This calls `trac.util.to_unicode` with the `charset` provided,
or the one obtained by `Mimeview.get_charset()`.
"""
if not charset:
charset = self.get_charset(content, mimetype)
return to_unicode(content, charset)
def configured_modes_mapping(self, renderer):
"""Return a MIME type to `(mode,quality)` mapping for given `option`"""
types, option = {}, '%s_modes' % renderer
for mapping in self.config['mimeviewer'].getlist(option):
if not mapping:
continue
try:
mimetype, mode, quality = mapping.split(':')
types[mimetype] = (mode, int(quality))
except (TypeError, ValueError):
self.log.warning("Invalid mapping '%s' specified in '%s' "
"option.", mapping, option)
return types
def preview_data(self, context, content, length, mimetype, filename,
url=None, annotations=None, force_source=False):
"""Prepares a rendered preview of the given `content`.
Note: `content` will usually be an object with a `read` method.
"""
data = {'raw_href': url, 'size': length,
'max_file_size': self.max_preview_size,
'max_file_size_reached': False,
'rendered': None,
}
if length >= self.max_preview_size:
data['max_file_size_reached'] = True
else:
result = self.render(context, mimetype, content, filename, url,
annotations, force_source=force_source)
data['rendered'] = result
return data
def send_converted(self, req, in_type, content, selector, filename='file'):
"""Helper method for converting `content` and sending it directly.
`selector` can be either a key or a MIME Type."""
from trac.web.api import RequestDone
content, output_type, ext = self.convert_content(req, in_type,
content, selector)
if isinstance(content, unicode):
content = content.encode('utf-8')
req.send_response(200)
req.send_header('Content-Type', output_type)
req.send_header('Content-Length', len(content))
if filename:
req.send_header('Content-Disposition',
content_disposition(filename='%s.%s' %
(filename, ext)))
req.end_headers()
req.write(content)
raise RequestDone
def _group_lines(stream):
space_re = re.compile('(?P<spaces> (?: +))|^(?P<tag><\w+.*?>)?( )')
def pad_spaces(match):
m = match.group('spaces')
if m:
div, mod = divmod(len(m), 2)
return div * u'\xa0 ' + mod * u'\xa0'
return (match.group('tag') or '') + u'\xa0'
def _generate():
stack = []
def _reverse():
for event in reversed(stack):
if event[0] is START:
yield END, event[1][0], event[2]
else:
yield END_NS, event[1][0], event[2]
for kind, data, pos in stream:
if kind is TEXT:
lines = data.split('\n')
if lines:
# First element
for e in stack:
yield e
yield kind, lines.pop(0), pos
for e in _reverse():
yield e
# Subsequent ones, prefix with \n
for line in lines:
yield TEXT, '\n', pos
for e in stack:
yield e
yield kind, line, pos
for e in _reverse():
yield e
else:
if kind is START or kind is START_NS:
stack.append((kind, data, pos))
elif kind is END or kind is END_NS:
stack.pop()
else:
yield kind, data, pos
buf = []
# Fix the \n at EOF.
if not isinstance(stream, list):
stream = list(stream)
found_text = False
for i in range(len(stream)-1, -1, -1):
if stream[i][0] is TEXT:
e = stream[i]
# One chance to strip a \n
if not found_text and e[1].endswith('\n'):
stream[i] = (e[0], e[1][:-1], e[2])
if len(e[1]):
found_text = True
break
if not found_text:
raise StopIteration
for kind, data, pos in _generate():
if kind is TEXT and data == '\n':
yield Stream(buf[:])
del buf[:]
else:
if kind is TEXT:
data = space_re.sub(pad_spaces, data)
buf.append((kind, data, pos))
if buf:
yield Stream(buf[:])
# -- Default annotators
class LineNumberAnnotator(Component):
"""Text annotator that adds a column with line numbers."""
implements(IHTMLPreviewAnnotator)
# ITextAnnotator methods
def get_annotation_type(self):
return 'lineno', _('Line'), _('Line numbers')
def get_annotation_data(self, context):
return None
def annotate_row(self, context, row, lineno, line, data):
row.append(tag.th(id='L%s' % lineno)(
tag.a(lineno, href='#L%s' % lineno)
))
# -- Default renderers
class PlainTextRenderer(Component):
"""HTML preview renderer for plain text, and fallback for any kind of text
for which no more specific renderer is available.
"""
implements(IHTMLPreviewRenderer)
expand_tabs = True
returns_source = True
def get_quality_ratio(self, mimetype):
if mimetype in Mimeview(self.env).treat_as_binary:
return 0
return 1
def render(self, context, mimetype, content, filename=None, url=None):
if is_binary(content):
self.log.debug("Binary data; no preview available")
return
self.log.debug("Using default plain text mimeviewer")
return content_to_unicode(self.env, content, mimetype)
class ImageRenderer(Component):
"""Inline image display.
This component doesn't need the `content` at all.
"""
implements(IHTMLPreviewRenderer)
def get_quality_ratio(self, mimetype):
if mimetype.startswith('image/'):
return 8
return 0
def render(self, context, mimetype, content, filename=None, url=None):
if url:
return tag.div(tag.img(src=url, alt=filename),
class_='image-file')
class WikiTextRenderer(Component):
"""HTML renderer for files containing Trac's own Wiki formatting markup."""
implements(IHTMLPreviewRenderer)
def get_quality_ratio(self, mimetype):
if mimetype in ('text/x-trac-wiki', 'application/x-trac-wiki'):
return 8
return 0
def render(self, context, mimetype, content, filename=None, url=None):
from trac.wiki.formatter import format_to_html
return format_to_html(self.env, context,
content_to_unicode(self.env, content, mimetype))
| 43,065 | [['EMAIL_ADDRESS', 'dummy@email.com'], ['EMAIL_ADDRESS', 'dummy@email.com'], ['EMAIL_ADDRESS', 'dummy@email.com'], ['EMAIL_ADDRESS', 'dummy@email.com'], ['EMAIL_ADDRESS', 'dummy@email.com'], ['EMAIL_ADDRESS', 'dummy@email.com'], ['DATE_TIME', '2004-2010'], ['DATE_TIME', '2004'], ['PERSON', 'Daniel Lundin'], ['DATE_TIME', '2005-2006'], ['PERSON', 'Christopher Lenz'], ['DATE_TIME', '2006-2007'], ['NRP', 'Christian'], ['PERSON', 'Daniel Lundin'], ['PERSON', 'Christopher Lenz'], ['PERSON', 'Markup'], ['PERSON', 'Href'], ['LOCATION', 'self.resource'], ['PERSON', '1).resource'], ['PERSON', 'oneliner'], ['NRP', 'self._hints'], ['PERSON', 'msword'], ['PERSON', "roff troff'"], ['PERSON', "ini cfg'"], ['PERSON', 'vba'], ['PERSON', 'ada asm asp'], ['PERSON', 'ksh lua'], ['PERSON', 'tcl tex zsh'], ['PERSON', 'mimetype'], ['PERSON', 'Mark'], ['LOCATION', 'convert_content(req'], ['LOCATION', 'mimetype'], ['PERSON', 'dylan'], ['PERSON', 'msword'], ['NRP', 'input_mimettype'], ['LOCATION', 'mimetype'], ['NRP', 'force_source'], ['NRP', 'force_source'], ['NRP', 'force_source'], ['LOCATION', "self.log.debug('Trying"], ['NRP', 'force_source'], ['LOCATION', 'basestring'], ['PERSON', "context.get_hint('disable_warnings"], ['PERSON', 'atitle = annotator.get_annotation_type'], ['LOCATION', 'tag_("Can\'t'], ['PERSON', 'mimetype'], ['PERSON', 'mimetype'], ['NRP', 'force_source'], ['URL', 'self.ma'], ['PERSON', 'lineno'], ['URL', 'self.log.de'], ['URL', 'tag.im'], ['URL', 'trac.wiki.fo'], ['URL', 'http://trac.edgewall.org/wiki/TracLicense.'], ['URL', 'http://trac.edgewall.org/log/.'], ['IP_ADDRESS', '\n\n '], ['IP_ADDRESS', 'e:: '], ['URL', 'email.com'], ['URL', 'email.com'], ['URL', 'email.com'], ['URL', 'email.com'], ['URL', 'email.com'], ['URL', 'email.com'], ['URL', 'genshi.co'], ['URL', 'genshi.in'], ['URL', 'trac.co'], ['URL', 'trac.co'], ['URL', 'trac.re'], ['URL', 'trac.util.tr'], ['URL', 'self.pa'], ['URL', 'self.re'], ['URL', 'self.hr'], ['URL', 'self.pe'], ['URL', 'trac.web.ch'], ['URL', 'context.resource.re'], ['URL', 'context.re'], ['URL', 'context.pa'], ['URL', 'context.ch'], ['URL', 'context.ch'], ['URL', 'context.ch'], ['URL', 'self.re'], ['URL', 'self.hr'], ['URL', 'self.pe'], ['URL', 'context.pa'], ['URL', 'context.re'], ['URL', 'self.re'], ['URL', 'context.re'], ['URL', 'context.resource.re'], ['URL', 'resource.re'], ['URL', 'context.resource.id'], ['URL', 'resource.id'], ['URL', 'context.pa'], ['URL', 'ctx.se'], ['URL', 'ctx.se'], ['URL', 'ctx.ge'], ['URL', 'ctx.ge'], ['URL', 'ctx.ge'], ['URL', 'ctx.ge'], ['URL', 'ctx.ge'], ['URL', 'ctx.ge'], ['URL', 'ctx.se'], ['URL', 'ctx.ge'], ['URL', 'ctx.ge'], ['URL', 'hints.ge'], ['URL', 'ctx.se'], ['URL', 'self.pa'], ['URL', 'p.pa'], ['URL', 'TYPES.ke'], ['URL', 't.st'], ['URL', 'TYPES.se'], ['URL', 'TYPES.it'], ['URL', 're.com'], ['URL', 're.VE'], ['URL', 'mimetypes.gu'], ['URL', 're.se'], ['URL', 'match.gr'], ['URL', 'match.gr'], ['URL', 'match.gr'], ['URL', 'match.gr'], ['URL', 'data.st'], ['URL', 'data.st'], ['URL', 'data.st'], ['URL', 'content.re'], ['URL', 'mimeview.ma'], ['URL', 'mimeview.to'], ['URL', 'self.in'], ['URL', 'self.ma'], ['URL', 'self.co'], ['URL', 'self.co'], ['URL', 'self.co'], ['URL', 'self.input.re'], ['URL', 'self.ma'], ['URL', 'self.content.re'], ['URL', 'self.co'], ['URL', 'self.content.se'], ['URL', 'IContentConverter.ge'], ['URL', 'self.co'], ['URL', 'converter.ge'], ['URL', 'content.re'], ['URL', 'self.ma'], ['URL', 'self.ge'], ['URL', 'self.ge'], ['URL', 'converter.co'], ['URL', 'self.an'], ['URL', 'annotator.ge'], ['URL', 'IHTMLPreviewRenderer.re'], ['URL', 'content.re'], ['URL', 'self.ma'], ['URL', 'self.ge'], ['URL', 'self.re'], ['URL', 'renderer.ge'], ['URL', 'candidates.so'], ['URL', 'self.ma'], ['URL', 'content.re'], ['URL', 'self.log.de'], ['URL', 'renderer.re'], ['URL', 'result.ge'], ['URL', 'context.req.args.ge'], ['URL', 'context.re'], ['URL', 'tag.pr'], ['URL', 'context.re'], ['URL', 'context.ge'], ['URL', 'trac.web.ch'], ['URL', 'context.re'], ['URL', 'trac.web.ch'], ['URL', 'self.an'], ['URL', 'annotator.ge'], ['URL', 'annotator.ge'], ['URL', 'e.me'], ['URL', 'context.re'], ['URL', 'tag.st'], ['URL', 'tag.pr'], ['URL', 'e.me'], ['URL', 'tag.tr'], ['URL', 'tag.th'], ['URL', 'tag.th'], ['URL', 'tag.tr'], ['URL', 'annotator.an'], ['URL', 'tag.td'], ['URL', 'tag.td'], ['URL', 'tag.th'], ['URL', 'self.ma'], ['URL', 'mimetype.fi'], ['URL', 'self.de'], ['URL', 'MAP.co'], ['URL', 'self.co'], ['URL', 'self.ge'], ['URL', 'self.ge'], ['URL', 'self.tr'], ['URL', 'self.ge'], ['URL', 'trac.util.to'], ['URL', 'Mimeview.ge'], ['URL', 'self.ge'], ['URL', 'self.co'], ['URL', 'self.ma'], ['URL', 'self.re'], ['URL', 'self.co'], ['URL', 'req.se'], ['URL', 'req.se'], ['URL', 'req.se'], ['URL', 'req.se'], ['URL', 're.com'], ['URL', 'match.gr'], ['URL', 'match.gr'], ['URL', 're.su'], ['URL', 'tag.th'], ['URL', 'self.log.de'], ['URL', 'mimetype.st']] |
8 | #!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Class for dashd node under test"""
import decimal
import errno
import http.client
import json
import logging
import os
import subprocess
import time
from .authproxy import JSONRPCException
from .mininode import NodeConn
from .util import (
assert_equal,
get_rpc_proxy,
rpc_url,
wait_until,
p2p_port,
)
BITCOIND_PROC_WAIT_TIMEOUT = 60
class TestNode():
"""A class for representing a dashd node under test.
This class contains:
- state about the node (whether it's running, etc)
- a Python subprocess.Popen object representing the running process
- an RPC connection to the node
- one or more P2P connections to the node
To make things easier for the test writer, any unrecognised messages will
be dispatched to the RPC connection."""
def __init__(self, i, dirname, extra_args, rpchost, timewait, binary, stderr, mocktime, coverage_dir):
self.index = i
self.datadir = os.path.join(dirname, "node" + str(i))
self.rpchost = rpchost
if timewait:
self.rpc_timeout = timewait
else:
# Wait for up to 60 seconds for the RPC server to respond
self.rpc_timeout = 60
if binary is None:
self.binary = os.getenv("BITCOIND", "dashd")
else:
self.binary = binary
self.stderr = stderr
self.coverage_dir = coverage_dir
# Most callers will just need to add extra args to the standard list below. For those callers that need more flexibity, they can just set the args property directly.
self.extra_args = extra_args
self.args = [self.binary, "-datadir=" + self.datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-logtimemicros", "-debug", "-debugexclude=libevent", "-debugexclude=leveldb", "-mocktime=" + str(mocktime), "-uacomment=testnode%d" % i]
self.cli = TestNodeCLI(os.getenv("BITCOINCLI", "dash-cli"), self.datadir)
# Don't try auto backups (they fail a lot when running tests)
self.args.append("-createwalletbackups=0")
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.url = None
self.log = logging.getLogger('TestFramework.node%d' % i)
self.p2ps = []
def __getattr__(self, name):
"""Dispatches any unrecognised messages to the RPC connection."""
assert self.rpc_connected and self.rpc is not None, "Error: no RPC connection"
return getattr(self.rpc, name)
def start(self, extra_args=None, stderr=None):
"""Start the node."""
if extra_args is None:
extra_args = self.extra_args
if stderr is None:
stderr = self.stderr
self.process = subprocess.Popen(self.args + extra_args, stderr=stderr)
self.running = True
self.log.debug("dashd started, waiting for RPC to come up")
def wait_for_rpc_connection(self):
"""Sets up an RPC connection to the dashd process. Returns False if unable to connect."""
# Poll at a rate of four times per second
poll_per_s = 4
for _ in range(poll_per_s * self.rpc_timeout):
assert self.process.poll() is None, "dashd exited with status %i during initialization" % self.process.returncode
try:
self.rpc = get_rpc_proxy(rpc_url(self.datadir, self.index, self.rpchost), self.index, timeout=self.rpc_timeout, coveragedir=self.coverage_dir)
self.rpc.getblockcount()
# If the call to getblockcount() succeeds then the RPC connection is up
self.rpc_connected = True
self.url = self.rpc.url
self.log.debug("RPC successfully started")
return
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
# -28 RPC in warmup
# -342 Service unavailable, RPC server started but is shutting down due to error
if e.error['code'] != -28 and e.error['code'] != -342:
raise # unknown JSON RPC exception
except ValueError as e: # cookie file not found and no rpcuser or rpcassword. dashd still starting
if "No RPC credentials" not in str(e):
raise
time.sleep(1.0 / poll_per_s)
raise AssertionError("Unable to connect to dashd")
def get_wallet_rpc(self, wallet_name):
assert self.rpc_connected
assert self.rpc
wallet_path = "wallet/%s" % wallet_name
return self.rpc / wallet_path
def stop_node(self, wait=0):
"""Stop the node."""
if not self.running:
return
self.log.debug("Stopping node")
try:
self.stop(wait=wait)
except http.client.CannotSendRequest:
self.log.exception("Unable to stop node.")
del self.p2ps[:]
def is_node_stopped(self):
"""Checks whether the node has stopped.
Returns True if the node has stopped. False otherwise.
This method is responsible for freeing resources (self.process)."""
if not self.running:
return True
return_code = self.process.poll()
if return_code is None:
return False
# process has stopped. Assert that it didn't return an error code.
assert_equal(return_code, 0)
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.log.debug("Node stopped")
return True
def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT):
wait_until(self.is_node_stopped, timeout=timeout)
def node_encrypt_wallet(self, passphrase):
""""Encrypts the wallet.
This causes dashd to shutdown, so this method takes
care of cleaning up resources."""
self.encryptwallet(passphrase)
self.wait_until_stopped()
def add_p2p_connection(self, p2p_conn, **kwargs):
"""Add a p2p connection to the node.
This method adds the p2p connection to the self.p2ps list and also
returns the connection to the caller."""
if 'dstport' not in kwargs:
kwargs['dstport'] = p2p_port(self.index)
if 'dstaddr' not in kwargs:
kwargs['dstaddr'] = '127.0.0.1'
self.p2ps.append(p2p_conn)
kwargs.update({'rpc': self.rpc, 'callback': p2p_conn})
p2p_conn.add_connection(NodeConn(**kwargs))
return p2p_conn
@property
def p2p(self):
"""Return the first p2p connection
Convenience property - most tests only use a single p2p connection to each
node, so this saves having to write node.p2ps[0] many times."""
assert self.p2ps, "No p2p connection"
return self.p2ps[0]
def disconnect_p2ps(self):
"""Close all p2p connections to the node."""
for p in self.p2ps:
# Connection could have already been closed by other end.
if p.connection is not None:
p.connection.disconnect_node()
self.p2ps = []
class TestNodeCLI():
"""Interface to bitcoin-cli for an individual node"""
def __init__(self, binary, datadir):
self.args = []
self.binary = binary
self.datadir = datadir
self.input = None
def __call__(self, *args, input=None):
# TestNodeCLI is callable with bitcoin-cli command-line args
self.args = [str(arg) for arg in args]
self.input = input
return self
def __getattr__(self, command):
def dispatcher(*args, **kwargs):
return self.send_cli(command, *args, **kwargs)
return dispatcher
def send_cli(self, command, *args, **kwargs):
"""Run bitcoin-cli command. Deserializes returned string as python object."""
pos_args = [str(arg) for arg in args]
named_args = [str(key) + "=" + str(value) for (key, value) in kwargs.items()]
assert not (pos_args and named_args), "Cannot use positional arguments and named arguments in the same bitcoin-cli call"
p_args = [self.binary, "-datadir=" + self.datadir] + self.args
if named_args:
p_args += ["-named"]
p_args += [command] + pos_args + named_args
process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
cli_stdout, cli_stderr = process.communicate(input=self.input)
returncode = process.poll()
if returncode:
# Ignore cli_stdout, raise with cli_stderr
raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr)
return json.loads(cli_stdout, parse_float=decimal.Decimal)
| 9,148 | [['DATE_TIME', '2017'], ['DATE_TIME', '.authproxy'], ['LOCATION', 'JSONRPCException'], ['PERSON', 'dirname'], ['PERSON', 'extra_args'], ['LOCATION', 'rpchost'], ['DATE_TIME', 'up to 60 seconds'], ['URL', 'logging.ge'], ['URL', 'TestFramework.no'], ['PERSON', 'extra_args'], ['URL', 'self.in'], ['URL', 'self.in'], ['URL', 'self.log.de'], ['LOCATION', 'JSONRPCException'], ['LOCATION', 'http.client'], ['LOCATION', 'p2p_conn'], ['PERSON', "kwargs['dstport"], ['LOCATION', 'self.p2ps.append(p2p_conn'], ['PERSON', 'returncode ='], ['URL', 'http://www.opensource.org/licenses/mit-license.php.'], ['IP_ADDRESS', '127.0.0.1'], ['URL', 'http.cl'], ['URL', 'self.in'], ['URL', 'os.path.jo'], ['URL', 'self.bi'], ['URL', 'os.ge'], ['URL', 'self.bi'], ['URL', 'self.st'], ['URL', 'self.co'], ['URL', 'self.ar'], ['URL', 'self.bi'], ['URL', 'self.cl'], ['URL', 'os.ge'], ['URL', 'self.ar'], ['URL', 'self.ru'], ['URL', 'self.pro'], ['URL', 'self.st'], ['URL', 'self.pro'], ['URL', 'self.ar'], ['URL', 'self.ru'], ['URL', 'self.log.de'], ['URL', 'self.pro'], ['URL', 'self.process.re'], ['URL', 'self.co'], ['URL', 'self.rpc.ge'], ['URL', 'e.er'], ['URL', 'errno.EC'], ['URL', 'e.er'], ['URL', 'e.er'], ['URL', 'time.sl'], ['URL', 'self.ru'], ['URL', 'self.log.de'], ['URL', 'self.st'], ['URL', 'http.client.Ca'], ['URL', 'self.pro'], ['URL', 'self.ru'], ['URL', 'self.pro'], ['URL', 'self.ru'], ['URL', 'self.pro'], ['URL', 'self.log.de'], ['URL', 'self.is'], ['URL', 'self.in'], ['URL', 'conn.ad'], ['URL', 'p.co'], ['URL', 'p.co'], ['URL', 'self.ar'], ['URL', 'self.bi'], ['URL', 'self.in'], ['URL', 'self.ar'], ['URL', 'self.in'], ['URL', 'self.se'], ['URL', 'kwargs.it'], ['URL', 'self.bi'], ['URL', 'self.ar'], ['URL', 'process.com'], ['URL', 'self.in'], ['URL', 'subprocess.Ca'], ['URL', 'self.bi'], ['URL', 'decimal.De']] |
9 | # !/usr/bin/python
# Copyright (C) 2015 Red Hat, All rights reserved.
# AUTHORS: Alex Collins dummy@email.com
import sys
import json
import subprocess
import collections
from fnmatch import fnmatch as matches
from docker.utils import kwargs_from_env
import docker
import selinux
"""Atomic Utility Module"""
ReturnTuple = collections.namedtuple('ReturnTuple',
['return_code', 'stdout', 'stderr'])
if sys.version_info[0] < 3:
input = raw_input
else:
input = input
def _decompose(compound_name):
""" '[reg/]repo[:tag]' -> (reg, repo, tag) """
reg, repo, tag = '', compound_name, ''
if '/' in repo:
reg, repo = repo.split('/', 1)
if ':' in repo:
repo, tag = repo.rsplit(':', 1)
return reg, repo, tag
def image_by_name(img_name, images=None):
"""
Returns a list of image data for images which match img_name. Will
optionally take a list of images from a docker.Client.images
query to avoid multiple docker queries.
"""
i_reg, i_rep, i_tag = _decompose(img_name)
# Correct for bash-style matching expressions.
if not i_reg:
i_reg = '*'
if not i_tag:
i_tag = '*'
# If the images were not passed in, go get them.
if images is None:
c = docker.Client(**kwargs_from_env())
images = c.images(all=False)
valid_images = []
for i in images:
for t in i['RepoTags']:
reg, rep, tag = _decompose(t)
if matches(reg, i_reg) \
and matches(rep, i_rep) \
and matches(tag, i_tag):
valid_images.append(i)
break
# Some repo after decompose end up with the img_name
# at the end. i.e. rhel7/rsyslog
if rep.endswith(img_name):
valid_images.append(i)
break
return valid_images
def subp(cmd):
"""
Run a command as a subprocess.
Return a triple of return code, standard out, standard err.
"""
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
return ReturnTuple(proc.returncode, stdout=out, stderr=err)
def default_container_context():
if selinux.is_selinux_enabled() != 0:
fd = open(selinux.selinux_lxc_contexts_path())
for i in fd.readlines():
name, context = i.split("=")
if name.strip() == "file":
return context.strip("\n\" ")
return ""
def writeOut(output, lf="\n"):
sys.stdout.flush()
sys.stdout.write(str(output) + lf)
def output_json(json_data):
''' Pretty print json data '''
writeOut(json.dumps(json_data, indent=4, separators=(',', ': ')))
def print_scan_summary(json_data, names=None):
'''
Print a summary of the data returned from a
CVE scan.
'''
max_col_width = 50
min_width = 15
def _max_width(data):
max_name = 0
for name in data:
max_name = len(data[name]) if len(data[name]) > max_name \
else max_name
# If the max name length is less that max_width
if max_name < min_width:
max_name = min_width
# If the man name is greater than the max col leng
# we wish to use
if max_name > max_col_width:
max_name = max_col_width
return max_name
clean = True
if len(names) > 0:
max_width = _max_width(names)
else:
max_width = min_width
template = "{0:" + str(max_width) + "} {1:5} {2:5} {3:5} {4:5}"
sevs = ['critical', 'important', 'moderate', 'low']
writeOut(template.format("Container/Image", "Cri", "Imp", "Med", "Low"))
writeOut(template.format("-" * max_width, "---", "---", "---", "---"))
res_summary = json_data['results_summary']
for image in res_summary.keys():
image_res = res_summary[image]
if 'msg' in image_res.keys():
tmp_tuple = (image_res['msg'], "", "", "", "")
else:
if len(names) < 1:
image_name = image[:max_width]
else:
image_name = names[image][-max_width:]
if len(image_name) == max_col_width:
image_name = '...' + image_name[-(len(image_name) - 3):]
tmp_tuple = tuple([image_name] +
[str(image_res[sev]) for sev in sevs])
sev_results = [image_res[sev] for sev in
sevs if image_res[sev] > 0]
if len(sev_results) > 0:
clean = False
writeOut(template.format(*tmp_tuple))
writeOut("")
return clean
def print_detail_scan_summary(json_data, names=None):
'''
Print a detailed summary of the data returned from
a CVE scan.
'''
clean = True
sevs = ['Critical', 'Important', 'Moderate', 'Low']
cve_summary = json_data['host_results']
image_template = " {0:10}: {1}"
cve_template = " {0:10}: {1}"
for image in cve_summary.keys():
image_res = cve_summary[image]
writeOut("")
writeOut(image[:12])
if not image_res['isRHEL']:
writeOut(image_template.format("Result",
"Not based on Red Hat"
"Enterprise Linux"))
continue
else:
writeOut(image_template.format("OS", image_res['os'].rstrip()))
scan_results = image_res['cve_summary']['scan_results']
for sev in sevs:
if sev in scan_results:
clean = False
writeOut(image_template.format(sev,
str(scan_results[sev]['num'])))
for cve in scan_results[sev]['cves']:
writeOut(cve_template.format("CVE", cve['cve_title']))
writeOut(cve_template.format("CVE URL",
cve['cve_ref_url']))
writeOut(cve_template.format("RHSA ID",
cve['rhsa_ref_id']))
writeOut(cve_template.format("RHSA URL",
cve['rhsa_ref_url']))
writeOut("")
return clean
def get_mounts_by_path():
'''
Gets all mounted devices and paths
:return: dict of mounted devices and related information by path
'''
mount_info = []
f = open('/proc/mounts', 'r')
for line in f:
_tmp = line.split(" ")
mount_info.append({'path': _tmp[1],
'device': _tmp[0],
'type': _tmp[2],
'options': _tmp[3]
}
)
return mount_info
def is_dock_obj_mounted(docker_obj):
'''
Check if the provided docker object, which needs to be an ID,
is currently mounted and should be considered "busy"
:param docker_obj: str, must be in ID format
:return: bool True or False
'''
mount_info = get_mounts_by_path()
devices = [x['device'] for x in mount_info]
# If we can find the ID of the object in the list
# of devices which comes from mount, safe to assume
# it is busy.
return any(docker_obj in x for x in devices)
def urllib3_disable_warnings():
if 'requests' not in sys.modules:
import requests
else:
requests = sys.modules['requests']
# On latest Fedora, this is a symlink
if hasattr(requests, 'packages'):
requests.packages.urllib3.disable_warnings() # pylint: disable=maybe-no-member
else:
# But with python-requests-2.4.3-1.el7.noarch, we need
# to talk to urllib3 directly
have_urllib3 = False
try:
if 'urllib3' not in sys.modules:
import urllib3
have_urllib3 = True
except ImportError:
pass
if have_urllib3:
# Except only call disable-warnings if it exists
if hasattr(urllib3, 'disable_warnings'):
urllib3.disable_warnings()
| 8,241 | [['EMAIL_ADDRESS', 'dummy@email.com'], ['PERSON', 'Alex Collins'], ['LOCATION', 'name.strip'], ['PERSON', 'json'], ['PERSON', 'separators='], ['PERSON', 'max'], ['PERSON', 'sevs'], ['PERSON', 'sev'], ['LOCATION', 'sevs'], ['PERSON', 'sev'], ['LOCATION', 'sevs'], ['PERSON', 'sev'], ['URL', 'template.fo'], ['LOCATION', 'sys.modules'], ['URL', 'sys.mo'], ['URL', 'requests.pa'], ['LOCATION', 'sys.modules'], ['URL', 'sys.mo'], ['URL', 'email.com'], ['URL', 'collections.na'], ['URL', 'sys.ve'], ['URL', 'repo.rs'], ['URL', 'docker.Client.im'], ['URL', 'docker.Cl'], ['URL', 'c.im'], ['URL', 'proc.com'], ['URL', 'proc.re'], ['URL', 'selinux.is'], ['URL', 'selinux.se'], ['URL', 'fd.re'], ['URL', 'name.st'], ['URL', 'context.st'], ['URL', 'sys.st'], ['URL', 'sys.st'], ['URL', 'template.fo'], ['URL', 'template.fo'], ['URL', 'summary.ke'], ['URL', 'res.ke'], ['URL', 'template.fo'], ['URL', 'summary.ke'], ['URL', 'template.fo'], ['URL', 'template.fo'], ['URL', 'template.fo'], ['URL', 'template.fo'], ['URL', 'template.fo'], ['URL', 'template.fo'], ['URL', 'sys.mo'], ['URL', 'python-requests-2.4.3-1.el7.no']] |
10 | # Copyright (C) 2014 Claudio "nex" Guarnieri (@botherder), Accuvant, Inc. (dummy@email.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lib.cuckoo.common.abstracts import Signature
class Unhook(Signature):
name = "antisandbox_unhook"
description = "Tries to unhook or modify Windows functions monitored by Cuckoo"
severity = 3
confidence = 60
categories = ["anti-sandbox"]
authors = ["nex","Accuvant"]
minimum = "1.2"
evented = True
filter_categories = set(["__notification__"])
def __init__(self, *args, **kwargs):
Signature.__init__(self, *args, **kwargs)
self.saw_unhook = False
self.unhook_info = set()
def on_call(self, call, process):
subcategory = self.check_argument_call(call,
api="__anomaly__",
name="Subcategory",
pattern="unhook")
if subcategory:
self.saw_unhook = True
funcname = self.get_argument(call, "FunctionName")
if funcname != "":
if (funcname != "SetUnhandledExceptionFilter" and funcname != "SetWindowsHookExW" and funcname != "UnhookWindowsHookEx" and
funcname != "CoCreateInstance") or self.get_argument(call, "UnhookType") != "modification":
self.unhook_info.add("function_name: " + funcname + ", type: " + self.get_argument(call, "UnhookType"))
def on_complete(self):
if len(self.unhook_info) > 5:
weight = len(self.unhook_info)
confidence = 100
if not self.unhook_info:
self.saw_unhook = False
for info in self.unhook_info:
self.data.append({"unhook" : info })
return self.saw_unhook
| 2,422 | [['EMAIL_ADDRESS', 'dummy@email.com'], ['DATE_TIME', '2014'], ['PERSON', 'Claudio'], ['PERSON', 'Guarnieri'], ['PERSON', 'api="__anomaly'], ['URL', 'http://www.gnu.org/licenses/'], ['URL', 'email.com'], ['URL', 'lib.cuckoo.com'], ['URL', 'self.sa'], ['URL', 'self.ch'], ['URL', 'self.sa'], ['URL', 'self.ge'], ['URL', 'self.ge'], ['URL', 'info.ad'], ['URL', 'self.ge'], ['URL', 'self.sa'], ['URL', 'self.sa']] |
End of preview. Expand
in Dataset Viewer.
No dataset card yet
New: Create and edit this dataset card directly on the website!
Contribute a Dataset Card- Downloads last month
- 10