patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -26,7 +26,8 @@ import org.apache.iceberg.io.CloseableIterable;
class StaticTableScan extends BaseTableScan {
private final Function<StaticTableScan, DataTask> buildTask;
- StaticTableScan(TableOperations ops, Table table, Schema schema, Function<StaticTableScan, DataTask> buildTask) {
+ StaticTableScan(TableOperations ops, Table table, Schema schema,
+ Function<StaticTableScan, DataTask> buildTask) {
super(ops, table, schema);
this.buildTask = buildTask;
} | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import java.util.function.Function;
import org.apache.iceberg.expressions.Expression;
import org.apache.iceberg.io.CloseableIterable;
class StaticTableScan extends BaseTableScan {
private final Function<StaticTableScan, DataTask> buildTask;
StaticTableScan(TableOperations ops, Table table, Schema schema, Function<StaticTableScan, DataTask> buildTask) {
super(ops, table, schema);
this.buildTask = buildTask;
}
private StaticTableScan(TableOperations ops, Table table, Schema schema,
Function<StaticTableScan, DataTask> buildTask, TableScanContext context) {
super(ops, table, schema, context);
this.buildTask = buildTask;
}
@Override
public long targetSplitSize() {
return tableOps().current().propertyAsLong(
TableProperties.METADATA_SPLIT_SIZE, TableProperties.METADATA_SPLIT_SIZE_DEFAULT);
}
@Override
protected TableScan newRefinedScan(TableOperations ops, Table table, Schema schema, TableScanContext context) {
return new StaticTableScan(
ops, table, schema, buildTask, context);
}
@Override
protected CloseableIterable<FileScanTask> planFiles(
TableOperations ops, Snapshot snapshot, Expression rowFilter,
boolean ignoreResiduals, boolean caseSensitive, boolean colStats) {
return CloseableIterable.withNoopClose(buildTask.apply(this));
}
}
| 1 | 37,769 | Looks like this didn't need to change. | apache-iceberg | java |
@@ -595,6 +595,10 @@ class PyLinter(config.OptionsManagerMixIn,
for msg_id in self._checker_messages('python3'):
if msg_id.startswith('E'):
self.enable(msg_id)
+ config_parser = self.cfgfile_parser
+ if config_parser.has_option('MESSAGES CONTROL', 'disable'):
+ value = config_parser.get('MESSAGES CONTROL', 'disable')
+ self.global_set_option('disable', value)
else:
self.disable('python3')
self.set_option('reports', False) | 1 | # Copyright (c) 2006-2015 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2011-2014 Google, Inc.
# Copyright (c) 2012 FELD Boris <lothiraldan@gmail.com>
# Copyright (c) 2014-2016 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014-2015 Michal Nowikowski <godfryd@gmail.com>
# Copyright (c) 2015 Mihai Balint <balint.mihai@gmail.com>
# Copyright (c) 2015 Simu Toni <simutoni@gmail.com>
# Copyright (c) 2015 Aru Sahni <arusahni@gmail.com>
# Copyright (c) 2015-2016 Florian Bruhin <me@the-compiler.org>
# Copyright (c) 2016 Glenn Matthews <glenn@e-dad.net>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
""" %prog [options] modules_or_packages
Check that module(s) satisfy a coding standard (and more !).
%prog --help
Display this help message and exit.
%prog --help-msg <msg-id>[,<msg-id>]
Display help messages about given message identifiers and exit.
"""
from __future__ import print_function
import collections
import contextlib
import operator
import os
try:
import multiprocessing
except ImportError:
multiprocessing = None
import sys
import tokenize
import warnings
import six
import astroid
from astroid.__pkginfo__ import version as astroid_version
from astroid import modutils
from pylint import checkers
from pylint import interfaces
from pylint import reporters
from pylint import exceptions
from pylint import utils
from pylint import config
from pylint.__pkginfo__ import version
from pylint.reporters.ureports import nodes as report_nodes
MANAGER = astroid.MANAGER
def _get_new_args(message):
location = (
message.abspath,
message.path,
message.module,
message.obj,
message.line,
message.column,
)
return (
message.msg_id,
message.symbol,
location,
message.msg,
message.confidence,
)
def _get_python_path(filepath):
dirname = os.path.realpath(os.path.expanduser(filepath))
if not os.path.isdir(dirname):
dirname = os.path.dirname(dirname)
while True:
if not os.path.exists(os.path.join(dirname, "__init__.py")):
return dirname
old_dirname = dirname
dirname = os.path.dirname(dirname)
if old_dirname == dirname:
return os.getcwd()
return None
def _merge_stats(stats):
merged = {}
by_msg = collections.Counter()
for stat in stats:
message_stats = stat.pop('by_msg', {})
by_msg.update(message_stats)
for key, item in six.iteritems(stat):
if key not in merged:
merged[key] = item
else:
if isinstance(item, dict):
merged[key].update(item)
else:
merged[key] = merged[key] + item
merged['by_msg'] = by_msg
return merged
@contextlib.contextmanager
def _patch_sysmodules():
# Context manager that permits running pylint, on Windows, with -m switch
# and with --jobs, as in 'python -2 -m pylint .. --jobs'.
# For more details why this is needed,
# see Python issue http://bugs.python.org/issue10845.
mock_main = __name__ != '__main__' # -m switch
if mock_main:
sys.modules['__main__'] = sys.modules[__name__]
try:
yield
finally:
if mock_main:
sys.modules.pop('__main__')
# Python Linter class #########################################################
MSGS = {
'F0001': ('%s',
'fatal',
'Used when an error occurred preventing the analysis of a \
module (unable to find it for instance).'),
'F0002': ('%s: %s',
'astroid-error',
'Used when an unexpected error occurred while building the '
'Astroid representation. This is usually accompanied by a '
'traceback. Please report such errors !'),
'F0010': ('error while code parsing: %s',
'parse-error',
'Used when an exception occurred while building the Astroid '
'representation which could be handled by astroid.'),
'I0001': ('Unable to run raw checkers on built-in module %s',
'raw-checker-failed',
'Used to inform that a built-in module has not been checked '
'using the raw checkers.'),
'I0010': ('Unable to consider inline option %r',
'bad-inline-option',
'Used when an inline option is either badly formatted or can\'t '
'be used inside modules.'),
'I0011': ('Locally disabling %s (%s)',
'locally-disabled',
'Used when an inline option disables a message or a messages '
'category.'),
'I0012': ('Locally enabling %s (%s)',
'locally-enabled',
'Used when an inline option enables a message or a messages '
'category.'),
'I0013': ('Ignoring entire file',
'file-ignored',
'Used to inform that the file will not be checked'),
'I0020': ('Suppressed %s (from line %d)',
'suppressed-message',
'A message was triggered on a line, but suppressed explicitly '
'by a disable= comment in the file. This message is not '
'generated for messages that are ignored due to configuration '
'settings.'),
'I0021': ('Useless suppression of %s',
'useless-suppression',
'Reported when a message is explicitly disabled for a line or '
'a block of code, but never triggered.'),
'I0022': ('Pragma "%s" is deprecated, use "%s" instead',
'deprecated-pragma',
'Some inline pylint options have been renamed or reworked, '
'only the most recent form should be used. '
'NOTE:skip-all is only available with pylint >= 0.26',
{'old_names': [('I0014', 'deprecated-disable-all')]}),
'E0001': ('%s',
'syntax-error',
'Used when a syntax error is raised for a module.'),
'E0011': ('Unrecognized file option %r',
'unrecognized-inline-option',
'Used when an unknown inline option is encountered.'),
'E0012': ('Bad option value %r',
'bad-option-value',
'Used when a bad value for an inline option is encountered.'),
}
if multiprocessing is not None:
class ChildLinter(multiprocessing.Process):
def run(self):
# pylint: disable=no-member, unbalanced-tuple-unpacking
tasks_queue, results_queue, self._config = self._args
self._config["jobs"] = 1 # Child does not parallelize any further.
self._python3_porting_mode = self._config.pop(
'python3_porting_mode', None)
self._plugins = self._config.pop('plugins', None)
# Run linter for received files/modules.
for file_or_module in iter(tasks_queue.get, 'STOP'):
try:
result = self._run_linter(file_or_module[0])
results_queue.put(result)
except Exception as ex:
print("internal error with sending report for module %s" %
file_or_module, file=sys.stderr)
print(ex, file=sys.stderr)
results_queue.put({})
def _run_linter(self, file_or_module):
linter = PyLinter()
# Register standard checkers.
linter.load_default_plugins()
# Load command line plugins.
if self._plugins:
linter.load_plugin_modules(self._plugins)
linter.load_configuration_from_config(self._config)
linter.set_reporter(reporters.CollectingReporter())
# Enable the Python 3 checker mode. This option is
# passed down from the parent linter up to here, since
# the Python 3 porting flag belongs to the Run class,
# instead of the Linter class.
if self._python3_porting_mode:
linter.python3_porting_mode()
# Run the checks.
linter.check(file_or_module)
msgs = [_get_new_args(m) for m in linter.reporter.messages]
return (file_or_module, linter.file_state.base_name, linter.current_name,
msgs, linter.stats, linter.msg_status)
class PyLinter(config.OptionsManagerMixIn,
utils.MessagesHandlerMixIn,
utils.ReportsHandlerMixIn,
checkers.BaseTokenChecker):
"""lint Python modules using external checkers.
This is the main checker controlling the other ones and the reports
generation. It is itself both a raw checker and an astroid checker in order
to:
* handle message activation / deactivation at the module level
* handle some basic but necessary stats'data (number of classes, methods...)
IDE plugin developers: you may have to call
`astroid.builder.MANAGER.astroid_cache.clear()` across runs if you want
to ensure the latest code version is actually checked.
"""
__implements__ = (interfaces.ITokenChecker, )
name = 'master'
priority = 0
level = 0
msgs = MSGS
@staticmethod
def make_options():
return (('ignore',
{'type' : 'csv', 'metavar' : '<file>[,<file>...]',
'dest' : 'black_list', 'default' : ('CVS',),
'help' : 'Add files or directories to the blacklist. '
'They should be base names, not paths.'}),
('ignore-patterns',
{'type' : 'regexp_csv', 'metavar' : '<pattern>[,<pattern>...]',
'dest' : 'black_list_re', 'default' : (),
'help' : 'Add files or directories matching the regex patterns to the'
' blacklist. The regex matches against base names, not paths.'}),
('persistent',
{'default': True, 'type' : 'yn', 'metavar' : '<y_or_n>',
'level': 1,
'help' : 'Pickle collected data for later comparisons.'}),
('load-plugins',
{'type' : 'csv', 'metavar' : '<modules>', 'default' : (),
'level': 1,
'help' : 'List of plugins (as comma separated values of '
'python modules names) to load, usually to register '
'additional checkers.'}),
('output-format',
{'default': 'text', 'type': 'string', 'metavar' : '<format>',
'short': 'f',
'group': 'Reports',
'help' : 'Set the output format. Available formats are text,'
' parseable, colorized, json and msvs (visual studio).'
'You can also give a reporter class, eg mypackage.mymodule.'
'MyReporterClass.'}),
('reports',
{'default': False, 'type' : 'yn', 'metavar' : '<y_or_n>',
'short': 'r',
'group': 'Reports',
'help' : 'Tells whether to display a full report or only the '
'messages'}),
('evaluation',
{'type' : 'string', 'metavar' : '<python_expression>',
'group': 'Reports', 'level': 1,
'default': '10.0 - ((float(5 * error + warning + refactor + '
'convention) / statement) * 10)',
'help' : 'Python expression which should return a note less '
'than 10 (10 is the highest note). You have access '
'to the variables errors warning, statement which '
'respectively contain the number of errors / '
'warnings messages and the total number of '
'statements analyzed. This is used by the global '
'evaluation report (RP0004).'}),
('score',
{'default': True, 'type': 'yn', 'metavar': '<y_or_n>',
'short': 's',
'group': 'Reports',
'help': 'Activate the evaluation score.'}),
('confidence',
{'type' : 'multiple_choice', 'metavar': '<levels>',
'default': '',
'choices': [c.name for c in interfaces.CONFIDENCE_LEVELS],
'group': 'Messages control',
'help' : 'Only show warnings with the listed confidence levels.'
' Leave empty to show all. Valid levels: %s' % (
', '.join(c.name for c in interfaces.CONFIDENCE_LEVELS),)}),
('enable',
{'type' : 'csv', 'metavar': '<msg ids>',
'short': 'e',
'group': 'Messages control',
'help' : 'Enable the message, report, category or checker with the '
'given id(s). You can either give multiple identifier '
'separated by comma (,) or put this option multiple time '
'(only on the command line, not in the configuration file '
'where it should appear only once). '
'See also the "--disable" option for examples. '}),
('disable',
{'type' : 'csv', 'metavar': '<msg ids>',
'short': 'd',
'group': 'Messages control',
'help' : 'Disable the message, report, category or checker '
'with the given id(s). You can either give multiple identifiers'
' separated by comma (,) or put this option multiple times '
'(only on the command line, not in the configuration file '
'where it should appear only once).'
'You can also use "--disable=all" to disable everything first '
'and then reenable specific checks. For example, if you want '
'to run only the similarities checker, you can use '
'"--disable=all --enable=similarities". '
'If you want to run only the classes checker, but have no '
'Warning level messages displayed, use'
'"--disable=all --enable=classes --disable=W"'}),
('msg-template',
{'type' : 'string', 'metavar': '<template>',
'group': 'Reports',
'help' : ('Template used to display messages. '
'This is a python new-style format string '
'used to format the message information. '
'See doc for all details')
}),
('jobs',
{'type' : 'int', 'metavar': '<n-processes>',
'short': 'j',
'default': 1,
'help' : '''Use multiple processes to speed up Pylint.''',
}),
('unsafe-load-any-extension',
{'type': 'yn', 'metavar': '<yn>', 'default': False, 'hide': True,
'help': ('Allow loading of arbitrary C extensions. Extensions'
' are imported into the active Python interpreter and'
' may run arbitrary code.')}),
('extension-pkg-whitelist',
{'type': 'csv', 'metavar': '<pkg[,pkg]>', 'default': [],
'help': ('A comma-separated list of package or module names'
' from where C extensions may be loaded. Extensions are'
' loading into the active Python interpreter and may run'
' arbitrary code')}),
('suggestion-mode',
{'type': 'yn', 'metavar': '<yn>', 'default': True,
'help': ('When enabled, pylint would attempt to guess common '
'misconfiguration and emit user-friendly hints instead '
'of false-positive error messages')}),
)
option_groups = (
('Messages control', 'Options controlling analysis messages'),
('Reports', 'Options related to output formatting and reporting'),
)
def __init__(self, options=(), reporter=None, option_groups=(),
pylintrc=None):
# some stuff has to be done before ancestors initialization...
#
# messages store / checkers / reporter / astroid manager
self.msgs_store = utils.MessagesStore()
self.reporter = None
self._reporter_name = None
self._reporters = {}
self._checkers = collections.defaultdict(list)
self._pragma_lineno = {}
self._ignore_file = False
# visit variables
self.file_state = utils.FileState()
self.current_name = None
self.current_file = None
self.stats = None
# init options
self._external_opts = options
self.options = options + PyLinter.make_options()
self.option_groups = option_groups + PyLinter.option_groups
self._options_methods = {
'enable': self.enable,
'disable': self.disable}
self._bw_options_methods = {'disable-msg': self.disable,
'enable-msg': self.enable}
full_version = '%%prog %s, \nastroid %s\nPython %s' % (
version, astroid_version, sys.version)
utils.MessagesHandlerMixIn.__init__(self)
utils.ReportsHandlerMixIn.__init__(self)
super(PyLinter, self).__init__(
usage=__doc__,
version=full_version,
config_file=pylintrc or config.PYLINTRC)
checkers.BaseTokenChecker.__init__(self)
# provided reports
self.reports = (('RP0001', 'Messages by category',
report_total_messages_stats),
('RP0002', '% errors / warnings by module',
report_messages_by_module_stats),
('RP0003', 'Messages',
report_messages_stats),
)
self.register_checker(self)
self._dynamic_plugins = set()
self._python3_porting_mode = False
self._error_mode = False
self.load_provider_defaults()
if reporter:
self.set_reporter(reporter)
def load_default_plugins(self):
checkers.initialize(self)
reporters.initialize(self)
# Make sure to load the default reporter, because
# the option has been set before the plugins had been loaded.
if not self.reporter:
self._load_reporter()
def load_plugin_modules(self, modnames):
"""take a list of module names which are pylint plugins and load
and register them
"""
for modname in modnames:
if modname in self._dynamic_plugins:
continue
self._dynamic_plugins.add(modname)
module = modutils.load_module_from_name(modname)
module.register(self)
def _load_reporter(self):
name = self._reporter_name.lower()
if name in self._reporters:
self.set_reporter(self._reporters[name]())
else:
try:
reporter_class = self._load_reporter_class()
except (ImportError, AttributeError):
raise exceptions.InvalidReporterError(name)
else:
self.set_reporter(reporter_class())
def _load_reporter_class(self):
qname = self._reporter_name
module = modutils.load_module_from_name(
modutils.get_module_part(qname))
class_name = qname.split('.')[-1]
reporter_class = getattr(module, class_name)
return reporter_class
def set_reporter(self, reporter):
"""set the reporter used to display messages and reports"""
self.reporter = reporter
reporter.linter = self
def set_option(self, optname, value, action=None, optdict=None):
"""overridden from config.OptionsProviderMixin to handle some
special options
"""
if optname in self._options_methods or \
optname in self._bw_options_methods:
if value:
try:
meth = self._options_methods[optname]
except KeyError:
meth = self._bw_options_methods[optname]
warnings.warn('%s is deprecated, replace it by %s' % (optname,
optname.split('-')[0]),
DeprecationWarning)
value = utils._check_csv(value)
if isinstance(value, (list, tuple)):
for _id in value:
meth(_id, ignore_unknown=True)
else:
meth(value)
return # no need to call set_option, disable/enable methods do it
elif optname == 'output-format':
self._reporter_name = value
# If the reporters are already available, load
# the reporter class.
if self._reporters:
self._load_reporter()
try:
checkers.BaseTokenChecker.set_option(self, optname,
value, action, optdict)
except config.UnsupportedAction:
print('option %s can\'t be read from config file' % \
optname, file=sys.stderr)
def register_reporter(self, reporter_class):
self._reporters[reporter_class.name] = reporter_class
def report_order(self):
reports = sorted(self._reports, key=lambda x: getattr(x, 'name', ''))
try:
# Remove the current reporter and add it
# at the end of the list.
reports.pop(reports.index(self))
except ValueError:
pass
else:
reports.append(self)
return reports
# checkers manipulation methods ############################################
def register_checker(self, checker):
"""register a new checker
checker is an object implementing IRawChecker or / and IAstroidChecker
"""
assert checker.priority <= 0, 'checker priority can\'t be >= 0'
self._checkers[checker.name].append(checker)
for r_id, r_title, r_cb in checker.reports:
self.register_report(r_id, r_title, r_cb, checker)
self.register_options_provider(checker)
if hasattr(checker, 'msgs'):
self.msgs_store.register_messages(checker)
checker.load_defaults()
# Register the checker, but disable all of its messages.
# TODO(cpopa): we should have a better API for this.
if not getattr(checker, 'enabled', True):
self.disable(checker.name)
def disable_noerror_messages(self):
for msgcat, msgids in six.iteritems(self.msgs_store._msgs_by_category):
# enable only messages with 'error' severity and above ('fatal')
if msgcat in ['E', 'F']:
for msgid in msgids:
self.enable(msgid)
else:
for msgid in msgids:
self.disable(msgid)
def disable_reporters(self):
"""disable all reporters"""
for _reporters in six.itervalues(self._reports):
for report_id, _, _ in _reporters:
self.disable_report(report_id)
def error_mode(self):
"""error mode: enable only errors; no reports, no persistent"""
self._error_mode = True
self.disable_noerror_messages()
self.disable('miscellaneous')
if self._python3_porting_mode:
self.disable('all')
for msg_id in self._checker_messages('python3'):
if msg_id.startswith('E'):
self.enable(msg_id)
else:
self.disable('python3')
self.set_option('reports', False)
self.set_option('persistent', False)
self.set_option('score', False)
def python3_porting_mode(self):
"""Disable all other checkers and enable Python 3 warnings."""
self.disable('all')
self.enable('python3')
if self._error_mode:
# The error mode was activated, using the -E flag.
# So we'll need to enable only the errors from the
# Python 3 porting checker.
for msg_id in self._checker_messages('python3'):
if msg_id.startswith('E'):
self.enable(msg_id)
else:
self.disable(msg_id)
self._python3_porting_mode = True
# block level option handling #############################################
#
# see func_block_disable_msg.py test case for expected behaviour
def process_tokens(self, tokens):
"""process tokens from the current module to search for module/block
level options
"""
control_pragmas = {'disable', 'enable'}
for (tok_type, content, start, _, _) in tokens:
if tok_type != tokenize.COMMENT:
continue
match = utils.OPTION_RGX.search(content)
if match is None:
continue
if match.group(1).strip() == "disable-all" or \
match.group(1).strip() == 'skip-file':
if match.group(1).strip() == "disable-all":
self.add_message('deprecated-pragma', line=start[0],
args=('disable-all', 'skip-file'))
self.add_message('file-ignored', line=start[0])
self._ignore_file = True
return
try:
opt, value = match.group(1).split('=', 1)
except ValueError:
self.add_message('bad-inline-option', args=match.group(1).strip(),
line=start[0])
continue
opt = opt.strip()
if opt in self._options_methods or opt in self._bw_options_methods:
try:
meth = self._options_methods[opt]
except KeyError:
meth = self._bw_options_methods[opt]
# found a "(dis|en)able-msg" pragma deprecated suppression
self.add_message('deprecated-pragma', line=start[0],
args=(opt, opt.replace('-msg', '')))
for msgid in utils._splitstrip(value):
# Add the line where a control pragma was encountered.
if opt in control_pragmas:
self._pragma_lineno[msgid] = start[0]
try:
if (opt, msgid) == ('disable', 'all'):
self.add_message('deprecated-pragma', line=start[0],
args=('disable=all', 'skip-file'))
self.add_message('file-ignored', line=start[0])
self._ignore_file = True
return
meth(msgid, 'module', start[0])
except exceptions.UnknownMessageError:
self.add_message('bad-option-value', args=msgid, line=start[0])
else:
self.add_message('unrecognized-inline-option', args=opt, line=start[0])
# code checking methods ###################################################
def get_checkers(self):
"""return all available checkers as a list"""
return [self] + [c for _checkers in six.itervalues(self._checkers)
for c in _checkers if c is not self]
def prepare_checkers(self):
"""return checkers needed for activated messages and reports"""
if not self.config.reports:
self.disable_reporters()
# get needed checkers
neededcheckers = [self]
for checker in self.get_checkers()[1:]:
messages = set(msg for msg in checker.msgs
if self.is_message_enabled(msg))
if (messages or
any(self.report_is_enabled(r[0]) for r in checker.reports)):
neededcheckers.append(checker)
# Sort checkers by priority
neededcheckers = sorted(neededcheckers,
key=operator.attrgetter('priority'),
reverse=True)
return neededcheckers
# pylint: disable=unused-argument
@staticmethod
def should_analyze_file(modname, path, is_argument=False):
"""Returns whether or not a module should be checked.
This implementation returns True for all python source file, indicating
that all files should be linted.
Subclasses may override this method to indicate that modules satisfying
certain conditions should not be linted.
:param str modname: The name of the module to be checked.
:param str path: The full path to the source code of the module.
:param bool is_argument: Whetter the file is an argument to pylint or not.
Files which respect this property are always
checked, since the user requested it explicitly.
:returns: True if the module should be checked.
:rtype: bool
"""
if is_argument:
return True
return path.endswith('.py')
# pylint: enable=unused-argument
def check(self, files_or_modules):
"""main checking entry: check a list of files or modules from their
name.
"""
# initialize msgs_state now that all messages have been registered into
# the store
for msg in self.msgs_store.messages:
if not msg.may_be_emitted():
self._msgs_state[msg.msgid] = False
if not isinstance(files_or_modules, (list, tuple)):
files_or_modules = (files_or_modules,)
if self.config.jobs == 1:
self._do_check(files_or_modules)
else:
with _patch_sysmodules():
self._parallel_check(files_or_modules)
def _get_jobs_config(self):
child_config = collections.OrderedDict()
filter_options = {'long-help'}
filter_options.update((opt_name for opt_name, _ in self._external_opts))
for opt_providers in six.itervalues(self._all_options):
for optname, optdict, val in opt_providers.options_and_values():
if optdict.get('deprecated'):
continue
if optname not in filter_options:
child_config[optname] = utils._format_option_value(
optdict, val)
child_config['python3_porting_mode'] = self._python3_porting_mode
child_config['plugins'] = self._dynamic_plugins
return child_config
def _parallel_task(self, files_or_modules):
# Prepare configuration for child linters.
child_config = self._get_jobs_config()
children = []
manager = multiprocessing.Manager()
tasks_queue = manager.Queue()
results_queue = manager.Queue()
# Send files to child linters.
expanded_files = self.expand_files(files_or_modules)
# do not start more jobs than needed
for _ in range(min(self.config.jobs, len(expanded_files))):
child_linter = ChildLinter(args=(tasks_queue, results_queue,
child_config))
child_linter.start()
children.append(child_linter)
for files_or_module in expanded_files:
path = files_or_module['path']
tasks_queue.put([path])
# collect results from child linters
failed = False
for _ in expanded_files:
try:
result = results_queue.get()
except Exception as ex:
print("internal error while receiving results from child linter",
file=sys.stderr)
print(ex, file=sys.stderr)
failed = True
break
yield result
# Stop child linters and wait for their completion.
for _ in range(self.config.jobs):
tasks_queue.put('STOP')
for child in children:
child.join()
if failed:
print("Error occurred, stopping the linter.", file=sys.stderr)
sys.exit(32)
def _parallel_check(self, files_or_modules):
# Reset stats.
self.open()
all_stats = []
module = None
for result in self._parallel_task(files_or_modules):
if not result:
continue
(
_,
self.file_state.base_name,
module,
messages,
stats,
msg_status
) = result
for msg in messages:
msg = utils.Message(*msg)
self.set_current_module(module)
self.reporter.handle_message(msg)
all_stats.append(stats)
self.msg_status |= msg_status
self.stats = _merge_stats(all_stats)
self.current_name = module
# Insert stats data to local checkers.
for checker in self.get_checkers():
if checker is not self:
checker.stats = self.stats
def _do_check(self, files_or_modules):
walker = utils.PyLintASTWalker(self)
_checkers = self.prepare_checkers()
tokencheckers = [c for c in _checkers
if interfaces.implements(c, interfaces.ITokenChecker)
and c is not self]
rawcheckers = [c for c in _checkers
if interfaces.implements(c, interfaces.IRawChecker)]
# notify global begin
for checker in _checkers:
checker.open()
if interfaces.implements(checker, interfaces.IAstroidChecker):
walker.add_checker(checker)
# build ast and check modules or packages
for descr in self.expand_files(files_or_modules):
modname, filepath, is_arg = descr['name'], descr['path'], descr['isarg']
if not self.should_analyze_file(modname, filepath, is_argument=is_arg):
continue
self.set_current_module(modname, filepath)
# get the module representation
ast_node = self.get_ast(filepath, modname)
if ast_node is None:
continue
# XXX to be correct we need to keep module_msgs_state for every
# analyzed module (the problem stands with localized messages which
# are only detected in the .close step)
self.file_state = utils.FileState(descr['basename'])
self._ignore_file = False
# fix the current file (if the source file was not available or
# if it's actually a c extension)
self.current_file = ast_node.file # pylint: disable=maybe-no-member
self.check_astroid_module(ast_node, walker, rawcheckers, tokencheckers)
# warn about spurious inline messages handling
spurious_messages = self.file_state.iter_spurious_suppression_messages(self.msgs_store)
for msgid, line, args in spurious_messages:
self.add_message(msgid, line, None, args)
# notify global end
self.stats['statement'] = walker.nbstatements
for checker in reversed(_checkers):
checker.close()
def expand_files(self, modules):
"""get modules and errors from a list of modules and handle errors
"""
result, errors = utils.expand_modules(modules, self.config.black_list,
self.config.black_list_re)
for error in errors:
message = modname = error["mod"]
key = error["key"]
self.set_current_module(modname)
if key == "fatal":
message = str(error["ex"]).replace(os.getcwd() + os.sep, '')
self.add_message(key, args=message)
return result
def set_current_module(self, modname, filepath=None):
"""set the name of the currently analyzed module and
init statistics for it
"""
if not modname and filepath is None:
return
self.reporter.on_set_current_module(modname, filepath)
self.current_name = modname
self.current_file = filepath or modname
self.stats['by_module'][modname] = {}
self.stats['by_module'][modname]['statement'] = 0
for msg_cat in six.itervalues(utils.MSG_TYPES):
self.stats['by_module'][modname][msg_cat] = 0
def get_ast(self, filepath, modname):
"""return a ast(roid) representation for a module"""
try:
return MANAGER.ast_from_file(filepath, modname, source=True)
except astroid.AstroidSyntaxError as ex:
self.add_message('syntax-error',
line=getattr(ex.error, 'lineno', 0),
args=str(ex.error))
except astroid.AstroidBuildingException as ex:
self.add_message('parse-error', args=ex)
except Exception as ex: # pylint: disable=broad-except
import traceback
traceback.print_exc()
self.add_message('astroid-error', args=(ex.__class__, ex))
def check_astroid_module(self, ast_node, walker,
rawcheckers, tokencheckers):
"""Check a module from its astroid representation."""
try:
tokens = utils.tokenize_module(ast_node)
except tokenize.TokenError as ex:
self.add_message('syntax-error', line=ex.args[1][0], args=ex.args[0])
return None
if not ast_node.pure_python:
self.add_message('raw-checker-failed', args=ast_node.name)
else:
#assert astroid.file.endswith('.py')
# invoke ITokenChecker interface on self to fetch module/block
# level options
self.process_tokens(tokens)
if self._ignore_file:
return False
# walk ast to collect line numbers
self.file_state.collect_block_lines(self.msgs_store, ast_node)
# run raw and tokens checkers
for checker in rawcheckers:
checker.process_module(ast_node)
for checker in tokencheckers:
checker.process_tokens(tokens)
# generate events to astroid checkers
walker.walk(ast_node)
return True
# IAstroidChecker interface #################################################
def open(self):
"""initialize counters"""
self.stats = {'by_module' : {},
'by_msg' : {},
}
MANAGER.always_load_extensions = self.config.unsafe_load_any_extension
MANAGER.extension_package_whitelist.update(
self.config.extension_pkg_whitelist)
for msg_cat in six.itervalues(utils.MSG_TYPES):
self.stats[msg_cat] = 0
def generate_reports(self):
"""close the whole package /module, it's time to make reports !
if persistent run, pickle results for later comparison
"""
# Display whatever messages are left on the reporter.
self.reporter.display_messages(report_nodes.Section())
if self.file_state.base_name is not None:
# load previous results if any
previous_stats = config.load_results(self.file_state.base_name)
# XXX code below needs refactoring to be more reporter agnostic
self.reporter.on_close(self.stats, previous_stats)
if self.config.reports:
sect = self.make_reports(self.stats, previous_stats)
else:
sect = report_nodes.Section()
if self.config.reports:
self.reporter.display_reports(sect)
self._report_evaluation()
# save results if persistent run
if self.config.persistent:
config.save_results(self.stats, self.file_state.base_name)
else:
self.reporter.on_close(self.stats, {})
def _report_evaluation(self):
"""make the global evaluation report"""
# check with at least check 1 statements (usually 0 when there is a
# syntax error preventing pylint from further processing)
previous_stats = config.load_results(self.file_state.base_name)
if self.stats['statement'] == 0:
return
# get a global note for the code
evaluation = self.config.evaluation
try:
note = eval(evaluation, {}, self.stats) # pylint: disable=eval-used
except Exception as ex: # pylint: disable=broad-except
msg = 'An exception occurred while rating: %s' % ex
else:
self.stats['global_note'] = note
msg = 'Your code has been rated at %.2f/10' % note
pnote = previous_stats.get('global_note')
if pnote is not None:
msg += ' (previous run: %.2f/10, %+.2f)' % (pnote, note - pnote)
if self.config.score:
sect = report_nodes.EvaluationSection(msg)
self.reporter.display_reports(sect)
# some reporting functions ####################################################
def report_total_messages_stats(sect, stats, previous_stats):
"""make total errors / warnings report"""
lines = ['type', 'number', 'previous', 'difference']
lines += checkers.table_lines_from_stats(stats, previous_stats,
('convention', 'refactor',
'warning', 'error'))
sect.append(report_nodes.Table(children=lines, cols=4, rheaders=1))
def report_messages_stats(sect, stats, _):
"""make messages type report"""
if not stats['by_msg']:
# don't print this report when we didn't detected any errors
raise exceptions.EmptyReportError()
in_order = sorted([(value, msg_id)
for msg_id, value in six.iteritems(stats['by_msg'])
if not msg_id.startswith('I')])
in_order.reverse()
lines = ('message id', 'occurrences')
for value, msg_id in in_order:
lines += (msg_id, str(value))
sect.append(report_nodes.Table(children=lines, cols=2, rheaders=1))
def report_messages_by_module_stats(sect, stats, _):
"""make errors / warnings by modules report"""
if len(stats['by_module']) == 1:
# don't print this report when we are analysing a single module
raise exceptions.EmptyReportError()
by_mod = collections.defaultdict(dict)
for m_type in ('fatal', 'error', 'warning', 'refactor', 'convention'):
total = stats[m_type]
for module in six.iterkeys(stats['by_module']):
mod_total = stats['by_module'][module][m_type]
if total == 0:
percent = 0
else:
percent = float((mod_total)*100) / total
by_mod[module][m_type] = percent
sorted_result = []
for module, mod_info in six.iteritems(by_mod):
sorted_result.append((mod_info['error'],
mod_info['warning'],
mod_info['refactor'],
mod_info['convention'],
module))
sorted_result.sort()
sorted_result.reverse()
lines = ['module', 'error', 'warning', 'refactor', 'convention']
for line in sorted_result:
# Don't report clean modules.
if all(entry == 0 for entry in line[:-1]):
continue
lines.append(line[-1])
for val in line[:-1]:
lines.append('%.2f' % val)
if len(lines) == 5:
raise exceptions.EmptyReportError()
sect.append(report_nodes.Table(children=lines, cols=5, rheaders=1))
# utilities ###################################################################
class ArgumentPreprocessingError(Exception):
"""Raised if an error occurs during argument preprocessing."""
def preprocess_options(args, search_for):
"""look for some options (keys of <search_for>) which have to be processed
before others
values of <search_for> are callback functions to call when the option is
found
"""
i = 0
while i < len(args):
arg = args[i]
if arg.startswith('--'):
try:
option, val = arg[2:].split('=', 1)
except ValueError:
option, val = arg[2:], None
try:
cb, takearg = search_for[option]
except KeyError:
i += 1
else:
del args[i]
if takearg and val is None:
if i >= len(args) or args[i].startswith('-'):
msg = 'Option %s expects a value' % option
raise ArgumentPreprocessingError(msg)
val = args[i]
del args[i]
elif not takearg and val is not None:
msg = "Option %s doesn't expects a value" % option
raise ArgumentPreprocessingError(msg)
cb(option, val)
else:
i += 1
@contextlib.contextmanager
def fix_import_path(args):
"""Prepare sys.path for running the linter checks.
Within this context, each of the given arguments is importable.
Paths are added to sys.path in corresponding order to the arguments.
We avoid adding duplicate directories to sys.path.
`sys.path` is reset to its original value upon exiting this context.
"""
orig = list(sys.path)
changes = []
for arg in args:
path = _get_python_path(arg)
if path in changes:
continue
else:
changes.append(path)
sys.path[:] = changes + ["."] + sys.path
try:
yield
finally:
sys.path[:] = orig
class Run(object):
"""helper class to use as main for pylint :
run(*sys.argv[1:])
"""
LinterClass = PyLinter
option_groups = (
('Commands', 'Options which are actually commands. Options in this \
group are mutually exclusive.'),
)
def __init__(self, args, reporter=None, exit=True):
self._rcfile = None
self._plugins = []
try:
preprocess_options(args, {
# option: (callback, takearg)
'init-hook': (cb_init_hook, True),
'rcfile': (self.cb_set_rcfile, True),
'load-plugins': (self.cb_add_plugins, True),
})
except ArgumentPreprocessingError as ex:
print(ex, file=sys.stderr)
sys.exit(32)
self.linter = linter = self.LinterClass((
('rcfile',
{'action' : 'callback', 'callback' : lambda *args: 1,
'type': 'string', 'metavar': '<file>',
'help' : 'Specify a configuration file.'}),
('init-hook',
{'action' : 'callback', 'callback' : lambda *args: 1,
'type' : 'string', 'metavar': '<code>',
'level': 1,
'help' : 'Python code to execute, usually for sys.path '
'manipulation such as pygtk.require().'}),
('help-msg',
{'action' : 'callback', 'type' : 'string', 'metavar': '<msg-id>',
'callback' : self.cb_help_message,
'group': 'Commands',
'help' : 'Display a help message for the given message id and '
'exit. The value may be a comma separated list of message ids.'}),
('list-msgs',
{'action' : 'callback', 'metavar': '<msg-id>',
'callback' : self.cb_list_messages,
'group': 'Commands', 'level': 1,
'help' : "Generate pylint's messages."}),
('list-conf-levels',
{'action' : 'callback',
'callback' : cb_list_confidence_levels,
'group': 'Commands', 'level': 1,
'help' : "Generate pylint's messages."}),
('full-documentation',
{'action' : 'callback', 'metavar': '<msg-id>',
'callback' : self.cb_full_documentation,
'group': 'Commands', 'level': 1,
'help' : "Generate pylint's full documentation."}),
('generate-rcfile',
{'action' : 'callback', 'callback' : self.cb_generate_config,
'group': 'Commands',
'help' : 'Generate a sample configuration file according to '
'the current configuration. You can put other options '
'before this one to get them in the generated '
'configuration.'}),
('generate-man',
{'action' : 'callback', 'callback' : self.cb_generate_manpage,
'group': 'Commands',
'help' : "Generate pylint's man page.", 'hide': True}),
('errors-only',
{'action' : 'callback', 'callback' : self.cb_error_mode,
'short': 'E',
'help' : 'In error mode, checkers without error messages are '
'disabled and for others, only the ERROR messages are '
'displayed, and no reports are done by default'''}),
('py3k',
{'action' : 'callback', 'callback' : self.cb_python3_porting_mode,
'help' : 'In Python 3 porting mode, all checkers will be '
'disabled and only messages emitted by the porting '
'checker will be displayed'}),
), option_groups=self.option_groups, pylintrc=self._rcfile)
# register standard checkers
linter.load_default_plugins()
# load command line plugins
linter.load_plugin_modules(self._plugins)
# add some help section
linter.add_help_section('Environment variables', config.ENV_HELP, level=1)
# pylint: disable=bad-continuation
linter.add_help_section('Output',
'Using the default text output, the message format is : \n'
' \n'
' MESSAGE_TYPE: LINE_NUM:[OBJECT:] MESSAGE \n'
' \n'
'There are 5 kind of message types : \n'
' * (C) convention, for programming standard violation \n'
' * (R) refactor, for bad code smell \n'
' * (W) warning, for python specific problems \n'
' * (E) error, for probable bugs in the code \n'
' * (F) fatal, if an error occurred which prevented pylint from doing further\n'
'processing.\n'
, level=1)
linter.add_help_section('Output status code',
'Pylint should leave with following status code: \n'
' * 0 if everything went fine \n'
' * 1 if a fatal message was issued \n'
' * 2 if an error message was issued \n'
' * 4 if a warning message was issued \n'
' * 8 if a refactor message was issued \n'
' * 16 if a convention message was issued \n'
' * 32 on usage error \n'
' \n'
'status 1 to 16 will be bit-ORed so you can know which different categories has\n'
'been issued by analysing pylint output status code\n',
level=1)
# read configuration
linter.disable('I')
linter.enable('c-extension-no-member')
linter.read_config_file()
config_parser = linter.cfgfile_parser
# run init hook, if present, before loading plugins
if config_parser.has_option('MASTER', 'init-hook'):
cb_init_hook('init-hook',
utils._unquote(config_parser.get('MASTER',
'init-hook')))
# is there some additional plugins in the file configuration, in
if config_parser.has_option('MASTER', 'load-plugins'):
plugins = utils._splitstrip(
config_parser.get('MASTER', 'load-plugins'))
linter.load_plugin_modules(plugins)
# now we can load file config and command line, plugins (which can
# provide options) have been registered
linter.load_config_file()
if reporter:
# if a custom reporter is provided as argument, it may be overridden
# by file parameters, so re-set it here, but before command line
# parsing so it's still overrideable by command line option
linter.set_reporter(reporter)
try:
args = linter.load_command_line_configuration(args)
except SystemExit as exc:
if exc.code == 2: # bad options
exc.code = 32
raise
if not args:
print(linter.help())
sys.exit(32)
if linter.config.jobs < 0:
print("Jobs number (%d) should be greater than 0"
% linter.config.jobs, file=sys.stderr)
sys.exit(32)
if linter.config.jobs > 1 or linter.config.jobs == 0:
if multiprocessing is None:
print("Multiprocessing library is missing, "
"fallback to single process", file=sys.stderr)
linter.set_option("jobs", 1)
else:
if linter.config.jobs == 0:
linter.config.jobs = multiprocessing.cpu_count()
# insert current working directory to the python path to have a correct
# behaviour
with fix_import_path(args):
linter.check(args)
linter.generate_reports()
if exit:
sys.exit(self.linter.msg_status)
def cb_set_rcfile(self, name, value):
"""callback for option preprocessing (i.e. before option parsing)"""
self._rcfile = value
def cb_add_plugins(self, name, value):
"""callback for option preprocessing (i.e. before option parsing)"""
self._plugins.extend(utils._splitstrip(value))
def cb_error_mode(self, *args, **kwargs):
"""error mode:
* disable all but error messages
* disable the 'miscellaneous' checker which can be safely deactivated in
debug
* disable reports
* do not save execution information
"""
self.linter.error_mode()
def cb_generate_config(self, *args, **kwargs):
"""optik callback for sample config file generation"""
self.linter.generate_config(skipsections=('COMMANDS',))
sys.exit(0)
def cb_generate_manpage(self, *args, **kwargs):
"""optik callback for sample config file generation"""
from pylint import __pkginfo__
self.linter.generate_manpage(__pkginfo__)
sys.exit(0)
def cb_help_message(self, option, optname, value, parser):
"""optik callback for printing some help about a particular message"""
self.linter.msgs_store.help_message(utils._splitstrip(value))
sys.exit(0)
def cb_full_documentation(self, option, optname, value, parser):
"""optik callback for printing full documentation"""
self.linter.print_full_documentation()
sys.exit(0)
def cb_list_messages(self, option, optname, value, parser): # FIXME
"""optik callback for printing available messages"""
self.linter.msgs_store.list_messages()
sys.exit(0)
def cb_python3_porting_mode(self, *args, **kwargs):
"""Activate only the python3 porting checker."""
self.linter.python3_porting_mode()
def cb_list_confidence_levels(option, optname, value, parser):
for level in interfaces.CONFIDENCE_LEVELS:
print('%-18s: %s' % level)
sys.exit(0)
def cb_init_hook(optname, value):
"""exec arbitrary code to set sys.path for instance"""
exec(value) # pylint: disable=exec-used
if __name__ == '__main__':
Run(sys.argv[1:])
| 1 | 9,697 | I don't understand why this is here and what's the intention with it? Can you elaborate what's the reasoning behind this change? | PyCQA-pylint | py |
@@ -25,13 +25,13 @@ package sqlplugin
import "database/sql"
type (
- // historyShard is the SQL persistence interface for history shards
- historyShard interface {
+ // HistoryShard is the SQL persistence interface for history shards
+ HistoryShard interface {
InsertIntoShards(rows *ShardsRow) (sql.Result, error)
UpdateShards(row *ShardsRow) (sql.Result, error)
SelectFromShards(filter *ShardsFilter) (*ShardsRow, error)
- ReadLockShards(filter *ShardsFilter) (int, error)
- WriteLockShards(filter *ShardsFilter) (int, error)
+ ReadLockShards(filter *ShardsFilter) (int64, error)
+ WriteLockShards(filter *ShardsFilter) (int64, error)
}
// historyEvent is the SQL persistence interface for history nodes and history trees | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package sqlplugin
import "database/sql"
type (
// historyShard is the SQL persistence interface for history shards
historyShard interface {
InsertIntoShards(rows *ShardsRow) (sql.Result, error)
UpdateShards(row *ShardsRow) (sql.Result, error)
SelectFromShards(filter *ShardsFilter) (*ShardsRow, error)
ReadLockShards(filter *ShardsFilter) (int, error)
WriteLockShards(filter *ShardsFilter) (int, error)
}
// historyEvent is the SQL persistence interface for history nodes and history trees
historyEvent interface {
InsertIntoHistoryNode(row *HistoryNodeRow) (sql.Result, error)
SelectFromHistoryNode(filter *HistoryNodeFilter) ([]HistoryNodeRow, error)
DeleteFromHistoryNode(filter *HistoryNodeFilter) (sql.Result, error)
InsertIntoHistoryTree(row *HistoryTreeRow) (sql.Result, error)
SelectFromHistoryTree(filter *HistoryTreeFilter) ([]HistoryTreeRow, error)
DeleteFromHistoryTree(filter *HistoryTreeFilter) (sql.Result, error)
}
// historyExecution is the SQL persistence interface for history nodes and history executions
historyExecution interface {
InsertIntoExecutions(row *ExecutionsRow) (sql.Result, error)
UpdateExecutions(row *ExecutionsRow) (sql.Result, error)
SelectFromExecutions(filter *ExecutionsFilter) (*ExecutionsRow, error)
DeleteFromExecutions(filter *ExecutionsFilter) (sql.Result, error)
ReadLockExecutions(filter *ExecutionsFilter) (int, error)
WriteLockExecutions(filter *ExecutionsFilter) (int, error)
LockCurrentExecutionsJoinExecutions(filter *CurrentExecutionsFilter) ([]CurrentExecutionsRow, error)
InsertIntoCurrentExecutions(row *CurrentExecutionsRow) (sql.Result, error)
UpdateCurrentExecutions(row *CurrentExecutionsRow) (sql.Result, error)
// SelectFromCurrentExecutions returns one or more rows from current_executions table
// Required params - {shardID, namespaceID, workflowID}
SelectFromCurrentExecutions(filter *CurrentExecutionsFilter) (*CurrentExecutionsRow, error)
// DeleteFromCurrentExecutions deletes a single row that matches the filter criteria
// If a row exist, that row will be deleted and this method will return success
// If there is no row matching the filter criteria, this method will still return success
// Callers can check the output of Result.RowsAffected() to see if a row was deleted or not
// Required params - {shardID, namespaceID, workflowID, runID}
DeleteFromCurrentExecutions(filter *CurrentExecutionsFilter) (sql.Result, error)
LockCurrentExecutions(filter *CurrentExecutionsFilter) (*CurrentExecutionsRow, error)
}
// historyExecutionBuffer is the SQL persistence interface for history nodes and history execution buffer events
historyExecutionBuffer interface {
InsertIntoBufferedEvents(rows []BufferedEventsRow) (sql.Result, error)
SelectFromBufferedEvents(filter *BufferedEventsFilter) ([]BufferedEventsRow, error)
DeleteFromBufferedEvents(filter *BufferedEventsFilter) (sql.Result, error)
}
// historyExecutionActivity is the SQL persistence interface for history nodes and history execution activities
historyExecutionActivity interface {
ReplaceIntoActivityInfoMaps(rows []ActivityInfoMapsRow) (sql.Result, error)
// SelectFromActivityInfoMaps returns one or more rows from activity_info_maps
// Required filter params - {shardID, namespaceID, workflowID, runID}
SelectFromActivityInfoMaps(filter *ActivityInfoMapsFilter) ([]ActivityInfoMapsRow, error)
// DeleteFromActivityInfoMaps deletes a row from activity_info_maps table
// Required filter params
// - single row delete - {shardID, namespaceID, workflowID, runID, scheduleID}
// - range delete - {shardID, namespaceID, workflowID, runID}
DeleteFromActivityInfoMaps(filter *ActivityInfoMapsFilter) (sql.Result, error)
}
// historyExecutionChildWorkflow is the SQL persistence interface for history nodes and history execution child workflows
historyExecutionChildWorkflow interface {
ReplaceIntoChildExecutionInfoMaps(rows []ChildExecutionInfoMapsRow) (sql.Result, error)
// SelectFromChildExecutionInfoMaps returns one or more rows form child_execution_info_maps table
// Required filter params - {shardID, namespaceID, workflowID, runID}
SelectFromChildExecutionInfoMaps(filter *ChildExecutionInfoMapsFilter) ([]ChildExecutionInfoMapsRow, error)
// DeleteFromChildExecutionInfoMaps deletes one or more rows from child_execution_info_maps
// Required filter params
// - single row - {shardID, namespaceID, workflowID, runID, initiatedID}
// - multiple rows - {shardID, namespaceID, workflowID, runID}
DeleteFromChildExecutionInfoMaps(filter *ChildExecutionInfoMapsFilter) (sql.Result, error)
}
// historyExecutionTimer is the SQL persistence interface for history nodes and history execution timers
historyExecutionTimer interface {
ReplaceIntoTimerInfoMaps(rows []TimerInfoMapsRow) (sql.Result, error)
// SelectFromTimerInfoMaps returns one or more rows form timer_info_maps table
// Required filter params - {shardID, namespaceID, workflowID, runID}
SelectFromTimerInfoMaps(filter *TimerInfoMapsFilter) ([]TimerInfoMapsRow, error)
// DeleteFromTimerInfoMaps deletes one or more rows from timer_info_maps
// Required filter params
// - single row - {shardID, namespaceID, workflowID, runID, timerID}
// - multiple rows - {shardID, namespaceID, workflowID, runID}
DeleteFromTimerInfoMaps(filter *TimerInfoMapsFilter) (sql.Result, error)
}
// historyExecutionRequestCancel is the SQL persistence interface for history nodes and history execution request cancels
historyExecutionRequestCancel interface {
ReplaceIntoRequestCancelInfoMaps(rows []RequestCancelInfoMapsRow) (sql.Result, error)
// SelectFromRequestCancelInfoMaps returns one or more rows form request_cancel_info_maps table
// Required filter params - {shardID, namespaceID, workflowID, runID}
SelectFromRequestCancelInfoMaps(filter *RequestCancelInfoMapsFilter) ([]RequestCancelInfoMapsRow, error)
// DeleteFromRequestCancelInfoMaps deletes one or more rows from request_cancel_info_maps
// Required filter params
// - single row - {shardID, namespaceID, workflowID, runID, initiatedID}
// - multiple rows - {shardID, namespaceID, workflowID, runID}
DeleteFromRequestCancelInfoMaps(filter *RequestCancelInfoMapsFilter) (sql.Result, error)
}
// historyExecutionSignal is the SQL persistence interface for history nodes and history execution signals
historyExecutionSignal interface {
ReplaceIntoSignalInfoMaps(rows []SignalInfoMapsRow) (sql.Result, error)
// SelectFromSignalInfoMaps returns one or more rows form signal_info_maps table
// Required filter params - {shardID, namespaceID, workflowID, runID}
SelectFromSignalInfoMaps(filter *SignalInfoMapsFilter) ([]SignalInfoMapsRow, error)
// DeleteFromSignalInfoMaps deletes one or more rows from signal_info_maps table
// Required filter params
// - single row - {shardID, namespaceID, workflowID, runID, initiatedID}
// - multiple rows - {shardID, namespaceID, workflowID, runID}
DeleteFromSignalInfoMaps(filter *SignalInfoMapsFilter) (sql.Result, error)
}
// historyExecutionSignalRequest is the SQL persistence interface for history nodes and history execution signal request
historyExecutionSignalRequest interface {
InsertIntoSignalsRequestedSets(rows []SignalsRequestedSetsRow) (sql.Result, error)
// SelectFromSignalInfoMaps returns one or more rows form singals_requested_sets table
// Required filter params - {shardID, namespaceID, workflowID, runID}
SelectFromSignalsRequestedSets(filter *SignalsRequestedSetsFilter) ([]SignalsRequestedSetsRow, error)
// DeleteFromSignalsRequestedSets deletes one or more rows from signals_requested_sets
// Required filter params
// - single row - {shardID, namespaceID, workflowID, runID, signalID}
// - multiple rows - {shardID, namespaceID, workflowID, runID}
DeleteFromSignalsRequestedSets(filter *SignalsRequestedSetsFilter) (sql.Result, error)
}
// historyTransferTask is the SQL persistence interface for history nodes and history transfer tasks
historyTransferTask interface {
InsertIntoTransferTasks(rows []TransferTasksRow) (sql.Result, error)
// SelectFromTransferTasks returns rows that match filter criteria from transfer_tasks table.
// Required filter params - {shardID, minTaskID, maxTaskID}
SelectFromTransferTasks(filter *TransferTasksFilter) ([]TransferTasksRow, error)
// DeleteFromTransferTasks deletes one or more rows from transfer_tasks table.
// Filter params - shardID is required. If TaskID is not nil, a single row is deleted.
// When MinTaskID and MaxTaskID are not-nil, a range of rows are deleted.
DeleteFromTransferTasks(filter *TransferTasksFilter) (sql.Result, error)
}
// historyTimerTask is the SQL persistence interface for history nodes and history timer tasks
historyTimerTask interface {
InsertIntoTimerTasks(rows []TimerTasksRow) (sql.Result, error)
// SelectFromTimerTasks returns one or more rows from timer_tasks table
// Required filter Params - {shardID, taskID, minVisibilityTimestamp, maxVisibilityTimestamp, pageSize}
SelectFromTimerTasks(filter *TimerTasksFilter) ([]TimerTasksRow, error)
// DeleteFromTimerTasks deletes one or more rows from timer_tasks table
// Required filter Params:
// - to delete one row - {shardID, visibilityTimestamp, taskID}
// - to delete multiple rows - {shardID, minVisibilityTimestamp, maxVisibilityTimestamp}
DeleteFromTimerTasks(filter *TimerTasksFilter) (sql.Result, error)
}
// historyReplicationTask is the SQL persistence interface for history nodes and history replication tasks
historyReplicationTask interface {
InsertIntoReplicationTasks(rows []ReplicationTasksRow) (sql.Result, error)
// SelectFromReplicationTasks returns one or more rows from replication_tasks table
// Required filter params - {shardID, minTaskID, maxTaskID, pageSize}
SelectFromReplicationTasks(filter *ReplicationTasksFilter) ([]ReplicationTasksRow, error)
// DeleteFromReplicationTasks deletes a row from replication_tasks table
// Required filter params - {shardID, inclusiveEndTaskID}
DeleteFromReplicationTasks(filter *ReplicationTasksFilter) (sql.Result, error)
// DeleteFromReplicationTasks deletes multi rows from replication_tasks table
// Required filter params - {shardID, inclusiveEndTaskID}
RangeDeleteFromReplicationTasks(filter *ReplicationTasksFilter) (sql.Result, error)
// InsertIntoReplicationTasksDLQ puts the replication task into DLQ
InsertIntoReplicationTasksDLQ(row *ReplicationTaskDLQRow) (sql.Result, error)
// SelectFromReplicationTasksDLQ returns one or more rows from replication_tasks_dlq table
// Required filter params - {sourceClusterName, shardID, minTaskID, pageSize}
SelectFromReplicationTasksDLQ(filter *ReplicationTasksDLQFilter) ([]ReplicationTasksRow, error)
// DeleteMessageFromReplicationTasksDLQ deletes one row from replication_tasks_dlq table
// Required filter params - {sourceClusterName, shardID, taskID}
DeleteMessageFromReplicationTasksDLQ(filter *ReplicationTasksDLQFilter) (sql.Result, error)
// RangeDeleteMessageFromReplicationTasksDLQ deletes one or more rows from replication_tasks_dlq table
// Required filter params - {sourceClusterName, shardID, taskID, inclusiveTaskID}
RangeDeleteMessageFromReplicationTasksDLQ(filter *ReplicationTasksDLQFilter) (sql.Result, error)
}
)
| 1 | 10,306 | I think @sergeybykov converted (wanted to convert) all `shardID` to `int32` from `int64`. Please check with him. | temporalio-temporal | go |
@@ -2289,7 +2289,8 @@ void ProtocolGame::sendCreatureHealth(const Creature* creature)
if (creature->isHealthHidden()) {
msg.addByte(0x00);
} else {
- msg.addByte(std::ceil((static_cast<double>(creature->getHealth()) / std::max<int32_t>(creature->getMaxHealth(), 1)) * 100));
+ int32_t maxHealth = std::max(creature->getMaxHealth(), 1);
+ msg.addByte(std::ceil((static_cast<double>(std::min(creature->getHealth(), maxHealth)) / maxHealth) * 100));
}
writeToOutputBuffer(msg);
} | 1 | /**
* The Forgotten Server - a free and open-source MMORPG server emulator
* Copyright (C) 2019 Mark Samman <mark.samman@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "otpch.h"
#include <boost/range/adaptor/reversed.hpp>
#include "protocolgame.h"
#include "outputmessage.h"
#include "player.h"
#include "configmanager.h"
#include "actions.h"
#include "game.h"
#include "iologindata.h"
#include "iomarket.h"
#include "waitlist.h"
#include "ban.h"
#include "scheduler.h"
extern ConfigManager g_config;
extern Actions actions;
extern CreatureEvents* g_creatureEvents;
extern Chat* g_chat;
void ProtocolGame::release()
{
//dispatcher thread
if (player && player->client == shared_from_this()) {
player->client.reset();
player->decrementReferenceCounter();
player = nullptr;
}
OutputMessagePool::getInstance().removeProtocolFromAutosend(shared_from_this());
Protocol::release();
}
void ProtocolGame::login(const std::string& name, uint32_t accountId, OperatingSystem_t operatingSystem)
{
//dispatcher thread
Player* foundPlayer = g_game.getPlayerByName(name);
if (!foundPlayer || g_config.getBoolean(ConfigManager::ALLOW_CLONES)) {
player = new Player(getThis());
player->setName(name);
player->incrementReferenceCounter();
player->setID();
if (!IOLoginData::preloadPlayer(player, name)) {
disconnectClient("Your character could not be loaded.");
return;
}
if (IOBan::isPlayerNamelocked(player->getGUID())) {
disconnectClient("Your character has been namelocked.");
return;
}
if (g_game.getGameState() == GAME_STATE_CLOSING && !player->hasFlag(PlayerFlag_CanAlwaysLogin)) {
disconnectClient("The game is just going down.\nPlease try again later.");
return;
}
if (g_game.getGameState() == GAME_STATE_CLOSED && !player->hasFlag(PlayerFlag_CanAlwaysLogin)) {
disconnectClient("Server is currently closed.\nPlease try again later.");
return;
}
if (g_config.getBoolean(ConfigManager::ONE_PLAYER_ON_ACCOUNT) && player->getAccountType() < ACCOUNT_TYPE_GAMEMASTER && g_game.getPlayerByAccount(player->getAccount())) {
disconnectClient("You may only login with one character\nof your account at the same time.");
return;
}
if (!player->hasFlag(PlayerFlag_CannotBeBanned)) {
BanInfo banInfo;
if (IOBan::isAccountBanned(accountId, banInfo)) {
if (banInfo.reason.empty()) {
banInfo.reason = "(none)";
}
std::ostringstream ss;
if (banInfo.expiresAt > 0) {
ss << "Your account has been banned until " << formatDateShort(banInfo.expiresAt) << " by " << banInfo.bannedBy << ".\n\nReason specified:\n" << banInfo.reason;
} else {
ss << "Your account has been permanently banned by " << banInfo.bannedBy << ".\n\nReason specified:\n" << banInfo.reason;
}
disconnectClient(ss.str());
return;
}
}
std::size_t currentSlot = WaitingList::getInstance().clientLogin(player);
if (currentSlot > 0) {
uint8_t retryTime = WaitingList::getTime(currentSlot);
std::ostringstream ss;
ss << "Too many players online.\nYou are at place "
<< currentSlot << " on the waiting list.";
auto output = OutputMessagePool::getOutputMessage();
output->addByte(0x16);
output->addString(ss.str());
output->addByte(retryTime);
send(output);
disconnect();
return;
}
if (!IOLoginData::loadPlayerById(player, player->getGUID())) {
disconnectClient("Your character could not be loaded.");
return;
}
player->setOperatingSystem(operatingSystem);
if (!g_game.placeCreature(player, player->getLoginPosition())) {
if (!g_game.placeCreature(player, player->getTemplePosition(), false, true)) {
disconnectClient("Temple position is wrong. Contact the administrator.");
return;
}
}
if (operatingSystem >= CLIENTOS_OTCLIENT_LINUX) {
player->registerCreatureEvent("ExtendedOpcode");
}
player->lastIP = player->getIP();
player->lastLoginSaved = std::max<time_t>(time(nullptr), player->lastLoginSaved + 1);
acceptPackets = true;
} else {
if (eventConnect != 0 || !g_config.getBoolean(ConfigManager::REPLACE_KICK_ON_LOGIN)) {
//Already trying to connect
disconnectClient("You are already logged in.");
return;
}
if (foundPlayer->client) {
foundPlayer->disconnect();
foundPlayer->isConnecting = true;
eventConnect = g_scheduler.addEvent(createSchedulerTask(1000, std::bind(&ProtocolGame::connect, getThis(), foundPlayer->getID(), operatingSystem)));
} else {
connect(foundPlayer->getID(), operatingSystem);
}
}
OutputMessagePool::getInstance().addProtocolToAutosend(shared_from_this());
}
void ProtocolGame::connect(uint32_t playerId, OperatingSystem_t operatingSystem)
{
eventConnect = 0;
Player* foundPlayer = g_game.getPlayerByID(playerId);
if (!foundPlayer || foundPlayer->client) {
disconnectClient("You are already logged in.");
return;
}
if (isConnectionExpired()) {
//ProtocolGame::release() has been called at this point and the Connection object
//no longer exists, so we return to prevent leakage of the Player.
return;
}
player = foundPlayer;
player->incrementReferenceCounter();
g_chat->removeUserFromAllChannels(*player);
player->clearModalWindows();
player->setOperatingSystem(operatingSystem);
player->isConnecting = false;
player->client = getThis();
sendAddCreature(player, player->getPosition(), 0, false);
player->lastIP = player->getIP();
player->lastLoginSaved = std::max<time_t>(time(nullptr), player->lastLoginSaved + 1);
acceptPackets = true;
}
void ProtocolGame::logout(bool displayEffect, bool forced)
{
//dispatcher thread
if (!player) {
return;
}
if (!player->isRemoved()) {
if (!forced) {
if (!player->isAccessPlayer()) {
if (player->getTile()->hasFlag(TILESTATE_NOLOGOUT)) {
player->sendCancelMessage(RETURNVALUE_YOUCANNOTLOGOUTHERE);
return;
}
if (!player->getTile()->hasFlag(TILESTATE_PROTECTIONZONE) && player->hasCondition(CONDITION_INFIGHT)) {
player->sendCancelMessage(RETURNVALUE_YOUMAYNOTLOGOUTDURINGAFIGHT);
return;
}
}
//scripting event - onLogout
if (!g_creatureEvents->playerLogout(player)) {
//Let the script handle the error message
return;
}
}
if (displayEffect && player->getHealth() > 0) {
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
}
}
disconnect();
g_game.removeCreature(player);
}
void ProtocolGame::onRecvFirstMessage(NetworkMessage& msg)
{
if (g_game.getGameState() == GAME_STATE_SHUTDOWN) {
disconnect();
return;
}
OperatingSystem_t operatingSystem = static_cast<OperatingSystem_t>(msg.get<uint16_t>());
version = msg.get<uint16_t>();
msg.skipBytes(7); // U32 client version, U8 client type, U16 dat revision
if (!Protocol::RSA_decrypt(msg)) {
disconnect();
return;
}
xtea::key key;
key[0] = msg.get<uint32_t>();
key[1] = msg.get<uint32_t>();
key[2] = msg.get<uint32_t>();
key[3] = msg.get<uint32_t>();
enableXTEAEncryption();
setXTEAKey(std::move(key));
if (operatingSystem >= CLIENTOS_OTCLIENT_LINUX) {
NetworkMessage opcodeMessage;
opcodeMessage.addByte(0x32);
opcodeMessage.addByte(0x00);
opcodeMessage.add<uint16_t>(0x00);
writeToOutputBuffer(opcodeMessage);
}
msg.skipBytes(1); // gamemaster flag
std::string sessionKey = msg.getString();
auto sessionArgs = explodeString(sessionKey, "\n", 4);
if (sessionArgs.size() != 4) {
disconnect();
return;
}
std::string& accountName = sessionArgs[0];
std::string& password = sessionArgs[1];
std::string& token = sessionArgs[2];
uint32_t tokenTime = 0;
try {
tokenTime = std::stoul(sessionArgs[3]);
} catch (const std::invalid_argument&) {
disconnectClient("Malformed token packet.");
return;
} catch (const std::out_of_range&) {
disconnectClient("Token time is too long.");
return;
}
if (accountName.empty()) {
disconnectClient("You must enter your account name.");
return;
}
std::string characterName = msg.getString();
uint32_t timeStamp = msg.get<uint32_t>();
uint8_t randNumber = msg.getByte();
if (challengeTimestamp != timeStamp || challengeRandom != randNumber) {
disconnect();
return;
}
if (version < CLIENT_VERSION_MIN || version > CLIENT_VERSION_MAX) {
std::ostringstream ss;
ss << "Only clients with protocol " << CLIENT_VERSION_STR << " allowed!";
disconnectClient(ss.str());
return;
}
if (g_game.getGameState() == GAME_STATE_STARTUP) {
disconnectClient("Gameworld is starting up. Please wait.");
return;
}
if (g_game.getGameState() == GAME_STATE_MAINTAIN) {
disconnectClient("Gameworld is under maintenance. Please re-connect in a while.");
return;
}
BanInfo banInfo;
if (IOBan::isIpBanned(getIP(), banInfo)) {
if (banInfo.reason.empty()) {
banInfo.reason = "(none)";
}
std::ostringstream ss;
ss << "Your IP has been banned until " << formatDateShort(banInfo.expiresAt) << " by " << banInfo.bannedBy << ".\n\nReason specified:\n" << banInfo.reason;
disconnectClient(ss.str());
return;
}
uint32_t accountId = IOLoginData::gameworldAuthentication(accountName, password, characterName, token, tokenTime);
if (accountId == 0) {
disconnectClient("Account name or password is not correct.");
return;
}
g_dispatcher.addTask(createTask(std::bind(&ProtocolGame::login, getThis(), characterName, accountId, operatingSystem)));
}
void ProtocolGame::onConnect()
{
auto output = OutputMessagePool::getOutputMessage();
static std::random_device rd;
static std::ranlux24 generator(rd());
static std::uniform_int_distribution<uint16_t> randNumber(0x00, 0xFF);
// Skip checksum
output->skipBytes(sizeof(uint32_t));
// Packet length & type
output->add<uint16_t>(0x0006);
output->addByte(0x1F);
// Add timestamp & random number
challengeTimestamp = static_cast<uint32_t>(time(nullptr));
output->add<uint32_t>(challengeTimestamp);
challengeRandom = randNumber(generator);
output->addByte(challengeRandom);
// Go back and write checksum
output->skipBytes(-12);
output->add<uint32_t>(adlerChecksum(output->getOutputBuffer() + sizeof(uint32_t), 8));
send(output);
}
void ProtocolGame::disconnectClient(const std::string& message) const
{
auto output = OutputMessagePool::getOutputMessage();
output->addByte(0x14);
output->addString(message);
send(output);
disconnect();
}
void ProtocolGame::writeToOutputBuffer(const NetworkMessage& msg)
{
auto out = getOutputBuffer(msg.getLength());
out->append(msg);
}
void ProtocolGame::parsePacket(NetworkMessage& msg)
{
if (!acceptPackets || g_game.getGameState() == GAME_STATE_SHUTDOWN || msg.getLength() <= 0) {
return;
}
uint8_t recvbyte = msg.getByte();
if (!player) {
if (recvbyte == 0x0F) {
disconnect();
}
return;
}
//a dead player can not performs actions
if (player->isRemoved() || player->getHealth() <= 0) {
if (recvbyte == 0x0F) {
disconnect();
return;
}
if (recvbyte != 0x14) {
return;
}
}
switch (recvbyte) {
case 0x14: g_dispatcher.addTask(createTask(std::bind(&ProtocolGame::logout, getThis(), true, false))); break;
case 0x1D: addGameTask(&Game::playerReceivePingBack, player->getID()); break;
case 0x1E: addGameTask(&Game::playerReceivePing, player->getID()); break;
case 0x32: parseExtendedOpcode(msg); break; //otclient extended opcode
case 0x64: parseAutoWalk(msg); break;
case 0x65: addGameTask(&Game::playerMove, player->getID(), DIRECTION_NORTH); break;
case 0x66: addGameTask(&Game::playerMove, player->getID(), DIRECTION_EAST); break;
case 0x67: addGameTask(&Game::playerMove, player->getID(), DIRECTION_SOUTH); break;
case 0x68: addGameTask(&Game::playerMove, player->getID(), DIRECTION_WEST); break;
case 0x69: addGameTask(&Game::playerStopAutoWalk, player->getID()); break;
case 0x6A: addGameTask(&Game::playerMove, player->getID(), DIRECTION_NORTHEAST); break;
case 0x6B: addGameTask(&Game::playerMove, player->getID(), DIRECTION_SOUTHEAST); break;
case 0x6C: addGameTask(&Game::playerMove, player->getID(), DIRECTION_SOUTHWEST); break;
case 0x6D: addGameTask(&Game::playerMove, player->getID(), DIRECTION_NORTHWEST); break;
case 0x6F: addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerTurn, player->getID(), DIRECTION_NORTH); break;
case 0x70: addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerTurn, player->getID(), DIRECTION_EAST); break;
case 0x71: addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerTurn, player->getID(), DIRECTION_SOUTH); break;
case 0x72: addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerTurn, player->getID(), DIRECTION_WEST); break;
case 0x77: parseEquipObject(msg); break;
case 0x78: parseThrow(msg); break;
case 0x79: parseLookInShop(msg); break;
case 0x7A: parsePlayerPurchase(msg); break;
case 0x7B: parsePlayerSale(msg); break;
case 0x7C: addGameTask(&Game::playerCloseShop, player->getID()); break;
case 0x7D: parseRequestTrade(msg); break;
case 0x7E: parseLookInTrade(msg); break;
case 0x7F: addGameTask(&Game::playerAcceptTrade, player->getID()); break;
case 0x80: addGameTask(&Game::playerCloseTrade, player->getID()); break;
case 0x82: parseUseItem(msg); break;
case 0x83: parseUseItemEx(msg); break;
case 0x84: parseUseWithCreature(msg); break;
case 0x85: parseRotateItem(msg); break;
case 0x87: parseCloseContainer(msg); break;
case 0x88: parseUpArrowContainer(msg); break;
case 0x89: parseTextWindow(msg); break;
case 0x8A: parseHouseWindow(msg); break;
case 0x8B: parseWrapItem(msg); break;
case 0x8C: parseLookAt(msg); break;
case 0x8D: parseLookInBattleList(msg); break;
case 0x8E: /* join aggression */ break;
case 0x96: parseSay(msg); break;
case 0x97: addGameTask(&Game::playerRequestChannels, player->getID()); break;
case 0x98: parseOpenChannel(msg); break;
case 0x99: parseCloseChannel(msg); break;
case 0x9A: parseOpenPrivateChannel(msg); break;
case 0x9E: addGameTask(&Game::playerCloseNpcChannel, player->getID()); break;
case 0xA0: parseFightModes(msg); break;
case 0xA1: parseAttack(msg); break;
case 0xA2: parseFollow(msg); break;
case 0xA3: parseInviteToParty(msg); break;
case 0xA4: parseJoinParty(msg); break;
case 0xA5: parseRevokePartyInvite(msg); break;
case 0xA6: parsePassPartyLeadership(msg); break;
case 0xA7: addGameTask(&Game::playerLeaveParty, player->getID()); break;
case 0xA8: parseEnableSharedPartyExperience(msg); break;
case 0xAA: addGameTask(&Game::playerCreatePrivateChannel, player->getID()); break;
case 0xAB: parseChannelInvite(msg); break;
case 0xAC: parseChannelExclude(msg); break;
case 0xBE: addGameTask(&Game::playerCancelAttackAndFollow, player->getID()); break;
case 0xC9: /* update tile */ break;
case 0xCA: parseUpdateContainer(msg); break;
case 0xCB: parseBrowseField(msg); break;
case 0xCC: parseSeekInContainer(msg); break;
case 0xD2: addGameTask(&Game::playerRequestOutfit, player->getID()); break;
case 0xD3: parseSetOutfit(msg); break;
case 0xD4: parseToggleMount(msg); break;
case 0xDC: parseAddVip(msg); break;
case 0xDD: parseRemoveVip(msg); break;
case 0xDE: parseEditVip(msg); break;
case 0xE6: parseBugReport(msg); break;
case 0xE7: /* thank you */ break;
case 0xE8: parseDebugAssert(msg); break;
case 0xF0: addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerShowQuestLog, player->getID()); break;
case 0xF1: parseQuestLine(msg); break;
case 0xF2: parseRuleViolationReport(msg); break;
case 0xF3: /* get object info */ break;
case 0xF4: parseMarketLeave(); break;
case 0xF5: parseMarketBrowse(msg); break;
case 0xF6: parseMarketCreateOffer(msg); break;
case 0xF7: parseMarketCancelOffer(msg); break;
case 0xF8: parseMarketAcceptOffer(msg); break;
case 0xF9: parseModalWindowAnswer(msg); break;
default:
// std::cout << "Player: " << player->getName() << " sent an unknown packet header: 0x" << std::hex << static_cast<uint16_t>(recvbyte) << std::dec << "!" << std::endl;
break;
}
if (msg.isOverrun()) {
disconnect();
}
}
void ProtocolGame::GetTileDescription(const Tile* tile, NetworkMessage& msg)
{
msg.add<uint16_t>(0x00); //environmental effects
int32_t count;
Item* ground = tile->getGround();
if (ground) {
msg.addItem(ground);
count = 1;
} else {
count = 0;
}
const TileItemVector* items = tile->getItemList();
if (items) {
for (auto it = items->getBeginTopItem(), end = items->getEndTopItem(); it != end; ++it) {
msg.addItem(*it);
count++;
if (count == 9 && tile->getPosition() == player->getPosition()) {
break;
} else if (count == 10) {
return;
}
}
}
const CreatureVector* creatures = tile->getCreatures();
if (creatures) {
bool playerAdded = false;
for (const Creature* creature : boost::adaptors::reverse(*creatures)) {
if (!player->canSeeCreature(creature)) {
continue;
}
if (tile->getPosition() == player->getPosition() && count == 9 && !playerAdded) {
creature = player;
}
if (creature->getID() == player->getID()) {
playerAdded = true;
}
bool known;
uint32_t removedKnown;
checkCreatureAsKnown(creature->getID(), known, removedKnown);
AddCreature(msg, creature, known, removedKnown);
if (++count == 10) {
return;
}
}
}
if (items) {
for (auto it = items->getBeginDownItem(), end = items->getEndDownItem(); it != end; ++it) {
msg.addItem(*it);
if (++count == 10) {
return;
}
}
}
}
void ProtocolGame::GetMapDescription(int32_t x, int32_t y, int32_t z, int32_t width, int32_t height, NetworkMessage& msg)
{
int32_t skip = -1;
int32_t startz, endz, zstep;
if (z > 7) {
startz = z - 2;
endz = std::min<int32_t>(MAP_MAX_LAYERS - 1, z + 2);
zstep = 1;
} else {
startz = 7;
endz = 0;
zstep = -1;
}
for (int32_t nz = startz; nz != endz + zstep; nz += zstep) {
GetFloorDescription(msg, x, y, nz, width, height, z - nz, skip);
}
if (skip >= 0) {
msg.addByte(skip);
msg.addByte(0xFF);
}
}
void ProtocolGame::GetFloorDescription(NetworkMessage& msg, int32_t x, int32_t y, int32_t z, int32_t width, int32_t height, int32_t offset, int32_t& skip)
{
for (int32_t nx = 0; nx < width; nx++) {
for (int32_t ny = 0; ny < height; ny++) {
Tile* tile = g_game.map.getTile(x + nx + offset, y + ny + offset, z);
if (tile) {
if (skip >= 0) {
msg.addByte(skip);
msg.addByte(0xFF);
}
skip = 0;
GetTileDescription(tile, msg);
} else if (skip == 0xFE) {
msg.addByte(0xFF);
msg.addByte(0xFF);
skip = -1;
} else {
++skip;
}
}
}
}
void ProtocolGame::checkCreatureAsKnown(uint32_t id, bool& known, uint32_t& removedKnown)
{
auto result = knownCreatureSet.insert(id);
if (!result.second) {
known = true;
return;
}
known = false;
if (knownCreatureSet.size() > 1300) {
// Look for a creature to remove
for (auto it = knownCreatureSet.begin(), end = knownCreatureSet.end(); it != end; ++it) {
Creature* creature = g_game.getCreatureByID(*it);
if (!canSee(creature)) {
removedKnown = *it;
knownCreatureSet.erase(it);
return;
}
}
// Bad situation. Let's just remove anyone.
auto it = knownCreatureSet.begin();
if (*it == id) {
++it;
}
removedKnown = *it;
knownCreatureSet.erase(it);
} else {
removedKnown = 0;
}
}
bool ProtocolGame::canSee(const Creature* c) const
{
if (!c || !player || c->isRemoved()) {
return false;
}
if (!player->canSeeCreature(c)) {
return false;
}
return canSee(c->getPosition());
}
bool ProtocolGame::canSee(const Position& pos) const
{
return canSee(pos.x, pos.y, pos.z);
}
bool ProtocolGame::canSee(int32_t x, int32_t y, int32_t z) const
{
if (!player) {
return false;
}
const Position& myPos = player->getPosition();
if (myPos.z <= 7) {
//we are on ground level or above (7 -> 0)
//view is from 7 -> 0
if (z > 7) {
return false;
}
} else if (myPos.z >= 8) {
//we are underground (8 -> 15)
//view is +/- 2 from the floor we stand on
if (std::abs(myPos.getZ() - z) > 2) {
return false;
}
}
//negative offset means that the action taken place is on a lower floor than ourself
int32_t offsetz = myPos.getZ() - z;
if ((x >= myPos.getX() - 8 + offsetz) && (x <= myPos.getX() + 9 + offsetz) &&
(y >= myPos.getY() - 6 + offsetz) && (y <= myPos.getY() + 7 + offsetz)) {
return true;
}
return false;
}
// Parse methods
void ProtocolGame::parseChannelInvite(NetworkMessage& msg)
{
const std::string name = msg.getString();
addGameTask(&Game::playerChannelInvite, player->getID(), name);
}
void ProtocolGame::parseChannelExclude(NetworkMessage& msg)
{
const std::string name = msg.getString();
addGameTask(&Game::playerChannelExclude, player->getID(), name);
}
void ProtocolGame::parseOpenChannel(NetworkMessage& msg)
{
uint16_t channelId = msg.get<uint16_t>();
addGameTask(&Game::playerOpenChannel, player->getID(), channelId);
}
void ProtocolGame::parseCloseChannel(NetworkMessage& msg)
{
uint16_t channelId = msg.get<uint16_t>();
addGameTask(&Game::playerCloseChannel, player->getID(), channelId);
}
void ProtocolGame::parseOpenPrivateChannel(NetworkMessage& msg)
{
const std::string receiver = msg.getString();
addGameTask(&Game::playerOpenPrivateChannel, player->getID(), receiver);
}
void ProtocolGame::parseAutoWalk(NetworkMessage& msg)
{
uint8_t numdirs = msg.getByte();
if (numdirs == 0 || (msg.getBufferPosition() + numdirs) != (msg.getLength() + 8)) {
return;
}
msg.skipBytes(numdirs);
std::forward_list<Direction> path;
for (uint8_t i = 0; i < numdirs; ++i) {
uint8_t rawdir = msg.getPreviousByte();
switch (rawdir) {
case 1: path.push_front(DIRECTION_EAST); break;
case 2: path.push_front(DIRECTION_NORTHEAST); break;
case 3: path.push_front(DIRECTION_NORTH); break;
case 4: path.push_front(DIRECTION_NORTHWEST); break;
case 5: path.push_front(DIRECTION_WEST); break;
case 6: path.push_front(DIRECTION_SOUTHWEST); break;
case 7: path.push_front(DIRECTION_SOUTH); break;
case 8: path.push_front(DIRECTION_SOUTHEAST); break;
default: break;
}
}
if (path.empty()) {
return;
}
addGameTask(&Game::playerAutoWalk, player->getID(), path);
}
void ProtocolGame::parseSetOutfit(NetworkMessage& msg)
{
Outfit_t newOutfit;
newOutfit.lookType = msg.get<uint16_t>();
newOutfit.lookHead = msg.getByte();
newOutfit.lookBody = msg.getByte();
newOutfit.lookLegs = msg.getByte();
newOutfit.lookFeet = msg.getByte();
newOutfit.lookAddons = msg.getByte();
newOutfit.lookMount = msg.get<uint16_t>();
addGameTask(&Game::playerChangeOutfit, player->getID(), newOutfit);
}
void ProtocolGame::parseToggleMount(NetworkMessage& msg)
{
bool mount = msg.getByte() != 0;
addGameTask(&Game::playerToggleMount, player->getID(), mount);
}
void ProtocolGame::parseUseItem(NetworkMessage& msg)
{
Position pos = msg.getPosition();
uint16_t spriteId = msg.get<uint16_t>();
uint8_t stackpos = msg.getByte();
uint8_t index = msg.getByte();
addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerUseItem, player->getID(), pos, stackpos, index, spriteId);
}
void ProtocolGame::parseUseItemEx(NetworkMessage& msg)
{
Position fromPos = msg.getPosition();
uint16_t fromSpriteId = msg.get<uint16_t>();
uint8_t fromStackPos = msg.getByte();
Position toPos = msg.getPosition();
uint16_t toSpriteId = msg.get<uint16_t>();
uint8_t toStackPos = msg.getByte();
addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerUseItemEx, player->getID(), fromPos, fromStackPos, fromSpriteId, toPos, toStackPos, toSpriteId);
}
void ProtocolGame::parseUseWithCreature(NetworkMessage& msg)
{
Position fromPos = msg.getPosition();
uint16_t spriteId = msg.get<uint16_t>();
uint8_t fromStackPos = msg.getByte();
uint32_t creatureId = msg.get<uint32_t>();
addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerUseWithCreature, player->getID(), fromPos, fromStackPos, creatureId, spriteId);
}
void ProtocolGame::parseCloseContainer(NetworkMessage& msg)
{
uint8_t cid = msg.getByte();
addGameTask(&Game::playerCloseContainer, player->getID(), cid);
}
void ProtocolGame::parseUpArrowContainer(NetworkMessage& msg)
{
uint8_t cid = msg.getByte();
addGameTask(&Game::playerMoveUpContainer, player->getID(), cid);
}
void ProtocolGame::parseUpdateContainer(NetworkMessage& msg)
{
uint8_t cid = msg.getByte();
addGameTask(&Game::playerUpdateContainer, player->getID(), cid);
}
void ProtocolGame::parseThrow(NetworkMessage& msg)
{
Position fromPos = msg.getPosition();
uint16_t spriteId = msg.get<uint16_t>();
uint8_t fromStackpos = msg.getByte();
Position toPos = msg.getPosition();
uint8_t count = msg.getByte();
if (toPos != fromPos) {
addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerMoveThing, player->getID(), fromPos, spriteId, fromStackpos, toPos, count);
}
}
void ProtocolGame::parseLookAt(NetworkMessage& msg)
{
Position pos = msg.getPosition();
msg.skipBytes(2); // spriteId
uint8_t stackpos = msg.getByte();
addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerLookAt, player->getID(), pos, stackpos);
}
void ProtocolGame::parseLookInBattleList(NetworkMessage& msg)
{
uint32_t creatureId = msg.get<uint32_t>();
addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerLookInBattleList, player->getID(), creatureId);
}
void ProtocolGame::parseSay(NetworkMessage& msg)
{
std::string receiver;
uint16_t channelId;
SpeakClasses type = static_cast<SpeakClasses>(msg.getByte());
switch (type) {
case TALKTYPE_PRIVATE_TO:
case TALKTYPE_PRIVATE_RED_TO:
receiver = msg.getString();
channelId = 0;
break;
case TALKTYPE_CHANNEL_Y:
case TALKTYPE_CHANNEL_R1:
channelId = msg.get<uint16_t>();
break;
default:
channelId = 0;
break;
}
const std::string text = msg.getString();
if (text.length() > 255) {
return;
}
addGameTask(&Game::playerSay, player->getID(), channelId, type, receiver, text);
}
void ProtocolGame::parseFightModes(NetworkMessage& msg)
{
uint8_t rawFightMode = msg.getByte(); // 1 - offensive, 2 - balanced, 3 - defensive
uint8_t rawChaseMode = msg.getByte(); // 0 - stand while fightning, 1 - chase opponent
uint8_t rawSecureMode = msg.getByte(); // 0 - can't attack unmarked, 1 - can attack unmarked
// uint8_t rawPvpMode = msg.getByte(); // pvp mode introduced in 10.0
fightMode_t fightMode;
if (rawFightMode == 1) {
fightMode = FIGHTMODE_ATTACK;
} else if (rawFightMode == 2) {
fightMode = FIGHTMODE_BALANCED;
} else {
fightMode = FIGHTMODE_DEFENSE;
}
addGameTask(&Game::playerSetFightModes, player->getID(), fightMode, rawChaseMode != 0, rawSecureMode != 0);
}
void ProtocolGame::parseAttack(NetworkMessage& msg)
{
uint32_t creatureId = msg.get<uint32_t>();
// msg.get<uint32_t>(); creatureId (same as above)
addGameTask(&Game::playerSetAttackedCreature, player->getID(), creatureId);
}
void ProtocolGame::parseFollow(NetworkMessage& msg)
{
uint32_t creatureId = msg.get<uint32_t>();
// msg.get<uint32_t>(); creatureId (same as above)
addGameTask(&Game::playerFollowCreature, player->getID(), creatureId);
}
void ProtocolGame::parseEquipObject(NetworkMessage& msg)
{
uint16_t spriteId = msg.get<uint16_t>();
// msg.get<uint8_t>();
addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerEquipItem, player->getID(), spriteId);
}
void ProtocolGame::parseTextWindow(NetworkMessage& msg)
{
uint32_t windowTextId = msg.get<uint32_t>();
const std::string newText = msg.getString();
addGameTask(&Game::playerWriteItem, player->getID(), windowTextId, newText);
}
void ProtocolGame::parseHouseWindow(NetworkMessage& msg)
{
uint8_t doorId = msg.getByte();
uint32_t id = msg.get<uint32_t>();
const std::string text = msg.getString();
addGameTask(&Game::playerUpdateHouseWindow, player->getID(), doorId, id, text);
}
void ProtocolGame::parseWrapItem(NetworkMessage& msg)
{
Position pos = msg.getPosition();
uint16_t spriteId = msg.get<uint16_t>();
uint8_t stackpos = msg.getByte();
addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerWrapItem, player->getID(), pos, stackpos, spriteId);
}
void ProtocolGame::parseLookInShop(NetworkMessage& msg)
{
uint16_t id = msg.get<uint16_t>();
uint8_t count = msg.getByte();
addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerLookInShop, player->getID(), id, count);
}
void ProtocolGame::parsePlayerPurchase(NetworkMessage& msg)
{
uint16_t id = msg.get<uint16_t>();
uint8_t count = msg.getByte();
uint8_t amount = msg.getByte();
bool ignoreCap = msg.getByte() != 0;
bool inBackpacks = msg.getByte() != 0;
addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerPurchaseItem, player->getID(), id, count, amount, ignoreCap, inBackpacks);
}
void ProtocolGame::parsePlayerSale(NetworkMessage& msg)
{
uint16_t id = msg.get<uint16_t>();
uint8_t count = msg.getByte();
uint8_t amount = msg.getByte();
bool ignoreEquipped = msg.getByte() != 0;
addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerSellItem, player->getID(), id, count, amount, ignoreEquipped);
}
void ProtocolGame::parseRequestTrade(NetworkMessage& msg)
{
Position pos = msg.getPosition();
uint16_t spriteId = msg.get<uint16_t>();
uint8_t stackpos = msg.getByte();
uint32_t playerId = msg.get<uint32_t>();
addGameTask(&Game::playerRequestTrade, player->getID(), pos, stackpos, playerId, spriteId);
}
void ProtocolGame::parseLookInTrade(NetworkMessage& msg)
{
bool counterOffer = (msg.getByte() == 0x01);
uint8_t index = msg.getByte();
addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerLookInTrade, player->getID(), counterOffer, index);
}
void ProtocolGame::parseAddVip(NetworkMessage& msg)
{
const std::string name = msg.getString();
addGameTask(&Game::playerRequestAddVip, player->getID(), name);
}
void ProtocolGame::parseRemoveVip(NetworkMessage& msg)
{
uint32_t guid = msg.get<uint32_t>();
addGameTask(&Game::playerRequestRemoveVip, player->getID(), guid);
}
void ProtocolGame::parseEditVip(NetworkMessage& msg)
{
uint32_t guid = msg.get<uint32_t>();
const std::string description = msg.getString();
uint32_t icon = std::min<uint32_t>(10, msg.get<uint32_t>()); // 10 is max icon in 9.63
bool notify = msg.getByte() != 0;
addGameTask(&Game::playerRequestEditVip, player->getID(), guid, description, icon, notify);
}
void ProtocolGame::parseRotateItem(NetworkMessage& msg)
{
Position pos = msg.getPosition();
uint16_t spriteId = msg.get<uint16_t>();
uint8_t stackpos = msg.getByte();
addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerRotateItem, player->getID(), pos, stackpos, spriteId);
}
void ProtocolGame::parseRuleViolationReport(NetworkMessage& msg)
{
uint8_t reportType = msg.getByte();
uint8_t reportReason = msg.getByte();
const std::string& targetName = msg.getString();
const std::string& comment = msg.getString();
std::string translation;
if (reportType == REPORT_TYPE_NAME) {
translation = msg.getString();
} else if (reportType == REPORT_TYPE_STATEMENT) {
translation = msg.getString();
msg.get<uint32_t>(); // statement id, used to get whatever player have said, we don't log that.
}
addGameTask(&Game::playerReportRuleViolation, player->getID(), targetName, reportType, reportReason, comment, translation);
}
void ProtocolGame::parseBugReport(NetworkMessage& msg)
{
uint8_t category = msg.getByte();
std::string message = msg.getString();
Position position;
if (category == BUG_CATEGORY_MAP) {
position = msg.getPosition();
}
addGameTask(&Game::playerReportBug, player->getID(), message, position, category);
}
void ProtocolGame::parseDebugAssert(NetworkMessage& msg)
{
if (debugAssertSent) {
return;
}
debugAssertSent = true;
std::string assertLine = msg.getString();
std::string date = msg.getString();
std::string description = msg.getString();
std::string comment = msg.getString();
addGameTask(&Game::playerDebugAssert, player->getID(), assertLine, date, description, comment);
}
void ProtocolGame::parseInviteToParty(NetworkMessage& msg)
{
uint32_t targetId = msg.get<uint32_t>();
addGameTask(&Game::playerInviteToParty, player->getID(), targetId);
}
void ProtocolGame::parseJoinParty(NetworkMessage& msg)
{
uint32_t targetId = msg.get<uint32_t>();
addGameTask(&Game::playerJoinParty, player->getID(), targetId);
}
void ProtocolGame::parseRevokePartyInvite(NetworkMessage& msg)
{
uint32_t targetId = msg.get<uint32_t>();
addGameTask(&Game::playerRevokePartyInvitation, player->getID(), targetId);
}
void ProtocolGame::parsePassPartyLeadership(NetworkMessage& msg)
{
uint32_t targetId = msg.get<uint32_t>();
addGameTask(&Game::playerPassPartyLeadership, player->getID(), targetId);
}
void ProtocolGame::parseEnableSharedPartyExperience(NetworkMessage& msg)
{
bool sharedExpActive = msg.getByte() == 1;
addGameTask(&Game::playerEnableSharedPartyExperience, player->getID(), sharedExpActive);
}
void ProtocolGame::parseQuestLine(NetworkMessage& msg)
{
uint16_t questId = msg.get<uint16_t>();
addGameTask(&Game::playerShowQuestLine, player->getID(), questId);
}
void ProtocolGame::parseMarketLeave()
{
addGameTask(&Game::playerLeaveMarket, player->getID());
}
void ProtocolGame::parseMarketBrowse(NetworkMessage& msg)
{
uint16_t browseId = msg.get<uint16_t>();
if (browseId == MARKETREQUEST_OWN_OFFERS) {
addGameTask(&Game::playerBrowseMarketOwnOffers, player->getID());
} else if (browseId == MARKETREQUEST_OWN_HISTORY) {
addGameTask(&Game::playerBrowseMarketOwnHistory, player->getID());
} else {
addGameTask(&Game::playerBrowseMarket, player->getID(), browseId);
}
}
void ProtocolGame::parseMarketCreateOffer(NetworkMessage& msg)
{
uint8_t type = msg.getByte();
uint16_t spriteId = msg.get<uint16_t>();
uint16_t amount = msg.get<uint16_t>();
uint32_t price = msg.get<uint32_t>();
bool anonymous = (msg.getByte() != 0);
addGameTask(&Game::playerCreateMarketOffer, player->getID(), type, spriteId, amount, price, anonymous);
}
void ProtocolGame::parseMarketCancelOffer(NetworkMessage& msg)
{
uint32_t timestamp = msg.get<uint32_t>();
uint16_t counter = msg.get<uint16_t>();
addGameTask(&Game::playerCancelMarketOffer, player->getID(), timestamp, counter);
}
void ProtocolGame::parseMarketAcceptOffer(NetworkMessage& msg)
{
uint32_t timestamp = msg.get<uint32_t>();
uint16_t counter = msg.get<uint16_t>();
uint16_t amount = msg.get<uint16_t>();
addGameTask(&Game::playerAcceptMarketOffer, player->getID(), timestamp, counter, amount);
}
void ProtocolGame::parseModalWindowAnswer(NetworkMessage& msg)
{
uint32_t id = msg.get<uint32_t>();
uint8_t button = msg.getByte();
uint8_t choice = msg.getByte();
addGameTask(&Game::playerAnswerModalWindow, player->getID(), id, button, choice);
}
void ProtocolGame::parseBrowseField(NetworkMessage& msg)
{
const Position& pos = msg.getPosition();
addGameTask(&Game::playerBrowseField, player->getID(), pos);
}
void ProtocolGame::parseSeekInContainer(NetworkMessage& msg)
{
uint8_t containerId = msg.getByte();
uint16_t index = msg.get<uint16_t>();
addGameTask(&Game::playerSeekInContainer, player->getID(), containerId, index);
}
// Send methods
void ProtocolGame::sendOpenPrivateChannel(const std::string& receiver)
{
NetworkMessage msg;
msg.addByte(0xAD);
msg.addString(receiver);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendChannelEvent(uint16_t channelId, const std::string& playerName, ChannelEvent_t channelEvent)
{
NetworkMessage msg;
msg.addByte(0xF3);
msg.add<uint16_t>(channelId);
msg.addString(playerName);
msg.addByte(channelEvent);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCreatureOutfit(const Creature* creature, const Outfit_t& outfit)
{
if (!canSee(creature)) {
return;
}
NetworkMessage msg;
msg.addByte(0x8E);
msg.add<uint32_t>(creature->getID());
AddOutfit(msg, outfit);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCreatureLight(const Creature* creature)
{
if (!canSee(creature)) {
return;
}
NetworkMessage msg;
AddCreatureLight(msg, creature);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendWorldLight(LightInfo lightInfo)
{
NetworkMessage msg;
AddWorldLight(msg, lightInfo);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCreatureWalkthrough(const Creature* creature, bool walkthrough)
{
if (!canSee(creature)) {
return;
}
NetworkMessage msg;
msg.addByte(0x92);
msg.add<uint32_t>(creature->getID());
msg.addByte(walkthrough ? 0x00 : 0x01);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCreatureShield(const Creature* creature)
{
if (!canSee(creature)) {
return;
}
NetworkMessage msg;
msg.addByte(0x91);
msg.add<uint32_t>(creature->getID());
msg.addByte(player->getPartyShield(creature->getPlayer()));
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCreatureSkull(const Creature* creature)
{
if (g_game.getWorldType() != WORLD_TYPE_PVP) {
return;
}
if (!canSee(creature)) {
return;
}
NetworkMessage msg;
msg.addByte(0x90);
msg.add<uint32_t>(creature->getID());
msg.addByte(player->getSkullClient(creature));
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCreatureType(uint32_t creatureId, uint8_t creatureType)
{
NetworkMessage msg;
msg.addByte(0x95);
msg.add<uint32_t>(creatureId);
msg.addByte(creatureType);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCreatureHelpers(uint32_t creatureId, uint16_t helpers)
{
NetworkMessage msg;
msg.addByte(0x94);
msg.add<uint32_t>(creatureId);
msg.add<uint16_t>(helpers);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCreatureSquare(const Creature* creature, SquareColor_t color)
{
if (!canSee(creature)) {
return;
}
NetworkMessage msg;
msg.addByte(0x93);
msg.add<uint32_t>(creature->getID());
msg.addByte(0x01);
msg.addByte(color);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendTutorial(uint8_t tutorialId)
{
NetworkMessage msg;
msg.addByte(0xDC);
msg.addByte(tutorialId);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendAddMarker(const Position& pos, uint8_t markType, const std::string& desc)
{
NetworkMessage msg;
msg.addByte(0xDD);
msg.addPosition(pos);
msg.addByte(markType);
msg.addString(desc);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendReLoginWindow(uint8_t unfairFightReduction)
{
NetworkMessage msg;
msg.addByte(0x28);
msg.addByte(0x00);
msg.addByte(unfairFightReduction);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendStats()
{
NetworkMessage msg;
AddPlayerStats(msg);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendBasicData()
{
NetworkMessage msg;
msg.addByte(0x9F);
if (player->isPremium()) {
msg.addByte(1);
msg.add<uint32_t>(time(nullptr) + (player->premiumDays * 86400));
} else {
msg.addByte(0);
msg.add<uint32_t>(0);
}
msg.addByte(player->getVocation()->getClientId());
msg.add<uint16_t>(0xFF); // number of known spells
for (uint8_t spellId = 0x00; spellId < 0xFF; spellId++) {
msg.addByte(spellId);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendTextMessage(const TextMessage& message)
{
NetworkMessage msg;
msg.addByte(0xB4);
msg.addByte(message.type);
switch (message.type) {
case MESSAGE_DAMAGE_DEALT:
case MESSAGE_DAMAGE_RECEIVED:
case MESSAGE_DAMAGE_OTHERS: {
msg.addPosition(message.position);
msg.add<uint32_t>(message.primary.value);
msg.addByte(message.primary.color);
msg.add<uint32_t>(message.secondary.value);
msg.addByte(message.secondary.color);
break;
}
case MESSAGE_HEALED:
case MESSAGE_HEALED_OTHERS:
case MESSAGE_EXPERIENCE:
case MESSAGE_EXPERIENCE_OTHERS: {
msg.addPosition(message.position);
msg.add<uint32_t>(message.primary.value);
msg.addByte(message.primary.color);
break;
}
case MESSAGE_GUILD:
case MESSAGE_PARTY_MANAGEMENT:
case MESSAGE_PARTY:
msg.add<uint16_t>(message.channelId);
break;
default: {
break;
}
}
msg.addString(message.text);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendClosePrivate(uint16_t channelId)
{
NetworkMessage msg;
msg.addByte(0xB3);
msg.add<uint16_t>(channelId);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCreatePrivateChannel(uint16_t channelId, const std::string& channelName)
{
NetworkMessage msg;
msg.addByte(0xB2);
msg.add<uint16_t>(channelId);
msg.addString(channelName);
msg.add<uint16_t>(0x01);
msg.addString(player->getName());
msg.add<uint16_t>(0x00);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendChannelsDialog()
{
NetworkMessage msg;
msg.addByte(0xAB);
const ChannelList& list = g_chat->getChannelList(*player);
msg.addByte(list.size());
for (ChatChannel* channel : list) {
msg.add<uint16_t>(channel->getId());
msg.addString(channel->getName());
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendChannel(uint16_t channelId, const std::string& channelName, const UsersMap* channelUsers, const InvitedMap* invitedUsers)
{
NetworkMessage msg;
msg.addByte(0xAC);
msg.add<uint16_t>(channelId);
msg.addString(channelName);
if (channelUsers) {
msg.add<uint16_t>(channelUsers->size());
for (const auto& it : *channelUsers) {
msg.addString(it.second->getName());
}
} else {
msg.add<uint16_t>(0x00);
}
if (invitedUsers) {
msg.add<uint16_t>(invitedUsers->size());
for (const auto& it : *invitedUsers) {
msg.addString(it.second->getName());
}
} else {
msg.add<uint16_t>(0x00);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendChannelMessage(const std::string& author, const std::string& text, SpeakClasses type, uint16_t channel)
{
NetworkMessage msg;
msg.addByte(0xAA);
msg.add<uint32_t>(0x00);
msg.addString(author);
msg.add<uint16_t>(0x00);
msg.addByte(type);
msg.add<uint16_t>(channel);
msg.addString(text);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendIcons(uint16_t icons)
{
NetworkMessage msg;
msg.addByte(0xA2);
msg.add<uint16_t>(icons);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendContainer(uint8_t cid, const Container* container, bool hasParent, uint16_t firstIndex)
{
NetworkMessage msg;
msg.addByte(0x6E);
msg.addByte(cid);
if (container->getID() == ITEM_BROWSEFIELD) {
msg.addItem(ITEM_BAG, 1);
msg.addString("Browse Field");
} else {
msg.addItem(container);
msg.addString(container->getName());
}
msg.addByte(container->capacity());
msg.addByte(hasParent ? 0x01 : 0x00);
msg.addByte(container->isUnlocked() ? 0x01 : 0x00); // Drag and drop
msg.addByte(container->hasPagination() ? 0x01 : 0x00); // Pagination
uint32_t containerSize = container->size();
msg.add<uint16_t>(containerSize);
msg.add<uint16_t>(firstIndex);
if (firstIndex < containerSize) {
uint8_t itemsToSend = std::min<uint32_t>(std::min<uint32_t>(container->capacity(), containerSize - firstIndex), std::numeric_limits<uint8_t>::max());
msg.addByte(itemsToSend);
for (auto it = container->getItemList().begin() + firstIndex, end = it + itemsToSend; it != end; ++it) {
msg.addItem(*it);
}
} else {
msg.addByte(0x00);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendShop(Npc* npc, const ShopInfoList& itemList)
{
NetworkMessage msg;
msg.addByte(0x7A);
msg.addString(npc->getName());
uint16_t itemsToSend = std::min<size_t>(itemList.size(), std::numeric_limits<uint16_t>::max());
msg.add<uint16_t>(itemsToSend);
uint16_t i = 0;
for (auto it = itemList.begin(); i < itemsToSend; ++it, ++i) {
AddShopItem(msg, *it);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCloseShop()
{
NetworkMessage msg;
msg.addByte(0x7C);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendSaleItemList(const std::list<ShopInfo>& shop)
{
NetworkMessage msg;
msg.addByte(0x7B);
msg.add<uint64_t>(player->getMoney() + player->getBankBalance());
std::map<uint16_t, uint32_t> saleMap;
if (shop.size() <= 5) {
// For very small shops it's not worth it to create the complete map
for (const ShopInfo& shopInfo : shop) {
if (shopInfo.sellPrice == 0) {
continue;
}
int8_t subtype = -1;
const ItemType& itemType = Item::items[shopInfo.itemId];
if (itemType.hasSubType() && !itemType.stackable) {
subtype = (shopInfo.subType == 0 ? -1 : shopInfo.subType);
}
uint32_t count = player->getItemTypeCount(shopInfo.itemId, subtype);
if (count > 0) {
saleMap[shopInfo.itemId] = count;
}
}
} else {
// Large shop, it's better to get a cached map of all item counts and use it
// We need a temporary map since the finished map should only contain items
// available in the shop
std::map<uint32_t, uint32_t> tempSaleMap;
player->getAllItemTypeCount(tempSaleMap);
// We must still check manually for the special items that require subtype matches
// (That is, fluids such as potions etc., actually these items are very few since
// health potions now use their own ID)
for (const ShopInfo& shopInfo : shop) {
if (shopInfo.sellPrice == 0) {
continue;
}
int8_t subtype = -1;
const ItemType& itemType = Item::items[shopInfo.itemId];
if (itemType.hasSubType() && !itemType.stackable) {
subtype = (shopInfo.subType == 0 ? -1 : shopInfo.subType);
}
if (subtype != -1) {
uint32_t count;
if (!itemType.isFluidContainer() && !itemType.isSplash()) {
count = player->getItemTypeCount(shopInfo.itemId, subtype); // This shop item requires extra checks
} else {
count = subtype;
}
if (count > 0) {
saleMap[shopInfo.itemId] = count;
}
} else {
std::map<uint32_t, uint32_t>::const_iterator findIt = tempSaleMap.find(shopInfo.itemId);
if (findIt != tempSaleMap.end() && findIt->second > 0) {
saleMap[shopInfo.itemId] = findIt->second;
}
}
}
}
uint8_t itemsToSend = std::min<size_t>(saleMap.size(), std::numeric_limits<uint8_t>::max());
msg.addByte(itemsToSend);
uint8_t i = 0;
for (std::map<uint16_t, uint32_t>::const_iterator it = saleMap.begin(); i < itemsToSend; ++it, ++i) {
msg.addItemId(it->first);
msg.addByte(std::min<uint32_t>(it->second, std::numeric_limits<uint8_t>::max()));
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendMarketEnter(uint32_t depotId)
{
NetworkMessage msg;
msg.addByte(0xF6);
msg.add<uint64_t>(player->getBankBalance());
msg.addByte(std::min<uint32_t>(IOMarket::getPlayerOfferCount(player->getGUID()), std::numeric_limits<uint8_t>::max()));
DepotChest* depotChest = player->getDepotChest(depotId, false);
if (!depotChest) {
msg.add<uint16_t>(0x00);
writeToOutputBuffer(msg);
return;
}
player->setInMarket(true);
std::map<uint16_t, uint32_t> depotItems;
std::forward_list<Container*> containerList { depotChest, player->getInbox() };
do {
Container* container = containerList.front();
containerList.pop_front();
for (Item* item : container->getItemList()) {
Container* c = item->getContainer();
if (c && !c->empty()) {
containerList.push_front(c);
continue;
}
const ItemType& itemType = Item::items[item->getID()];
if (itemType.wareId == 0) {
continue;
}
if (c && (!itemType.isContainer() || c->capacity() != itemType.maxItems)) {
continue;
}
if (!item->hasMarketAttributes()) {
continue;
}
depotItems[itemType.wareId] += Item::countByType(item, -1);
}
} while (!containerList.empty());
uint16_t itemsToSend = std::min<size_t>(depotItems.size(), std::numeric_limits<uint16_t>::max());
msg.add<uint16_t>(itemsToSend);
uint16_t i = 0;
for (std::map<uint16_t, uint32_t>::const_iterator it = depotItems.begin(); i < itemsToSend; ++it, ++i) {
msg.add<uint16_t>(it->first);
msg.add<uint16_t>(std::min<uint32_t>(0xFFFF, it->second));
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendMarketLeave()
{
NetworkMessage msg;
msg.addByte(0xF7);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendMarketBrowseItem(uint16_t itemId, const MarketOfferList& buyOffers, const MarketOfferList& sellOffers)
{
NetworkMessage msg;
msg.addByte(0xF9);
msg.addItemId(itemId);
msg.add<uint32_t>(buyOffers.size());
for (const MarketOffer& offer : buyOffers) {
msg.add<uint32_t>(offer.timestamp);
msg.add<uint16_t>(offer.counter);
msg.add<uint16_t>(offer.amount);
msg.add<uint32_t>(offer.price);
msg.addString(offer.playerName);
}
msg.add<uint32_t>(sellOffers.size());
for (const MarketOffer& offer : sellOffers) {
msg.add<uint32_t>(offer.timestamp);
msg.add<uint16_t>(offer.counter);
msg.add<uint16_t>(offer.amount);
msg.add<uint32_t>(offer.price);
msg.addString(offer.playerName);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendMarketAcceptOffer(const MarketOfferEx& offer)
{
NetworkMessage msg;
msg.addByte(0xF9);
msg.addItemId(offer.itemId);
if (offer.type == MARKETACTION_BUY) {
msg.add<uint32_t>(0x01);
msg.add<uint32_t>(offer.timestamp);
msg.add<uint16_t>(offer.counter);
msg.add<uint16_t>(offer.amount);
msg.add<uint32_t>(offer.price);
msg.addString(offer.playerName);
msg.add<uint32_t>(0x00);
} else {
msg.add<uint32_t>(0x00);
msg.add<uint32_t>(0x01);
msg.add<uint32_t>(offer.timestamp);
msg.add<uint16_t>(offer.counter);
msg.add<uint16_t>(offer.amount);
msg.add<uint32_t>(offer.price);
msg.addString(offer.playerName);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendMarketBrowseOwnOffers(const MarketOfferList& buyOffers, const MarketOfferList& sellOffers)
{
NetworkMessage msg;
msg.addByte(0xF9);
msg.add<uint16_t>(MARKETREQUEST_OWN_OFFERS);
msg.add<uint32_t>(buyOffers.size());
for (const MarketOffer& offer : buyOffers) {
msg.add<uint32_t>(offer.timestamp);
msg.add<uint16_t>(offer.counter);
msg.addItemId(offer.itemId);
msg.add<uint16_t>(offer.amount);
msg.add<uint32_t>(offer.price);
}
msg.add<uint32_t>(sellOffers.size());
for (const MarketOffer& offer : sellOffers) {
msg.add<uint32_t>(offer.timestamp);
msg.add<uint16_t>(offer.counter);
msg.addItemId(offer.itemId);
msg.add<uint16_t>(offer.amount);
msg.add<uint32_t>(offer.price);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendMarketCancelOffer(const MarketOfferEx& offer)
{
NetworkMessage msg;
msg.addByte(0xF9);
msg.add<uint16_t>(MARKETREQUEST_OWN_OFFERS);
if (offer.type == MARKETACTION_BUY) {
msg.add<uint32_t>(0x01);
msg.add<uint32_t>(offer.timestamp);
msg.add<uint16_t>(offer.counter);
msg.addItemId(offer.itemId);
msg.add<uint16_t>(offer.amount);
msg.add<uint32_t>(offer.price);
msg.add<uint32_t>(0x00);
} else {
msg.add<uint32_t>(0x00);
msg.add<uint32_t>(0x01);
msg.add<uint32_t>(offer.timestamp);
msg.add<uint16_t>(offer.counter);
msg.addItemId(offer.itemId);
msg.add<uint16_t>(offer.amount);
msg.add<uint32_t>(offer.price);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendMarketBrowseOwnHistory(const HistoryMarketOfferList& buyOffers, const HistoryMarketOfferList& sellOffers)
{
uint32_t i = 0;
std::map<uint32_t, uint16_t> counterMap;
uint32_t buyOffersToSend = std::min<uint32_t>(buyOffers.size(), 810 + std::max<int32_t>(0, 810 - sellOffers.size()));
uint32_t sellOffersToSend = std::min<uint32_t>(sellOffers.size(), 810 + std::max<int32_t>(0, 810 - buyOffers.size()));
NetworkMessage msg;
msg.addByte(0xF9);
msg.add<uint16_t>(MARKETREQUEST_OWN_HISTORY);
msg.add<uint32_t>(buyOffersToSend);
for (auto it = buyOffers.begin(); i < buyOffersToSend; ++it, ++i) {
msg.add<uint32_t>(it->timestamp);
msg.add<uint16_t>(counterMap[it->timestamp]++);
msg.addItemId(it->itemId);
msg.add<uint16_t>(it->amount);
msg.add<uint32_t>(it->price);
msg.addByte(it->state);
}
counterMap.clear();
i = 0;
msg.add<uint32_t>(sellOffersToSend);
for (auto it = sellOffers.begin(); i < sellOffersToSend; ++it, ++i) {
msg.add<uint32_t>(it->timestamp);
msg.add<uint16_t>(counterMap[it->timestamp]++);
msg.addItemId(it->itemId);
msg.add<uint16_t>(it->amount);
msg.add<uint32_t>(it->price);
msg.addByte(it->state);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendMarketDetail(uint16_t itemId)
{
NetworkMessage msg;
msg.addByte(0xF8);
msg.addItemId(itemId);
const ItemType& it = Item::items[itemId];
if (it.armor != 0) {
msg.addString(std::to_string(it.armor));
} else {
msg.add<uint16_t>(0x00);
}
if (it.attack != 0) {
// TODO: chance to hit, range
// example:
// "attack +x, chance to hit +y%, z fields"
if (it.abilities && it.abilities->elementType != COMBAT_NONE && it.abilities->elementDamage != 0) {
std::ostringstream ss;
ss << it.attack << " physical +" << it.abilities->elementDamage << ' ' << getCombatName(it.abilities->elementType);
msg.addString(ss.str());
} else {
msg.addString(std::to_string(it.attack));
}
} else {
msg.add<uint16_t>(0x00);
}
if (it.isContainer()) {
msg.addString(std::to_string(it.maxItems));
} else {
msg.add<uint16_t>(0x00);
}
if (it.defense != 0) {
if (it.extraDefense != 0) {
std::ostringstream ss;
ss << it.defense << ' ' << std::showpos << it.extraDefense << std::noshowpos;
msg.addString(ss.str());
} else {
msg.addString(std::to_string(it.defense));
}
} else {
msg.add<uint16_t>(0x00);
}
if (!it.description.empty()) {
const std::string& descr = it.description;
if (descr.back() == '.') {
msg.addString(std::string(descr, 0, descr.length() - 1));
} else {
msg.addString(descr);
}
} else {
msg.add<uint16_t>(0x00);
}
if (it.decayTime != 0) {
std::ostringstream ss;
ss << it.decayTime << " seconds";
msg.addString(ss.str());
} else {
msg.add<uint16_t>(0x00);
}
if (it.abilities) {
std::ostringstream ss;
bool separator = false;
for (size_t i = 0; i < COMBAT_COUNT; ++i) {
if (it.abilities->absorbPercent[i] == 0) {
continue;
}
if (separator) {
ss << ", ";
} else {
separator = true;
}
ss << getCombatName(indexToCombatType(i)) << ' ' << std::showpos << it.abilities->absorbPercent[i] << std::noshowpos << '%';
}
msg.addString(ss.str());
} else {
msg.add<uint16_t>(0x00);
}
if (it.minReqLevel != 0) {
msg.addString(std::to_string(it.minReqLevel));
} else {
msg.add<uint16_t>(0x00);
}
if (it.minReqMagicLevel != 0) {
msg.addString(std::to_string(it.minReqMagicLevel));
} else {
msg.add<uint16_t>(0x00);
}
msg.addString(it.vocationString);
msg.addString(it.runeSpellName);
if (it.abilities) {
std::ostringstream ss;
bool separator = false;
for (uint8_t i = SKILL_FIRST; i <= SKILL_LAST; i++) {
if (!it.abilities->skills[i]) {
continue;
}
if (separator) {
ss << ", ";
} else {
separator = true;
}
ss << getSkillName(i) << ' ' << std::showpos << it.abilities->skills[i] << std::noshowpos;
}
if (it.abilities->stats[STAT_MAGICPOINTS] != 0) {
if (separator) {
ss << ", ";
} else {
separator = true;
}
ss << "magic level " << std::showpos << it.abilities->stats[STAT_MAGICPOINTS] << std::noshowpos;
}
if (it.abilities->speed != 0) {
if (separator) {
ss << ", ";
}
ss << "speed " << std::showpos << (it.abilities->speed >> 1) << std::noshowpos;
}
msg.addString(ss.str());
} else {
msg.add<uint16_t>(0x00);
}
if (it.charges != 0) {
msg.addString(std::to_string(it.charges));
} else {
msg.add<uint16_t>(0x00);
}
std::string weaponName = getWeaponName(it.weaponType);
if (it.slotPosition & SLOTP_TWO_HAND) {
if (!weaponName.empty()) {
weaponName += ", two-handed";
} else {
weaponName = "two-handed";
}
}
msg.addString(weaponName);
if (it.weight != 0) {
std::ostringstream ss;
if (it.weight < 10) {
ss << "0.0" << it.weight;
} else if (it.weight < 100) {
ss << "0." << it.weight;
} else {
std::string weightString = std::to_string(it.weight);
weightString.insert(weightString.end() - 2, '.');
ss << weightString;
}
ss << " oz";
msg.addString(ss.str());
} else {
msg.add<uint16_t>(0x00);
}
MarketStatistics* statistics = IOMarket::getInstance().getPurchaseStatistics(itemId);
if (statistics) {
msg.addByte(0x01);
msg.add<uint32_t>(statistics->numTransactions);
msg.add<uint32_t>(std::min<uint64_t>(std::numeric_limits<uint32_t>::max(), statistics->totalPrice));
msg.add<uint32_t>(statistics->highestPrice);
msg.add<uint32_t>(statistics->lowestPrice);
} else {
msg.addByte(0x00);
}
statistics = IOMarket::getInstance().getSaleStatistics(itemId);
if (statistics) {
msg.addByte(0x01);
msg.add<uint32_t>(statistics->numTransactions);
msg.add<uint32_t>(std::min<uint64_t>(std::numeric_limits<uint32_t>::max(), statistics->totalPrice));
msg.add<uint32_t>(statistics->highestPrice);
msg.add<uint32_t>(statistics->lowestPrice);
} else {
msg.addByte(0x00);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendQuestLog()
{
NetworkMessage msg;
msg.addByte(0xF0);
msg.add<uint16_t>(g_game.quests.getQuestsCount(player));
for (const Quest& quest : g_game.quests.getQuests()) {
if (quest.isStarted(player)) {
msg.add<uint16_t>(quest.getID());
msg.addString(quest.getName());
msg.addByte(quest.isCompleted(player));
}
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendQuestLine(const Quest* quest)
{
NetworkMessage msg;
msg.addByte(0xF1);
msg.add<uint16_t>(quest->getID());
msg.addByte(quest->getMissionsCount(player));
for (const Mission& mission : quest->getMissions()) {
if (mission.isStarted(player)) {
msg.addString(mission.getName(player));
msg.addString(mission.getDescription(player));
}
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendTradeItemRequest(const std::string& traderName, const Item* item, bool ack)
{
NetworkMessage msg;
if (ack) {
msg.addByte(0x7D);
} else {
msg.addByte(0x7E);
}
msg.addString(traderName);
if (const Container* tradeContainer = item->getContainer()) {
std::list<const Container*> listContainer {tradeContainer};
std::list<const Item*> itemList {tradeContainer};
while (!listContainer.empty()) {
const Container* container = listContainer.front();
listContainer.pop_front();
for (Item* containerItem : container->getItemList()) {
Container* tmpContainer = containerItem->getContainer();
if (tmpContainer) {
listContainer.push_back(tmpContainer);
}
itemList.push_back(containerItem);
}
}
msg.addByte(itemList.size());
for (const Item* listItem : itemList) {
msg.addItem(listItem);
}
} else {
msg.addByte(0x01);
msg.addItem(item);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCloseTrade()
{
NetworkMessage msg;
msg.addByte(0x7F);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCloseContainer(uint8_t cid)
{
NetworkMessage msg;
msg.addByte(0x6F);
msg.addByte(cid);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCreatureTurn(const Creature* creature, uint32_t stackPos)
{
if (!canSee(creature)) {
return;
}
NetworkMessage msg;
msg.addByte(0x6B);
msg.addPosition(creature->getPosition());
msg.addByte(stackPos);
msg.add<uint16_t>(0x63);
msg.add<uint32_t>(creature->getID());
msg.addByte(creature->getDirection());
msg.addByte(player->canWalkthroughEx(creature) ? 0x00 : 0x01);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCreatureSay(const Creature* creature, SpeakClasses type, const std::string& text, const Position* pos/* = nullptr*/)
{
NetworkMessage msg;
msg.addByte(0xAA);
static uint32_t statementId = 0;
msg.add<uint32_t>(++statementId);
msg.addString(creature->getName());
//Add level only for players
if (const Player* speaker = creature->getPlayer()) {
msg.add<uint16_t>(speaker->getLevel());
} else {
msg.add<uint16_t>(0x00);
}
msg.addByte(type);
if (pos) {
msg.addPosition(*pos);
} else {
msg.addPosition(creature->getPosition());
}
msg.addString(text);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendToChannel(const Creature* creature, SpeakClasses type, const std::string& text, uint16_t channelId)
{
NetworkMessage msg;
msg.addByte(0xAA);
static uint32_t statementId = 0;
msg.add<uint32_t>(++statementId);
if (!creature) {
msg.add<uint32_t>(0x00);
} else if (type == TALKTYPE_CHANNEL_R2) {
msg.add<uint32_t>(0x00);
type = TALKTYPE_CHANNEL_R1;
} else {
msg.addString(creature->getName());
//Add level only for players
if (const Player* speaker = creature->getPlayer()) {
msg.add<uint16_t>(speaker->getLevel());
} else {
msg.add<uint16_t>(0x00);
}
}
msg.addByte(type);
msg.add<uint16_t>(channelId);
msg.addString(text);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendPrivateMessage(const Player* speaker, SpeakClasses type, const std::string& text)
{
NetworkMessage msg;
msg.addByte(0xAA);
static uint32_t statementId = 0;
msg.add<uint32_t>(++statementId);
if (speaker) {
msg.addString(speaker->getName());
msg.add<uint16_t>(speaker->getLevel());
} else {
msg.add<uint32_t>(0x00);
}
msg.addByte(type);
msg.addString(text);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCancelTarget()
{
NetworkMessage msg;
msg.addByte(0xA3);
msg.add<uint32_t>(0x00);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendChangeSpeed(const Creature* creature, uint32_t speed)
{
NetworkMessage msg;
msg.addByte(0x8F);
msg.add<uint32_t>(creature->getID());
msg.add<uint16_t>(creature->getBaseSpeed() / 2);
msg.add<uint16_t>(speed / 2);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCancelWalk()
{
NetworkMessage msg;
msg.addByte(0xB5);
msg.addByte(player->getDirection());
writeToOutputBuffer(msg);
}
void ProtocolGame::sendSkills()
{
NetworkMessage msg;
AddPlayerSkills(msg);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendPing()
{
NetworkMessage msg;
msg.addByte(0x1D);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendPingBack()
{
NetworkMessage msg;
msg.addByte(0x1E);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendDistanceShoot(const Position& from, const Position& to, uint8_t type)
{
NetworkMessage msg;
msg.addByte(0x85);
msg.addPosition(from);
msg.addPosition(to);
msg.addByte(type);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendMagicEffect(const Position& pos, uint8_t type)
{
if (!canSee(pos)) {
return;
}
NetworkMessage msg;
msg.addByte(0x83);
msg.addPosition(pos);
msg.addByte(type);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCreatureHealth(const Creature* creature)
{
NetworkMessage msg;
msg.addByte(0x8C);
msg.add<uint32_t>(creature->getID());
if (creature->isHealthHidden()) {
msg.addByte(0x00);
} else {
msg.addByte(std::ceil((static_cast<double>(creature->getHealth()) / std::max<int32_t>(creature->getMaxHealth(), 1)) * 100));
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendFYIBox(const std::string& message)
{
NetworkMessage msg;
msg.addByte(0x15);
msg.addString(message);
writeToOutputBuffer(msg);
}
//tile
void ProtocolGame::sendMapDescription(const Position& pos)
{
NetworkMessage msg;
msg.addByte(0x64);
msg.addPosition(player->getPosition());
GetMapDescription(pos.x - 8, pos.y - 6, pos.z, 18, 14, msg);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendAddTileItem(const Position& pos, uint32_t stackpos, const Item* item)
{
if (!canSee(pos)) {
return;
}
NetworkMessage msg;
msg.addByte(0x6A);
msg.addPosition(pos);
msg.addByte(stackpos);
msg.addItem(item);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendUpdateTileItem(const Position& pos, uint32_t stackpos, const Item* item)
{
if (!canSee(pos)) {
return;
}
NetworkMessage msg;
msg.addByte(0x6B);
msg.addPosition(pos);
msg.addByte(stackpos);
msg.addItem(item);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendRemoveTileThing(const Position& pos, uint32_t stackpos)
{
if (!canSee(pos)) {
return;
}
NetworkMessage msg;
RemoveTileThing(msg, pos, stackpos);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendUpdateTile(const Tile* tile, const Position& pos)
{
if (!canSee(pos)) {
return;
}
NetworkMessage msg;
msg.addByte(0x69);
msg.addPosition(pos);
if (tile) {
GetTileDescription(tile, msg);
msg.addByte(0x00);
msg.addByte(0xFF);
} else {
msg.addByte(0x01);
msg.addByte(0xFF);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendPendingStateEntered()
{
NetworkMessage msg;
msg.addByte(0x0A);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendEnterWorld()
{
NetworkMessage msg;
msg.addByte(0x0F);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendFightModes()
{
NetworkMessage msg;
msg.addByte(0xA7);
msg.addByte(player->fightMode);
msg.addByte(player->chaseMode);
msg.addByte(player->secureMode);
msg.addByte(PVP_MODE_DOVE);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendAddCreature(const Creature* creature, const Position& pos, int32_t stackpos, bool isLogin)
{
if (!canSee(pos)) {
return;
}
if (creature != player) {
if (stackpos != -1) {
NetworkMessage msg;
msg.addByte(0x6A);
msg.addPosition(pos);
msg.addByte(stackpos);
bool known;
uint32_t removedKnown;
checkCreatureAsKnown(creature->getID(), known, removedKnown);
AddCreature(msg, creature, known, removedKnown);
writeToOutputBuffer(msg);
}
if (isLogin) {
sendMagicEffect(pos, CONST_ME_TELEPORT);
}
return;
}
NetworkMessage msg;
msg.addByte(0x17);
msg.add<uint32_t>(player->getID());
msg.add<uint16_t>(0x32); // beat duration (50)
msg.addDouble(Creature::speedA, 3);
msg.addDouble(Creature::speedB, 3);
msg.addDouble(Creature::speedC, 3);
// can report bugs?
if (player->getAccountType() >= ACCOUNT_TYPE_TUTOR) {
msg.addByte(0x01);
} else {
msg.addByte(0x00);
}
msg.addByte(0x00); // can change pvp framing option
msg.addByte(0x00); // expert mode button enabled
msg.add<uint16_t>(0x00); // URL (string) to ingame store images
msg.add<uint16_t>(25); // premium coin package size
writeToOutputBuffer(msg);
sendPendingStateEntered();
sendEnterWorld();
sendMapDescription(pos);
if (isLogin) {
sendMagicEffect(pos, CONST_ME_TELEPORT);
}
for (int i = CONST_SLOT_FIRST; i <= CONST_SLOT_LAST; ++i) {
sendInventoryItem(static_cast<slots_t>(i), player->getInventoryItem(static_cast<slots_t>(i)));
}
sendStats();
sendSkills();
//gameworld light-settings
sendWorldLight(g_game.getWorldLightInfo());
//player light level
sendCreatureLight(creature);
sendVIPEntries();
sendBasicData();
player->sendIcons();
}
void ProtocolGame::sendMoveCreature(const Creature* creature, const Position& newPos, int32_t newStackPos, const Position& oldPos, int32_t oldStackPos, bool teleport)
{
if (creature == player) {
if (oldStackPos >= 10) {
sendMapDescription(newPos);
} else if (teleport) {
NetworkMessage msg;
RemoveTileThing(msg, oldPos, oldStackPos);
writeToOutputBuffer(msg);
sendMapDescription(newPos);
} else {
NetworkMessage msg;
if (oldPos.z == 7 && newPos.z >= 8) {
RemoveTileThing(msg, oldPos, oldStackPos);
} else {
msg.addByte(0x6D);
msg.addPosition(oldPos);
msg.addByte(oldStackPos);
msg.addPosition(newPos);
}
if (newPos.z > oldPos.z) {
MoveDownCreature(msg, creature, newPos, oldPos);
} else if (newPos.z < oldPos.z) {
MoveUpCreature(msg, creature, newPos, oldPos);
}
if (oldPos.y > newPos.y) { // north, for old x
msg.addByte(0x65);
GetMapDescription(oldPos.x - 8, newPos.y - 6, newPos.z, 18, 1, msg);
} else if (oldPos.y < newPos.y) { // south, for old x
msg.addByte(0x67);
GetMapDescription(oldPos.x - 8, newPos.y + 7, newPos.z, 18, 1, msg);
}
if (oldPos.x < newPos.x) { // east, [with new y]
msg.addByte(0x66);
GetMapDescription(newPos.x + 9, newPos.y - 6, newPos.z, 1, 14, msg);
} else if (oldPos.x > newPos.x) { // west, [with new y]
msg.addByte(0x68);
GetMapDescription(newPos.x - 8, newPos.y - 6, newPos.z, 1, 14, msg);
}
writeToOutputBuffer(msg);
}
} else if (canSee(oldPos) && canSee(creature->getPosition())) {
if (teleport || (oldPos.z == 7 && newPos.z >= 8) || oldStackPos >= 10) {
sendRemoveTileThing(oldPos, oldStackPos);
sendAddCreature(creature, newPos, newStackPos, false);
} else {
NetworkMessage msg;
msg.addByte(0x6D);
msg.addPosition(oldPos);
msg.addByte(oldStackPos);
msg.addPosition(creature->getPosition());
writeToOutputBuffer(msg);
}
} else if (canSee(oldPos)) {
sendRemoveTileThing(oldPos, oldStackPos);
} else if (canSee(creature->getPosition())) {
sendAddCreature(creature, newPos, newStackPos, false);
}
}
void ProtocolGame::sendInventoryItem(slots_t slot, const Item* item)
{
NetworkMessage msg;
if (item) {
msg.addByte(0x78);
msg.addByte(slot);
msg.addItem(item);
} else {
msg.addByte(0x79);
msg.addByte(slot);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendItems()
{
NetworkMessage msg;
msg.addByte(0xF5);
const std::vector<uint16_t>& inventory = Item::items.getInventory();
msg.add<uint16_t>(inventory.size() + 11);
for (uint16_t i = 1; i <= 11; i++) {
msg.add<uint16_t>(i);
msg.addByte(0); //always 0
msg.add<uint16_t>(1); // always 1
}
for (auto clientId : inventory) {
msg.add<uint16_t>(clientId);
msg.addByte(0); //always 0
msg.add<uint16_t>(1);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendAddContainerItem(uint8_t cid, uint16_t slot, const Item* item)
{
NetworkMessage msg;
msg.addByte(0x70);
msg.addByte(cid);
msg.add<uint16_t>(slot);
msg.addItem(item);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendUpdateContainerItem(uint8_t cid, uint16_t slot, const Item* item)
{
NetworkMessage msg;
msg.addByte(0x71);
msg.addByte(cid);
msg.add<uint16_t>(slot);
msg.addItem(item);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendRemoveContainerItem(uint8_t cid, uint16_t slot, const Item* lastItem)
{
NetworkMessage msg;
msg.addByte(0x72);
msg.addByte(cid);
msg.add<uint16_t>(slot);
if (lastItem) {
msg.addItem(lastItem);
} else {
msg.add<uint16_t>(0x00);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendTextWindow(uint32_t windowTextId, Item* item, uint16_t maxlen, bool canWrite)
{
NetworkMessage msg;
msg.addByte(0x96);
msg.add<uint32_t>(windowTextId);
msg.addItem(item);
if (canWrite) {
msg.add<uint16_t>(maxlen);
msg.addString(item->getText());
} else {
const std::string& text = item->getText();
msg.add<uint16_t>(text.size());
msg.addString(text);
}
const std::string& writer = item->getWriter();
if (!writer.empty()) {
msg.addString(writer);
} else {
msg.add<uint16_t>(0x00);
}
time_t writtenDate = item->getDate();
if (writtenDate != 0) {
msg.addString(formatDateShort(writtenDate));
} else {
msg.add<uint16_t>(0x00);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendTextWindow(uint32_t windowTextId, uint32_t itemId, const std::string& text)
{
NetworkMessage msg;
msg.addByte(0x96);
msg.add<uint32_t>(windowTextId);
msg.addItem(itemId, 1);
msg.add<uint16_t>(text.size());
msg.addString(text);
msg.add<uint16_t>(0x00);
msg.add<uint16_t>(0x00);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendHouseWindow(uint32_t windowTextId, const std::string& text)
{
NetworkMessage msg;
msg.addByte(0x97);
msg.addByte(0x00);
msg.add<uint32_t>(windowTextId);
msg.addString(text);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendOutfitWindow()
{
NetworkMessage msg;
msg.addByte(0xC8);
Outfit_t currentOutfit = player->getDefaultOutfit();
Mount* currentMount = g_game.mounts.getMountByID(player->getCurrentMount());
if (currentMount) {
currentOutfit.lookMount = currentMount->clientId;
}
AddOutfit(msg, currentOutfit);
std::vector<ProtocolOutfit> protocolOutfits;
if (player->isAccessPlayer()) {
static const std::string gamemasterOutfitName = "Gamemaster";
protocolOutfits.emplace_back(gamemasterOutfitName, 75, 0);
}
const auto& outfits = Outfits::getInstance().getOutfits(player->getSex());
protocolOutfits.reserve(outfits.size());
for (const Outfit& outfit : outfits) {
uint8_t addons;
if (!player->getOutfitAddons(outfit, addons)) {
continue;
}
protocolOutfits.emplace_back(outfit.name, outfit.lookType, addons);
if (protocolOutfits.size() == 100) { // Game client doesn't allow more than 100 outfits
break;
}
}
msg.addByte(protocolOutfits.size());
for (const ProtocolOutfit& outfit : protocolOutfits) {
msg.add<uint16_t>(outfit.lookType);
msg.addString(outfit.name);
msg.addByte(outfit.addons);
}
std::vector<const Mount*> mounts;
for (const Mount& mount : g_game.mounts.getMounts()) {
if (player->hasMount(&mount)) {
mounts.push_back(&mount);
}
}
msg.addByte(mounts.size());
for (const Mount* mount : mounts) {
msg.add<uint16_t>(mount->clientId);
msg.addString(mount->name);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendUpdatedVIPStatus(uint32_t guid, VipStatus_t newStatus)
{
NetworkMessage msg;
msg.addByte(0xD3);
msg.add<uint32_t>(guid);
msg.addByte(newStatus);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendVIP(uint32_t guid, const std::string& name, const std::string& description, uint32_t icon, bool notify, VipStatus_t status)
{
NetworkMessage msg;
msg.addByte(0xD2);
msg.add<uint32_t>(guid);
msg.addString(name);
msg.addString(description);
msg.add<uint32_t>(std::min<uint32_t>(10, icon));
msg.addByte(notify ? 0x01 : 0x00);
msg.addByte(status);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendVIPEntries()
{
const std::forward_list<VIPEntry>& vipEntries = IOLoginData::getVIPEntries(player->getAccount());
for (const VIPEntry& entry : vipEntries) {
VipStatus_t vipStatus = VIPSTATUS_ONLINE;
Player* vipPlayer = g_game.getPlayerByGUID(entry.guid);
if (!vipPlayer || vipPlayer->isInGhostMode() || player->isAccessPlayer()) {
vipStatus = VIPSTATUS_OFFLINE;
}
sendVIP(entry.guid, entry.name, entry.description, entry.icon, entry.notify, vipStatus);
}
}
void ProtocolGame::sendSpellCooldown(uint8_t spellId, uint32_t time)
{
NetworkMessage msg;
msg.addByte(0xA4);
msg.addByte(spellId);
msg.add<uint32_t>(time);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendSpellGroupCooldown(SpellGroup_t groupId, uint32_t time)
{
NetworkMessage msg;
msg.addByte(0xA5);
msg.addByte(groupId);
msg.add<uint32_t>(time);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendModalWindow(const ModalWindow& modalWindow)
{
NetworkMessage msg;
msg.addByte(0xFA);
msg.add<uint32_t>(modalWindow.id);
msg.addString(modalWindow.title);
msg.addString(modalWindow.message);
msg.addByte(modalWindow.buttons.size());
for (const auto& it : modalWindow.buttons) {
msg.addString(it.first);
msg.addByte(it.second);
}
msg.addByte(modalWindow.choices.size());
for (const auto& it : modalWindow.choices) {
msg.addString(it.first);
msg.addByte(it.second);
}
msg.addByte(modalWindow.defaultEscapeButton);
msg.addByte(modalWindow.defaultEnterButton);
msg.addByte(modalWindow.priority ? 0x01 : 0x00);
writeToOutputBuffer(msg);
}
////////////// Add common messages
void ProtocolGame::AddCreature(NetworkMessage& msg, const Creature* creature, bool known, uint32_t remove)
{
CreatureType_t creatureType = creature->getType();
const Player* otherPlayer = creature->getPlayer();
if (known) {
msg.add<uint16_t>(0x62);
msg.add<uint32_t>(creature->getID());
} else {
msg.add<uint16_t>(0x61);
msg.add<uint32_t>(remove);
msg.add<uint32_t>(creature->getID());
msg.addByte(creatureType);
msg.addString(creature->getName());
}
if (creature->isHealthHidden()) {
msg.addByte(0x00);
} else {
msg.addByte(std::ceil((static_cast<double>(creature->getHealth()) / std::max<int32_t>(creature->getMaxHealth(), 1)) * 100));
}
msg.addByte(creature->getDirection());
if (!creature->isInGhostMode() && !creature->isInvisible()) {
AddOutfit(msg, creature->getCurrentOutfit());
} else {
static Outfit_t outfit;
AddOutfit(msg, outfit);
}
LightInfo lightInfo = creature->getCreatureLight();
msg.addByte(player->isAccessPlayer() ? 0xFF : lightInfo.level);
msg.addByte(lightInfo.color);
msg.add<uint16_t>(creature->getStepSpeed() / 2);
msg.addByte(player->getSkullClient(creature));
msg.addByte(player->getPartyShield(otherPlayer));
if (!known) {
msg.addByte(player->getGuildEmblem(otherPlayer));
}
if (creatureType == CREATURETYPE_MONSTER) {
const Creature* master = creature->getMaster();
if (master) {
const Player* masterPlayer = master->getPlayer();
if (masterPlayer) {
if (masterPlayer == player) {
creatureType = CREATURETYPE_SUMMON_OWN;
} else {
creatureType = CREATURETYPE_SUMMON_OTHERS;
}
}
}
}
msg.addByte(creatureType); // Type (for summons)
msg.addByte(creature->getSpeechBubble());
msg.addByte(0xFF); // MARK_UNMARKED
if (otherPlayer) {
msg.add<uint16_t>(otherPlayer->getHelpers());
} else {
msg.add<uint16_t>(0x00);
}
msg.addByte(player->canWalkthroughEx(creature) ? 0x00 : 0x01);
}
void ProtocolGame::AddPlayerStats(NetworkMessage& msg)
{
msg.addByte(0xA0);
msg.add<uint16_t>(std::min<int32_t>(player->getHealth(), std::numeric_limits<uint16_t>::max()));
msg.add<uint16_t>(std::min<int32_t>(player->getMaxHealth(), std::numeric_limits<uint16_t>::max()));
msg.add<uint32_t>(player->getFreeCapacity());
msg.add<uint32_t>(player->getCapacity());
msg.add<uint64_t>(player->getExperience());
msg.add<uint16_t>(player->getLevel());
msg.addByte(player->getLevelPercent());
msg.add<uint16_t>(100); // base xp gain rate
msg.add<uint16_t>(0); // xp voucher
msg.add<uint16_t>(0); // low level bonus
msg.add<uint16_t>(0); // xp boost
msg.add<uint16_t>(100); // stamina multiplier (100 = x1.0)
msg.add<uint16_t>(std::min<int32_t>(player->getMana(), std::numeric_limits<uint16_t>::max()));
msg.add<uint16_t>(std::min<int32_t>(player->getMaxMana(), std::numeric_limits<uint16_t>::max()));
msg.addByte(std::min<uint32_t>(player->getMagicLevel(), std::numeric_limits<uint8_t>::max()));
msg.addByte(std::min<uint32_t>(player->getBaseMagicLevel(), std::numeric_limits<uint8_t>::max()));
msg.addByte(player->getMagicLevelPercent());
msg.addByte(player->getSoul());
msg.add<uint16_t>(player->getStaminaMinutes());
msg.add<uint16_t>(player->getBaseSpeed() / 2);
Condition* condition = player->getCondition(CONDITION_REGENERATION);
msg.add<uint16_t>(condition ? condition->getTicks() / 1000 : 0x00);
msg.add<uint16_t>(player->getOfflineTrainingTime() / 60 / 1000);
msg.add<uint16_t>(0); // xp boost time (seconds)
msg.addByte(0); // enables exp boost in the store
}
void ProtocolGame::AddPlayerSkills(NetworkMessage& msg)
{
msg.addByte(0xA1);
for (uint8_t i = SKILL_FIRST; i <= SKILL_LAST; ++i) {
msg.add<uint16_t>(std::min<int32_t>(player->getSkillLevel(i), std::numeric_limits<uint16_t>::max()));
msg.add<uint16_t>(player->getBaseSkill(i));
msg.addByte(player->getSkillPercent(i));
}
for (uint8_t i = SPECIALSKILL_FIRST; i <= SPECIALSKILL_LAST; ++i) {
msg.add<uint16_t>(std::min<int32_t>(100, player->varSpecialSkills[i]));
msg.add<uint16_t>(0);
}
}
void ProtocolGame::AddOutfit(NetworkMessage& msg, const Outfit_t& outfit)
{
msg.add<uint16_t>(outfit.lookType);
if (outfit.lookType != 0) {
msg.addByte(outfit.lookHead);
msg.addByte(outfit.lookBody);
msg.addByte(outfit.lookLegs);
msg.addByte(outfit.lookFeet);
msg.addByte(outfit.lookAddons);
} else {
msg.addItemId(outfit.lookTypeEx);
}
msg.add<uint16_t>(outfit.lookMount);
}
void ProtocolGame::AddWorldLight(NetworkMessage& msg, LightInfo lightInfo)
{
msg.addByte(0x82);
msg.addByte((player->isAccessPlayer() ? 0xFF : lightInfo.level));
msg.addByte(lightInfo.color);
}
void ProtocolGame::AddCreatureLight(NetworkMessage& msg, const Creature* creature)
{
LightInfo lightInfo = creature->getCreatureLight();
msg.addByte(0x8D);
msg.add<uint32_t>(creature->getID());
msg.addByte((player->isAccessPlayer() ? 0xFF : lightInfo.level));
msg.addByte(lightInfo.color);
}
//tile
void ProtocolGame::RemoveTileThing(NetworkMessage& msg, const Position& pos, uint32_t stackpos)
{
if (stackpos >= 10) {
return;
}
msg.addByte(0x6C);
msg.addPosition(pos);
msg.addByte(stackpos);
}
void ProtocolGame::MoveUpCreature(NetworkMessage& msg, const Creature* creature, const Position& newPos, const Position& oldPos)
{
if (creature != player) {
return;
}
//floor change up
msg.addByte(0xBE);
//going to surface
if (newPos.z == 7) {
int32_t skip = -1;
GetFloorDescription(msg, oldPos.x - 8, oldPos.y - 6, 5, 18, 14, 3, skip); //(floor 7 and 6 already set)
GetFloorDescription(msg, oldPos.x - 8, oldPos.y - 6, 4, 18, 14, 4, skip);
GetFloorDescription(msg, oldPos.x - 8, oldPos.y - 6, 3, 18, 14, 5, skip);
GetFloorDescription(msg, oldPos.x - 8, oldPos.y - 6, 2, 18, 14, 6, skip);
GetFloorDescription(msg, oldPos.x - 8, oldPos.y - 6, 1, 18, 14, 7, skip);
GetFloorDescription(msg, oldPos.x - 8, oldPos.y - 6, 0, 18, 14, 8, skip);
if (skip >= 0) {
msg.addByte(skip);
msg.addByte(0xFF);
}
}
//underground, going one floor up (still underground)
else if (newPos.z > 7) {
int32_t skip = -1;
GetFloorDescription(msg, oldPos.x - 8, oldPos.y - 6, oldPos.getZ() - 3, 18, 14, 3, skip);
if (skip >= 0) {
msg.addByte(skip);
msg.addByte(0xFF);
}
}
//moving up a floor up makes us out of sync
//west
msg.addByte(0x68);
GetMapDescription(oldPos.x - 8, oldPos.y - 5, newPos.z, 1, 14, msg);
//north
msg.addByte(0x65);
GetMapDescription(oldPos.x - 8, oldPos.y - 6, newPos.z, 18, 1, msg);
}
void ProtocolGame::MoveDownCreature(NetworkMessage& msg, const Creature* creature, const Position& newPos, const Position& oldPos)
{
if (creature != player) {
return;
}
//floor change down
msg.addByte(0xBF);
//going from surface to underground
if (newPos.z == 8) {
int32_t skip = -1;
GetFloorDescription(msg, oldPos.x - 8, oldPos.y - 6, newPos.z, 18, 14, -1, skip);
GetFloorDescription(msg, oldPos.x - 8, oldPos.y - 6, newPos.z + 1, 18, 14, -2, skip);
GetFloorDescription(msg, oldPos.x - 8, oldPos.y - 6, newPos.z + 2, 18, 14, -3, skip);
if (skip >= 0) {
msg.addByte(skip);
msg.addByte(0xFF);
}
}
//going further down
else if (newPos.z > oldPos.z && newPos.z > 8 && newPos.z < 14) {
int32_t skip = -1;
GetFloorDescription(msg, oldPos.x - 8, oldPos.y - 6, newPos.z + 2, 18, 14, -3, skip);
if (skip >= 0) {
msg.addByte(skip);
msg.addByte(0xFF);
}
}
//moving down a floor makes us out of sync
//east
msg.addByte(0x66);
GetMapDescription(oldPos.x + 9, oldPos.y - 7, newPos.z, 1, 14, msg);
//south
msg.addByte(0x67);
GetMapDescription(oldPos.x - 8, oldPos.y + 7, newPos.z, 18, 1, msg);
}
void ProtocolGame::AddShopItem(NetworkMessage& msg, const ShopInfo& item)
{
const ItemType& it = Item::items[item.itemId];
msg.add<uint16_t>(it.clientId);
if (it.isSplash() || it.isFluidContainer()) {
msg.addByte(serverFluidToClient(item.subType));
} else {
msg.addByte(0x00);
}
msg.addString(item.realName);
msg.add<uint32_t>(it.weight);
msg.add<uint32_t>(item.buyPrice);
msg.add<uint32_t>(item.sellPrice);
}
void ProtocolGame::parseExtendedOpcode(NetworkMessage& msg)
{
uint8_t opcode = msg.getByte();
const std::string& buffer = msg.getString();
// process additional opcodes via lua script event
addGameTask(&Game::parsePlayerExtendedOpcode, player->getID(), opcode, buffer);
}
| 1 | 17,471 | This will only fix client side (so showing it in-game) server still gonna hold invalid values I guess? | otland-forgottenserver | cpp |
@@ -287,6 +287,18 @@ class Dataset(Element):
class to each underlying element.
"""
if isinstance(data, DynamicMap):
+ class_name = cls.__name__
+ repr_kdims = 'kdims=%r' % kdims if kdims else None
+ repr_vdims = 'vdims=%r' % vdims if vdims else None
+ repr_kwargs = (', '.join('%s=%r' % (k,v) for k,v in kwargs.items())
+ if kwargs else None)
+ extras = ', '.join([el for el in [repr_kdims, repr_vdims, repr_kwargs]
+ if el is not None])
+ extras = ', ' + extras if extras else ''
+ apply_args= 'hv.{class_name}{extras}'.format(class_name=class_name,
+ extras=extras)
+ msg = "Cannot construct a {class_name} from the supplied object of type DynamicMap. Implicitly creating a DynamicMap of {class_name} objects, but instead please explicitly call .apply({apply_args}) on the supplied DynamicMap."
+ cls.warning(cls, msg.format(class_name=class_name, apply_args=apply_args))
return data.apply(cls, per_element=True, kdims=kdims, vdims=vdims, **kwargs)
else:
return super(Dataset, cls).__new__(cls) | 1 | from __future__ import absolute_import
try:
import itertools.izip as zip
except ImportError:
pass
import types
import copy
import numpy as np
import param
from param.parameterized import add_metaclass, ParameterizedMetaclass
from .. import util
from ..accessors import Redim
from ..dimension import (
Dimension, Dimensioned, LabelledData, dimension_name, process_dimensions
)
from ..element import Element
from ..ndmapping import OrderedDict, MultiDimensionalMapping
from ..spaces import HoloMap, DynamicMap
from .interface import Interface, iloc, ndloc
from .array import ArrayInterface
from .dictionary import DictInterface
from .grid import GridInterface
from .multipath import MultiInterface # noqa (API import)
from .image import ImageInterface # noqa (API import)
from .spatialpandas import SpatialPandasInterface # noqa (API import)
default_datatype = 'dictionary'
datatypes = ['dictionary', 'grid', 'spatialpandas']
try:
import pandas as pd # noqa (Availability import)
from .pandas import PandasInterface
default_datatype = 'dataframe'
datatypes = ['dataframe', 'dictionary', 'spatialpandas', 'grid']
DFColumns = PandasInterface
except ImportError:
pd = None
except Exception as e:
pd = None
param.main.param.warning('Pandas interface failed to import with '
'following error: %s' % e)
try:
from .xarray import XArrayInterface # noqa (Conditional API import)
datatypes.append('xarray')
except ImportError:
pass
try:
from .dask import DaskInterface # noqa (Conditional API import)
datatypes.append('dask')
except ImportError:
pass
try:
from .cudf import cuDFInterface # noqa (Conditional API import)
datatypes.append('cuDF')
except ImportError:
pass
if 'array' not in datatypes:
datatypes.append('array')
if 'multitabular' not in datatypes:
datatypes.append('multitabular')
def concat(datasets, datatype=None):
"""Concatenates collection of datasets along NdMapping dimensions.
Concatenates multiple datasets wrapped in an NdMapping type along
all of its dimensions. Before concatenation all datasets are cast
to the same datatype, which may be explicitly defined or
implicitly derived from the first datatype that is
encountered. For columnar data concatenation adds the columns for
the dimensions being concatenated along and then concatenates all
the old and new columns. For gridded data a new axis is created
for each dimension being concatenated along and then
hierarchically concatenates along each dimension.
Args:
datasets: NdMapping of Datasets to concatenate
datatype: Datatype to cast data to before concatenation
Returns:
Concatenated dataset
"""
return Interface.concatenate(datasets, datatype)
class DataConversion(object):
"""
DataConversion is a very simple container object which can be
given an existing Dataset Element and provides methods to convert
the Dataset into most other Element types.
"""
def __init__(self, element):
self._element = element
def __call__(self, new_type, kdims=None, vdims=None, groupby=None,
sort=False, **kwargs):
"""
Generic conversion method for Dataset based Element
types. Supply the Dataset Element type to convert to and
optionally the key dimensions (kdims), value dimensions
(vdims) and the dimensions. to group over. Converted Columns
can be automatically sorted via the sort option and kwargs can
be passed through.
"""
element_params = new_type.param.objects()
kdim_param = element_params['kdims']
vdim_param = element_params['vdims']
if isinstance(kdim_param.bounds[1], int):
ndim = min([kdim_param.bounds[1], len(kdim_param.default)])
else:
ndim = None
nvdim = vdim_param.bounds[1] if isinstance(vdim_param.bounds[1], int) else None
if kdims is None:
kd_filter = groupby or []
if not isinstance(kd_filter, list):
kd_filter = [groupby]
kdims = [kd for kd in self._element.kdims if kd not in kd_filter][:ndim]
elif kdims and not isinstance(kdims, list): kdims = [kdims]
if vdims is None:
vdims = [d for d in self._element.vdims if d not in kdims][:nvdim]
if vdims and not isinstance(vdims, list): vdims = [vdims]
# Checks Element type supports dimensionality
type_name = new_type.__name__
for dim_type, dims in (('kdims', kdims), ('vdims', vdims)):
min_d, max_d = element_params[dim_type].bounds
if ((min_d is not None and len(dims) < min_d) or
(max_d is not None and len(dims) > max_d)):
raise ValueError("%s %s must be between length %s and %s." %
(type_name, dim_type, min_d, max_d))
if groupby is None:
groupby = [d for d in self._element.kdims if d not in kdims+vdims]
elif groupby and not isinstance(groupby, list):
groupby = [groupby]
if self._element.interface.gridded:
dropped_kdims = [kd for kd in self._element.kdims if kd not in groupby+kdims]
if dropped_kdims:
selected = self._element.reindex(groupby+kdims, vdims)
else:
selected = self._element
else:
if pd and issubclass(self._element.interface, PandasInterface):
ds_dims = self._element.dimensions()
ds_kdims = [self._element.get_dimension(d) if d in ds_dims else d
for d in groupby+kdims]
ds_vdims = [self._element.get_dimension(d) if d in ds_dims else d
for d in vdims]
selected = self._element.clone(kdims=ds_kdims, vdims=ds_vdims)
else:
selected = self._element.reindex(groupby+kdims, vdims)
params = {'kdims': [selected.get_dimension(kd, strict=True) for kd in kdims],
'vdims': [selected.get_dimension(vd, strict=True) for vd in vdims],
'label': selected.label}
if selected.group != selected.param.objects('existing')['group'].default:
params['group'] = selected.group
params.update(kwargs)
if len(kdims) == selected.ndims or not groupby:
# Propagate dataset
params['dataset'] = self._element.dataset
params['pipeline'] = self._element._pipeline
element = new_type(selected, **params)
return element.sort() if sort else element
group = selected.groupby(groupby, container_type=HoloMap,
group_type=new_type, **params)
if sort:
return group.map(lambda x: x.sort(), [new_type])
else:
return group
class PipelineMeta(ParameterizedMetaclass):
# Public methods that should not be wrapped
blacklist = ['__init__', 'clone']
def __new__(mcs, classname, bases, classdict):
for method_name in classdict:
method_fn = classdict[method_name]
if method_name in mcs.blacklist or method_name.startswith('_'):
continue
elif isinstance(method_fn, types.FunctionType):
classdict[method_name] = mcs.pipelined(method_fn, method_name)
inst = type.__new__(mcs, classname, bases, classdict)
return inst
@staticmethod
def pipelined(method_fn, method_name):
def pipelined_fn(*args, **kwargs):
from ...operation.element import method as method_op
inst = args[0]
inst_pipeline = copy.copy(getattr(inst, '_pipeline', None))
in_method = inst._in_method
if not in_method:
inst._in_method = True
try:
result = method_fn(*args, **kwargs)
op = method_op.instance(
input_type=type(inst),
method_name=method_name,
args=list(args[1:]),
kwargs=kwargs,
)
if not in_method:
if isinstance(result, Dataset):
result._pipeline = inst_pipeline.instance(
operations=inst_pipeline.operations + [op],
output_type=type(result),
)
elif isinstance(result, MultiDimensionalMapping):
for key, element in result.items():
if isinstance(element, Dataset):
getitem_op = method_op.instance(
input_type=type(result),
method_name='__getitem__',
args=[key]
)
element._pipeline = inst_pipeline.instance(
operations=inst_pipeline.operations + [
op, getitem_op
],
output_type=type(result),
)
finally:
if not in_method:
inst._in_method = False
return result
pipelined_fn.__doc__ = method_fn.__doc__
return pipelined_fn
@add_metaclass(PipelineMeta)
class Dataset(Element):
"""
Dataset provides a general baseclass for Element types that
contain structured data and supports a range of data formats.
The Dataset class supports various methods offering a consistent
way of working with the stored data regardless of the storage
format used. These operations include indexing, selection and
various ways of aggregating or collapsing the data with a supplied
function.
"""
datatype = param.List(datatypes, doc="""
A priority list of the data types to be used for storage
on the .data attribute. If the input supplied to the element
constructor cannot be put into the requested format, the next
format listed will be used until a suitable format is found (or
the data fails to be understood).""")
group = param.String(default='Dataset', constant=True)
# In the 1D case the interfaces should not automatically add x-values
# to supplied data
_auto_indexable_1d = False
# Define a class used to transform Datasets into other Element types
_conversion_interface = DataConversion
# Whether the key dimensions are specified as bins
_binned = False
_vdim_reductions = {}
_kdim_reductions = {}
def __new__(cls, data=None, kdims=None, vdims=None, **kwargs):
"""
Allows casting a DynamicMap to an Element class like hv.Curve, by applying the
class to each underlying element.
"""
if isinstance(data, DynamicMap):
return data.apply(cls, per_element=True, kdims=kdims, vdims=vdims, **kwargs)
else:
return super(Dataset, cls).__new__(cls)
def __init__(self, data, kdims=None, vdims=None, **kwargs):
from ...operation.element import (
chain as chain_op, factory
)
self._in_method = False
input_data = data
dataset_provided = 'dataset' in kwargs
input_dataset = kwargs.pop('dataset', None)
input_pipeline = kwargs.pop('pipeline', None)
input_transforms = kwargs.pop('transforms', None)
if isinstance(data, Element):
pvals = util.get_param_values(data)
kwargs.update([(l, pvals[l]) for l in ['group', 'label']
if l in pvals and l not in kwargs])
if isinstance(data, Dataset):
if not dataset_provided and data._dataset is not None:
input_dataset = data._dataset
if input_pipeline is None:
input_pipeline = data.pipeline
if input_transforms is None:
input_transforms = data._transforms
kwargs.update(process_dimensions(kdims, vdims))
kdims, vdims = kwargs.get('kdims'), kwargs.get('vdims')
validate_vdims = kwargs.pop('_validate_vdims', True)
initialized = Interface.initialize(type(self), data, kdims, vdims,
datatype=kwargs.get('datatype'))
(data, self.interface, dims, extra_kws) = initialized
super(Dataset, self).__init__(data, **dict(kwargs, **dict(dims, **extra_kws)))
self.interface.validate(self, validate_vdims)
self.redim = Redim(self, mode='dataset')
# Handle _pipeline property
if input_pipeline is None:
input_pipeline = chain_op.instance()
init_op = factory.instance(
output_type=type(self),
args=[],
kwargs=kwargs,
)
self._pipeline = input_pipeline.instance(
operations=input_pipeline.operations + [init_op],
output_type=type(self),
)
self._transforms = input_transforms or []
# Handle initializing the dataset property.
self._dataset = None
if input_dataset is not None:
self._dataset = input_dataset.clone(dataset=None, pipeline=None)
elif isinstance(input_data, Dataset) and not dataset_provided:
self._dataset = input_data._dataset
elif type(self) is Dataset:
self._dataset = self
@property
def dataset(self):
"""
The Dataset that this object was created from
"""
if self._dataset is None:
datatype = list(util.unique_iterator(self.datatype+Dataset.datatype))
dataset = Dataset(self, _validate_vdims=False, datatype=datatype)
if hasattr(self, '_binned'):
dataset._binned = self._binned
return dataset
else:
return self._dataset
@property
def pipeline(self):
"""
Chain operation that evaluates the sequence of operations that was
used to create this object, starting with the Dataset stored in
dataset property
"""
return self._pipeline
def closest(self, coords=[], **kwargs):
"""Snaps coordinate(s) to closest coordinate in Dataset
Args:
coords: List of coordinates expressed as tuples
**kwargs: Coordinates defined as keyword pairs
Returns:
List of tuples of the snapped coordinates
Raises:
NotImplementedError: Raised if snapping is not supported
"""
if self.ndims > 1:
raise NotImplementedError("Closest method currently only "
"implemented for 1D Elements")
if kwargs:
if len(kwargs) > 1:
raise NotImplementedError("Closest method currently only "
"supports 1D indexes")
samples = list(kwargs.values())[0]
coords = samples if isinstance(samples, list) else [samples]
xs = self.dimension_values(0)
if xs.dtype.kind in 'SO':
raise NotImplementedError("Closest only supported for numeric types")
idxs = [np.argmin(np.abs(xs-coord)) for coord in coords]
return [xs[idx] for idx in idxs]
def sort(self, by=None, reverse=False):
"""
Sorts the data by the values along the supplied dimensions.
Args:
by: Dimension(s) to sort by
reverse (bool, optional): Reverse sort order
Returns:
Sorted Dataset
"""
if by is None:
by = self.kdims
elif not isinstance(by, list):
by = [by]
sorted_columns = self.interface.sort(self, by, reverse)
return self.clone(sorted_columns)
def range(self, dim, data_range=True, dimension_range=True):
"""Return the lower and upper bounds of values along dimension.
Args:
dimension: The dimension to compute the range on.
data_range (bool): Compute range from data values
dimension_range (bool): Include Dimension ranges
Whether to include Dimension range and soft_range
in range calculation
Returns:
Tuple containing the lower and upper bound
"""
dim = self.get_dimension(dim)
if dim is None or (not data_range and not dimension_range):
return (None, None)
elif all(util.isfinite(v) for v in dim.range) and dimension_range:
return dim.range
elif dim in self.dimensions() and data_range and bool(self):
lower, upper = self.interface.range(self, dim)
else:
lower, upper = (np.NaN, np.NaN)
if not dimension_range:
return lower, upper
return util.dimension_range(lower, upper, dim.range, dim.soft_range)
def add_dimension(self, dimension, dim_pos, dim_val, vdim=False, **kwargs):
"""Adds a dimension and its values to the Dataset
Requires the dimension name or object, the desired position in
the key dimensions and a key value scalar or array of values,
matching the length or shape of the Dataset.
Args:
dimension: Dimension or dimension spec to add
dim_pos (int): Integer index to insert dimension at
dim_val (scalar or ndarray): Dimension value(s) to add
vdim: Disabled, this type does not have value dimensions
**kwargs: Keyword arguments passed to the cloned element
Returns:
Cloned object containing the new dimension
"""
if isinstance(dimension, (util.basestring, tuple)):
dimension = Dimension(dimension)
if dimension.name in self.kdims:
raise Exception('{dim} dimension already defined'.format(dim=dimension.name))
if vdim:
dims = self.vdims[:]
dims.insert(dim_pos, dimension)
dimensions = dict(vdims=dims)
dim_pos += self.ndims
else:
dims = self.kdims[:]
dims.insert(dim_pos, dimension)
dimensions = dict(kdims=dims)
if issubclass(self.interface, ArrayInterface) and np.asarray(dim_val).dtype != self.data.dtype:
element = self.clone(datatype=[default_datatype])
data = element.interface.add_dimension(element, dimension, dim_pos, dim_val, vdim)
else:
data = self.interface.add_dimension(self, dimension, dim_pos, dim_val, vdim)
return self.clone(data, **dimensions)
def select(self, selection_expr=None, selection_specs=None, **selection):
"""Applies selection by dimension name
Applies a selection along the dimensions of the object using
keyword arguments. The selection may be narrowed to certain
objects using selection_specs. For container objects the
selection will be applied to all children as well.
Selections may select a specific value, slice or set of values:
* value: Scalar values will select rows along with an exact
match, e.g.:
ds.select(x=3)
* slice: Slices may be declared as tuples of the upper and
lower bound, e.g.:
ds.select(x=(0, 3))
* values: A list of values may be selected using a list or
set, e.g.:
ds.select(x=[0, 1, 2])
* predicate expression: A holoviews.dim expression, e.g.:
from holoviews import dim
ds.select(selection_expr=dim('x') % 2 == 0)
Args:
selection_expr: holoviews.dim predicate expression
specifying selection.
selection_specs: List of specs to match on
A list of types, functions, or type[.group][.label]
strings specifying which objects to apply the
selection on.
**selection: Dictionary declaring selections by dimension
Selections can be scalar values, tuple ranges, lists
of discrete values and boolean arrays
Returns:
Returns an Dimensioned object containing the selected data
or a scalar if a single value was selected
"""
from ...util.transform import dim
if selection_expr is not None and not isinstance(selection_expr, dim):
raise ValueError("""\
The first positional argument to the Dataset.select method is expected to be a
holoviews.util.transform.dim expression. Use the selection_specs keyword
argument to specify a selection specification""")
if selection_specs is not None and not isinstance(selection_specs, (list, tuple)):
selection_specs = [selection_specs]
selection = {dim_name: sel for dim_name, sel in selection.items()
if dim_name in self.dimensions()+['selection_mask']}
if (selection_specs and not any(self.matches(sp) for sp in selection_specs)
or (not selection and not selection_expr)):
return self
# Handle selection dim expression
if selection_expr is not None:
mask = selection_expr.apply(self, compute=False, keep_index=True)
dataset = self[mask]
else:
dataset = self
# Handle selection kwargs
if selection:
data = dataset.interface.select(dataset, **selection)
else:
data = dataset.data
if np.isscalar(data):
return data
else:
return self.clone(data)
def reindex(self, kdims=None, vdims=None):
"""Reindexes Dataset dropping static or supplied kdims
Creates a new object with a reordered or reduced set of key
dimensions. By default drops all non-varying key dimensions.x
Args:
kdims (optional): New list of key dimensionsx
vdims (optional): New list of value dimensions
Returns:
Reindexed object
"""
gridded = self.interface.gridded
scalars = []
if gridded:
coords = [(d, self.interface.coords(self, d.name)) for d in self.kdims]
scalars = [d for d, vs in coords if len(vs) == 1]
if kdims is None:
# If no key dimensions are defined and interface is gridded
# drop all scalar key dimensions
key_dims = [d for d in self.kdims if (not vdims or d not in vdims)
and not d in scalars]
elif not isinstance(kdims, list):
key_dims = [self.get_dimension(kdims, strict=True)]
else:
key_dims = [self.get_dimension(k, strict=True) for k in kdims]
dropped = [d for d in self.kdims if not d in key_dims and not d in scalars]
new_type = None
if vdims is None:
val_dims = [d for d in self.vdims if not kdims or d not in kdims]
else:
val_dims = [self.get_dimension(v, strict=True) for v in vdims]
new_type = self._vdim_reductions.get(len(val_dims), type(self))
data = self.interface.reindex(self, key_dims, val_dims)
datatype = self.datatype
if gridded and dropped:
interfaces = self.interface.interfaces
datatype = [dt for dt in datatype if not
getattr(interfaces.get(dt, None), 'gridded', True)]
return self.clone(data, kdims=key_dims, vdims=val_dims,
new_type=new_type, datatype=datatype)
def __getitem__(self, slices):
"""
Allows slicing and selecting values in the Dataset object.
Supports multiple indexing modes:
(1) Slicing and indexing along the values of each dimension
in the columns object using either scalars, slices or
sets of values.
(2) Supplying the name of a dimension as the first argument
will return the values along that dimension as a numpy
array.
(3) Slicing of all key dimensions and selecting a single
value dimension by name.
(4) A boolean array index matching the length of the Dataset
object.
"""
slices = util.process_ellipses(self, slices, vdim_selection=True)
if getattr(getattr(slices, 'dtype', None), 'kind', None) == 'b':
if not len(slices) == len(self):
raise IndexError("Boolean index must match length of sliced object")
return self.clone(self.select(selection_mask=slices))
elif slices in [(), Ellipsis]:
return self
if not isinstance(slices, tuple): slices = (slices,)
value_select = None
if len(slices) == 1 and slices[0] in self.dimensions():
return self.dimension_values(slices[0])
elif len(slices) == self.ndims+1 and slices[self.ndims] in self.dimensions():
selection = dict(zip(self.dimensions('key', label=True), slices))
value_select = slices[self.ndims]
elif len(slices) == self.ndims+1 and isinstance(slices[self.ndims],
(Dimension,str)):
raise IndexError("%r is not an available value dimension" % slices[self.ndims])
else:
selection = dict(zip(self.dimensions(label=True), slices))
data = self.select(**selection)
if value_select:
if data.shape[0] == 1:
return data[value_select][0]
else:
return data.reindex(vdims=[value_select])
return data
def sample(self, samples=[], bounds=None, closest=True, **kwargs):
"""Samples values at supplied coordinates.
Allows sampling of element with a list of coordinates matching
the key dimensions, returning a new object containing just the
selected samples. Supports multiple signatures:
Sampling with a list of coordinates, e.g.:
ds.sample([(0, 0), (0.1, 0.2), ...])
Sampling a range or grid of coordinates, e.g.:
1D: ds.sample(3)
2D: ds.sample((3, 3))
Sampling by keyword, e.g.:
ds.sample(x=0)
Args:
samples: List of nd-coordinates to sample
bounds: Bounds of the region to sample
Defined as two-tuple for 1D sampling and four-tuple
for 2D sampling.
closest: Whether to snap to closest coordinates
**kwargs: Coordinates specified as keyword pairs
Keywords of dimensions and scalar coordinates
Returns:
Element containing the sampled coordinates
"""
if kwargs and samples != []:
raise Exception('Supply explicit list of samples or kwargs, not both.')
elif kwargs:
sample = [slice(None) for _ in range(self.ndims)]
for dim, val in kwargs.items():
sample[self.get_dimension_index(dim)] = val
samples = [tuple(sample)]
elif isinstance(samples, tuple) or util.isscalar(samples):
if self.ndims == 1:
xlim = self.range(0)
lower, upper = (xlim[0], xlim[1]) if bounds is None else bounds
edges = np.linspace(lower, upper, samples+1)
linsamples = [(l+u)/2.0 for l,u in zip(edges[:-1], edges[1:])]
elif self.ndims == 2:
(rows, cols) = samples
if bounds:
(l,b,r,t) = bounds
else:
l, r = self.range(0)
b, t = self.range(1)
xedges = np.linspace(l, r, cols+1)
yedges = np.linspace(b, t, rows+1)
xsamples = [(lx+ux)/2.0 for lx,ux in zip(xedges[:-1], xedges[1:])]
ysamples = [(ly+uy)/2.0 for ly,uy in zip(yedges[:-1], yedges[1:])]
Y,X = np.meshgrid(ysamples, xsamples)
linsamples = list(zip(X.flat, Y.flat))
else:
raise NotImplementedError("Regular sampling not implemented "
"for elements with more than two dimensions.")
samples = list(util.unique_iterator(self.closest(linsamples)))
# Note: Special handling sampling of gridded 2D data as Curve
# may be replaced with more general handling
# see https://github.com/ioam/holoviews/issues/1173
from ...element import Table, Curve
datatype = ['dataframe', 'dictionary', 'dask']
if len(samples) == 1:
sel = {kd.name: s for kd, s in zip(self.kdims, samples[0])}
dims = [kd for kd, v in sel.items() if not np.isscalar(v)]
selection = self.select(**sel)
# If a 1D cross-section of 2D space return Curve
if self.interface.gridded and self.ndims == 2 and len(dims) == 1:
new_type = Curve
kdims = [self.get_dimension(kd) for kd in dims]
else:
new_type = Table
kdims = self.kdims
if np.isscalar(selection):
selection = [samples[0]+(selection,)]
else:
reindexed = selection.clone(new_type=Dataset, datatype=datatype).reindex(kdims)
selection = tuple(reindexed.columns(kdims+self.vdims).values())
datatype = list(util.unique_iterator(self.datatype+['dataframe', 'dict']))
return self.clone(selection, kdims=kdims, new_type=new_type,
datatype=datatype)
lens = set(len(util.wrap_tuple(s)) for s in samples)
if len(lens) > 1:
raise IndexError('Sample coordinates must all be of the same length.')
if closest:
try:
samples = self.closest(samples)
except NotImplementedError:
pass
samples = [util.wrap_tuple(s) for s in samples]
sampled = self.interface.sample(self, samples)
return self.clone(sampled, new_type=Table, datatype=datatype)
def reduce(self, dimensions=[], function=None, spreadfn=None, **reductions):
"""Applies reduction along the specified dimension(s).
Allows reducing the values along one or more key dimension
with the supplied function. Supports two signatures:
Reducing with a list of dimensions, e.g.:
ds.reduce(['x'], np.mean)
Defining a reduction using keywords, e.g.:
ds.reduce(x=np.mean)
Args:
dimensions: Dimension(s) to apply reduction on
Defaults to all key dimensions
function: Reduction operation to apply, e.g. numpy.mean
spreadfn: Secondary reduction to compute value spread
Useful for computing a confidence interval, spread, or
standard deviation.
**reductions: Keyword argument defining reduction
Allows reduction to be defined as keyword pair of
dimension and function
Returns:
The Dataset after reductions have been applied.
"""
if any(dim in self.vdims for dim in dimensions):
raise Exception("Reduce cannot be applied to value dimensions")
function, dims = self._reduce_map(dimensions, function, reductions)
dims = [d for d in self.kdims if d not in dims]
return self.aggregate(dims, function, spreadfn)
def aggregate(self, dimensions=None, function=None, spreadfn=None, **kwargs):
"""Aggregates data on the supplied dimensions.
Aggregates over the supplied key dimensions with the defined
function or dim_transform specified as a tuple of the transformed
dimension name and dim transform.
Args:
dimensions: Dimension(s) to aggregate on
Default to all key dimensions
function: Aggregation function or transform to apply
Supports both simple functions and dimension transforms
spreadfn: Secondary reduction to compute value spread
Useful for computing a confidence interval, spread, or
standard deviation.
**kwargs: Keyword arguments either passed to the aggregation function
or to create new names for the transformed variables
Returns:
Returns the aggregated Dataset
"""
from ...util.transform import dim
if dimensions is None: dimensions = self.kdims
elif not isinstance(dimensions, list): dimensions = [dimensions]
if isinstance(function, tuple) or any(isinstance(v, dim) for v in kwargs.values()):
dataset = self.clone(new_type=Dataset)
if dimensions:
dataset = dataset.groupby(dimensions)
args = () if function is None else (function,)
transformed = dataset.apply.transform(*args, drop=True, **kwargs)
if not isinstance(transformed, Dataset):
transformed = transformed.collapse()
return transformed.clone(new_type=type(self))
# Handle functions
kdims = [self.get_dimension(d, strict=True) for d in dimensions]
if not len(self):
if spreadfn:
spread_name = spreadfn.__name__
vdims = [d for vd in self.vdims for d in [vd, vd.clone('_'.join([vd.name, spread_name]))]]
else:
vdims = self.vdims
return self.clone([], kdims=kdims, vdims=vdims)
vdims = self.vdims
aggregated, dropped = self.interface.aggregate(self, kdims, function, **kwargs)
aggregated = self.interface.unpack_scalar(self, aggregated)
vdims = [vd for vd in vdims if vd not in dropped]
ndims = len(dimensions)
min_d, max_d = self.param.objects('existing')['kdims'].bounds
generic_type = (min_d is not None and ndims < min_d) or (max_d is not None and ndims > max_d)
if spreadfn:
error, _ = self.interface.aggregate(self, dimensions, spreadfn)
spread_name = spreadfn.__name__
ndims = len(vdims)
error = self.clone(error, kdims=kdims, new_type=Dataset)
combined = self.clone(aggregated, kdims=kdims, new_type=Dataset)
for i, d in enumerate(vdims):
dim = d.clone('_'.join([d.name, spread_name]))
dvals = error.dimension_values(d, flat=False)
combined = combined.add_dimension(dim, ndims+i, dvals, True)
return combined.clone(new_type=Dataset if generic_type else type(self))
if np.isscalar(aggregated):
return aggregated
else:
try:
# Should be checking the dimensions declared on the element are compatible
return self.clone(aggregated, kdims=kdims, vdims=vdims)
except:
datatype = self.param.objects('existing')['datatype'].default
return self.clone(aggregated, kdims=kdims, vdims=vdims,
new_type=Dataset if generic_type else None,
datatype=datatype)
def groupby(self, dimensions=[], container_type=HoloMap, group_type=None,
dynamic=False, **kwargs):
"""Groups object by one or more dimensions
Applies groupby operation over the specified dimensions
returning an object of type container_type (expected to be
dictionary-like) containing the groups.
Args:
dimensions: Dimension(s) to group by
container_type: Type to cast group container to
group_type: Type to cast each group to
dynamic: Whether to return a DynamicMap
**kwargs: Keyword arguments to pass to each group
Returns:
Returns object of supplied container_type containing the
groups. If dynamic=True returns a DynamicMap instead.
"""
if not isinstance(dimensions, list): dimensions = [dimensions]
if not len(dimensions): dimensions = self.dimensions('key', True)
if group_type is None: group_type = type(self)
dimensions = [self.get_dimension(d, strict=True) for d in dimensions]
dim_names = [d.name for d in dimensions]
if dynamic:
group_dims = [kd for kd in self.kdims if kd not in dimensions]
kdims = [self.get_dimension(d) for d in kwargs.pop('kdims', group_dims)]
drop_dim = len(group_dims) != len(kdims)
group_kwargs = dict(util.get_param_values(self), kdims=kdims)
group_kwargs.update(kwargs)
def load_subset(*args):
constraint = dict(zip(dim_names, args))
group = self.select(**constraint)
if np.isscalar(group):
return group_type(([group],), group=self.group,
label=self.label, vdims=self.vdims)
data = group.reindex(kdims)
if drop_dim and self.interface.gridded:
data = data.columns()
return group_type(data, **group_kwargs)
dynamic_dims = [d.clone(values=list(self.interface.values(self, d.name, False)))
for d in dimensions]
return DynamicMap(load_subset, kdims=dynamic_dims)
return self.interface.groupby(self, dim_names, container_type,
group_type, **kwargs)
def transform(self, *args, **kwargs):
"""Transforms the Dataset according to a dimension transform.
Transforms may be supplied as tuples consisting of the
dimension(s) and the dim transform to apply or keyword
arguments mapping from dimension(s) to dim transforms. If the
arg or kwarg declares multiple dimensions the dim transform
should return a tuple of values for each.
A transform may override an existing dimension or add a new
one in which case it will be added as an additional value
dimension.
Args:
args: Specify the output arguments and transforms as a
tuple of dimension specs and dim transforms
drop (bool): Whether to drop all variables not part of the transform
keep_index (bool): Whether to keep indexes
Whether to apply transform on datastructure with
index, e.g. pandas.Series or xarray.DataArray,
(important for dask datastructures where index may
be required to align datasets).
kwargs: Specify new dimensions in the form new_dim=dim_transform
Returns:
Transformed dataset with new dimensions
"""
drop = kwargs.pop('drop', False)
keep_index = kwargs.pop('keep_index', True)
transforms = OrderedDict()
for s, transform in list(args)+list(kwargs.items()):
transforms[util.wrap_tuple(s)] = transform
new_data = OrderedDict()
for signature, transform in transforms.items():
applied = transform.apply(
self, compute=False, keep_index=keep_index
)
if len(signature) == 1:
new_data[signature[0]] = applied
else:
for s, vals in zip(signature, applied):
new_data[s] = vals
new_dims = []
for d in new_data:
if self.get_dimension(d) is None:
new_dims.append(d)
ds = self
if ds.interface.datatype in ('image', 'array'):
ds = ds.clone(datatype=[dt for dt in ds.datatype if dt != ds.interface.datatype])
if drop:
kdims = [ds.get_dimension(d) for d in new_data if d in ds.kdims]
vdims = [ds.get_dimension(d) or d for d in new_data if d not in ds.kdims]
data = OrderedDict([(dimension_name(d), values) for d, values in new_data.items()])
return ds.clone(data, kdims=kdims, vdims=vdims)
else:
new_data = OrderedDict([(dimension_name(d), values) for d, values in new_data.items()])
data = ds.interface.assign(ds, new_data)
data, drop = data if isinstance(data, tuple) else (data, [])
kdims = [kd for kd in self.kdims if kd.name not in drop]
return ds.clone(data, kdims=kdims, vdims=ds.vdims+new_dims)
def __len__(self):
"Number of values in the Dataset."
return self.interface.length(self)
def __nonzero__(self):
"Whether the Dataset contains any values"
return self.interface.nonzero(self)
__bool__ = __nonzero__
@property
def shape(self):
"Returns the shape of the data."
return self.interface.shape(self)
def dimension_values(self, dimension, expanded=True, flat=True):
"""Return the values along the requested dimension.
Args:
dimension: The dimension to return values for
expanded (bool, optional): Whether to expand values
Whether to return the expanded values, behavior depends
on the type of data:
* Columnar: If false returns unique values
* Geometry: If false returns scalar values per geometry
* Gridded: If false returns 1D coordinates
flat (bool, optional): Whether to flatten array
Returns:
NumPy array of values along the requested dimension
"""
dim = self.get_dimension(dimension, strict=True)
return self.interface.values(self, dim, expanded, flat)
def get_dimension_type(self, dim):
"""Get the type of the requested dimension.
Type is determined by Dimension.type attribute or common
type of the dimension values, otherwise None.
Args:
dimension: Dimension to look up by name or by index
Returns:
Declared type of values along the dimension
"""
dim_obj = self.get_dimension(dim)
if dim_obj and dim_obj.type is not None:
return dim_obj.type
return self.interface.dimension_type(self, dim_obj)
def dframe(self, dimensions=None, multi_index=False):
"""Convert dimension values to DataFrame.
Returns a pandas dataframe of columns along each dimension,
either completely flat or indexed by key dimensions.
Args:
dimensions: Dimensions to return as columns
multi_index: Convert key dimensions to (multi-)index
Returns:
DataFrame of columns corresponding to each dimension
"""
if dimensions is None:
dimensions = [d.name for d in self.dimensions()]
else:
dimensions = [self.get_dimension(d, strict=True).name for d in dimensions]
df = self.interface.dframe(self, dimensions)
if multi_index:
df = df.set_index([d for d in dimensions if d in self.kdims])
return df
def columns(self, dimensions=None):
"""Convert dimension values to a dictionary.
Returns a dictionary of column arrays along each dimension
of the element.
Args:
dimensions: Dimensions to return as columns
Returns:
Dictionary of arrays for each dimension
"""
if dimensions is None:
dimensions = self.dimensions()
else:
dimensions = [self.get_dimension(d, strict=True) for d in dimensions]
return OrderedDict([(d.name, self.dimension_values(d)) for d in dimensions])
@property
def to(self):
"Returns the conversion interface with methods to convert Dataset"
return self._conversion_interface(self)
def clone(self, data=None, shared_data=True, new_type=None, link=True,
*args, **overrides):
"""Clones the object, overriding data and parameters.
Args:
data: New data replacing the existing data
shared_data (bool, optional): Whether to use existing data
new_type (optional): Type to cast object to
link (bool, optional): Whether clone should be linked
Determines whether Streams and Links attached to
original object will be inherited.
*args: Additional arguments to pass to constructor
**overrides: New keyword arguments to pass to constructor
Returns:
Cloned object
"""
if 'datatype' not in overrides:
datatypes = [self.interface.datatype] + self.datatype
overrides['datatype'] = list(util.unique_iterator(datatypes))
if data is None:
overrides['_validate_vdims'] = False
# Allows datatype conversions
if shared_data:
data = self
if link:
overrides['plot_id'] = self._plot_id
if 'dataset' not in overrides:
overrides['dataset'] = self.dataset
if 'pipeline' not in overrides:
overrides['pipeline'] = self._pipeline
elif self._in_method:
if 'dataset' not in overrides:
overrides['dataset'] = self.dataset
new_dataset = super(Dataset, self).clone(
data, shared_data, new_type, *args, **overrides
)
return new_dataset
# Overrides of superclass methods that are needed so that PipelineMeta
# will find them to wrap with pipeline support
def options(self, *args, **kwargs):
return super(Dataset, self).options(*args, **kwargs)
options.__doc__ = Dimensioned.options.__doc__
def map(self, *args, **kwargs):
return super(Dataset, self).map(*args, **kwargs)
map.__doc__ = LabelledData.map.__doc__
def relabel(self, *args, **kwargs):
return super(Dataset, self).relabel(*args, **kwargs)
relabel.__doc__ = LabelledData.relabel.__doc__
@property
def iloc(self):
"""Returns iloc indexer with support for columnar indexing.
Returns an iloc object providing a convenient interface to
slice and index into the Dataset using row and column indices.
Allow selection by integer index, slice and list of integer
indices and boolean arrays.
Examples:
* Index the first row and column:
dataset.iloc[0, 0]
* Select rows 1 and 2 with a slice:
dataset.iloc[1:3, :]
* Select with a list of integer coordinates:
dataset.iloc[[0, 2, 3]]
"""
return iloc(self)
@property
def ndloc(self):
"""Returns ndloc indexer with support for gridded indexing.
Returns an ndloc object providing nd-array like indexing for
gridded datasets. Follows NumPy array indexing conventions,
allowing for indexing, slicing and selecting a list of indices
on multi-dimensional arrays using integer indices. The order
of array indices is inverted relative to the Dataset key
dimensions, e.g. an Image with key dimensions 'x' and 'y' can
be indexed with ``image.ndloc[iy, ix]``, where ``iy`` and
``ix`` are integer indices along the y and x dimensions.
Examples:
* Index value in 2D array:
dataset.ndloc[3, 1]
* Slice along y-axis of 2D array:
dataset.ndloc[2:5, :]
* Vectorized (non-orthogonal) indexing along x- and y-axes:
dataset.ndloc[[1, 2, 3], [0, 2, 3]]
"""
return ndloc(self)
# Aliases for pickle backward compatibility
Columns = Dataset
ArrayColumns = ArrayInterface
DictColumns = DictInterface
GridColumns = GridInterface
| 1 | 23,508 | Shouldn't this be `cls.param.warning`? | holoviz-holoviews | py |
@@ -54,7 +54,6 @@ module Selenium
it 'does not set the chrome.detach capability by default' do
Driver.new(http_client: http)
- expect(caps['goog:chromeOptions']).to eq({})
expect(caps['chrome.detach']).to be nil
end
| 1 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
require File.expand_path('../../spec_helper', __FILE__)
module Selenium
module WebDriver
module Chrome
describe Driver do
let(:resp) { {'sessionId' => 'foo', 'value' => Remote::Capabilities.chrome.as_json} }
let(:service) { instance_double(Service, start: true, uri: 'http://example.com') }
let(:caps) { Remote::Capabilities.new }
let(:http) { instance_double(Remote::Http::Default, call: resp).as_null_object }
before do
allow(Remote::Capabilities).to receive(:chrome).and_return(caps)
allow(Service).to receive(:binary_path).and_return('/foo')
allow(Service).to receive(:new).and_return(service)
end
it 'sets the args capability' do
Driver.new(http_client: http, args: %w[--foo=bar])
expect(caps['goog:chromeOptions'][:args]).to eq(%w[--foo=bar])
end
it 'sets the args capability from switches' do
Driver.new(http_client: http, switches: %w[--foo=bar])
expect(caps['goog:chromeOptions'][:args]).to eq(%w[--foo=bar])
end
it 'sets the proxy capabilitiy' do
proxy = Proxy.new(http: 'localhost:1234')
Driver.new(http_client: http, proxy: proxy)
expect(caps[:proxy]).to eq(proxy)
end
it 'does not set the chrome.detach capability by default' do
Driver.new(http_client: http)
expect(caps['goog:chromeOptions']).to eq({})
expect(caps['chrome.detach']).to be nil
end
it 'sets the prefs capability' do
Driver.new(http_client: http, prefs: {foo: 'bar'})
expect(caps['goog:chromeOptions'][:prefs]).to eq(foo: 'bar')
end
it 'lets the user override chrome.detach' do
Driver.new(http_client: http, detach: true)
expect(caps['goog:chromeOptions'][:detach]).to be true
end
it 'raises an ArgumentError if args is not an Array' do
expect { Driver.new(args: '--foo=bar') }.to raise_error(ArgumentError)
end
it 'uses the given profile' do
profile = Profile.new
profile['some_pref'] = true
profile.add_extension(__FILE__)
Driver.new(http_client: http, profile: profile)
profile_data = profile.as_json
expect(caps['goog:chromeOptions'][:args].first).to include(profile_data[:directory])
expect(caps['goog:chromeOptions'][:extensions]).to eq(profile_data[:extensions])
end
it 'takes desired capabilities' do
custom_caps = Remote::Capabilities.new
custom_caps[:chrome_options] = {'foo' => 'bar'}
expect(http).to receive(:call) do |_, _, payload|
expect(payload[:desiredCapabilities][:chrome_options]).to include('foo' => 'bar')
resp
end
Driver.new(http_client: http, desired_capabilities: custom_caps)
end
it 'lets direct arguments take presedence over capabilities' do
custom_caps = Remote::Capabilities.new
custom_caps['goog:chromeOptions'] = {'args' => %w[foo bar]}
expect(http).to receive(:call) do |_, _, payload|
expect(payload[:desiredCapabilities]['goog:chromeOptions'][:args]).to eq(['baz'])
resp
end
Driver.new(http_client: http, desired_capabilities: custom_caps, args: %w[baz])
end
it 'handshakes protocol' do
expect(Remote::Bridge).to receive(:handshake)
Driver.new(http_client: http)
end
end
end # Chrome
end # WebDriver
end # Selenium
| 1 | 15,563 | This spec can be modified, giving you extra strength (Check this fetch key doesn't work and therefore returns `nil`) | SeleniumHQ-selenium | rb |
@@ -107,9 +107,14 @@ class ServerConnectionMixin:
"""
address = self.server_conn.address
if address:
+ forbidden_hosts = ["localhost", "127.0.0.1", "::1"]
+
+ if self.config.options.listen_host:
+ forbidden_hosts.append(self.config.options.listen_host)
+
self_connect = (
address[1] == self.config.options.listen_port and
- address[0] in ("localhost", "127.0.0.1", "::1")
+ address[0] in forbidden_hosts
)
if self_connect:
raise exceptions.ProtocolException( | 1 | from mitmproxy import exceptions
from mitmproxy import connections
from mitmproxy import controller # noqa
from mitmproxy.proxy import config # noqa
class _LayerCodeCompletion:
"""
Dummy class that provides type hinting in PyCharm, which simplifies development a lot.
"""
def __init__(self, **mixin_args): # pragma: no cover
super().__init__(**mixin_args)
if True:
return
self.config: config.ProxyConfig = None
self.client_conn: connections.ClientConnection = None
self.server_conn: connections.ServerConnection = None
self.channel: controller.Channel = None
self.ctx = None
"""@type: mitmproxy.proxy.protocol.Layer"""
class Layer(_LayerCodeCompletion):
"""
Base class for all layers. All other protocol layers should inherit from this class.
"""
def __init__(self, ctx, **mixin_args):
"""
Each layer usually passes itself to its child layers as a context. Properties of the
context are transparently mapped to the layer, so that the following works:
.. code-block:: python
root_layer = Layer(None)
root_layer.client_conn = 42
sub_layer = Layer(root_layer)
print(sub_layer.client_conn) # 42
The root layer is passed a :py:class:`mitmproxy.proxy.RootContext` object,
which provides access to :py:attr:`.client_conn <mitmproxy.proxy.RootContext.client_conn>`,
:py:attr:`.next_layer <mitmproxy.proxy.RootContext.next_layer>` and other basic attributes.
Args:
ctx: The (read-only) parent layer / context.
"""
self.ctx = ctx
"""
The parent layer.
:type: :py:class:`Layer`
"""
super().__init__(**mixin_args)
def __call__(self):
"""Logic of the layer.
Returns:
Once the protocol has finished without exceptions.
Raises:
~mitmproxy.exceptions.ProtocolException: if an exception occurs. No other exceptions must be raised.
"""
raise NotImplementedError()
def __getattr__(self, name):
"""
Attributes not present on the current layer are looked up on the context.
"""
return getattr(self.ctx, name)
class ServerConnectionMixin:
"""
Mixin that provides a layer with the capabilities to manage a server connection.
The server address can be passed in the constructor or set by calling :py:meth:`set_server`.
Subclasses are responsible for calling :py:meth:`disconnect` before returning.
Recommended Usage:
.. code-block:: python
class MyLayer(Layer, ServerConnectionMixin):
def __call__(self):
try:
# Do something.
finally:
if self.server_conn.connected():
self.disconnect()
"""
def __init__(self, server_address=None):
super().__init__()
self.server_conn = self.__make_server_conn(server_address)
self.__check_self_connect()
def __check_self_connect(self):
"""
We try to protect the proxy from _accidentally_ connecting to itself,
e.g. because of a failed transparent lookup or an invalid configuration.
"""
address = self.server_conn.address
if address:
self_connect = (
address[1] == self.config.options.listen_port and
address[0] in ("localhost", "127.0.0.1", "::1")
)
if self_connect:
raise exceptions.ProtocolException(
"Invalid server address: {}\r\n"
"The proxy shall not connect to itself.".format(repr(address))
)
def __make_server_conn(self, server_address):
if self.config.options.spoof_source_address and self.config.options.upstream_bind_address == '':
return connections.ServerConnection(
server_address, (self.ctx.client_conn.address[0], 0), True)
else:
return connections.ServerConnection(
server_address, (self.config.options.upstream_bind_address, 0),
self.config.options.spoof_source_address
)
def set_server(self, address):
"""
Sets a new server address. If there is an existing connection, it will be closed.
"""
if self.server_conn.connected():
self.disconnect()
self.log("Set new server address: {}:{}".format(address[0], address[1]), "debug")
self.server_conn.address = address
self.__check_self_connect()
def disconnect(self):
"""
Deletes (and closes) an existing server connection.
Must not be called if there is no existing connection.
"""
self.log("serverdisconnect", "debug", [repr(self.server_conn.address)])
address = self.server_conn.address
self.server_conn.finish()
self.server_conn.close()
self.channel.tell("serverdisconnect", self.server_conn)
self.server_conn = self.__make_server_conn(address)
def connect(self):
"""
Establishes a server connection.
Must not be called if there is an existing connection.
Raises:
~mitmproxy.exceptions.ProtocolException: if the connection could not be established.
"""
if not self.server_conn.address:
raise exceptions.ProtocolException("Cannot connect to server, no server address given.")
try:
self.server_conn.connect()
self.log("serverconnect", "debug", [repr(self.server_conn.address)])
self.channel.ask("serverconnect", self.server_conn)
except exceptions.TcpException as e:
raise exceptions.ProtocolException(
"Server connection to {} failed: {}".format(
repr(self.server_conn.address), str(e)
)
)
| 1 | 15,153 | Could we not just always include `self.config.options.listen_host`? If that is empty, the `address[0]` check should also never be true. | mitmproxy-mitmproxy | py |
@@ -102,8 +102,8 @@ public class DataFilesTable extends BaseMetadataTable {
}
@Override
- protected long targetSplitSize(TableOperations ops) {
- return ops.current().propertyAsLong(
+ public long targetSplitSize() {
+ return tableOps().current().propertyAsLong(
TableProperties.METADATA_SPLIT_SIZE, TableProperties.METADATA_SPLIT_SIZE_DEFAULT);
}
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import org.apache.iceberg.expressions.Expression;
import org.apache.iceberg.expressions.Expressions;
import org.apache.iceberg.expressions.ResidualEvaluator;
import org.apache.iceberg.io.CloseableIterable;
import org.apache.iceberg.io.FileIO;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
import org.apache.iceberg.relocated.com.google.common.collect.Sets;
import org.apache.iceberg.types.TypeUtil;
/**
* A {@link Table} implementation that exposes a table's data files as rows.
*/
public class DataFilesTable extends BaseMetadataTable {
private final TableOperations ops;
private final Table table;
private final String name;
DataFilesTable(TableOperations ops, Table table) {
this(ops, table, table.name() + ".files");
}
DataFilesTable(TableOperations ops, Table table, String name) {
this.ops = ops;
this.table = table;
this.name = name;
}
@Override
Table table() {
return table;
}
@Override
public String name() {
return name;
}
@Override
public TableScan newScan() {
return new FilesTableScan(ops, table, schema());
}
@Override
public Schema schema() {
Schema schema = new Schema(DataFile.getType(table.spec().partitionType()).fields());
if (table.spec().fields().size() < 1) {
// avoid returning an empty struct, which is not always supported. instead, drop the partition field
return TypeUtil.selectNot(schema, Sets.newHashSet(DataFile.PARTITION_ID));
} else {
return schema;
}
}
@Override
String metadataLocation() {
return ops.current().metadataFileLocation();
}
@Override
MetadataTableType metadataTableType() {
return MetadataTableType.FILES;
}
public static class FilesTableScan extends BaseTableScan {
private final Schema fileSchema;
FilesTableScan(TableOperations ops, Table table, Schema fileSchema) {
super(ops, table, fileSchema);
this.fileSchema = fileSchema;
}
private FilesTableScan(TableOperations ops, Table table, Schema schema, Schema fileSchema,
TableScanContext context) {
super(ops, table, schema, context);
this.fileSchema = fileSchema;
}
@Override
protected TableScan newRefinedScan(TableOperations ops, Table table, Schema schema, TableScanContext context) {
return new FilesTableScan(ops, table, schema, fileSchema, context);
}
@Override
protected long targetSplitSize(TableOperations ops) {
return ops.current().propertyAsLong(
TableProperties.METADATA_SPLIT_SIZE, TableProperties.METADATA_SPLIT_SIZE_DEFAULT);
}
@Override
protected CloseableIterable<FileScanTask> planFiles(
TableOperations ops, Snapshot snapshot, Expression rowFilter,
boolean ignoreResiduals, boolean caseSensitive, boolean colStats) {
CloseableIterable<ManifestFile> manifests = CloseableIterable.withNoopClose(snapshot.dataManifests());
String schemaString = SchemaParser.toJson(schema());
String specString = PartitionSpecParser.toJson(PartitionSpec.unpartitioned());
Expression filter = ignoreResiduals ? Expressions.alwaysTrue() : rowFilter;
ResidualEvaluator residuals = ResidualEvaluator.unpartitioned(filter);
// Data tasks produce the table schema, not the projection schema and projection is done by processing engines.
// This data task needs to use the table schema, which may not include a partition schema to avoid having an
// empty struct in the schema for unpartitioned tables. Some engines, like Spark, can't handle empty structs in
// all cases.
return CloseableIterable.transform(manifests, manifest ->
new ManifestReadTask(ops.io(), manifest, fileSchema, schemaString, specString, residuals));
}
}
static class ManifestReadTask extends BaseFileScanTask implements DataTask {
private final FileIO io;
private final ManifestFile manifest;
private final Schema schema;
ManifestReadTask(FileIO io, ManifestFile manifest, Schema schema, String schemaString,
String specString, ResidualEvaluator residuals) {
super(DataFiles.fromManifest(manifest), null, schemaString, specString, residuals);
this.io = io;
this.manifest = manifest;
this.schema = schema;
}
@Override
public CloseableIterable<StructLike> rows() {
return CloseableIterable.transform(
ManifestFiles.read(manifest, io).project(schema),
file -> (GenericDataFile) file);
}
@Override
public Iterable<FileScanTask> split(long splitSize) {
return ImmutableList.of(this); // don't split
}
}
}
| 1 | 34,216 | Why not just access `ops` directly like before? | apache-iceberg | java |
@@ -389,17 +389,13 @@ void Creature::onRemoveTileItem(const Tile* tile, const Position& pos, const Ite
}
}
-void Creature::onCreatureAppear(Creature* creature, bool isLogin)
+void Creature::onCreatureAppear(Creature* creature, bool)
{
if (creature == this) {
if (useCacheMap()) {
isMapLoaded = true;
updateMapCache();
}
-
- if (isLogin) {
- setLastPosition(getPosition());
- }
} else if (isMapLoaded) {
if (creature->getPosition().z == getPosition().z) {
updateTileCache(creature->getTile(), creature->getPosition()); | 1 | /**
* The Forgotten Server - a free and open-source MMORPG server emulator
* Copyright (C) 2017 Mark Samman <mark.samman@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "otpch.h"
#include "creature.h"
#include "game.h"
#include "monster.h"
#include "configmanager.h"
#include "scheduler.h"
double Creature::speedA = 857.36;
double Creature::speedB = 261.29;
double Creature::speedC = -4795.01;
extern Game g_game;
extern ConfigManager g_config;
extern CreatureEvents* g_creatureEvents;
Creature::Creature()
{
onIdleStatus();
}
Creature::~Creature()
{
for (Creature* summon : summons) {
summon->setAttackedCreature(nullptr);
summon->setMaster(nullptr);
}
for (Condition* condition : conditions) {
condition->endCondition(this);
delete condition;
}
}
bool Creature::canSee(const Position& myPos, const Position& pos, int32_t viewRangeX, int32_t viewRangeY)
{
if (myPos.z <= 7) {
//we are on ground level or above (7 -> 0)
//view is from 7 -> 0
if (pos.z > 7) {
return false;
}
} else if (myPos.z >= 8) {
//we are underground (8 -> 15)
//view is +/- 2 from the floor we stand on
if (Position::getDistanceZ(myPos, pos) > 2) {
return false;
}
}
const int_fast32_t offsetz = myPos.getZ() - pos.getZ();
return (pos.getX() >= myPos.getX() - viewRangeX + offsetz) && (pos.getX() <= myPos.getX() + viewRangeX + offsetz)
&& (pos.getY() >= myPos.getY() - viewRangeY + offsetz) && (pos.getY() <= myPos.getY() + viewRangeY + offsetz);
}
bool Creature::canSee(const Position& pos) const
{
return canSee(getPosition(), pos, Map::maxViewportX, Map::maxViewportY);
}
bool Creature::canSeeCreature(const Creature* creature) const
{
if (!canSeeInvisibility() && creature->isInvisible()) {
return false;
}
return true;
}
void Creature::setSkull(Skulls_t newSkull)
{
skull = newSkull;
g_game.updateCreatureSkull(this);
}
int64_t Creature::getTimeSinceLastMove() const
{
if (lastStep) {
return OTSYS_TIME() - lastStep;
}
return std::numeric_limits<int64_t>::max();
}
int32_t Creature::getWalkDelay(Direction dir) const
{
if (lastStep == 0) {
return 0;
}
int64_t ct = OTSYS_TIME();
int64_t stepDuration = getStepDuration(dir);
return stepDuration - (ct - lastStep);
}
int32_t Creature::getWalkDelay() const
{
//Used for auto-walking
if (lastStep == 0) {
return 0;
}
int64_t ct = OTSYS_TIME();
int64_t stepDuration = getStepDuration() * lastStepCost;
return stepDuration - (ct - lastStep);
}
void Creature::onThink(uint32_t interval)
{
if (!isMapLoaded && useCacheMap()) {
isMapLoaded = true;
updateMapCache();
}
if (followCreature && master != followCreature && !canSeeCreature(followCreature)) {
onCreatureDisappear(followCreature, false);
}
if (attackedCreature && master != attackedCreature && !canSeeCreature(attackedCreature)) {
onCreatureDisappear(attackedCreature, false);
}
blockTicks += interval;
if (blockTicks >= 1000) {
blockCount = std::min<uint32_t>(blockCount + 1, 2);
blockTicks = 0;
}
if (followCreature) {
walkUpdateTicks += interval;
if (forceUpdateFollowPath || walkUpdateTicks >= 2000) {
walkUpdateTicks = 0;
forceUpdateFollowPath = false;
isUpdatingPath = true;
}
}
if (isUpdatingPath) {
isUpdatingPath = false;
goToFollowCreature();
}
//scripting event - onThink
const CreatureEventList& thinkEvents = getCreatureEvents(CREATURE_EVENT_THINK);
for (CreatureEvent* thinkEvent : thinkEvents) {
thinkEvent->executeOnThink(this, interval);
}
}
void Creature::onAttacking(uint32_t interval)
{
if (!attackedCreature) {
return;
}
onAttacked();
attackedCreature->onAttacked();
if (g_game.isSightClear(getPosition(), attackedCreature->getPosition(), true)) {
doAttacking(interval);
}
}
void Creature::onIdleStatus()
{
if (getHealth() > 0) {
damageMap.clear();
lastHitCreatureId = 0;
}
}
void Creature::onWalk()
{
if (getWalkDelay() <= 0) {
Direction dir;
uint32_t flags = FLAG_IGNOREFIELDDAMAGE;
if (getNextStep(dir, flags)) {
ReturnValue ret = g_game.internalMoveCreature(this, dir, flags);
if (ret != RETURNVALUE_NOERROR) {
if (Player* player = getPlayer()) {
player->sendCancelMessage(ret);
player->sendCancelWalk();
}
forceUpdateFollowPath = true;
}
} else {
if (listWalkDir.empty()) {
onWalkComplete();
}
stopEventWalk();
}
}
if (cancelNextWalk) {
listWalkDir.clear();
onWalkAborted();
cancelNextWalk = false;
}
if (eventWalk != 0) {
eventWalk = 0;
addEventWalk();
}
}
void Creature::onWalk(Direction& dir)
{
if (hasCondition(CONDITION_DRUNK)) {
uint32_t r = uniform_random(0, 20);
if (r <= DIRECTION_DIAGONAL_MASK) {
if (r < DIRECTION_DIAGONAL_MASK) {
dir = static_cast<Direction>(r);
}
g_game.internalCreatureSay(this, TALKTYPE_MONSTER_SAY, "Hicks!", false);
}
}
}
bool Creature::getNextStep(Direction& dir, uint32_t&)
{
if (listWalkDir.empty()) {
return false;
}
dir = listWalkDir.front();
listWalkDir.pop_front();
onWalk(dir);
return true;
}
void Creature::startAutoWalk(const std::forward_list<Direction>& listDir)
{
listWalkDir = listDir;
size_t size = 0;
for (auto it = listDir.begin(); it != listDir.end() && size <= 1; ++it) {
size++;
}
addEventWalk(size == 1);
}
void Creature::addEventWalk(bool firstStep)
{
cancelNextWalk = false;
if (getStepSpeed() <= 0) {
return;
}
if (eventWalk != 0) {
return;
}
int64_t ticks = getEventStepTicks(firstStep);
if (ticks <= 0) {
return;
}
// Take first step right away, but still queue the next
if (ticks == 1) {
g_game.checkCreatureWalk(getID());
}
eventWalk = g_scheduler.addEvent(createSchedulerTask(ticks, std::bind(&Game::checkCreatureWalk, &g_game, getID())));
}
void Creature::stopEventWalk()
{
if (eventWalk != 0) {
g_scheduler.stopEvent(eventWalk);
eventWalk = 0;
}
}
void Creature::updateMapCache()
{
Tile* tile;
const Position& myPos = getPosition();
Position pos(0, 0, myPos.z);
for (int32_t y = -maxWalkCacheHeight; y <= maxWalkCacheHeight; ++y) {
for (int32_t x = -maxWalkCacheWidth; x <= maxWalkCacheWidth; ++x) {
pos.x = myPos.getX() + x;
pos.y = myPos.getY() + y;
tile = g_game.map.getTile(pos);
updateTileCache(tile, pos);
}
}
}
void Creature::updateTileCache(const Tile* tile, int32_t dx, int32_t dy)
{
if (std::abs(dx) <= maxWalkCacheWidth && std::abs(dy) <= maxWalkCacheHeight) {
localMapCache[maxWalkCacheHeight + dy][maxWalkCacheWidth + dx] = tile && tile->queryAdd(0, *this, 1, FLAG_PATHFINDING | FLAG_IGNOREFIELDDAMAGE) == RETURNVALUE_NOERROR;
}
}
void Creature::updateTileCache(const Tile* tile, const Position& pos)
{
const Position& myPos = getPosition();
if (pos.z == myPos.z) {
int32_t dx = Position::getOffsetX(pos, myPos);
int32_t dy = Position::getOffsetY(pos, myPos);
updateTileCache(tile, dx, dy);
}
}
int32_t Creature::getWalkCache(const Position& pos) const
{
if (!useCacheMap()) {
return 2;
}
const Position& myPos = getPosition();
if (myPos.z != pos.z) {
return 0;
}
if (pos == myPos) {
return 1;
}
int32_t dx = Position::getOffsetX(pos, myPos);
if (std::abs(dx) <= maxWalkCacheWidth) {
int32_t dy = Position::getOffsetY(pos, myPos);
if (std::abs(dy) <= maxWalkCacheHeight) {
if (localMapCache[maxWalkCacheHeight + dy][maxWalkCacheWidth + dx]) {
return 1;
} else {
return 0;
}
}
}
//out of range
return 2;
}
void Creature::onAddTileItem(const Tile* tile, const Position& pos)
{
if (isMapLoaded && pos.z == getPosition().z) {
updateTileCache(tile, pos);
}
}
void Creature::onUpdateTileItem(const Tile* tile, const Position& pos, const Item*,
const ItemType& oldType, const Item*, const ItemType& newType)
{
if (!isMapLoaded) {
return;
}
if (oldType.blockSolid || oldType.blockPathFind || newType.blockPathFind || newType.blockSolid) {
if (pos.z == getPosition().z) {
updateTileCache(tile, pos);
}
}
}
void Creature::onRemoveTileItem(const Tile* tile, const Position& pos, const ItemType& iType, const Item*)
{
if (!isMapLoaded) {
return;
}
if (iType.blockSolid || iType.blockPathFind || iType.isGroundTile()) {
if (pos.z == getPosition().z) {
updateTileCache(tile, pos);
}
}
}
void Creature::onCreatureAppear(Creature* creature, bool isLogin)
{
if (creature == this) {
if (useCacheMap()) {
isMapLoaded = true;
updateMapCache();
}
if (isLogin) {
setLastPosition(getPosition());
}
} else if (isMapLoaded) {
if (creature->getPosition().z == getPosition().z) {
updateTileCache(creature->getTile(), creature->getPosition());
}
}
}
void Creature::onRemoveCreature(Creature* creature, bool)
{
onCreatureDisappear(creature, true);
if (creature == this) {
if (master && !master->isRemoved()) {
setMaster(nullptr);
}
} else if (isMapLoaded) {
if (creature->getPosition().z == getPosition().z) {
updateTileCache(creature->getTile(), creature->getPosition());
}
}
}
void Creature::onCreatureDisappear(const Creature* creature, bool isLogout)
{
if (attackedCreature == creature) {
setAttackedCreature(nullptr);
onAttackedCreatureDisappear(isLogout);
}
if (followCreature == creature) {
setFollowCreature(nullptr);
onFollowCreatureDisappear(isLogout);
}
}
void Creature::onChangeZone(ZoneType_t zone)
{
if (attackedCreature && zone == ZONE_PROTECTION) {
onCreatureDisappear(attackedCreature, false);
}
}
void Creature::onAttackedCreatureChangeZone(ZoneType_t zone)
{
if (zone == ZONE_PROTECTION) {
onCreatureDisappear(attackedCreature, false);
}
}
void Creature::onCreatureMove(Creature* creature, const Tile* newTile, const Position& newPos,
const Tile* oldTile, const Position& oldPos, bool teleport)
{
if (creature == this) {
lastStep = OTSYS_TIME();
lastStepCost = 1;
if (!teleport) {
if (oldPos.z != newPos.z) {
//floor change extra cost
lastStepCost = 2;
} else if (Position::getDistanceX(newPos, oldPos) >= 1 && Position::getDistanceY(newPos, oldPos) >= 1) {
//diagonal extra cost
lastStepCost = 3;
}
} else {
stopEventWalk();
}
if (!summons.empty()) {
//check if any of our summons is out of range (+/- 2 floors or 30 tiles away)
std::forward_list<Creature*> despawnList;
for (Creature* summon : summons) {
const Position& pos = summon->getPosition();
if (Position::getDistanceZ(newPos, pos) > 2 || (std::max<int32_t>(Position::getDistanceX(newPos, pos), Position::getDistanceY(newPos, pos)) > 30)) {
despawnList.push_front(summon);
}
}
for (Creature* despawnCreature : despawnList) {
g_game.removeCreature(despawnCreature, true);
}
}
if (newTile->getZone() != oldTile->getZone()) {
onChangeZone(getZone());
}
//update map cache
if (isMapLoaded) {
if (teleport || oldPos.z != newPos.z) {
updateMapCache();
} else {
const Position& myPos = getPosition();
if (oldPos.y > newPos.y) { //north
//shift y south
for (int32_t y = mapWalkHeight - 1; --y >= 0;) {
memcpy(localMapCache[y + 1], localMapCache[y], sizeof(localMapCache[y]));
}
//update 0
for (int32_t x = -maxWalkCacheWidth; x <= maxWalkCacheWidth; ++x) {
Tile* cacheTile = g_game.map.getTile(myPos.getX() + x, myPos.getY() - maxWalkCacheHeight, myPos.z);
updateTileCache(cacheTile, x, -maxWalkCacheHeight);
}
} else if (oldPos.y < newPos.y) { // south
//shift y north
for (int32_t y = 0; y <= mapWalkHeight - 2; ++y) {
memcpy(localMapCache[y], localMapCache[y + 1], sizeof(localMapCache[y]));
}
//update mapWalkHeight - 1
for (int32_t x = -maxWalkCacheWidth; x <= maxWalkCacheWidth; ++x) {
Tile* cacheTile = g_game.map.getTile(myPos.getX() + x, myPos.getY() + maxWalkCacheHeight, myPos.z);
updateTileCache(cacheTile, x, maxWalkCacheHeight);
}
}
if (oldPos.x < newPos.x) { // east
//shift y west
int32_t starty = 0;
int32_t endy = mapWalkHeight - 1;
int32_t dy = Position::getDistanceY(oldPos, newPos);
if (dy < 0) {
endy += dy;
} else if (dy > 0) {
starty = dy;
}
for (int32_t y = starty; y <= endy; ++y) {
for (int32_t x = 0; x <= mapWalkWidth - 2; ++x) {
localMapCache[y][x] = localMapCache[y][x + 1];
}
}
//update mapWalkWidth - 1
for (int32_t y = -maxWalkCacheHeight; y <= maxWalkCacheHeight; ++y) {
Tile* cacheTile = g_game.map.getTile(myPos.x + maxWalkCacheWidth, myPos.y + y, myPos.z);
updateTileCache(cacheTile, maxWalkCacheWidth, y);
}
} else if (oldPos.x > newPos.x) { // west
//shift y east
int32_t starty = 0;
int32_t endy = mapWalkHeight - 1;
int32_t dy = Position::getDistanceY(oldPos, newPos);
if (dy < 0) {
endy += dy;
} else if (dy > 0) {
starty = dy;
}
for (int32_t y = starty; y <= endy; ++y) {
for (int32_t x = mapWalkWidth - 1; --x >= 0;) {
localMapCache[y][x + 1] = localMapCache[y][x];
}
}
//update 0
for (int32_t y = -maxWalkCacheHeight; y <= maxWalkCacheHeight; ++y) {
Tile* cacheTile = g_game.map.getTile(myPos.x - maxWalkCacheWidth, myPos.y + y, myPos.z);
updateTileCache(cacheTile, -maxWalkCacheWidth, y);
}
}
updateTileCache(oldTile, oldPos);
}
}
} else {
if (isMapLoaded) {
const Position& myPos = getPosition();
if (newPos.z == myPos.z) {
updateTileCache(newTile, newPos);
}
if (oldPos.z == myPos.z) {
updateTileCache(oldTile, oldPos);
}
}
}
if (creature == followCreature || (creature == this && followCreature)) {
if (hasFollowPath) {
isUpdatingPath = true;
}
if (newPos.z != oldPos.z || !canSee(followCreature->getPosition())) {
onCreatureDisappear(followCreature, false);
}
}
if (creature == attackedCreature || (creature == this && attackedCreature)) {
if (newPos.z != oldPos.z || !canSee(attackedCreature->getPosition())) {
onCreatureDisappear(attackedCreature, false);
} else {
if (hasExtraSwing()) {
//our target is moving lets see if we can get in hit
g_dispatcher.addTask(createTask(std::bind(&Game::checkCreatureAttack, &g_game, getID())));
}
if (newTile->getZone() != oldTile->getZone()) {
onAttackedCreatureChangeZone(attackedCreature->getZone());
}
}
}
}
void Creature::onDeath()
{
bool lastHitUnjustified = false;
bool mostDamageUnjustified = false;
Creature* lastHitCreature = g_game.getCreatureByID(lastHitCreatureId);
Creature* lastHitCreatureMaster;
if (lastHitCreature) {
lastHitUnjustified = lastHitCreature->onKilledCreature(this);
lastHitCreatureMaster = lastHitCreature->getMaster();
} else {
lastHitCreatureMaster = nullptr;
}
Creature* mostDamageCreature = nullptr;
const int64_t timeNow = OTSYS_TIME();
const uint32_t inFightTicks = g_config.getNumber(ConfigManager::PZ_LOCKED);
int32_t mostDamage = 0;
std::map<Creature*, uint64_t> experienceMap;
for (const auto& it : damageMap) {
if (Creature* attacker = g_game.getCreatureByID(it.first)) {
CountBlock_t cb = it.second;
if ((cb.total > mostDamage && (timeNow - cb.ticks <= inFightTicks))) {
mostDamage = cb.total;
mostDamageCreature = attacker;
}
if (attacker != this) {
uint64_t gainExp = getGainedExperience(attacker);
if (Player* attackerPlayer = attacker->getPlayer()) {
attackerPlayer->removeAttacked(getPlayer());
Party* party = attackerPlayer->getParty();
if (party && party->getLeader() && party->isSharedExperienceActive() && party->isSharedExperienceEnabled()) {
attacker = party->getLeader();
}
}
auto tmpIt = experienceMap.find(attacker);
if (tmpIt == experienceMap.end()) {
experienceMap[attacker] = gainExp;
} else {
tmpIt->second += gainExp;
}
}
}
}
for (const auto& it : experienceMap) {
it.first->onGainExperience(it.second, this);
}
if (mostDamageCreature) {
if (mostDamageCreature != lastHitCreature && mostDamageCreature != lastHitCreatureMaster) {
Creature* mostDamageCreatureMaster = mostDamageCreature->getMaster();
if (lastHitCreature != mostDamageCreatureMaster && (lastHitCreatureMaster == nullptr || mostDamageCreatureMaster != lastHitCreatureMaster)) {
mostDamageUnjustified = mostDamageCreature->onKilledCreature(this, false);
}
}
}
bool droppedCorpse = dropCorpse(lastHitCreature, mostDamageCreature, lastHitUnjustified, mostDamageUnjustified);
death(lastHitCreature);
if (master) {
setMaster(nullptr);
}
if (droppedCorpse) {
g_game.removeCreature(this, false);
}
}
bool Creature::dropCorpse(Creature* lastHitCreature, Creature* mostDamageCreature, bool lastHitUnjustified, bool mostDamageUnjustified)
{
if (!lootDrop && getMonster()) {
if (master) {
//scripting event - onDeath
const CreatureEventList& deathEvents = getCreatureEvents(CREATURE_EVENT_DEATH);
for (CreatureEvent* deathEvent : deathEvents) {
deathEvent->executeOnDeath(this, nullptr, lastHitCreature, mostDamageCreature, lastHitUnjustified, mostDamageUnjustified);
}
}
g_game.addMagicEffect(getPosition(), CONST_ME_POFF);
} else {
Item* splash;
switch (getRace()) {
case RACE_VENOM:
splash = Item::CreateItem(ITEM_FULLSPLASH, FLUID_GREEN);
break;
case RACE_BLOOD:
splash = Item::CreateItem(ITEM_FULLSPLASH, FLUID_BLOOD);
break;
default:
splash = nullptr;
break;
}
Tile* tile = getTile();
if (splash) {
g_game.internalAddItem(tile, splash, INDEX_WHEREEVER, FLAG_NOLIMIT);
g_game.startDecay(splash);
}
Item* corpse = getCorpse(lastHitCreature, mostDamageCreature);
if (corpse) {
g_game.internalAddItem(tile, corpse, INDEX_WHEREEVER, FLAG_NOLIMIT);
g_game.startDecay(corpse);
}
//scripting event - onDeath
for (CreatureEvent* deathEvent : getCreatureEvents(CREATURE_EVENT_DEATH)) {
deathEvent->executeOnDeath(this, corpse, lastHitCreature, mostDamageCreature, lastHitUnjustified, mostDamageUnjustified);
}
if (corpse) {
dropLoot(corpse->getContainer(), lastHitCreature);
}
}
return true;
}
bool Creature::hasBeenAttacked(uint32_t attackerId)
{
auto it = damageMap.find(attackerId);
if (it == damageMap.end()) {
return false;
}
return (OTSYS_TIME() - it->second.ticks) <= g_config.getNumber(ConfigManager::PZ_LOCKED);
}
Item* Creature::getCorpse(Creature*, Creature*)
{
return Item::CreateItem(getLookCorpse());
}
void Creature::changeHealth(int32_t healthChange, bool sendHealthChange/* = true*/)
{
int32_t oldHealth = health;
if (healthChange > 0) {
health += std::min<int32_t>(healthChange, getMaxHealth() - health);
} else {
health = std::max<int32_t>(0, health + healthChange);
}
if (sendHealthChange && oldHealth != health) {
g_game.addCreatureHealth(this);
}
}
void Creature::changeMana(int32_t manaChange)
{
if (manaChange > 0) {
mana += std::min<int32_t>(manaChange, getMaxMana() - mana);
} else {
mana = std::max<int32_t>(0, mana + manaChange);
}
}
void Creature::gainHealth(Creature* healer, int32_t healthGain)
{
changeHealth(healthGain);
if (healer) {
healer->onTargetCreatureGainHealth(this, healthGain);
}
}
void Creature::drainHealth(Creature* attacker, int32_t damage)
{
changeHealth(-damage, false);
if (attacker) {
attacker->onAttackedCreatureDrainHealth(this, damage);
}
}
void Creature::drainMana(Creature* attacker, int32_t manaLoss)
{
onAttacked();
changeMana(-manaLoss);
if (attacker) {
addDamagePoints(attacker, manaLoss);
}
}
BlockType_t Creature::blockHit(Creature* attacker, CombatType_t combatType, int32_t& damage,
bool checkDefense /* = false */, bool checkArmor /* = false */, bool /* field = false */)
{
BlockType_t blockType = BLOCK_NONE;
if (isImmune(combatType)) {
damage = 0;
blockType = BLOCK_IMMUNITY;
} else if (checkDefense || checkArmor) {
bool hasDefense = false;
if (blockCount > 0) {
--blockCount;
hasDefense = true;
}
if (checkDefense && hasDefense) {
int32_t defense = getDefense();
damage -= uniform_random(defense / 2, defense);
if (damage <= 0) {
damage = 0;
blockType = BLOCK_DEFENSE;
checkArmor = false;
}
}
if (checkArmor) {
int32_t armor = getArmor();
if (armor > 3) {
damage -= uniform_random(armor / 2, armor - (armor % 2 + 1));
} else if (armor > 0) {
--damage;
}
if (damage <= 0) {
damage = 0;
blockType = BLOCK_ARMOR;
}
}
if (hasDefense && blockType != BLOCK_NONE) {
onBlockHit();
}
}
if (attacker) {
attacker->onAttackedCreature(this);
attacker->onAttackedCreatureBlockHit(blockType);
}
onAttacked();
return blockType;
}
bool Creature::setAttackedCreature(Creature* creature)
{
if (creature) {
const Position& creaturePos = creature->getPosition();
if (creaturePos.z != getPosition().z || !canSee(creaturePos)) {
attackedCreature = nullptr;
return false;
}
attackedCreature = creature;
onAttackedCreature(attackedCreature);
attackedCreature->onAttacked();
} else {
attackedCreature = nullptr;
}
for (Creature* summon : summons) {
summon->setAttackedCreature(creature);
}
return true;
}
void Creature::getPathSearchParams(const Creature*, FindPathParams& fpp) const
{
fpp.fullPathSearch = !hasFollowPath;
fpp.clearSight = true;
fpp.maxSearchDist = 12;
fpp.minTargetDist = 1;
fpp.maxTargetDist = 1;
}
void Creature::goToFollowCreature()
{
if (followCreature) {
FindPathParams fpp;
getPathSearchParams(followCreature, fpp);
Monster* monster = getMonster();
if (monster && !monster->getMaster() && (monster->isFleeing() || fpp.maxTargetDist > 1)) {
Direction dir = DIRECTION_NONE;
if (monster->isFleeing()) {
monster->getDistanceStep(followCreature->getPosition(), dir, true);
} else { //maxTargetDist > 1
if (!monster->getDistanceStep(followCreature->getPosition(), dir)) {
// if we can't get anything then let the A* calculate
listWalkDir.clear();
if (getPathTo(followCreature->getPosition(), listWalkDir, fpp)) {
hasFollowPath = true;
startAutoWalk(listWalkDir);
} else {
hasFollowPath = false;
}
return;
}
}
if (dir != DIRECTION_NONE) {
listWalkDir.clear();
listWalkDir.push_front(dir);
hasFollowPath = true;
startAutoWalk(listWalkDir);
}
} else {
listWalkDir.clear();
if (getPathTo(followCreature->getPosition(), listWalkDir, fpp)) {
hasFollowPath = true;
startAutoWalk(listWalkDir);
} else {
hasFollowPath = false;
}
}
}
onFollowCreatureComplete(followCreature);
}
bool Creature::setFollowCreature(Creature* creature)
{
if (creature) {
if (followCreature == creature) {
return true;
}
const Position& creaturePos = creature->getPosition();
if (creaturePos.z != getPosition().z || !canSee(creaturePos)) {
followCreature = nullptr;
return false;
}
if (!listWalkDir.empty()) {
listWalkDir.clear();
onWalkAborted();
}
hasFollowPath = false;
forceUpdateFollowPath = false;
followCreature = creature;
isUpdatingPath = true;
} else {
isUpdatingPath = false;
followCreature = nullptr;
}
onFollowCreature(creature);
return true;
}
double Creature::getDamageRatio(Creature* attacker) const
{
uint32_t totalDamage = 0;
uint32_t attackerDamage = 0;
for (const auto& it : damageMap) {
const CountBlock_t& cb = it.second;
totalDamage += cb.total;
if (it.first == attacker->getID()) {
attackerDamage += cb.total;
}
}
if (totalDamage == 0) {
return 0;
}
return (static_cast<double>(attackerDamage) / totalDamage);
}
uint64_t Creature::getGainedExperience(Creature* attacker) const
{
return std::floor(getDamageRatio(attacker) * getLostExperience());
}
void Creature::addDamagePoints(Creature* attacker, int32_t damagePoints)
{
if (damagePoints <= 0) {
return;
}
uint32_t attackerId = attacker->id;
auto it = damageMap.find(attackerId);
if (it == damageMap.end()) {
CountBlock_t cb;
cb.ticks = OTSYS_TIME();
cb.total = damagePoints;
damageMap[attackerId] = cb;
} else {
it->second.total += damagePoints;
it->second.ticks = OTSYS_TIME();
}
lastHitCreatureId = attackerId;
}
void Creature::onAddCondition(ConditionType_t type)
{
if (type == CONDITION_PARALYZE && hasCondition(CONDITION_HASTE)) {
removeCondition(CONDITION_HASTE);
} else if (type == CONDITION_HASTE && hasCondition(CONDITION_PARALYZE)) {
removeCondition(CONDITION_PARALYZE);
}
}
void Creature::onAddCombatCondition(ConditionType_t)
{
//
}
void Creature::onEndCondition(ConditionType_t)
{
//
}
void Creature::onTickCondition(ConditionType_t type, bool& bRemove)
{
const MagicField* field = getTile()->getFieldItem();
if (!field) {
return;
}
switch (type) {
case CONDITION_FIRE:
bRemove = (field->getCombatType() != COMBAT_FIREDAMAGE);
break;
case CONDITION_ENERGY:
bRemove = (field->getCombatType() != COMBAT_ENERGYDAMAGE);
break;
case CONDITION_POISON:
bRemove = (field->getCombatType() != COMBAT_EARTHDAMAGE);
break;
case CONDITION_FREEZING:
bRemove = (field->getCombatType() != COMBAT_ICEDAMAGE);
break;
case CONDITION_DAZZLED:
bRemove = (field->getCombatType() != COMBAT_HOLYDAMAGE);
break;
case CONDITION_CURSED:
bRemove = (field->getCombatType() != COMBAT_DEATHDAMAGE);
break;
case CONDITION_DROWN:
bRemove = (field->getCombatType() != COMBAT_DROWNDAMAGE);
break;
case CONDITION_BLEEDING:
bRemove = (field->getCombatType() != COMBAT_PHYSICALDAMAGE);
break;
default:
break;
}
}
void Creature::onCombatRemoveCondition(Condition* condition)
{
removeCondition(condition);
}
void Creature::onAttacked()
{
//
}
void Creature::onAttackedCreatureDrainHealth(Creature* target, int32_t points)
{
target->addDamagePoints(this, points);
}
bool Creature::onKilledCreature(Creature* target, bool)
{
if (master) {
master->onKilledCreature(target);
}
//scripting event - onKill
const CreatureEventList& killEvents = getCreatureEvents(CREATURE_EVENT_KILL);
for (CreatureEvent* killEvent : killEvents) {
killEvent->executeOnKill(this, target);
}
return false;
}
void Creature::onGainExperience(uint64_t gainExp, Creature* target)
{
if (gainExp == 0 || !master) {
return;
}
gainExp /= 2;
master->onGainExperience(gainExp, target);
SpectatorHashSet spectators;
g_game.map.getSpectators(spectators, position, false, true);
if (spectators.empty()) {
return;
}
TextMessage message(MESSAGE_EXPERIENCE_OTHERS, ucfirst(getNameDescription()) + " gained " + std::to_string(gainExp) + (gainExp != 1 ? " experience points." : " experience point."));
message.position = position;
message.primary.color = TEXTCOLOR_WHITE_EXP;
message.primary.value = gainExp;
for (Creature* spectator : spectators) {
spectator->getPlayer()->sendTextMessage(message);
}
}
bool Creature::setMaster(Creature* newMaster) {
if (!newMaster && !master) {
return false;
}
if (newMaster) {
incrementReferenceCounter();
newMaster->summons.push_back(this);
}
if (master) {
auto summon = std::find(master->summons.begin(), master->summons.end(), this);
if (summon != master->summons.end()) {
decrementReferenceCounter();
master->summons.erase(summon);
}
}
master = newMaster;
return true;
}
bool Creature::addCondition(Condition* condition, bool force/* = false*/)
{
if (condition == nullptr) {
return false;
}
if (!force && condition->getType() == CONDITION_HASTE && hasCondition(CONDITION_PARALYZE)) {
int64_t walkDelay = getWalkDelay();
if (walkDelay > 0) {
g_scheduler.addEvent(createSchedulerTask(walkDelay, std::bind(&Game::forceAddCondition, &g_game, getID(), condition)));
return false;
}
}
Condition* prevCond = getCondition(condition->getType(), condition->getId(), condition->getSubId());
if (prevCond) {
prevCond->addCondition(this, condition);
delete condition;
return true;
}
if (condition->startCondition(this)) {
conditions.push_back(condition);
onAddCondition(condition->getType());
return true;
}
delete condition;
return false;
}
bool Creature::addCombatCondition(Condition* condition)
{
//Caution: condition variable could be deleted after the call to addCondition
ConditionType_t type = condition->getType();
if (!addCondition(condition)) {
return false;
}
onAddCombatCondition(type);
return true;
}
void Creature::removeCondition(ConditionType_t type, bool force/* = false*/)
{
auto it = conditions.begin(), end = conditions.end();
while (it != end) {
Condition* condition = *it;
if (condition->getType() != type) {
++it;
continue;
}
if (!force && type == CONDITION_PARALYZE) {
int64_t walkDelay = getWalkDelay();
if (walkDelay > 0) {
g_scheduler.addEvent(createSchedulerTask(walkDelay, std::bind(&Game::forceRemoveCondition, &g_game, getID(), type)));
return;
}
}
it = conditions.erase(it);
condition->endCondition(this);
delete condition;
onEndCondition(type);
}
}
void Creature::removeCondition(ConditionType_t type, ConditionId_t conditionId, bool force/* = false*/)
{
auto it = conditions.begin(), end = conditions.end();
while (it != end) {
Condition* condition = *it;
if (condition->getType() != type || condition->getId() != conditionId) {
++it;
continue;
}
if (!force && type == CONDITION_PARALYZE) {
int64_t walkDelay = getWalkDelay();
if (walkDelay > 0) {
g_scheduler.addEvent(createSchedulerTask(walkDelay, std::bind(&Game::forceRemoveCondition, &g_game, getID(), type)));
return;
}
}
it = conditions.erase(it);
condition->endCondition(this);
delete condition;
onEndCondition(type);
}
}
void Creature::removeCombatCondition(ConditionType_t type)
{
std::vector<Condition*> removeConditions;
for (Condition* condition : conditions) {
if (condition->getType() == type) {
removeConditions.push_back(condition);
}
}
for (Condition* condition : removeConditions) {
onCombatRemoveCondition(condition);
}
}
void Creature::removeCondition(Condition* condition, bool force/* = false*/)
{
auto it = std::find(conditions.begin(), conditions.end(), condition);
if (it == conditions.end()) {
return;
}
if (!force && condition->getType() == CONDITION_PARALYZE) {
int64_t walkDelay = getWalkDelay();
if (walkDelay > 0) {
g_scheduler.addEvent(createSchedulerTask(walkDelay, std::bind(&Game::forceRemoveCondition, &g_game, getID(), condition->getType())));
return;
}
}
conditions.erase(it);
condition->endCondition(this);
onEndCondition(condition->getType());
delete condition;
}
Condition* Creature::getCondition(ConditionType_t type) const
{
for (Condition* condition : conditions) {
if (condition->getType() == type) {
return condition;
}
}
return nullptr;
}
Condition* Creature::getCondition(ConditionType_t type, ConditionId_t conditionId, uint32_t subId/* = 0*/) const
{
for (Condition* condition : conditions) {
if (condition->getType() == type && condition->getId() == conditionId && condition->getSubId() == subId) {
return condition;
}
}
return nullptr;
}
void Creature::executeConditions(uint32_t interval)
{
auto it = conditions.begin(), end = conditions.end();
while (it != end) {
Condition* condition = *it;
if (!condition->executeCondition(this, interval)) {
ConditionType_t type = condition->getType();
it = conditions.erase(it);
condition->endCondition(this);
delete condition;
onEndCondition(type);
} else {
++it;
}
}
}
bool Creature::hasCondition(ConditionType_t type, uint32_t subId/* = 0*/) const
{
if (isSuppress(type)) {
return false;
}
int64_t timeNow = OTSYS_TIME();
for (Condition* condition : conditions) {
if (condition->getType() != type || condition->getSubId() != subId) {
continue;
}
if (condition->getEndTime() >= timeNow) {
return true;
}
}
return false;
}
bool Creature::isImmune(CombatType_t type) const
{
return hasBitSet(static_cast<uint32_t>(type), getDamageImmunities());
}
bool Creature::isImmune(ConditionType_t type) const
{
return hasBitSet(static_cast<uint32_t>(type), getConditionImmunities());
}
bool Creature::isSuppress(ConditionType_t type) const
{
return hasBitSet(static_cast<uint32_t>(type), getConditionSuppressions());
}
int64_t Creature::getStepDuration(Direction dir) const
{
int64_t stepDuration = getStepDuration();
if ((dir & DIRECTION_DIAGONAL_MASK) != 0) {
stepDuration *= 3;
}
return stepDuration;
}
int64_t Creature::getStepDuration() const
{
if (isRemoved()) {
return 0;
}
uint32_t calculatedStepSpeed;
uint32_t groundSpeed;
int32_t stepSpeed = getStepSpeed();
if (stepSpeed > -Creature::speedB) {
calculatedStepSpeed = floor((Creature::speedA * log((stepSpeed / 2) + Creature::speedB) + Creature::speedC) + 0.5);
if (calculatedStepSpeed <= 0) {
calculatedStepSpeed = 1;
}
} else {
calculatedStepSpeed = 1;
}
Item* ground = tile->getGround();
if (ground) {
groundSpeed = Item::items[ground->getID()].speed;
if (groundSpeed == 0) {
groundSpeed = 150;
}
} else {
groundSpeed = 150;
}
double duration = std::floor(1000 * groundSpeed / calculatedStepSpeed);
int64_t stepDuration = std::ceil(duration / 50) * 50;
const Monster* monster = getMonster();
if (monster && monster->isTargetNearby() && !monster->isFleeing() && !monster->getMaster()) {
stepDuration *= 2;
}
return stepDuration;
}
int64_t Creature::getEventStepTicks(bool onlyDelay) const
{
int64_t ret = getWalkDelay();
if (ret <= 0) {
int64_t stepDuration = getStepDuration();
if (onlyDelay && stepDuration > 0) {
ret = 1;
} else {
ret = stepDuration * lastStepCost;
}
}
return ret;
}
void Creature::getCreatureLight(LightInfo& light) const
{
light = internalLight;
}
void Creature::setNormalCreatureLight()
{
internalLight.level = 0;
internalLight.color = 0;
}
bool Creature::registerCreatureEvent(const std::string& name)
{
CreatureEvent* event = g_creatureEvents->getEventByName(name);
if (!event) {
return false;
}
CreatureEventType_t type = event->getEventType();
if (hasEventRegistered(type)) {
for (CreatureEvent* creatureEvent : eventsList) {
if (creatureEvent == event) {
return false;
}
}
} else {
scriptEventsBitField |= static_cast<uint32_t>(1) << type;
}
eventsList.push_back(event);
return true;
}
bool Creature::unregisterCreatureEvent(const std::string& name)
{
CreatureEvent* event = g_creatureEvents->getEventByName(name);
if (!event) {
return false;
}
CreatureEventType_t type = event->getEventType();
if (!hasEventRegistered(type)) {
return false;
}
bool resetTypeBit = true;
auto it = eventsList.begin(), end = eventsList.end();
while (it != end) {
CreatureEvent* curEvent = *it;
if (curEvent == event) {
it = eventsList.erase(it);
continue;
}
if (curEvent->getEventType() == type) {
resetTypeBit = false;
}
++it;
}
if (resetTypeBit) {
scriptEventsBitField &= ~(static_cast<uint32_t>(1) << type);
}
return true;
}
CreatureEventList Creature::getCreatureEvents(CreatureEventType_t type)
{
CreatureEventList tmpEventList;
if (!hasEventRegistered(type)) {
return tmpEventList;
}
for (CreatureEvent* creatureEvent : eventsList) {
if (creatureEvent->getEventType() == type) {
tmpEventList.push_back(creatureEvent);
}
}
return tmpEventList;
}
bool FrozenPathingConditionCall::isInRange(const Position& startPos, const Position& testPos,
const FindPathParams& fpp) const
{
if (fpp.fullPathSearch) {
if (testPos.x > targetPos.x + fpp.maxTargetDist) {
return false;
}
if (testPos.x < targetPos.x - fpp.maxTargetDist) {
return false;
}
if (testPos.y > targetPos.y + fpp.maxTargetDist) {
return false;
}
if (testPos.y < targetPos.y - fpp.maxTargetDist) {
return false;
}
} else {
int_fast32_t dx = Position::getOffsetX(startPos, targetPos);
int32_t dxMax = (dx >= 0 ? fpp.maxTargetDist : 0);
if (testPos.x > targetPos.x + dxMax) {
return false;
}
int32_t dxMin = (dx <= 0 ? fpp.maxTargetDist : 0);
if (testPos.x < targetPos.x - dxMin) {
return false;
}
int_fast32_t dy = Position::getOffsetY(startPos, targetPos);
int32_t dyMax = (dy >= 0 ? fpp.maxTargetDist : 0);
if (testPos.y > targetPos.y + dyMax) {
return false;
}
int32_t dyMin = (dy <= 0 ? fpp.maxTargetDist : 0);
if (testPos.y < targetPos.y - dyMin) {
return false;
}
}
return true;
}
bool FrozenPathingConditionCall::operator()(const Position& startPos, const Position& testPos,
const FindPathParams& fpp, int32_t& bestMatchDist) const
{
if (!isInRange(startPos, testPos, fpp)) {
return false;
}
if (fpp.clearSight && !g_game.isSightClear(testPos, targetPos, true)) {
return false;
}
int32_t testDist = std::max<int32_t>(Position::getDistanceX(targetPos, testPos), Position::getDistanceY(targetPos, testPos));
if (fpp.maxTargetDist == 1) {
if (testDist < fpp.minTargetDist || testDist > fpp.maxTargetDist) {
return false;
}
return true;
} else if (testDist <= fpp.maxTargetDist) {
if (testDist < fpp.minTargetDist) {
return false;
}
if (testDist == fpp.maxTargetDist) {
bestMatchDist = 0;
return true;
} else if (testDist > bestMatchDist) {
//not quite what we want, but the best so far
bestMatchDist = testDist;
return true;
}
}
return false;
}
bool Creature::isInvisible() const
{
return std::find_if(conditions.begin(), conditions.end(), [] (const Condition* condition) {
return condition->getType() == CONDITION_INVISIBLE;
}) != conditions.end();
}
bool Creature::getPathTo(const Position& targetPos, std::forward_list<Direction>& dirList, const FindPathParams& fpp) const
{
return g_game.map.getPathMatching(*this, dirList, FrozenPathingConditionCall(targetPos), fpp);
}
bool Creature::getPathTo(const Position& targetPos, std::forward_list<Direction>& dirList, int32_t minTargetDist, int32_t maxTargetDist, bool fullPathSearch /*= true*/, bool clearSight /*= true*/, int32_t maxSearchDist /*= 0*/) const
{
FindPathParams fpp;
fpp.fullPathSearch = fullPathSearch;
fpp.maxSearchDist = maxSearchDist;
fpp.clearSight = clearSight;
fpp.minTargetDist = minTargetDist;
fpp.maxTargetDist = maxTargetDist;
return getPathTo(targetPos, dirList, fpp);
}
| 1 | 13,785 | Is there even a need to keep the bool? | otland-forgottenserver | cpp |
@@ -42,7 +42,7 @@ class HotgymRegressionTest(unittest.TestCase):
def testHotgymRegression(self):
experimentDir = os.path.join(
- os.path.dirname(__file__).partition("tests/integration/nupic/opf")[0],
+ os.path.dirname(__file__).partition(os.path.normpath("tests/integration/nupic/opf"))[0],
"examples", "opf", "experiments", "multistep", "hotgym")
resultsDir = os.path.join(experimentDir, "inference") | 1 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Regression test that checks for differences in hotgym results.
If the prediction results differ then the test fails and must be
explicitly updated to match the new results.
"""
import collections
import csv
import os
import pkg_resources
import shutil
import unittest
from nupic.frameworks.opf import experiment_runner
class HotgymRegressionTest(unittest.TestCase):
"""Hotgym regression test to validate that predictions don't change."""
def testHotgymRegression(self):
experimentDir = os.path.join(
os.path.dirname(__file__).partition("tests/integration/nupic/opf")[0],
"examples", "opf", "experiments", "multistep", "hotgym")
resultsDir = os.path.join(experimentDir, "inference")
savedModelsDir = os.path.join(experimentDir, "savedmodels")
try:
_model = experiment_runner.runExperiment([experimentDir])
resultsPath = os.path.join(
resultsDir, "DefaultTask.TemporalMultiStep.predictionLog.csv")
with open(resultsPath) as f:
reader = csv.reader(f)
headers = reader.next()
self.assertEqual(headers[14],
"multiStepBestPredictions:multiStep:errorMetric='aae':"
"steps=1:window=1000:field=consumption")
lastRow = collections.deque(reader, 1)[0]
# Changes that affect prediction results will cause this test to fail.
# If the change is understood and reviewers agree that there has not been a
# regression then this value can be updated to reflect the new result.
self.assertAlmostEqual(float(lastRow[14]), 5.92657292088)
finally:
shutil.rmtree(resultsDir, ignore_errors=True)
shutil.rmtree(savedModelsDir, ignore_errors=True)
if __name__ == "__main__":
unittest.main()
| 1 | 19,975 | wrap to stay under 80 characters | numenta-nupic | py |
@@ -67,6 +67,7 @@ shared_ptr<Layer<Dtype> > GetConvolutionLayer(
#endif
} else {
LOG(FATAL) << "Layer " << param.name() << " has unknown engine.";
+ throw; // Avoids missing return warning
}
}
| 1 | // Make sure we include Python.h before any system header
// to avoid _POSIX_C_SOURCE redefinition
#ifdef WITH_PYTHON_LAYER
#include <boost/python.hpp>
#endif
#include <string>
#include "caffe/layer.hpp"
#include "caffe/layer_factory.hpp"
#include "caffe/layers/conv_layer.hpp"
#include "caffe/layers/lrn_layer.hpp"
#include "caffe/layers/pooling_layer.hpp"
#include "caffe/layers/relu_layer.hpp"
#include "caffe/layers/sigmoid_layer.hpp"
#include "caffe/layers/softmax_layer.hpp"
#include "caffe/layers/tanh_layer.hpp"
#include "caffe/proto/caffe.pb.h"
#ifdef USE_CUDNN
#include "caffe/layers/cudnn_conv_layer.hpp"
#include "caffe/layers/cudnn_lcn_layer.hpp"
#include "caffe/layers/cudnn_lrn_layer.hpp"
#include "caffe/layers/cudnn_pooling_layer.hpp"
#include "caffe/layers/cudnn_relu_layer.hpp"
#include "caffe/layers/cudnn_sigmoid_layer.hpp"
#include "caffe/layers/cudnn_softmax_layer.hpp"
#include "caffe/layers/cudnn_tanh_layer.hpp"
#endif
#ifdef WITH_PYTHON_LAYER
#include "caffe/layers/python_layer.hpp"
#endif
namespace caffe {
// Get convolution layer according to engine.
template <typename Dtype>
shared_ptr<Layer<Dtype> > GetConvolutionLayer(
const LayerParameter& param) {
ConvolutionParameter conv_param = param.convolution_param();
ConvolutionParameter_Engine engine = conv_param.engine();
#ifdef USE_CUDNN
bool use_dilation = false;
for (int i = 0; i < conv_param.dilation_size(); ++i) {
if (conv_param.dilation(i) > 1) {
use_dilation = true;
}
}
#endif
if (engine == ConvolutionParameter_Engine_DEFAULT) {
engine = ConvolutionParameter_Engine_CAFFE;
#ifdef USE_CUDNN
if (!use_dilation) {
engine = ConvolutionParameter_Engine_CUDNN;
}
#endif
}
if (engine == ConvolutionParameter_Engine_CAFFE) {
return shared_ptr<Layer<Dtype> >(new ConvolutionLayer<Dtype>(param));
#ifdef USE_CUDNN
} else if (engine == ConvolutionParameter_Engine_CUDNN) {
if (use_dilation) {
LOG(FATAL) << "CuDNN doesn't support the dilated convolution at Layer "
<< param.name();
}
return shared_ptr<Layer<Dtype> >(new CuDNNConvolutionLayer<Dtype>(param));
#endif
} else {
LOG(FATAL) << "Layer " << param.name() << " has unknown engine.";
}
}
REGISTER_LAYER_CREATOR(Convolution, GetConvolutionLayer);
// Get pooling layer according to engine.
template <typename Dtype>
shared_ptr<Layer<Dtype> > GetPoolingLayer(const LayerParameter& param) {
PoolingParameter_Engine engine = param.pooling_param().engine();
if (engine == PoolingParameter_Engine_DEFAULT) {
engine = PoolingParameter_Engine_CAFFE;
#ifdef USE_CUDNN
engine = PoolingParameter_Engine_CUDNN;
#endif
}
if (engine == PoolingParameter_Engine_CAFFE) {
return shared_ptr<Layer<Dtype> >(new PoolingLayer<Dtype>(param));
#ifdef USE_CUDNN
} else if (engine == PoolingParameter_Engine_CUDNN) {
if (param.top_size() > 1) {
LOG(INFO) << "cuDNN does not support multiple tops. "
<< "Using Caffe's own pooling layer.";
return shared_ptr<Layer<Dtype> >(new PoolingLayer<Dtype>(param));
}
// CuDNN assumes layers are not being modified in place, thus
// breaking our index tracking for updates in some cases in Caffe.
// Until there is a workaround in Caffe (index management) or
// cuDNN, use Caffe layer to max pooling, or don't use in place
// layers after max pooling layers
if (param.pooling_param().pool() == PoolingParameter_PoolMethod_MAX) {
return shared_ptr<Layer<Dtype> >(new PoolingLayer<Dtype>(param));
} else {
return shared_ptr<Layer<Dtype> >(new CuDNNPoolingLayer<Dtype>(param));
}
#endif
} else {
LOG(FATAL) << "Layer " << param.name() << " has unknown engine.";
}
}
REGISTER_LAYER_CREATOR(Pooling, GetPoolingLayer);
// Get LRN layer according to engine
template <typename Dtype>
shared_ptr<Layer<Dtype> > GetLRNLayer(const LayerParameter& param) {
LRNParameter_Engine engine = param.lrn_param().engine();
if (engine == LRNParameter_Engine_DEFAULT) {
#ifdef USE_CUDNN
engine = LRNParameter_Engine_CUDNN;
#else
engine = LRNParameter_Engine_CAFFE;
#endif
}
if (engine == LRNParameter_Engine_CAFFE) {
return shared_ptr<Layer<Dtype> >(new LRNLayer<Dtype>(param));
#ifdef USE_CUDNN
} else if (engine == LRNParameter_Engine_CUDNN) {
LRNParameter lrn_param = param.lrn_param();
if (lrn_param.norm_region() ==LRNParameter_NormRegion_WITHIN_CHANNEL) {
return shared_ptr<Layer<Dtype> >(new CuDNNLCNLayer<Dtype>(param));
} else {
// local size is too big to be handled through cuDNN
if (param.lrn_param().local_size() > CUDNN_LRN_MAX_N) {
return shared_ptr<Layer<Dtype> >(new LRNLayer<Dtype>(param));
} else {
return shared_ptr<Layer<Dtype> >(new CuDNNLRNLayer<Dtype>(param));
}
}
#endif
} else {
LOG(FATAL) << "Layer " << param.name() << " has unknown engine.";
}
}
REGISTER_LAYER_CREATOR(LRN, GetLRNLayer);
// Get relu layer according to engine.
template <typename Dtype>
shared_ptr<Layer<Dtype> > GetReLULayer(const LayerParameter& param) {
ReLUParameter_Engine engine = param.relu_param().engine();
if (engine == ReLUParameter_Engine_DEFAULT) {
engine = ReLUParameter_Engine_CAFFE;
#ifdef USE_CUDNN
engine = ReLUParameter_Engine_CUDNN;
#endif
}
if (engine == ReLUParameter_Engine_CAFFE) {
return shared_ptr<Layer<Dtype> >(new ReLULayer<Dtype>(param));
#ifdef USE_CUDNN
} else if (engine == ReLUParameter_Engine_CUDNN) {
return shared_ptr<Layer<Dtype> >(new CuDNNReLULayer<Dtype>(param));
#endif
} else {
LOG(FATAL) << "Layer " << param.name() << " has unknown engine.";
}
}
REGISTER_LAYER_CREATOR(ReLU, GetReLULayer);
// Get sigmoid layer according to engine.
template <typename Dtype>
shared_ptr<Layer<Dtype> > GetSigmoidLayer(const LayerParameter& param) {
SigmoidParameter_Engine engine = param.sigmoid_param().engine();
if (engine == SigmoidParameter_Engine_DEFAULT) {
engine = SigmoidParameter_Engine_CAFFE;
#ifdef USE_CUDNN
engine = SigmoidParameter_Engine_CUDNN;
#endif
}
if (engine == SigmoidParameter_Engine_CAFFE) {
return shared_ptr<Layer<Dtype> >(new SigmoidLayer<Dtype>(param));
#ifdef USE_CUDNN
} else if (engine == SigmoidParameter_Engine_CUDNN) {
return shared_ptr<Layer<Dtype> >(new CuDNNSigmoidLayer<Dtype>(param));
#endif
} else {
LOG(FATAL) << "Layer " << param.name() << " has unknown engine.";
}
}
REGISTER_LAYER_CREATOR(Sigmoid, GetSigmoidLayer);
// Get softmax layer according to engine.
template <typename Dtype>
shared_ptr<Layer<Dtype> > GetSoftmaxLayer(const LayerParameter& param) {
SoftmaxParameter_Engine engine = param.softmax_param().engine();
if (engine == SoftmaxParameter_Engine_DEFAULT) {
engine = SoftmaxParameter_Engine_CAFFE;
#ifdef USE_CUDNN
engine = SoftmaxParameter_Engine_CUDNN;
#endif
}
if (engine == SoftmaxParameter_Engine_CAFFE) {
return shared_ptr<Layer<Dtype> >(new SoftmaxLayer<Dtype>(param));
#ifdef USE_CUDNN
} else if (engine == SoftmaxParameter_Engine_CUDNN) {
return shared_ptr<Layer<Dtype> >(new CuDNNSoftmaxLayer<Dtype>(param));
#endif
} else {
LOG(FATAL) << "Layer " << param.name() << " has unknown engine.";
}
}
REGISTER_LAYER_CREATOR(Softmax, GetSoftmaxLayer);
// Get tanh layer according to engine.
template <typename Dtype>
shared_ptr<Layer<Dtype> > GetTanHLayer(const LayerParameter& param) {
TanHParameter_Engine engine = param.tanh_param().engine();
if (engine == TanHParameter_Engine_DEFAULT) {
engine = TanHParameter_Engine_CAFFE;
#ifdef USE_CUDNN
engine = TanHParameter_Engine_CUDNN;
#endif
}
if (engine == TanHParameter_Engine_CAFFE) {
return shared_ptr<Layer<Dtype> >(new TanHLayer<Dtype>(param));
#ifdef USE_CUDNN
} else if (engine == TanHParameter_Engine_CUDNN) {
return shared_ptr<Layer<Dtype> >(new CuDNNTanHLayer<Dtype>(param));
#endif
} else {
LOG(FATAL) << "Layer " << param.name() << " has unknown engine.";
}
}
REGISTER_LAYER_CREATOR(TanH, GetTanHLayer);
#ifdef WITH_PYTHON_LAYER
template <typename Dtype>
shared_ptr<Layer<Dtype> > GetPythonLayer(const LayerParameter& param) {
Py_Initialize();
try {
bp::object module = bp::import(param.python_param().module().c_str());
bp::object layer = module.attr(param.python_param().layer().c_str())(param);
return bp::extract<shared_ptr<PythonLayer<Dtype> > >(layer)();
} catch (bp::error_already_set) {
PyErr_Print();
throw;
}
}
REGISTER_LAYER_CREATOR(Python, GetPythonLayer);
#endif
// Layers that use their constructor as their default creator should be
// registered in their corresponding cpp files. Do not register them here.
} // namespace caffe
| 1 | 37,671 | `return NULL` is more clear than `throw` at least in my reading, and consistent with #3362. | BVLC-caffe | cpp |
@@ -87,8 +87,6 @@ class LocustIOExecutor(ScenarioExecutor, WidgetProvider, FileLister, HavingInsta
wrapper = os.path.join(get_full_path(__file__, step_up=2), "resources", "locustio-taurus-wrapper.py")
- self.env.add_path({"PYTHONPATH": self.engine.artifacts_dir})
- self.env.add_path({"PYTHONPATH": os.getcwd()})
self.env.set({"LOCUST_DURATION": dehumanize_time(load.duration)})
self.log_file = self.engine.create_artifact("locust", ".log") | 1 | """
Module holds all stuff regarding Locust tool usage
Copyright 2015 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import math
import os
import sys
import time
from collections import OrderedDict, Counter
from subprocess import STDOUT, CalledProcessError
from bzt import ToolError, TaurusConfigError
from bzt.engine import ScenarioExecutor, FileLister, Scenario, HavingInstallableTools, SelfDiagnosable
from bzt.modules.aggregator import ConsolidatingAggregator, ResultsProvider, DataPoint, KPISet
from bzt.modules.console import WidgetProvider, ExecutorWidget
from bzt.modules.jmeter import JTLReader
from bzt.requests_model import HTTPRequest
from bzt.six import iteritems, communicate
from bzt.utils import get_full_path, ensure_is_dict, PythonGenerator, FileReader, shell_exec
from bzt.utils import shutdown_process, RequiredTool, dehumanize_time
class LocustIOExecutor(ScenarioExecutor, WidgetProvider, FileLister, HavingInstallableTools, SelfDiagnosable):
def __init__(self):
super(LocustIOExecutor, self).__init__()
self.process = None
self.__out = None
self.is_master = False
self.expected_slaves = 0
self.scenario = None
self.script = None
self.log_file = None
def prepare(self):
self.install_required_tools()
self.scenario = self.get_scenario()
self.__setup_script()
self.engine.existing_artifact(self.script)
self.is_master = self.execution.get("master", self.is_master)
if self.is_master:
count_error = TaurusConfigError("Slaves count required when starting in master mode")
self.expected_slaves = int(self.execution.get("slaves", count_error))
slaves_ldjson = self.engine.create_artifact("locust-slaves", ".ldjson")
self.reader = SlavesReader(slaves_ldjson, self.expected_slaves, self.log)
self.env.set({"SLAVES_LDJSON": slaves_ldjson})
else:
kpi_jtl = self.engine.create_artifact("kpi", ".jtl")
self.reader = JTLReader(kpi_jtl, self.log)
self.env.set({"JTL": kpi_jtl})
if isinstance(self.engine.aggregator, ConsolidatingAggregator):
self.engine.aggregator.add_underling(self.reader)
def install_required_tools(self):
tool = LocustIO(self.log)
if not tool.check_if_installed():
tool.install()
def startup(self):
self.start_time = time.time()
load = self.get_load()
concurrency = load.concurrency or 1
if self.is_master:
concurrency = math.ceil(concurrency / float(self.expected_slaves))
if load.ramp_up:
hatch = concurrency / float(load.ramp_up)
else:
hatch = concurrency
wrapper = os.path.join(get_full_path(__file__, step_up=2), "resources", "locustio-taurus-wrapper.py")
self.env.add_path({"PYTHONPATH": self.engine.artifacts_dir})
self.env.add_path({"PYTHONPATH": os.getcwd()})
self.env.set({"LOCUST_DURATION": dehumanize_time(load.duration)})
self.log_file = self.engine.create_artifact("locust", ".log")
args = [sys.executable, wrapper, '-f', self.script]
args += ['--logfile=%s' % self.log_file]
args += ["--no-web", "--only-summary", ]
args += ["--clients=%d" % concurrency, "--hatch-rate=%f" % hatch]
if load.iterations:
num_requests = load.iterations * concurrency
args.append("--num-request=%d" % num_requests)
self.env.set({"LOCUST_NUMREQUESTS": num_requests})
if self.is_master:
args.extend(["--master", '--expect-slaves=%s' % self.expected_slaves])
host = self.get_scenario().get("default-address")
if host:
args.append('--host=%s' % host)
self.__out = open(self.engine.create_artifact("locust", ".out"), 'w')
self.process = self.execute(args, stderr=STDOUT, stdout=self.__out)
def get_widget(self):
"""
Add progress widget to console screen sidebar
:rtype: ExecutorWidget
"""
if not self.widget:
label = "%s" % self
self.widget = ExecutorWidget(self, "Locust.io: " + label.split('/')[1])
return self.widget
def check(self):
# TODO: when we're in master mode and get no results and exceeded duration - shut down then
retcode = self.process.poll()
if retcode is not None:
if retcode != 0:
self.log.warning("Locust exited with non-zero code: %s", retcode)
return True
return False
def resource_files(self):
script = self.get_script_path()
if script:
return [script]
else:
return []
def __tests_from_requests(self):
filename = self.engine.create_artifact("generated_locust", ".py")
locust_test = LocustIOScriptBuilder(self.scenario, self.log)
locust_test.build_source_code()
locust_test.save(filename)
return filename
def __setup_script(self):
self.script = self.get_script_path()
if not self.script:
if "requests" in self.scenario:
self.script = self.__tests_from_requests()
else:
msg = "There must be a script file or requests for its generation "
msg += "to run Locust (%s)" % self.execution.get('scenario')
raise TaurusConfigError(msg)
def shutdown(self):
try:
shutdown_process(self.process, self.log)
finally:
if self.__out:
self.__out.close()
def has_results(self):
master_results = self.is_master and self.reader.cumulative
local_results = not self.is_master and self.reader and self.reader.buffer
if master_results or local_results:
return True
else:
return False
def get_error_diagnostics(self):
diagnostics = []
if self.__out is not None:
with open(self.__out.name) as fds:
contents = fds.read().strip()
if contents.strip():
diagnostics.append("Locust STDOUT:\n" + contents)
if self.log_file is not None and os.path.exists(self.log_file):
with open(self.log_file) as fds:
contents = fds.read().strip()
if contents.strip():
diagnostics.append("Locust log:\n" + contents)
return diagnostics
class LocustIO(RequiredTool):
def __init__(self, parent_logger):
super(LocustIO, self).__init__("LocustIO", "locust")
self.log = parent_logger.getChild(self.__class__.__name__)
def check_if_installed(self):
self.log.debug('Checking LocustIO: %s' % self.tool_path)
try:
stdout, stderr = communicate(shell_exec([self.tool_path, '--version']))
self.log.debug("Locustio check stdout/stderr: %s, %s", stdout, stderr)
except (CalledProcessError, OSError, AttributeError):
return False
return True
def install(self):
msg = "Unable to locate locustio package. Please install it like this: pip install locustio"
raise ToolError(msg)
class SlavesReader(ResultsProvider):
def __init__(self, filename, num_slaves, parent_logger):
"""
:type filename: str
:type num_slaves: int
:type parent_logger: logging.Logger
"""
super(SlavesReader, self).__init__()
self.log = parent_logger.getChild(self.__class__.__name__)
self.join_buffer = {}
self.num_slaves = num_slaves
self.file = FileReader(filename=filename, parent_logger=self.log)
self.read_buffer = ""
def _calculate_datapoints(self, final_pass=False):
read = self.file.get_bytes(size=1024 * 1024, last_pass=final_pass)
if not read or not read.strip():
return
self.read_buffer += read
while "\n" in self.read_buffer:
_line = self.read_buffer[:self.read_buffer.index("\n") + 1]
self.read_buffer = self.read_buffer[len(_line):]
self.fill_join_buffer(json.loads(_line))
max_full_ts = self.get_max_full_ts()
if max_full_ts is not None:
for point in self.merge_datapoints(max_full_ts):
yield point
def merge_datapoints(self, max_full_ts):
for key in sorted(self.join_buffer.keys(), key=int):
if int(key) <= max_full_ts:
sec_data = self.join_buffer.pop(key)
self.log.debug("Processing complete second: %s", key)
point = DataPoint(int(key))
for sid, item in iteritems(sec_data):
point.merge_point(self.point_from_locust(key, sid, item))
point.recalculate()
yield point
def get_max_full_ts(self):
max_full_ts = None
for key in sorted(self.join_buffer.keys(), key=int):
if len(key) >= self.num_slaves:
max_full_ts = int(key)
return max_full_ts
def fill_join_buffer(self, data):
self.log.debug("Got slave data: %s", data)
for stats_item in data['stats']:
for timestamp in stats_item['num_reqs_per_sec'].keys():
if timestamp not in self.join_buffer:
self.join_buffer[timestamp] = {}
self.join_buffer[timestamp][data['client_id']] = data
@staticmethod
def point_from_locust(timestamp, sid, data):
"""
:type timestamp: str
:type sid: str
:type data: dict
:rtype: DataPoint
"""
point = DataPoint(int(timestamp))
point[DataPoint.SOURCE_ID] = sid
overall = KPISet()
for item in data['stats']:
if timestamp not in item['num_reqs_per_sec']:
continue
kpiset = KPISet()
kpiset[KPISet.SAMPLE_COUNT] = item['num_reqs_per_sec'][timestamp]
kpiset[KPISet.CONCURRENCY] = data['user_count']
kpiset[KPISet.BYTE_COUNT] = item['total_content_length']
if item['num_requests']:
avg_rt = (item['total_response_time'] / 1000.0) / item['num_requests']
kpiset.sum_rt = item['num_reqs_per_sec'][timestamp] * avg_rt
for err in data['errors'].values():
if err['name'] == item['name']:
new_err = KPISet.error_item_skel(err['error'], None, err['occurences'], KPISet.ERRTYPE_ERROR,
Counter(), None)
KPISet.inc_list(kpiset[KPISet.ERRORS], ("msg", err['error']), new_err)
kpiset[KPISet.FAILURES] += err['occurences']
kpiset[KPISet.SUCCESSES] = kpiset[KPISet.SAMPLE_COUNT] - kpiset[KPISet.FAILURES]
point[DataPoint.CURRENT][item['name']] = kpiset
overall.merge_kpis(kpiset)
point[DataPoint.CURRENT][''] = overall
point.recalculate()
return point
class LocustIOScriptBuilder(PythonGenerator):
IMPORTS = """
from gevent import sleep
from re import findall, compile
from locust import HttpLocust, TaskSet, task
"""
def build_source_code(self):
self.log.debug("Generating Python script for LocustIO")
header_comment = self.gen_comment("This script was generated by Taurus", indent=0)
scenario_class = self.gen_class_definition("UserBehaviour", ["TaskSet"])
swarm_class = self.gen_class_definition("GeneratedSwarm", ["HttpLocust"])
imports = self.add_imports()
self.root.append(header_comment)
self.root.append(imports)
self.root.append(scenario_class)
self.root.append(swarm_class)
swarm_class.append(self.gen_statement('task_set = UserBehaviour', indent=self.INDENT_STEP))
default_address = self.scenario.get("default-address", "")
swarm_class.append(self.gen_statement('host = "%s"' % default_address, indent=self.INDENT_STEP))
swarm_class.append(self.gen_statement('min_wait = %s' % 0, indent=self.INDENT_STEP))
swarm_class.append(self.gen_statement('max_wait = %s' % 0, indent=self.INDENT_STEP))
swarm_class.append(self.gen_new_line())
scenario_class.append(self.gen_decorator_statement('task(1)'))
scenario_class.append(self.__gen_task())
scenario_class.append(self.gen_new_line())
def __gen_task(self):
task = self.gen_method_definition("generated_task", ['self'])
think_time = dehumanize_time(self.scenario.get("think-time"))
global_headers = self.scenario.get_headers()
if not self.scenario.get("keepalive", True):
global_headers['Connection'] = 'close'
for req in self.scenario.get_requests():
if not isinstance(req, HTTPRequest):
msg = "Locust script generator doesn't support '%s' blocks, skipping"
self.log.warning(msg, req.NAME)
continue
method = req.method.lower()
if method not in ('get', 'delete', 'head', 'options', 'path', 'put', 'post'):
raise TaurusConfigError("Wrong Locust request type: %s" % method)
timeout = req.priority_option('timeout', default='30s')
self.__gen_check(method, req, task, dehumanize_time(timeout), global_headers)
if req.think_time:
task.append(self.gen_statement("sleep(%s)" % dehumanize_time(req.think_time)))
else:
if think_time:
task.append(self.gen_statement("sleep(%s)" % think_time))
task.append(self.gen_new_line())
return task
@staticmethod
def __get_params_line(req, timeout, headers):
param_dict = {'url': '"%s"' % req.url, 'timeout': timeout}
if req.body:
if isinstance(req.body, dict):
param_dict['data'] = json.dumps(req.body)
else:
param_dict['data'] = '"%s"' % req.body
if headers:
param_dict['headers'] = json.dumps(headers)
keys = (list(param_dict.keys()))
keys.sort()
return ', '.join(['%s=%s' % (key, param_dict[key]) for key in keys])
def __gen_check(self, method, req, task, timeout, global_headers):
assertions = req.config.get("assert", [])
first_assert = True
if assertions:
statement = 'with self.client.%s(%s, catch_response=True) as response:'
else:
statement = "self.client.%s(%s)"
headers = OrderedDict()
if global_headers:
sorted_headers = OrderedDict(sorted(global_headers.items(), key=lambda t: t[0]))
headers.update(sorted_headers)
if req.headers:
headers.update(req.headers)
task.append(self.gen_statement(statement % (method, self.__get_params_line(req, timeout, headers))))
for idx, assertion in enumerate(assertions):
assertion = ensure_is_dict(assertions, idx, "contains")
if not isinstance(assertion['contains'], list):
assertion['contains'] = [assertion['contains']]
self.__gen_assertion(task, assertion, first_assert)
first_assert = False
if assertions:
task.append(self.gen_statement('else:', indent=12))
task.append(self.gen_statement('response.success()', indent=16))
def __gen_assertion(self, task, assertion, is_first):
subject = assertion.get("subject", Scenario.FIELD_BODY)
values = [str(_assert) for _assert in assertion['contains']]
if subject == 'body':
content = 'response.content'
elif subject == 'http-code':
content = 'str(response.status_code)'
else:
raise TaurusConfigError('Wrong subject for Locust assertion: %s' % subject)
if assertion.get('not', False):
attr_not = ''
func_name = 'any'
else:
attr_not = ' not'
func_name = 'all'
if assertion.get("regexp", True):
expression = 'findall(compile(str(val)), %(content)s)' % {'content': content}
else:
expression = 'str(val) in %s' % content
statement = 'if%(not)s %(func)s(%(expression)s for val in %(values)s):'
statement = statement % {'not': attr_not, 'func': func_name, 'expression': expression, 'values': values}
if not is_first:
statement = 'el' + statement
task.append(self.gen_statement(statement, indent=12))
statement = 'response.failure("%(values)s%(not)s found in %(subject)s")'
statement = statement % {'values': values, 'not': attr_not, 'subject': subject}
task.append(self.gen_statement(statement, indent=16))
| 1 | 15,067 | Both of these are important. Without it, locust won't find my libraries in the cloud. | Blazemeter-taurus | py |
@@ -87,7 +87,7 @@ func (o *listSvcOpts) Execute() error {
return fmt.Errorf("get application: %w", err)
}
- svcs, err := o.store.ListServices(o.appName)
+ svcs, err := o.store.ListWorkloads(o.appName)
if err != nil {
return err
} | 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package cli
import (
"encoding/json"
"fmt"
"io"
"math"
"os"
"strings"
"text/tabwriter"
"github.com/aws/copilot-cli/internal/pkg/config"
"github.com/aws/copilot-cli/internal/pkg/term/prompt"
"github.com/aws/copilot-cli/internal/pkg/term/selector"
"github.com/aws/copilot-cli/internal/pkg/workspace"
"github.com/spf13/cobra"
)
const (
svcListAppNamePrompt = "Which application's services would you like to list?"
svcListAppNameHelpPrompt = "An application groups all of your services together."
// Display settings.
minCellWidth = 20 // minimum number of characters in a table's cell.
tabWidth = 4 // number of characters in between columns.
cellPaddingWidth = 2 // number of padding characters added by default to a cell.
paddingChar = ' ' // character in between columns.
noAdditionalFormatting = 0
)
type listSvcVars struct {
appName string
shouldOutputJSON bool
shouldShowLocalServices bool
}
type listSvcOpts struct {
listSvcVars
// Interfaces to dependencies.
store store
ws wsSvcReader
w io.Writer
sel appSelector
}
func newListSvcOpts(vars listSvcVars) (*listSvcOpts, error) {
store, err := config.NewStore()
if err != nil {
return nil, err
}
ws, err := workspace.New()
if err != nil {
return nil, err
}
return &listSvcOpts{
listSvcVars: vars,
store: store,
ws: ws,
w: os.Stdout,
sel: selector.NewSelect(prompt.New(), store),
}, nil
}
// Ask asks for fields that are required but not passed in.
func (o *listSvcOpts) Ask() error {
if o.appName != "" {
return nil
}
name, err := o.sel.Application(svcListAppNamePrompt, svcListAppNameHelpPrompt)
if err != nil {
return fmt.Errorf("select application name: %w", err)
}
o.appName = name
return nil
}
// Execute lists the services through the prompt.
func (o *listSvcOpts) Execute() error {
// Ensure the application actually exists before we try to list its services.
if _, err := o.store.GetApplication(o.appName); err != nil {
return fmt.Errorf("get application: %w", err)
}
svcs, err := o.store.ListServices(o.appName)
if err != nil {
return err
}
if o.shouldShowLocalServices {
localNames, err := o.ws.ServiceNames()
if err != nil {
return fmt.Errorf("get local services names: %w", err)
}
svcs = filterSvcsByName(svcs, localNames)
}
var out string
if o.shouldOutputJSON {
data, err := o.jsonOutput(svcs)
if err != nil {
return err
}
out = data
fmt.Fprint(o.w, out)
} else {
o.humanOutput(svcs)
}
return nil
}
func (o *listSvcOpts) humanOutput(svcs []*config.Service) {
writer := tabwriter.NewWriter(o.w, minCellWidth, tabWidth, cellPaddingWidth, paddingChar, noAdditionalFormatting)
fmt.Fprintf(writer, "%s\t%s\n", "Name", "Type")
nameLengthMax := len("Name")
typeLengthMax := len("Type")
for _, svc := range svcs {
nameLengthMax = int(math.Max(float64(nameLengthMax), float64(len(svc.Name))))
typeLengthMax = int(math.Max(float64(typeLengthMax), float64(len(svc.Type))))
}
fmt.Fprintf(writer, "%s\t%s\n", strings.Repeat("-", nameLengthMax), strings.Repeat("-", typeLengthMax))
for _, svc := range svcs {
fmt.Fprintf(writer, "%s\t%s\n", svc.Name, svc.Type)
}
writer.Flush()
}
func (o *listSvcOpts) jsonOutput(svcs []*config.Service) (string, error) {
type out struct {
Services []*config.Service `json:"services"`
}
b, err := json.Marshal(out{Services: svcs})
if err != nil {
return "", fmt.Errorf("marshal services: %w", err)
}
return fmt.Sprintf("%s\n", b), nil
}
func filterSvcsByName(svcs []*config.Service, wantedNames []string) []*config.Service {
isWanted := make(map[string]bool)
for _, name := range wantedNames {
isWanted[name] = true
}
var filtered []*config.Service
for _, svc := range svcs {
if _, ok := isWanted[svc.Name]; !ok {
continue
}
filtered = append(filtered, svc)
}
return filtered
}
// buildSvcListCmd builds the command for listing services in an appication.
func buildSvcListCmd() *cobra.Command {
vars := listSvcVars{}
cmd := &cobra.Command{
Use: "ls",
Short: "Lists all the services in an application.",
Example: `
Lists all the services for the "myapp" application.
/code $ copilot svc ls --app myapp`,
RunE: runCmdE(func(cmd *cobra.Command, args []string) error {
opts, err := newListSvcOpts(vars)
if err != nil {
return err
}
if err := opts.Ask(); err != nil {
return err
}
return opts.Execute()
}),
}
cmd.Flags().StringVarP(&vars.appName, appFlag, appFlagShort, tryReadingAppName(), appFlagDescription)
cmd.Flags().BoolVar(&vars.shouldOutputJSON, jsonFlag, false, jsonFlagDescription)
cmd.Flags().BoolVar(&vars.shouldShowLocalServices, localFlag, false, localSvcFlagDescription)
return cmd
}
| 1 | 15,127 | This should remain as `ListServices` | aws-copilot-cli | go |
@@ -0,0 +1,2 @@
+package v2
+ | 1 | 1 | 20,490 | If no tests, remove this file. | kubeedge-kubeedge | go |
|
@@ -116,7 +116,7 @@ cp bin/protoc /usr/local/bin/protoc
if SHOULD_INVENTORY_GROUPS:
GROUPS_DOMAIN_SUPER_ADMIN_EMAIL = context.properties[
'groups-domain-super-admin-email']
- GROUPS_SERVICE_ACCOUNT_KEY_FILE = context.properties[
+ GSUITE_SERVICE_ACCOUNT_KEY_FILE = context.properties[
'groups-service-account-key-file']
# TODO: remove this in a future version | 1 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates a GCE instance template for Forseti Security."""
def GenerateConfig(context):
"""Generate configuration."""
USE_BRANCH = context.properties.get('branch-name')
ORGANIZATION_ID = context.properties['organization-id']
if USE_BRANCH:
DOWNLOAD_FORSETI = """
git clone {}.git --branch {} --single-branch forseti-security
cd forseti-security
""".format(
context.properties['src-path'],
context.properties['branch-name'])
else:
DOWNLOAD_FORSETI = """
wget -qO- {}/archive/v{}.tar.gz | tar xvz
cd forseti-security-{}
""".format(
context.properties['src-path'],
context.properties['release-version'],
context.properties['release-version'])
CLOUDSQL_CONN_STRING = '{}:{}:{}'.format(
context.env['project'],
'$(ref.cloudsql-instance.region)',
'$(ref.cloudsql-instance.name)')
SCANNER_BUCKET = context.properties['scanner-bucket']
DATABASE_NAME = context.properties['database-name']
SHOULD_INVENTORY_GROUPS = bool(context.properties['inventory-groups'])
SERVICE_ACCOUNT_SCOPES = context.properties['service-account-scopes']
inventory_command = (
'/usr/local/bin/forseti_inventory --db_name {} '
.format(
DATABASE_NAME,
)
)
scanner_command = '/usr/local/bin/forseti_scanner --rules {} --output_path {} --db_name {} '.format(
'gs://{}/rules/rules.yaml'.format(SCANNER_BUCKET),
'gs://{}/scanner_violations'.format(SCANNER_BUCKET),
DATABASE_NAME,
)
if USE_BRANCH:
inventory_command = (inventory_command + ' --config_path {} '
.format('$USER_HOME/config/inventory_conf.yaml')
)
# TODO: temporary hack; remove --engine_name flag when we run scanner
# totally in batch with the other rule engines
scanner_command = (scanner_command + ' --engine_name {} '
.format('IamRulesEngine')
)
# TODO: remove this little hack when we update the release...
NEW_FORSETI_CONFIG = """
# Copy the default inventory config to a more permanent directory
mkdir -p $USER_HOME/config
cp samples/inventory/inventory_conf.yaml $USER_HOME/config/inventory_conf.yaml
# Build protos separately.
python build_protos.py --clean
"""
OLD_BUILD_PROTOS = ''
else:
inventory_command = (
inventory_command + ' --organization_id {} '
.format(ORGANIZATION_ID)
)
scanner_command = (
scanner_command + ' --organization_id {} '
.format(ORGANIZATION_ID)
)
NEW_FORSETI_CONFIG = ''
OLD_BUILD_PROTOS = """
# Install protoc
wget https://github.com/google/protobuf/releases/download/v3.3.0/protoc-3.3.0-linux-x86_64.zip
unzip protoc-3.3.0-linux-x86_64.zip
cp bin/protoc /usr/local/bin/protoc
"""
# Extend the commands, based on whether email is required.
SENDGRID_API_KEY = context.properties.get('sendgrid-api-key')
EMAIL_SENDER = context.properties.get('email-sender')
EMAIL_RECIPIENT = context.properties.get('email-recipient')
if EMAIL_RECIPIENT is not None:
email_flags = '--sendgrid_api_key {} --email_sender {} --email_recipient {}'.format(
SENDGRID_API_KEY,
EMAIL_SENDER,
EMAIL_RECIPIENT,
)
inventory_command = inventory_command + email_flags
scanner_command = scanner_command + email_flags
# Extend the commands, based on whether inventory-groups is set.
if SHOULD_INVENTORY_GROUPS:
GROUPS_DOMAIN_SUPER_ADMIN_EMAIL = context.properties[
'groups-domain-super-admin-email']
GROUPS_SERVICE_ACCOUNT_KEY_FILE = context.properties[
'groups-service-account-key-file']
# TODO: remove this in a future version
OLD_SHOULD_INV_GROUPS_FLAG = '--inventory_groups'
if USE_BRANCH:
OLD_SHOULD_INV_GROUPS_FLAG = ''
inventory_groups_flags = (
' {} '
'--domain_super_admin_email {} '
'--groups_service_account_key_file {} '
.format(
OLD_SHOULD_INV_GROUPS_FLAG,
GROUPS_DOMAIN_SUPER_ADMIN_EMAIL,
GROUPS_SERVICE_ACCOUNT_KEY_FILE,
)
)
inventory_command = inventory_command + inventory_groups_flags
resources = []
resources.append({
'name': '{}-vm'.format(context.env['deployment']),
'type': 'compute.v1.instance',
'properties': {
'zone': context.properties['zone'],
'machineType': (
'https://www.googleapis.com/compute/v1/projects/{}'
'/zones/{}/machineTypes/{}'.format(
context.env['project'], context.properties['zone'],
context.properties['instance-type'])),
'disks': [{
'deviceName': 'boot',
'type': 'PERSISTENT',
'boot': True,
'autoDelete': True,
'initializeParams': {
'sourceImage': (
'https://www.googleapis.com/compute/v1'
'/projects/{}/global/images/family/{}'.format(
context.properties['image-project'],
context.properties['image-family']
)
)
}
}],
'networkInterfaces': [{
'network': (
'https://www.googleapis.com/compute/v1/'
'projects/{}/global/networks/default'.format(
context.env['project'])),
'accessConfigs': [{
'name': 'External NAT',
'type': 'ONE_TO_ONE_NAT'
}]
}],
'serviceAccounts': [{
'email': context.properties['service-account'],
'scopes': SERVICE_ACCOUNT_SCOPES,
}],
'metadata': {
'items': [{
'key': 'startup-script',
'value': """#!/bin/bash
exec > /tmp/deployment.log
exec 2>&1
# Ubuntu update
sudo apt-get update -y
sudo apt-get upgrade -y
# Forseti setup
sudo apt-get install -y git unzip
# Forseti dependencies
sudo apt-get install -y libmysqlclient-dev python-pip python-dev
USER_HOME=/home/ubuntu
# Install fluentd if necessary
FLUENTD=$(ls /usr/sbin/google-fluentd)
if [ -z "$FLUENTD" ]; then
cd $USER_HOME
curl -sSO https://dl.google.com/cloudagents/install-logging-agent.sh
bash install-logging-agent.sh
fi
# Check whether Cloud SQL proxy is installed
CLOUD_SQL_PROXY=$(ls $USER_HOME/cloud_sql_proxy)
if [ -z "$CLOUD_SQL_PROXY" ]; then
cd $USER_HOME
wget https://dl.google.com/cloudsql/cloud_sql_proxy.{}
mv cloud_sql_proxy.{} cloud_sql_proxy
chmod +x cloud_sql_proxy
fi
$USER_HOME/cloud_sql_proxy -instances={}=tcp:{} &
# Check if rules.yaml exists
RULES_FILE=$(gsutil ls gs://{}/rules/rules.yaml)
if [ $? -eq 1 ]; then
cd $USER_HOME
read -d '' RULES_YAML << EOF
rules:
- name: sample whitelist
mode: whitelist
resource:
- type: organization
applies_to: self_and_children
resource_ids:
- {}
inherit_from_parents: true
bindings:
- role: roles/*
members:
- serviceAccount:*@*.gserviceaccount.com
EOF
echo "$RULES_YAML" > $USER_HOME/rules.yaml
gsutil cp $USER_HOME/rules.yaml gs://{}/rules/rules.yaml
fi
# Install Forseti Security
cd $USER_HOME
rm -rf forseti-*
rm -rf run_forseti.sh
pip install --upgrade pip
pip install --upgrade setuptools
pip install grpcio grpcio-tools
{}
# Download Forseti src; see DOWNLOAD_FORSETI
{}
# Prevent namespace clash
pip uninstall --yes protobuf
{}
python setup.py install
# Create the startup run script
read -d '' RUN_FORSETI << EOF
#!/bin/bash
# inventory command
{}
# scanner command
{}
EOF
echo "$RUN_FORSETI" > $USER_HOME/run_forseti.sh
chmod +x $USER_HOME/run_forseti.sh
/bin/sh $USER_HOME/run_forseti.sh
(echo "0 * * * * $USER_HOME/run_forseti.sh") | crontab -
""".format(
# cloud_sql_proxy
context.properties['cloudsqlproxy-os-arch'],
context.properties['cloudsqlproxy-os-arch'],
CLOUDSQL_CONN_STRING,
context.properties['db-port'],
# rules.yaml
SCANNER_BUCKET,
ORGANIZATION_ID,
SCANNER_BUCKET,
# old style build protobufs
OLD_BUILD_PROTOS,
# install forseti
DOWNLOAD_FORSETI,
# copy Forseti config file
NEW_FORSETI_CONFIG,
# run_forseti.sh
# - forseti_inventory
inventory_command,
# - forseti_scanner
scanner_command,
)
}]
}
}
})
return {'resources': resources}
| 1 | 26,618 | This will require changes to the docs. I suggest searching the gh-pages branch for the previous variable name. | forseti-security-forseti-security | py |
@@ -249,7 +249,7 @@ class RemoteHandler(logging.Handler):
def __init__(self):
#Load nvdaHelperRemote.dll but with an altered search path so it can pick up other dlls in lib
- path=os.path.abspath(os.path.join(u"lib",buildVersion.version,u"nvdaHelperRemote.dll"))
+ path = os.path.join(globalVars.appDir, "lib", buildVersion.version, "nvdaHelperRemote.dll")
h=ctypes.windll.kernel32.LoadLibraryExW(path,0,LOAD_WITH_ALTERED_SEARCH_PATH)
if not h:
raise OSError("Could not load %s"%path) | 1 | # A part of NonVisual Desktop Access (NVDA)
# Copyright (C) 2007-2020 NV Access Limited, Rui Batista, Joseph Lee, Leonard de Ruijter, Babbage B.V.,
# Accessolutions, Julien Cochuyt
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
"""Utilities and classes to manage logging in NVDA"""
import os
import ctypes
import sys
import warnings
from encodings import utf_8
import logging
import inspect
import winsound
import traceback
from types import MethodType, FunctionType
import globalVars
import buildVersion
from typing import Optional
ERROR_INVALID_WINDOW_HANDLE = 1400
ERROR_TIMEOUT = 1460
RPC_S_SERVER_UNAVAILABLE = 1722
RPC_S_CALL_FAILED_DNE = 1727
EPT_S_NOT_REGISTERED = 1753
E_ACCESSDENIED = -2147024891
CO_E_OBJNOTCONNECTED = -2147220995
EVENT_E_ALL_SUBSCRIBERS_FAILED = -2147220991
RPC_E_CALL_REJECTED = -2147418111
RPC_E_DISCONNECTED = -2147417848
LOAD_WITH_ALTERED_SEARCH_PATH=0x8
def isPathExternalToNVDA(path):
""" Checks if the given path is external to NVDA (I.e. not pointing to built-in code). """
if path[0] != "<" and os.path.isabs(path) and not path.startswith(sys.path[0] + "\\"):
# This module is external because:
# the code comes from a file (fn doesn't begin with "<");
# it has an absolute file path (code bundled in binary builds reports relative paths); and
# it is not part of NVDA's Python code (not beneath sys.path[0]).
return True
return False
def getCodePath(f):
"""Using a frame object, gets its module path (relative to the current directory).[className.[funcName]]
@param f: the frame object to use
@type f: frame
@returns: the dotted module.class.attribute path
@rtype: string
"""
fn=f.f_code.co_filename
if isPathExternalToNVDA(fn):
path="external:"
else:
path=""
try:
path+=f.f_globals["__name__"]
except KeyError:
path+=fn
funcName=f.f_code.co_name
if funcName.startswith('<'):
funcName=""
className=""
#Code borrowed from http://mail.python.org/pipermail/python-list/2000-January/020141.html
if f.f_code.co_argcount:
f_locals = f.f_locals
arg0 = f_locals[f.f_code.co_varnames[0]]
if f.f_code.co_flags & inspect.CO_NEWLOCALS:
# Fetching of Frame.f_locals causes a function frames's locals to be cached on the frame for ever.
# If an Exception is currently stored as a local variable on that frame,
# A reference cycle will be created, holding the frame and all its variables.
# Therefore clear f_locals manually.
f_locals.clear()
del f_locals
# #6122: Check if this function is a member of its first argument's class (and specifically which base class if any)
# Rather than an instance member of its first argument.
# This stops infinite recursions if fetching data descriptors,
# And better reflects the actual source code definition.
topCls=arg0 if isinstance(arg0,type) else type(arg0)
# find the deepest class this function's name is reachable as a method from
if hasattr(topCls,funcName):
for cls in topCls.__mro__:
member=cls.__dict__.get(funcName)
if not member:
continue
memberType=type(member)
if memberType is FunctionType and member.__code__ is f.f_code:
# the function was found as a standard method
className=cls.__name__
elif memberType is classmethod and type(member.__func__) is FunctionType and member.__func__.__code__ is f.f_code:
# function was found as a class method
className=cls.__name__
elif memberType is property:
if type(member.fget) is FunctionType and member.fget.__code__ is f.f_code:
# The function was found as a property getter
className=cls.__name__
elif type(member.fset) is FunctionType and member.fset.__code__ is f.f_code:
# the function was found as a property setter
className=cls.__name__
if className:
break
return ".".join(x for x in (path,className,funcName) if x)
# Function to strip the base path of our code from traceback text to improve readability.
if getattr(sys, "frozen", None):
# We're running a py2exe build.
stripBasePathFromTracebackText = lambda text: text
else:
BASE_PATH = os.path.split(__file__)[0] + os.sep
TB_BASE_PATH_PREFIX = ' File "'
TB_BASE_PATH_MATCH = TB_BASE_PATH_PREFIX + BASE_PATH
def stripBasePathFromTracebackText(text):
return text.replace(TB_BASE_PATH_MATCH, TB_BASE_PATH_PREFIX)
class Logger(logging.Logger):
# Import standard levels for convenience.
from logging import DEBUG, INFO, WARNING, WARN, ERROR, CRITICAL
# Our custom levels.
IO = 12
DEBUGWARNING = 15
OFF = 100
#: The start position of a fragment of the log file as marked with
#: L{markFragmentStart} for later retrieval using L{getFragment}.
#: @type: C{long}
fragmentStart = None
def _log(self, level, msg, args, exc_info=None, extra=None, codepath=None, activateLogViewer=False, stack_info=None):
if not extra:
extra={}
if not codepath or stack_info is True:
f=inspect.currentframe().f_back.f_back
if not codepath:
codepath=getCodePath(f)
extra["codepath"] = codepath
if not globalVars.appArgs or globalVars.appArgs.secure:
# The log might expose sensitive information and the Save As dialog in the Log Viewer is a security risk.
activateLogViewer = False
if activateLogViewer:
# Import logViewer here, as we don't want to import GUI code when this module is imported.
from gui import logViewer
logViewer.activate()
# Move to the end of the current log text. The new text will be written at this position.
# This means that the user will be positioned at the start of the new log text.
# This is why we activate the log viewer before writing to the log.
logViewer.logViewer.outputCtrl.SetInsertionPointEnd()
if stack_info:
if stack_info is True:
stack_info = traceback.extract_stack(f)
msg += ("\nStack trace:\n"
+ stripBasePathFromTracebackText("".join(traceback.format_list(stack_info)).rstrip()))
res = super()._log(level, msg, args, exc_info, extra)
if activateLogViewer:
# Make the log text we just wrote appear in the log viewer.
logViewer.logViewer.refresh()
return res
def debugWarning(self, msg, *args, **kwargs):
"""Log 'msg % args' with severity 'DEBUGWARNING'.
"""
if not self.isEnabledFor(self.DEBUGWARNING):
return
self._log(log.DEBUGWARNING, msg, args, **kwargs)
def io(self, msg, *args, **kwargs):
"""Log 'msg % args' with severity 'IO'.
"""
if not self.isEnabledFor(self.IO):
return
self._log(log.IO, msg, args, **kwargs)
def exception(self, msg="", exc_info=True, **kwargs):
"""Log an exception at an appropriate level.
Normally, it will be logged at level "ERROR".
However, certain exceptions which aren't considered errors (or aren't errors that we can fix) are expected and will therefore be logged at a lower level.
"""
import comtypes
from core import CallCancelled, RPC_E_CALL_CANCELED
if exc_info is True:
exc_info = sys.exc_info()
exc = exc_info[1]
if (
(isinstance(exc, WindowsError) and exc.winerror in (ERROR_INVALID_WINDOW_HANDLE, ERROR_TIMEOUT, RPC_S_SERVER_UNAVAILABLE, RPC_S_CALL_FAILED_DNE, EPT_S_NOT_REGISTERED, RPC_E_CALL_CANCELED))
or (isinstance(exc, comtypes.COMError) and (exc.hresult in (E_ACCESSDENIED, CO_E_OBJNOTCONNECTED, EVENT_E_ALL_SUBSCRIBERS_FAILED, RPC_E_CALL_REJECTED, RPC_E_CALL_CANCELED, RPC_E_DISCONNECTED) or exc.hresult & 0xFFFF == RPC_S_SERVER_UNAVAILABLE))
or isinstance(exc, CallCancelled)
):
level = self.DEBUGWARNING
else:
level = self.ERROR
if not self.isEnabledFor(level):
return
self._log(level, msg, (), exc_info=exc_info, **kwargs)
def markFragmentStart(self):
"""Mark the current end of the log file as the start position of a
fragment to be later retrieved by L{getFragment}.
@returns: Whether a log file is in use and a position could be marked
@rtype: bool
"""
if (
not globalVars.appArgs
or globalVars.appArgs.secure
or not globalVars.appArgs.logFileName
or not isinstance(logHandler, FileHandler)
):
return False
with open(globalVars.appArgs.logFileName, "r", encoding="UTF-8") as f:
# _io.TextIOWrapper.seek: whence=2 -- end of stream
f.seek(0, 2)
self.fragmentStart = f.tell()
return True
def getFragment(self):
"""Retrieve a fragment of the log starting from the position marked using
L{markFragmentStart}.
If L{fragmentStart} does not point to the current end of the log file, it
is reset to C{None} after reading the fragment.
@returns: The text of the fragment, or C{None} if L{fragmentStart} is None.
@rtype: str
"""
if (
self.fragmentStart is None
or not globalVars.appArgs
or globalVars.appArgs.secure
or not globalVars.appArgs.logFileName
or not isinstance(logHandler, FileHandler)
):
return None
with open(globalVars.appArgs.logFileName, "r", encoding="UTF-8") as f:
f.seek(self.fragmentStart)
fragment = f.read()
if fragment:
self.fragmentStart = None
return fragment
class RemoteHandler(logging.Handler):
def __init__(self):
#Load nvdaHelperRemote.dll but with an altered search path so it can pick up other dlls in lib
path=os.path.abspath(os.path.join(u"lib",buildVersion.version,u"nvdaHelperRemote.dll"))
h=ctypes.windll.kernel32.LoadLibraryExW(path,0,LOAD_WITH_ALTERED_SEARCH_PATH)
if not h:
raise OSError("Could not load %s"%path)
self._remoteLib=ctypes.WinDLL("nvdaHelperRemote",handle=h)
logging.Handler.__init__(self)
def emit(self, record):
msg = self.format(record)
try:
self._remoteLib.nvdaControllerInternal_logMessage(record.levelno, ctypes.windll.kernel32.GetCurrentProcessId(), msg)
except WindowsError:
pass
class FileHandler(logging.FileHandler):
def handle(self,record):
# Only play the error sound if this is a test version.
shouldPlayErrorSound = buildVersion.isTestVersion
if record.levelno>=logging.CRITICAL:
try:
winsound.PlaySound("SystemHand",winsound.SND_ALIAS)
except:
pass
elif record.levelno>=logging.ERROR and shouldPlayErrorSound:
import nvwave
try:
nvwave.playWaveFile("waves\\error.wav")
except:
pass
return super().handle(record)
class Formatter(logging.Formatter):
default_time_format = "%H:%M:%S"
default_msec_format = "%s.%03d"
def formatException(self, ex):
return stripBasePathFromTracebackText(super(Formatter, self).formatException(ex))
class StreamRedirector(object):
"""Redirects an output stream to a logger.
"""
def __init__(self, name, logger, level):
"""Constructor.
@param name: The name of the stream to be used in the log output.
@param logger: The logger to which to log.
@type logger: L{Logger}
@param level: The level at which to log.
@type level: int
"""
self.name = name
self.logger = logger
self.level = level
def write(self, text):
text = text.rstrip()
if not text:
return
self.logger.log(self.level, text, codepath=self.name)
def flush(self):
pass
def redirectStdout(logger):
"""Redirect stdout and stderr to a given logger.
@param logger: The logger to which to redirect.
@type logger: L{Logger}
"""
sys.stdout = StreamRedirector("stdout", logger, logging.WARNING)
sys.stderr = StreamRedirector("stderr", logger, logging.ERROR)
# Register our logging class as the class for all loggers.
logging.setLoggerClass(Logger)
#: The singleton logger instance.
log: Logger = logging.getLogger("nvda")
#: The singleton log handler instance.
logHandler: Optional[logging.Handler] = None
def _getDefaultLogFilePath():
if getattr(sys, "frozen", None):
import tempfile
return os.path.join(tempfile.gettempdir(), "nvda.log")
else:
return ".\\nvda.log"
def _excepthook(*exc_info):
log.exception(exc_info=exc_info, codepath="unhandled exception")
def _showwarning(message, category, filename, lineno, file=None, line=None):
log.debugWarning(warnings.formatwarning(message, category, filename, lineno, line).rstrip(), codepath="Python warning")
def initialize(shouldDoRemoteLogging=False):
"""Initialize logging.
This must be called before any logging can occur.
@precondition: The command line arguments have been parsed into L{globalVars.appArgs}.
@var shouldDoRemoteLogging: True if all logging should go to the real NVDA via rpc (for slave)
@type shouldDoRemoteLogging: bool
"""
global log, logHandler
logging.addLevelName(Logger.DEBUGWARNING, "DEBUGWARNING")
logging.addLevelName(Logger.IO, "IO")
logging.addLevelName(Logger.OFF, "OFF")
if not shouldDoRemoteLogging:
# This produces log entries such as the following:
# IO - inputCore.InputManager.executeGesture (09:17:40.724) - Thread-5 (13576):
# Input: kb(desktop):v
logFormatter = Formatter(
fmt="{levelname!s} - {codepath!s} ({asctime}) - {threadName} ({thread}):\n{message}",
style="{"
)
if (globalVars.appArgs.secure or globalVars.appArgs.noLogging) and (not globalVars.appArgs.debugLogging and globalVars.appArgs.logLevel == 0):
# Don't log in secure mode.
# #8516: also if logging is completely turned off.
logHandler = logging.NullHandler()
# There's no point in logging anything at all, since it'll go nowhere.
log.root.setLevel(Logger.OFF)
else:
if not globalVars.appArgs.logFileName:
globalVars.appArgs.logFileName = _getDefaultLogFilePath()
# Keep a backup of the previous log file so we can access it even if NVDA crashes or restarts.
oldLogFileName = os.path.join(os.path.dirname(globalVars.appArgs.logFileName), "nvda-old.log")
try:
# We must remove the old log file first as os.rename does replace it.
if os.path.exists(oldLogFileName):
os.unlink(oldLogFileName)
os.rename(globalVars.appArgs.logFileName, oldLogFileName)
except (IOError, WindowsError):
pass # Probably log does not exist, don't care.
try:
logHandler = FileHandler(globalVars.appArgs.logFileName, mode="w", encoding="utf-8")
except IOError:
# if log cannot be opened, we use NullHandler to avoid logging preserving logger behaviour
# and set log filename to None to inform logViewer about it
globalVars.appArgs.logFileName = None
logHandler = logging.NullHandler()
log.error("Faile to open log file, redirecting to standard output")
logLevel = globalVars.appArgs.logLevel
if globalVars.appArgs.debugLogging:
logLevel = Logger.DEBUG
elif logLevel <= 0:
logLevel = Logger.INFO
log.setLevel(logLevel)
log.root.setLevel(max(logLevel, logging.WARN))
else:
logHandler = RemoteHandler()
logFormatter = Formatter(
fmt="{codepath!s}:\n{message}",
style="{"
)
logHandler.setFormatter(logFormatter)
log.root.addHandler(logHandler)
redirectStdout(log)
sys.excepthook = _excepthook
warnings.showwarning = _showwarning
warnings.simplefilter("default", DeprecationWarning)
def isLogLevelForced() -> bool:
"""Check if the log level was overridden either from the command line or because of secure mode.
"""
return (
globalVars.appArgs.secure
or globalVars.appArgs.debugLogging
or globalVars.appArgs.logLevel != 0
or globalVars.appArgs.noLogging
)
def setLogLevelFromConfig():
"""Set the log level based on the current configuration.
"""
if isLogLevelForced():
return
import config
levelName=config.conf["general"]["loggingLevel"]
# logging.getLevelName can give you a level number if given a name.
level = logging.getLevelName(levelName)
# The lone exception to level higher than INFO is "OFF" (100).
# Setting a log level to something other than options found in the GUI is unsupported.
if level not in (log.DEBUG, log.IO, log.DEBUGWARNING, log.INFO, log.OFF):
log.warning("invalid setting for logging level: %s" % levelName)
level = log.INFO
config.conf["general"]["loggingLevel"] = logging.getLevelName(log.INFO)
log.setLevel(level)
log.root.setLevel(max(level, logging.WARN))
| 1 | 30,804 | `RemoteHandler` is used in nvda_slave and slave does not have access to `globalVars` currently. Perhaps `globalVars.appDir` should be set to `None` in `globalVarrs by default and set to NVDA directory when they're imported for the first time. | nvaccess-nvda | py |
@@ -12,10 +12,11 @@ type MetricsConfig struct {
}
type FileConfig struct {
- Prometheus *PrometheusConfig `hcl:"Prometheus"`
- DogStatsd []DogStatsdConfig `hcl:"DogStatsd"`
- Statsd []StatsdConfig `hcl:"Statsd"`
- M3 []M3Config `hcl:"M3"`
+ Prometheus *PrometheusConfig `hcl:"Prometheus"`
+ DogStatsd []DogStatsdConfig `hcl:"DogStatsd"`
+ Statsd []StatsdConfig `hcl:"Statsd"`
+ M3 []M3Config `hcl:"M3"`
+ DisableInMem bool `hcl:"DisableInMem"`
}
type DogStatsdConfig struct { | 1 | package telemetry
import (
"github.com/sirupsen/logrus"
)
type MetricsConfig struct {
FileConfig FileConfig
Logger logrus.FieldLogger
ServiceName string
Sinks []Sink
}
type FileConfig struct {
Prometheus *PrometheusConfig `hcl:"Prometheus"`
DogStatsd []DogStatsdConfig `hcl:"DogStatsd"`
Statsd []StatsdConfig `hcl:"Statsd"`
M3 []M3Config `hcl:"M3"`
}
type DogStatsdConfig struct {
Address string `hcl:"address"`
}
type PrometheusConfig struct {
Host string `hcl:"host"`
Port int `hcl:"port"`
}
type StatsdConfig struct {
Address string `hcl:"address"`
}
type M3Config struct {
Address string `hcl:"address"`
Env string `hcl:"env"`
}
| 1 | 12,312 | In case there is more configuration to be added to the "inmem" metrics later (however unlikely that is), we may want to play it safe and create an `InMemConfig` struct and include the disabled flag there. This also gives the config symmetry across the plugins. | spiffe-spire | go |
@@ -35,6 +35,16 @@ describe ProposalSearch do
end
context Gsa18f::Procurement do
+ around(:each) do |example|
+ ENV['GSA18F_APPROVER_EMAIL'] = 'test_approver@some-dot-gov.gov'
+ ENV['GSA18F_PURCHASER_EMAIL'] = 'test_purchaser@some-dot-gov.gov'
+ example.run
+
+ ENV['GSA18F_APPROVER_EMAIL'] = nil
+ ENV['GSA18F_PURCHASER_EMAIL'] = nil
+ end
+
+
[:product_name_and_description, :justification, :additional_info].each do |attr_name|
it "returns the Proposal when searching by the ##{attr_name}" do
procurement = FactoryGirl.create(:gsa18f_procurement, attr_name => 'foo') | 1 | describe ProposalSearch do
describe '#execute' do
it "returns an empty list for no Proposals" do
results = ProposalSearch.new.execute('')
expect(results).to eq([])
end
it "returns the Proposal when searching by ID" do
proposal = FactoryGirl.create(:proposal)
results = ProposalSearch.new.execute(proposal.id.to_s)
expect(results).to eq([proposal])
end
it "can operate on an a relation" do
proposal = FactoryGirl.create(:proposal)
relation = Proposal.where(id: proposal.id + 1)
results = ProposalSearch.new(relation).execute(proposal.id.to_s)
expect(results).to eq([])
end
it "returns an empty list for no matches" do
FactoryGirl.create(:proposal)
results = ProposalSearch.new.execute('asgsfgsfdbsd')
expect(results).to eq([])
end
context Ncr::WorkOrder do
[:project_title, :description, :vendor].each do |attr_name|
it "returns the Proposal when searching by the ##{attr_name}" do
work_order = FactoryGirl.create(:ncr_work_order, attr_name => 'foo')
results = ProposalSearch.new.execute('foo')
expect(results).to eq([work_order.proposal])
end
end
end
context Gsa18f::Procurement do
[:product_name_and_description, :justification, :additional_info].each do |attr_name|
it "returns the Proposal when searching by the ##{attr_name}" do
procurement = FactoryGirl.create(:gsa18f_procurement, attr_name => 'foo')
results = ProposalSearch.new.execute('foo')
expect(results).to eq([procurement.proposal])
end
end
end
it "returns the Proposals by rank" do
prop1 = FactoryGirl.create(:proposal, id: 12)
work_order = FactoryGirl.create(:ncr_work_order, project_title: "12 rolly chairs for 1600 Penn Ave")
prop2 = work_order.proposal
prop3 = FactoryGirl.create(:proposal, id: 1600)
searcher = ProposalSearch.new
expect(searcher.execute('12')).to eq([prop1, prop2])
expect(searcher.execute('1600')).to eq([prop3, prop2])
expect(searcher.execute('12 rolly')).to eq([prop2])
end
end
end
| 1 | 13,213 | I'll need to pull this out of here since we're sharing this in other tests. Same for `procurement_spec.rb` | 18F-C2 | rb |
@@ -6,7 +6,7 @@ class ObservationsController < ApplicationController
def create
obs = @proposal.add_observer(observer_email, current_user, params[:observation][:reason])
- flash[:success] = "#{obs.user.full_name} has been added as an observer"
+ prep_create_response_msg(obs)
redirect_to proposal_path(@proposal)
end
| 1 | class ObservationsController < ApplicationController
before_action :authenticate_user!
before_action :find_proposal
before_action -> { authorize self.observation_for_auth }
rescue_from Pundit::NotAuthorizedError, with: :auth_errors
def create
obs = @proposal.add_observer(observer_email, current_user, params[:observation][:reason])
flash[:success] = "#{obs.user.full_name} has been added as an observer"
redirect_to proposal_path(@proposal)
end
def destroy
self.observation.destroy
flash[:success] = "Deleted Observation"
redirect_to proposal_path(self.observation.proposal_id)
end
protected
def find_proposal
@proposal ||= Proposal.find(params[:proposal_id])
end
def observation_for_auth
if params[:action] == 'create'
Observation.new(proposal: @proposal)
else
self.observation
end
end
def observation
@cached_observation ||= Observation.find(params[:id])
end
def observer_email
params.permit(observation: { user: [:email_address] })
.require(:observation).require(:user).require(:email_address)
end
def auth_errors(exception)
render 'communicarts/authorization_error', status: 403,
locals: { msg: "You are not allowed to add observers to that proposal. #{exception.message}" }
end
end
| 1 | 15,327 | what about `observer` as a var name here (and below) rather than `obs` ? -- would be clearer, imo! | 18F-C2 | rb |
@@ -110,7 +110,7 @@ class ErrorHandler(object):
value = message
try:
message = message['message']
- except TypeError:
+ except KeyError:
message = None
else:
message = value.get('message', None) | 1 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from selenium.common.exceptions import (ElementNotInteractableException,
ElementNotSelectableException,
ElementNotVisibleException,
ErrorInResponseException,
InvalidElementStateException,
InvalidSelectorException,
ImeNotAvailableException,
ImeActivationFailedException,
MoveTargetOutOfBoundsException,
NoSuchElementException,
NoSuchFrameException,
NoSuchWindowException,
NoAlertPresentException,
StaleElementReferenceException,
TimeoutException,
UnexpectedAlertPresentException,
WebDriverException)
try:
basestring
except NameError: # Python 3.x
basestring = str
class ErrorCode(object):
"""
Error codes defined in the WebDriver wire protocol.
"""
# Keep in sync with org.openqa.selenium.remote.ErrorCodes and errorcodes.h
SUCCESS = 0
NO_SUCH_ELEMENT = [7, 'no such element']
NO_SUCH_FRAME = [8, 'no such frame']
UNKNOWN_COMMAND = [9, 'unknown command']
STALE_ELEMENT_REFERENCE = [10, 'stale element reference']
ELEMENT_NOT_VISIBLE = [11, 'element not visible']
INVALID_ELEMENT_STATE = [12, 'invalid element state']
UNKNOWN_ERROR = [13, 'unknown error']
ELEMENT_NOT_INTERACTABLE = ["element not interactable"]
ELEMENT_IS_NOT_SELECTABLE = [15, 'element not selectable']
JAVASCRIPT_ERROR = [17, 'javascript error']
XPATH_LOOKUP_ERROR = [19, 'invalid selector']
TIMEOUT = [21, 'timeout']
NO_SUCH_WINDOW = [23, 'no such window']
INVALID_COOKIE_DOMAIN = [24, 'invalid cookie domain']
UNABLE_TO_SET_COOKIE = [25, 'unable to set cookie']
UNEXPECTED_ALERT_OPEN = [26, 'unexpected alert open']
NO_ALERT_OPEN = [27, 'no such alert']
SCRIPT_TIMEOUT = [28, 'script timeout']
INVALID_ELEMENT_COORDINATES = [29, 'invalid element coordinates']
IME_NOT_AVAILABLE = [30, 'ime not available']
IME_ENGINE_ACTIVATION_FAILED = [31, 'ime engine activation failed']
INVALID_SELECTOR = [32, 'invalid selector']
MOVE_TARGET_OUT_OF_BOUNDS = [34, 'move target out of bounds']
INVALID_XPATH_SELECTOR = [51, 'invalid selector']
INVALID_XPATH_SELECTOR_RETURN_TYPER = [52, 'invalid selector']
METHOD_NOT_ALLOWED = [405, 'unsupported operation']
class ErrorHandler(object):
"""
Handles errors returned by the WebDriver server.
"""
def check_response(self, response):
"""
Checks that a JSON response from the WebDriver does not have an error.
:Args:
- response - The JSON response from the WebDriver server as a dictionary
object.
:Raises: If the response contains an error message.
"""
status = response.get('status', None)
if status is None or status == ErrorCode.SUCCESS:
return
value = None
message = response.get("message", "")
screen = response.get("screen", "")
stacktrace = None
if isinstance(status, int):
value_json = response.get('value', None)
if value_json and isinstance(value_json, basestring):
import json
try:
value = json.loads(value_json)
if len(value.keys()) == 1:
value = value['value']
status = value.get('error', None)
if status is None:
status = value["status"]
message = value["value"]
if not isinstance(message, basestring):
value = message
try:
message = message['message']
except TypeError:
message = None
else:
message = value.get('message', None)
except ValueError:
pass
exception_class = ErrorInResponseException
if status in ErrorCode.NO_SUCH_ELEMENT:
exception_class = NoSuchElementException
elif status in ErrorCode.NO_SUCH_FRAME:
exception_class = NoSuchFrameException
elif status in ErrorCode.NO_SUCH_WINDOW:
exception_class = NoSuchWindowException
elif status in ErrorCode.STALE_ELEMENT_REFERENCE:
exception_class = StaleElementReferenceException
elif status in ErrorCode.ELEMENT_NOT_VISIBLE:
exception_class = ElementNotVisibleException
elif status in ErrorCode.INVALID_ELEMENT_STATE:
exception_class = InvalidElementStateException
elif status in ErrorCode.INVALID_SELECTOR \
or status in ErrorCode.INVALID_XPATH_SELECTOR \
or status in ErrorCode.INVALID_XPATH_SELECTOR_RETURN_TYPER:
exception_class = InvalidSelectorException
elif status in ErrorCode.ELEMENT_IS_NOT_SELECTABLE:
exception_class = ElementNotSelectableException
elif status in ErrorCode.ELEMENT_NOT_INTERACTABLE:
exception_class = ElementNotInteractableException
elif status in ErrorCode.INVALID_COOKIE_DOMAIN:
exception_class = WebDriverException
elif status in ErrorCode.UNABLE_TO_SET_COOKIE:
exception_class = WebDriverException
elif status in ErrorCode.TIMEOUT:
exception_class = TimeoutException
elif status in ErrorCode.SCRIPT_TIMEOUT:
exception_class = TimeoutException
elif status in ErrorCode.UNKNOWN_ERROR:
exception_class = WebDriverException
elif status in ErrorCode.UNEXPECTED_ALERT_OPEN:
exception_class = UnexpectedAlertPresentException
elif status in ErrorCode.NO_ALERT_OPEN:
exception_class = NoAlertPresentException
elif status in ErrorCode.IME_NOT_AVAILABLE:
exception_class = ImeNotAvailableException
elif status in ErrorCode.IME_ENGINE_ACTIVATION_FAILED:
exception_class = ImeActivationFailedException
elif status in ErrorCode.MOVE_TARGET_OUT_OF_BOUNDS:
exception_class = MoveTargetOutOfBoundsException
else:
exception_class = WebDriverException
if value == '' or value is None:
value = response['value']
if isinstance(value, basestring):
if exception_class == ErrorInResponseException:
raise exception_class(response, value)
raise exception_class(value)
if message == "" and 'message' in value:
message = value['message']
screen = None
if 'screen' in value:
screen = value['screen']
stacktrace = None
if 'stackTrace' in value and value['stackTrace']:
stacktrace = []
try:
for frame in value['stackTrace']:
line = self._value_or_default(frame, 'lineNumber', '')
file = self._value_or_default(frame, 'fileName', '<anonymous>')
if line:
file = "%s:%s" % (file, line)
meth = self._value_or_default(frame, 'methodName', '<anonymous>')
if 'className' in frame:
meth = "%s.%s" % (frame['className'], meth)
msg = " at %s (%s)"
msg = msg % (meth, file)
stacktrace.append(msg)
except TypeError:
pass
if exception_class == ErrorInResponseException:
raise exception_class(response, message)
elif exception_class == UnexpectedAlertPresentException and 'alert' in value:
raise exception_class(message, screen, stacktrace, value['alert'].get('text'))
raise exception_class(message, screen, stacktrace)
def _value_or_default(self, obj, key, default):
return obj[key] if key in obj else default
| 1 | 14,479 | could you change this to instead of being a `try.. except` be `message = message.get('message')` | SeleniumHQ-selenium | js |
@@ -1,4 +1,5 @@
-// Copyright (c) Microsoft. All rights reserved.
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
namespace Microsoft.VisualStudio.TestPlatform.CommunicationUtilities
{
using System; | 1 | // Copyright (c) Microsoft. All rights reserved.
namespace Microsoft.VisualStudio.TestPlatform.CommunicationUtilities
{
using System;
using System.IO;
using System.Net;
using System.Net.Sockets;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.VisualStudio.TestPlatform.CommunicationUtilities.Interfaces;
using Microsoft.VisualStudio.TestPlatform.CommunicationUtilities.ObjectModel;
using Microsoft.VisualStudio.TestPlatform.ObjectModel;
/// <summary>
/// Facilitates communication using sockets
/// </summary>
public class SocketCommunicationManager : ICommunicationManager
{
/// <summary>
/// TCP Listener to host TCP channel and listen
/// </summary>
private TcpListener tcpListener;
/// <summary>
/// TCP Client that can connect to a TCP listener
/// </summary>
private TcpClient tcpClient;
/// <summary>
/// Binary Writer to write to channel stream
/// </summary>
private BinaryWriter binaryWriter;
/// <summary>
/// Binary reader to read from channel stream
/// </summary>
private BinaryReader binaryReader;
/// <summary>
/// Serializer for the data objects
/// </summary>
private IDataSerializer dataSerializer;
/// <summary>
/// Event used to maintain client connection state
/// </summary>
private ManualResetEvent clientConnectedEvent = new ManualResetEvent(false);
/// <summary>
/// Event used to maintain client connection state
/// </summary>
private ManualResetEvent clientConnectionAcceptedEvent = new ManualResetEvent(false);
/// <summary>
/// Sync object for sending messages
/// SendMessage over socket channel is NOT thread-safe
/// </summary>
private object sendSyncObject = new object();
/// <summary>
/// Stream to use read timeout
/// </summary>
private NetworkStream stream;
private Socket socket;
/// <summary>
/// The server stream read timeout constant (in microseconds).
/// </summary>
private const int StreamReadTimeout = 1000 * 1000;
/// <summary>
/// Initializes a new instance of the <see cref="SocketCommunicationManager"/> class.
/// </summary>
public SocketCommunicationManager() : this(JsonDataSerializer.Instance)
{
}
internal SocketCommunicationManager(IDataSerializer dataSerializer)
{
this.dataSerializer = dataSerializer;
}
#region ServerMethods
/// <summary>
/// Host TCP Socket Server and start listening
/// </summary>
/// <returns></returns>
public int HostServer()
{
var endpoint = new IPEndPoint(IPAddress.Loopback, 0);
this.tcpListener = new TcpListener(endpoint);
this.tcpListener.Start();
var portNumber = ((IPEndPoint)this.tcpListener.LocalEndpoint).Port;
EqtTrace.Info("Listening on port : {0}", portNumber);
return portNumber;
}
/// <summary>
/// Accepts client async
/// </summary>
public async Task AcceptClientAsync()
{
if (this.tcpListener != null)
{
this.clientConnectedEvent.Reset();
var client = await this.tcpListener.AcceptTcpClientAsync();
this.socket = client.Client;
this.stream = client.GetStream();
this.binaryReader = new BinaryReader(this.stream);
this.binaryWriter = new BinaryWriter(this.stream);
this.clientConnectedEvent.Set();
EqtTrace.Info("Accepted Client request and set the flag");
}
}
/// <summary>
/// Waits for Client Connection
/// </summary>
/// <param name="clientConnectionTimeout">Time to Wait for the connection</param>
/// <returns>True if Client is connected, false otherwise</returns>
public bool WaitForClientConnection(int clientConnectionTimeout)
{
return this.clientConnectedEvent.WaitOne(clientConnectionTimeout);
}
/// <summary>
/// Stop Listener
/// </summary>
public void StopServer()
{
this.tcpListener?.Stop();
this.tcpListener = null;
this.binaryReader?.Dispose();
this.binaryWriter?.Dispose();
}
#endregion
#region ClientMethods
/// <summary>
/// Connects to server async
/// </summary>
public async Task SetupClientAsync(int portNumber)
{
this.clientConnectionAcceptedEvent.Reset();
EqtTrace.Info("Trying to connect to server on port : {0}", portNumber);
this.tcpClient = new TcpClient();
this.socket = this.tcpClient.Client;
await this.tcpClient.ConnectAsync(IPAddress.Loopback, portNumber);
this.stream = this.tcpClient.GetStream();
this.binaryReader = new BinaryReader(this.stream);
this.binaryWriter = new BinaryWriter(this.stream);
this.clientConnectionAcceptedEvent.Set();
EqtTrace.Info("Connected to the server successfully ");
}
/// <summary>
/// Waits for server to be connected
/// Whoever creating the client and trying to connect to a server
/// should use this method to wait for connection to be established with server
/// </summary>
/// <param name="connectionTimeout">Time to wait for the connection</param>
/// <returns>True, if Server got a connection from client</returns>
public bool WaitForServerConnection(int connectionTimeout)
{
return this.clientConnectionAcceptedEvent.WaitOne(connectionTimeout);
}
/// <summary>
/// Stop Listener
/// </summary>
public void StopClient()
{
this.tcpClient?.Dispose();
this.tcpClient = null;
this.binaryReader?.Dispose();
this.binaryWriter?.Dispose();
}
#endregion
/// <summary>
/// Writes message to the binary writer.
/// </summary>
/// <param name="messageType">Type of Message to be sent, for instance TestSessionStart</param>
public void SendMessage(string messageType)
{
var serializedObject = this.dataSerializer.SerializeMessage(messageType);
this.WriteAndFlushToChannel(serializedObject);
}
/// <summary>
/// Reads message from the binary reader
/// </summary>
/// <returns>Returns message read from the binary reader</returns>
public Message ReceiveMessage()
{
var rawMessage = this.ReceiveRawMessage();
return this.dataSerializer.DeserializeMessage(rawMessage);
}
/// <summary>
/// Writes message to the binary writer with payload
/// </summary>
/// <param name="messageType">Type of Message to be sent, for instance TestSessionStart</param>
/// <param name="payload">payload to be sent</param>
public void SendMessage(string messageType, object payload)
{
var rawMessage = this.dataSerializer.SerializePayload(messageType, payload);
this.WriteAndFlushToChannel(rawMessage);
}
/// <summary>
/// The send hand shake message.
/// </summary>
public void SendHandShakeMessage()
{
this.SendMessage(MessageType.SessionStart);
}
/// <summary>
/// Reads message from the binary reader
/// </summary>
/// <returns> Raw message string </returns>
public string ReceiveRawMessage()
{
return this.binaryReader.ReadString();
}
/// <summary>
/// Send serialized raw message
/// </summary>
/// <param name="rawMessage">serialized message</param>
public void SendRawMessage(string rawMessage)
{
this.WriteAndFlushToChannel(rawMessage);
}
/// <summary>
/// Deserializes the Message into actual TestPlatform objects
/// </summary>
/// <typeparam name="T"> The type of object to deserialize to. </typeparam>
/// <param name="message"> Message object </param>
/// <returns> TestPlatform object </returns>
public T DeserializePayload<T>(Message message)
{
return this.dataSerializer.DeserializePayload<T>(message);
}
/// <summary>
/// Reads message from the binary reader using read timeout
/// </summary>
/// <param name="cancellationToken">
/// The cancellation Token.
/// </param>
/// <returns>
/// Returns message read from the binary reader
/// </returns>
public async Task<Message> ReceiveMessageAsync(CancellationToken cancellationToken)
{
var rawMessage = await this.ReceiveRawMessageAsync(cancellationToken);
if (!string.IsNullOrEmpty(rawMessage))
{
return this.dataSerializer.DeserializeMessage(rawMessage);
}
return null;
}
/// <summary>
/// Reads message from the binary reader using read timeout
/// </summary>
/// <param name="cancellationToken">
/// The cancellation Token.
/// </param>
/// <returns>
/// Raw message string
/// </returns>
public async Task<string> ReceiveRawMessageAsync(CancellationToken cancellationToken)
{
var str = await Task.Run(() => this.TryReceiveRawMessage(cancellationToken));
return str;
}
private string TryReceiveRawMessage(CancellationToken cancellationToken)
{
string str = null;
bool success = false;
// Set read timeout to avoid blocking receive raw message
while (!cancellationToken.IsCancellationRequested && !success)
{
try
{
if (this.socket.Poll(StreamReadTimeout, SelectMode.SelectRead))
{
str = this.ReceiveRawMessage();
success = true;
}
}
catch (IOException ioException)
{
var socketException = ioException.InnerException as SocketException;
if (socketException != null
&& socketException.SocketErrorCode == SocketError.TimedOut)
{
EqtTrace.Info(
"SocketCommunicationManager ReceiveMessage: failed to receive message because read timeout {0}",
ioException);
}
else
{
EqtTrace.Error(
"SocketCommunicationManager ReceiveMessage: failed to receive message {0}",
ioException);
break;
}
}
catch (Exception exception)
{
EqtTrace.Error(
"SocketCommunicationManager ReceiveMessage: failed to receive message {0}",
exception);
break;
}
}
return str;
}
/// <summary>
/// Writes the data on socket and flushes the buffer
/// </summary>
/// <param name="rawMessage">message to write</param>
private void WriteAndFlushToChannel(string rawMessage)
{
// Writing Message on binarywriter is not Thread-Safe
// Need to sync one by one to avoid buffer corruption
lock (this.sendSyncObject)
{
this.binaryWriter?.Write(rawMessage);
this.binaryWriter?.Flush();
}
}
}
}
| 1 | 11,395 | Add space between License and nameapace | microsoft-vstest | .cs |
@@ -0,0 +1,15 @@
+class ProfileController < ApplicationController
+ def show
+ end
+
+ def update
+ first_name = params[:first_name]
+ last_name = params[:last_name]
+ user = current_user
+ user.first_name = first_name
+ user.last_name = last_name
+ user.save!
+ flash[:success] = "Your profile is updated!"
+ redirect_to :me
+ end
+end | 1 | 1 | 15,461 | should we have a `before_filter` for auth here? | 18F-C2 | rb |
|
@@ -575,6 +575,17 @@ func (r *AWSMachineReconciler) reconcileLBAttachment(machineScope *scope.Machine
// In order to prevent sending request to a "not-ready" control plane machines, it is required to remove the machine
// from the ELB as soon as the machine gets deleted or when the machine is in a not running state.
if !machineScope.AWSMachine.DeletionTimestamp.IsZero() || !machineScope.InstanceIsRunning() {
+ registered, err := elbsvc.InstanceIsRegisteredWithAPIServerELB(i)
+ if err != nil {
+ r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedDetachControlPlaneELB",
+ "Failed to deregister control plane instance %q from load balancer: failed to determine registration status: %v", i.ID, err)
+ return errors.Wrapf(err, "could not deregister control plane instance %q from load balancer - error determining registration status", i.ID)
+ }
+ if !registered {
+ // Already deregistered - nothing more to do
+ return nil
+ }
+
if err := elbsvc.DeregisterInstanceFromAPIServerELB(i); err != nil {
r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedDetachControlPlaneELB",
"Failed to deregister control plane instance %q from load balancer: %v", i.ID, err) | 1 | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"github.com/go-logr/logr"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/client-go/tools/record"
"k8s.io/utils/pointer"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3"
"sigs.k8s.io/cluster-api/controllers/noderefutil"
capierrors "sigs.k8s.io/cluster-api/errors"
"sigs.k8s.io/cluster-api/util"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/source"
infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha3"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/ec2"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/elb"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/secretsmanager"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/userdata"
)
// AWSMachineReconciler reconciles a AwsMachine object
type AWSMachineReconciler struct {
client.Client
Log logr.Logger
Recorder record.EventRecorder
ec2ServiceFactory func(*scope.ClusterScope) services.EC2MachineInterface
secretsManagerServiceFactory func(*scope.ClusterScope) services.SecretsManagerInterface
}
func (r *AWSMachineReconciler) getEC2Service(scope *scope.ClusterScope) services.EC2MachineInterface {
if r.ec2ServiceFactory != nil {
return r.ec2ServiceFactory(scope)
}
return ec2.NewService(scope)
}
func (r *AWSMachineReconciler) getSecretsManagerService(scope *scope.ClusterScope) services.SecretsManagerInterface {
if r.secretsManagerServiceFactory != nil {
return r.secretsManagerServiceFactory(scope)
}
return secretsmanager.NewService(scope)
}
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmachines,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmachines/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines;machines/status,verbs=get;list;watch
// +kubebuilder:rbac:groups="",resources=secrets;,verbs=get;list;watch
// +kubebuilder:rbac:groups="",resources=events,verbs=get;list;watch;create;update;patch
func (r *AWSMachineReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, reterr error) {
ctx := context.TODO()
logger := r.Log.WithValues("namespace", req.Namespace, "awsMachine", req.Name)
// Fetch the AWSMachine instance.
awsMachine := &infrav1.AWSMachine{}
err := r.Get(ctx, req.NamespacedName, awsMachine)
if err != nil {
if apierrors.IsNotFound(err) {
return ctrl.Result{}, nil
}
return ctrl.Result{}, err
}
// Fetch the Machine.
machine, err := util.GetOwnerMachine(ctx, r.Client, awsMachine.ObjectMeta)
if err != nil {
return ctrl.Result{}, err
}
if machine == nil {
logger.Info("Machine Controller has not yet set OwnerRef")
return ctrl.Result{}, nil
}
logger = logger.WithValues("machine", machine.Name)
// Fetch the Cluster.
cluster, err := util.GetClusterFromMetadata(ctx, r.Client, machine.ObjectMeta)
if err != nil {
logger.Info("Machine is missing cluster label or cluster does not exist")
return ctrl.Result{}, nil
}
if util.IsPaused(cluster, awsMachine) {
logger.Info("AWSMachine or linked Cluster is marked as paused. Won't reconcile")
return ctrl.Result{}, nil
}
logger = logger.WithValues("cluster", cluster.Name)
awsCluster := &infrav1.AWSCluster{}
awsClusterName := client.ObjectKey{
Namespace: awsMachine.Namespace,
Name: cluster.Spec.InfrastructureRef.Name,
}
if err := r.Client.Get(ctx, awsClusterName, awsCluster); err != nil {
logger.Info("AWSCluster is not available yet")
return ctrl.Result{}, nil
}
logger = logger.WithValues("awsCluster", awsCluster.Name)
// Create the cluster scope
clusterScope, err := scope.NewClusterScope(scope.ClusterScopeParams{
Client: r.Client,
Logger: logger,
Cluster: cluster,
AWSCluster: awsCluster,
})
if err != nil {
return ctrl.Result{}, err
}
// Create the machine scope
machineScope, err := scope.NewMachineScope(scope.MachineScopeParams{
Logger: logger,
Client: r.Client,
Cluster: cluster,
Machine: machine,
AWSCluster: awsCluster,
AWSMachine: awsMachine,
})
if err != nil {
return ctrl.Result{}, errors.Errorf("failed to create scope: %+v", err)
}
// Always close the scope when exiting this function so we can persist any AWSMachine changes.
defer func() {
if err := machineScope.Close(); err != nil && reterr == nil {
reterr = err
}
}()
// Handle deleted machines
if !awsMachine.ObjectMeta.DeletionTimestamp.IsZero() {
return r.reconcileDelete(machineScope, clusterScope)
}
// Handle non-deleted machines
return r.reconcileNormal(ctx, machineScope, clusterScope)
}
func (r *AWSMachineReconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error {
controller, err := ctrl.NewControllerManagedBy(mgr).
WithOptions(options).
For(&infrav1.AWSMachine{}).
Watches(
&source.Kind{Type: &clusterv1.Machine{}},
&handler.EnqueueRequestsFromMapFunc{
ToRequests: util.MachineToInfrastructureMapFunc(infrav1.GroupVersion.WithKind("AWSMachine")),
},
).
Watches(
&source.Kind{Type: &infrav1.AWSCluster{}},
&handler.EnqueueRequestsFromMapFunc{ToRequests: handler.ToRequestsFunc(r.AWSClusterToAWSMachines)},
).
WithEventFilter(pausedPredicates(r.Log)).
Build(r)
if err != nil {
return err
}
return controller.Watch(
&source.Kind{Type: &clusterv1.Cluster{}},
&handler.EnqueueRequestsFromMapFunc{
ToRequests: handler.ToRequestsFunc(r.requeueAWSMachinesForUnpausedCluster),
},
predicate.Funcs{
UpdateFunc: func(e event.UpdateEvent) bool {
oldCluster := e.ObjectOld.(*clusterv1.Cluster)
newCluster := e.ObjectNew.(*clusterv1.Cluster)
log := r.Log.WithValues("predicate", "updateEvent", "namespace", newCluster.Namespace, "cluster", newCluster.Name)
switch {
// never return true for a paused Cluster
case newCluster.Spec.Paused:
log.V(4).Info("Cluster is paused, will not attempt to map associated AWSMachine.")
return false
// return true if Cluster.Status.InfrastructureReady has changed from false to true
case !oldCluster.Status.InfrastructureReady && newCluster.Status.InfrastructureReady:
log.V(4).Info("Cluster InfrastructureReady became ready, will attempt to map associated AWSMachine.")
return true
// return true if Cluster.Spec.Paused has changed from true to false
case oldCluster.Spec.Paused && !newCluster.Spec.Paused:
log.V(4).Info("Cluster was unpaused, will attempt to map associated AWSMachine.")
return true
// otherwise, return false
default:
log.V(4).Info("Cluster did not match expected conditions, will not attempt to map associated AWSMachine.")
return false
}
},
CreateFunc: func(e event.CreateEvent) bool {
cluster := e.Object.(*clusterv1.Cluster)
log := r.Log.WithValues("predicateEvent", "create", "namespace", cluster.Namespace, "cluster", cluster.Name)
// Only need to trigger a reconcile if the Cluster.Spec.Paused is false and
// Cluster.Status.InfrastructureReady is true
if !cluster.Spec.Paused && cluster.Status.InfrastructureReady {
log.V(4).Info("Cluster is not paused and has infrastructure ready, will attempt to map associated AWSMachine.")
return true
}
log.V(4).Info("Cluster did not match expected conditions, will not attempt to map associated AWSMachine.")
return false
},
DeleteFunc: func(e event.DeleteEvent) bool {
log := r.Log.WithValues("predicateEvent", "delete", "namespace", e.Meta.GetNamespace(), "cluster", e.Meta.GetName())
log.V(4).Info("Cluster did not match expected conditions, will not attempt to map associated AWSMachine.")
return false
},
GenericFunc: func(e event.GenericEvent) bool {
log := r.Log.WithValues("predicateEvent", "generic", "namespace", e.Meta.GetNamespace(), "cluster", e.Meta.GetName())
log.V(4).Info("Cluster did not match expected conditions, will not attempt to map associated AWSMachine.")
return false
},
},
)
}
func (r *AWSMachineReconciler) reconcileDelete(machineScope *scope.MachineScope, clusterScope *scope.ClusterScope) (ctrl.Result, error) {
machineScope.Info("Handling deleted AWSMachine")
ec2Service := r.getEC2Service(clusterScope)
secretSvc := r.getSecretsManagerService(clusterScope)
if err := r.deleteEncryptedBootstrapDataSecret(machineScope, secretSvc); err != nil {
return ctrl.Result{}, err
}
instance, err := r.findInstance(machineScope, ec2Service)
if err != nil {
return ctrl.Result{}, err
}
if instance == nil {
// The machine was never created or was deleted by some other entity
// One way to reach this state:
// 1. Scale deployment to 0
// 2. Rename EC2 machine, and delete ProviderID from spec of both Machine
// and AWSMachine
// 3. Issue a delete
// 4. Scale controller deployment to 1
machineScope.V(2).Info("Unable to locate EC2 instance by ID or tags")
r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "NoInstanceFound", "Unable to find matching EC2 instance")
controllerutil.RemoveFinalizer(machineScope.AWSMachine, infrav1.MachineFinalizer)
return ctrl.Result{}, nil
}
machineScope.V(3).Info("EC2 instance found matching deleted AWSMachine", "instance-id", instance.ID)
if err := r.reconcileLBAttachment(machineScope, clusterScope, instance); err != nil {
// We are tolerating AccessDenied error, so this won't block for users with older version of IAM;
// all the other errors are blocking.
if !elb.IsAccessDenied(err) {
return ctrl.Result{}, errors.Errorf("failed to reconcile LB attachment: %+v", err)
}
}
// Check the instance state. If it's already shutting down or terminated,
// do nothing. Otherwise attempt to delete it.
// This decision is based on the ec2-instance-lifecycle graph at
// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html
switch instance.State {
case infrav1.InstanceStateShuttingDown, infrav1.InstanceStateTerminated:
machineScope.Info("EC2 instance is shutting down or already terminated", "instance-id", instance.ID)
default:
machineScope.Info("Terminating EC2 instance", "instance-id", instance.ID)
if err := ec2Service.TerminateInstanceAndWait(instance.ID); err != nil {
r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedTerminate", "Failed to terminate instance %q: %v", instance.ID, err)
return ctrl.Result{}, errors.Wrap(err, "failed to terminate instance")
}
// If the AWSMachine specifies Network Interfaces, detach the cluster's core Security Groups from them as part of deletion.
if len(machineScope.AWSMachine.Spec.NetworkInterfaces) > 0 {
core, err := ec2Service.GetCoreSecurityGroups(machineScope)
if err != nil {
return ctrl.Result{}, errors.Wrap(err, "failed to get core security groups to detach from instance's network interfaces")
}
machineScope.V(3).Info(
"Detaching security groups from provided network interface",
"groups", core,
"instanceID", instance.ID,
)
for _, id := range machineScope.AWSMachine.Spec.NetworkInterfaces {
if err := ec2Service.DetachSecurityGroupsFromNetworkInterface(core, id); err != nil {
return ctrl.Result{}, errors.Wrap(err, "failed to detach security groups from instance's network interfaces")
}
}
}
machineScope.Info("EC2 instance successfully terminated", "instance-id", instance.ID)
r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeNormal, "SuccessfulTerminate", "Terminated instance %q", instance.ID)
}
// Instance is deleted so remove the finalizer.
controllerutil.RemoveFinalizer(machineScope.AWSMachine, infrav1.MachineFinalizer)
return ctrl.Result{}, nil
}
// findInstance queries the EC2 apis and retrieves the instance if it exists, returns nil otherwise.
func (r *AWSMachineReconciler) findInstance(scope *scope.MachineScope, ec2svc services.EC2MachineInterface) (*infrav1.Instance, error) {
// Parse the ProviderID.
pid, err := noderefutil.NewProviderID(scope.GetProviderID())
if err != nil && err != noderefutil.ErrEmptyProviderID {
return nil, errors.Wrapf(err, "failed to parse Spec.ProviderID")
}
// If the ProviderID is populated, describe the instance using the ID.
if err == nil {
instance, err := ec2svc.InstanceIfExists(pointer.StringPtr(pid.ID()))
if err != nil {
return nil, errors.Wrapf(err, "failed to query AWSMachine instance")
}
return instance, nil
}
// If the ProviderID is empty, try to query the instance using tags.
instance, err := ec2svc.GetRunningInstanceByTags(scope)
if err != nil {
return nil, errors.Wrapf(err, "failed to query AWSMachine instance by tags")
}
return instance, nil
}
func (r *AWSMachineReconciler) reconcileNormal(_ context.Context, machineScope *scope.MachineScope, clusterScope *scope.ClusterScope) (ctrl.Result, error) {
machineScope.Info("Reconciling AWSMachine")
secretSvc := r.getSecretsManagerService(clusterScope)
// If the AWSMachine is in an error state, return early.
if machineScope.HasFailed() {
machineScope.Info("Error state detected, skipping reconciliation")
// If we are in a failed state, delete the secret regardless of instance state
if err := r.deleteEncryptedBootstrapDataSecret(machineScope, secretSvc); err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
// If the AWSMachine doesn't have our finalizer, add it.
controllerutil.AddFinalizer(machineScope.AWSMachine, infrav1.MachineFinalizer)
// Register the finalizer immediately to avoid orphaning AWS resources on delete
if err := machineScope.PatchObject(); err != nil {
return ctrl.Result{}, err
}
if !machineScope.Cluster.Status.InfrastructureReady {
machineScope.Info("Cluster infrastructure is not ready yet")
return ctrl.Result{}, nil
}
// Make sure bootstrap data is available and populated.
if machineScope.Machine.Spec.Bootstrap.DataSecretName == nil {
machineScope.Info("Bootstrap data secret reference is not yet available")
return ctrl.Result{}, nil
}
ec2svc := r.getEC2Service(clusterScope)
// Get or create the instance.
instance, err := r.getOrCreate(machineScope, ec2svc, secretSvc)
if err != nil {
return ctrl.Result{}, err
}
// Set an failure message if we couldn't find the instance.
if instance == nil {
machineScope.Info("EC2 instance cannot be found")
machineScope.SetFailureReason(capierrors.UpdateMachineError)
machineScope.SetFailureMessage(errors.New("EC2 instance cannot be found"))
return ctrl.Result{}, nil
}
// Make sure Spec.ProviderID is always set.
machineScope.SetProviderID(instance.ID, instance.AvailabilityZone)
// See https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html
existingInstanceState := machineScope.GetInstanceState()
machineScope.SetInstanceState(instance.State)
// Proceed to reconcile the AWSMachine state.
if existingInstanceState == nil || *existingInstanceState != instance.State {
machineScope.Info("EC2 instance state changed", "state", instance.State, "instance-id", *machineScope.GetInstanceID())
}
// TODO(vincepri): Remove this annotation when clusterctl is no longer relevant.
machineScope.SetAnnotation("cluster-api-provider-aws", "true")
switch instance.State {
case infrav1.InstanceStatePending, infrav1.InstanceStateStopping, infrav1.InstanceStateStopped:
machineScope.SetNotReady()
case infrav1.InstanceStateRunning:
machineScope.SetReady()
case infrav1.InstanceStateShuttingDown, infrav1.InstanceStateTerminated:
machineScope.SetNotReady()
machineScope.Info("Unexpected EC2 instance termination", "state", instance.State, "instance-id", *machineScope.GetInstanceID())
r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "InstanceUnexpectedTermination", "Unexpected EC2 instance termination")
default:
machineScope.SetNotReady()
machineScope.Info("EC2 instance state is undefined", "state", instance.State, "instance-id", *machineScope.GetInstanceID())
r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "InstanceUnhandledState", "EC2 instance state is undefined")
machineScope.SetFailureReason(capierrors.UpdateMachineError)
machineScope.SetFailureMessage(errors.Errorf("EC2 instance state %q is undefined", instance.State))
}
// reconcile the deletion of the bootstrap data secret now that we have updated instance state
if err := r.deleteEncryptedBootstrapDataSecret(machineScope, secretSvc); err != nil {
return ctrl.Result{}, err
}
if instance.State == infrav1.InstanceStateTerminated {
machineScope.SetFailureReason(capierrors.UpdateMachineError)
machineScope.SetFailureMessage(errors.Errorf("EC2 instance state %q is unexpected", instance.State))
}
// tasks that can take place during all known instance states
if machineScope.InstanceIsInKnownState() {
_, err = r.ensureTags(ec2svc, machineScope.AWSMachine, machineScope.GetInstanceID(), machineScope.AdditionalTags())
if err != nil {
return ctrl.Result{}, errors.Errorf("failed to ensure tags: %+v", err)
}
if err := r.reconcileLBAttachment(machineScope, clusterScope, instance); err != nil {
return ctrl.Result{}, errors.Errorf("failed to reconcile LB attachment: %+v", err)
}
}
// tasks that can only take place during operational instance states
if machineScope.InstanceIsOperational() {
machineScope.SetAddresses(instance.Addresses)
existingSecurityGroups, err := ec2svc.GetInstanceSecurityGroups(*machineScope.GetInstanceID())
if err != nil {
return ctrl.Result{}, err
}
// Ensure that the security groups are correct.
_, err = r.ensureSecurityGroups(ec2svc, machineScope, machineScope.AWSMachine.Spec.AdditionalSecurityGroups, existingSecurityGroups)
if err != nil {
return ctrl.Result{}, errors.Errorf("failed to apply security groups: %+v", err)
}
}
return ctrl.Result{}, nil
}
func (r *AWSMachineReconciler) deleteEncryptedBootstrapDataSecret(machineScope *scope.MachineScope, secretSvc services.SecretsManagerInterface) error {
// do nothing if there isn't a secret
if machineScope.GetSecretPrefix() == "" {
return nil
}
if machineScope.GetSecretCount() == 0 {
return errors.New("secretPrefix present, but secretCount is not set")
}
// Do nothing if the AWSMachine is not in a failed state, and is operational from an EC2 perspective, but does not have a node reference
if !machineScope.HasFailed() && machineScope.InstanceIsOperational() && machineScope.Machine.Status.NodeRef == nil && !machineScope.AWSMachineIsDeleted() {
return nil
}
machineScope.Info("Deleting unneeded entry from AWS Secrets Manager", "secretPrefix", machineScope.GetSecretPrefix())
if err := secretSvc.Delete(machineScope); err != nil {
machineScope.Info("Unable to delete entries from AWS Secrets Manager containing encrypted userdata", "secretPrefix", machineScope.GetSecretPrefix())
r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedDeleteEncryptedBootstrapDataSecrets", "AWS Secret Manager entries containing userdata not deleted")
return err
}
r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeNormal, "SuccessfulDeleteEncryptedBootstrapDataSecrets", "AWS Secret Manager entries containing userdata deleted")
machineScope.DeleteSecretPrefix()
machineScope.SetSecretCount(0)
return nil
}
func (r *AWSMachineReconciler) getOrCreate(scope *scope.MachineScope, ec2svc services.EC2MachineInterface, secretSvc services.SecretsManagerInterface) (*infrav1.Instance, error) {
instance, err := r.findInstance(scope, ec2svc)
if err != nil {
return nil, err
}
// If we find an instance, return it
if instance != nil {
return instance, nil
}
// Otherwise create a new instance
scope.Info("Creating EC2 instance")
userData, err := scope.GetRawBootstrapData()
if err != nil {
r.Recorder.Eventf(scope.AWSMachine, corev1.EventTypeWarning, "FailedGetBootstrapData", err.Error())
return nil, err
}
if scope.UseSecretsManager() {
compressedUserData, err := userdata.GzipBytes(userData)
if err != nil {
return nil, err
}
prefix, chunks, serviceErr := secretSvc.Create(scope, compressedUserData)
// Only persist the AWS Secrets Manager entries if there is at least one
if chunks > 0 {
scope.SetSecretPrefix(prefix)
scope.SetSecretCount(chunks)
}
// Register the Secret ARN immediately to avoid orphaning whatever AWS resources have been created
if err := scope.PatchObject(); err != nil {
return nil, err
}
if serviceErr != nil {
r.Recorder.Eventf(scope.AWSMachine, corev1.EventTypeWarning, "FailedCreateAWSSecretsManagerSecrets", serviceErr.Error())
scope.Error(serviceErr, "Failed to create AWS Secret entry", "secretPrefix", prefix)
return nil, serviceErr
}
encryptedCloudInit, err := secretsmanager.GenerateCloudInitMIMEDocument(scope.GetSecretPrefix(), scope.GetSecretCount(), scope.AWSCluster.Spec.Region)
if err != nil {
r.Recorder.Eventf(scope.AWSMachine, corev1.EventTypeWarning, "FailedGenerateAWSSecretsManagerCloudInit", err.Error())
return nil, err
}
userData = encryptedCloudInit
}
instance, err = ec2svc.CreateInstance(scope, userData)
if err != nil {
return nil, errors.Wrapf(err, "failed to create AWSMachine instance")
}
return instance, nil
}
func (r *AWSMachineReconciler) reconcileLBAttachment(machineScope *scope.MachineScope, clusterScope *scope.ClusterScope, i *infrav1.Instance) error {
if !machineScope.IsControlPlane() {
return nil
}
elbsvc := elb.NewService(clusterScope)
// In order to prevent sending request to a "not-ready" control plane machines, it is required to remove the machine
// from the ELB as soon as the machine gets deleted or when the machine is in a not running state.
if !machineScope.AWSMachine.DeletionTimestamp.IsZero() || !machineScope.InstanceIsRunning() {
if err := elbsvc.DeregisterInstanceFromAPIServerELB(i); err != nil {
r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedDetachControlPlaneELB",
"Failed to deregister control plane instance %q from load balancer: %v", i.ID, err)
return errors.Wrapf(err, "could not deregister control plane instance %q from load balancer", i.ID)
}
r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeNormal, "SuccessfulDetachControlPlaneELB",
"Control plane instance %q is de-registered from load balancer", i.ID)
return nil
}
if err := elbsvc.RegisterInstanceWithAPIServerELB(i); err != nil {
r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedAttachControlPlaneELB",
"Failed to register control plane instance %q with load balancer: %v", i.ID, err)
return errors.Wrapf(err, "could not register control plane instance %q with load balancer", i.ID)
}
r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeNormal, "SuccessfulAttachControlPlaneELB",
"Control plane instance %q is registered with load balancer", i.ID)
return nil
}
// AWSClusterToAWSMachines is a handler.ToRequestsFunc to be used to enqeue requests for reconciliation
// of AWSMachines.
func (r *AWSMachineReconciler) AWSClusterToAWSMachines(o handler.MapObject) []ctrl.Request {
c := o.Object.(*infrav1.AWSCluster)
log := r.Log.WithValues("objectMapper", "awsClusterToAWSMachine", "namespace", c.Namespace, "awsCluster", c.Name)
// Don't handle deleted AWSClusters
if !c.ObjectMeta.DeletionTimestamp.IsZero() {
log.V(4).Info("AWSCluster has a deletion timestamp, skipping mapping.")
return nil
}
cluster, err := util.GetOwnerCluster(context.TODO(), r.Client, c.ObjectMeta)
switch {
case apierrors.IsNotFound(err) || cluster == nil:
log.V(4).Info("Cluster for AWSCluster not found, skipping mapping.")
return nil
case err != nil:
log.Error(err, "Failed to get owning cluster, skipping mapping.")
return nil
}
return r.requestsForCluster(log, cluster.Namespace, cluster.Name)
}
func (r *AWSMachineReconciler) requeueAWSMachinesForUnpausedCluster(o handler.MapObject) []ctrl.Request {
c := o.Object.(*clusterv1.Cluster)
log := r.Log.WithValues("objectMapper", "clusterToAWSMachine", "namespace", c.Namespace, "cluster", c.Name)
// Don't handle deleted clusters
if !c.ObjectMeta.DeletionTimestamp.IsZero() {
log.V(4).Info("Cluster has a deletion timestamp, skipping mapping.")
return nil
}
return r.requestsForCluster(log, c.Namespace, c.Name)
}
func (r *AWSMachineReconciler) requestsForCluster(log logr.Logger, namespace, name string) []ctrl.Request {
labels := map[string]string{clusterv1.ClusterLabelName: name}
machineList := &clusterv1.MachineList{}
if err := r.Client.List(context.TODO(), machineList, client.InNamespace(namespace), client.MatchingLabels(labels)); err != nil {
log.Error(err, "Failed to get owned Machines, skipping mapping.")
return nil
}
result := make([]ctrl.Request, 0, len(machineList.Items))
for _, m := range machineList.Items {
log.WithValues("machine", m.Name)
if m.Spec.InfrastructureRef.GroupVersionKind().Kind != "AWSMachine" {
log.V(4).Info("Machine has an InfrastructureRef for a different type, will not add to reconciliation request.")
continue
}
if m.Spec.InfrastructureRef.Name == "" {
log.V(4).Info("Machine has an InfrastructureRef with an empty name, will not add to reconciliation request.")
continue
}
log.WithValues("awsMachine", m.Spec.InfrastructureRef.Name)
log.V(4).Info("Adding AWSMachine to reconciliation request.")
result = append(result, ctrl.Request{NamespacedName: client.ObjectKey{Namespace: m.Namespace, Name: m.Spec.InfrastructureRef.Name}})
}
return result
}
| 1 | 15,282 | @vincepri I know we discussed moving this up and only calling it once, but I didn't (quickly) see an easy way to generate the right event based on the appropriate action (attach vs detach). Happy to move it around if you have suggestions. | kubernetes-sigs-cluster-api-provider-aws | go |
@@ -0,0 +1,15 @@
+class TsearchHooks
+ def after_save(record)
+ record.class.where(id: record).update_all(
+ "vector = #{ vector(record) }, popularity_factor = #{ record.searchable_factor }")
+ end
+
+ def vector(record)
+ weighted_sql = []
+ record.searchable_vector.each do |weight, attr_value|
+ attr_value.gsub!(/['?\\:]/, ' ')
+ weighted_sql << "setweight(to_tsvector(coalesce('#{attr_value}')), '#{weight.upcase}')"
+ end
+ weighted_sql.join(' ||')
+ end
+end | 1 | 1 | 6,808 | Can we use record.update here? | blackducksoftware-ohloh-ui | rb |
|
@@ -243,7 +243,7 @@ func (rw *responseWriter) Close() error {
retErr = appendError(retErr, fmt.Errorf("SetApplicationError() failed: %v", err))
}
}
- retErr = appendError(retErr, writeHeaders(rw.format, rw.headers, rw.response.Arg2Writer))
+ retErr = appendError(retErr, writeHeaders(rw.format, rw.headers.Items(), rw.response.Arg2Writer))
// Arg3Writer must be opened and closed regardless of if there is data
// However, if there is a system error, we do not want to do this | 1 | // Copyright (c) 2018 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package tchannel
import (
"context"
"fmt"
"time"
"github.com/opentracing/opentracing-go"
"github.com/uber/tchannel-go"
"go.uber.org/multierr"
"go.uber.org/yarpc/api/transport"
"go.uber.org/yarpc/internal/bufferpool"
"go.uber.org/yarpc/pkg/errors"
"go.uber.org/yarpc/yarpcerrors"
ncontext "golang.org/x/net/context"
)
// inboundCall provides an interface similar tchannel.InboundCall.
//
// We use it instead of *tchannel.InboundCall because tchannel.InboundCall is
// not an interface, so we have little control over its behavior in tests.
type inboundCall interface {
ServiceName() string
CallerName() string
MethodString() string
ShardKey() string
RoutingKey() string
RoutingDelegate() string
Format() tchannel.Format
Arg2Reader() (tchannel.ArgReader, error)
Arg3Reader() (tchannel.ArgReader, error)
Response() inboundCallResponse
}
// inboundCallResponse provides an interface similar to
// tchannel.InboundCallResponse.
//
// Its purpose is the same as inboundCall: Make it easier to test functions
// that consume InboundCallResponse without having control of
// InboundCallResponse's behavior.
type inboundCallResponse interface {
Arg2Writer() (tchannel.ArgWriter, error)
Arg3Writer() (tchannel.ArgWriter, error)
SendSystemError(err error) error
SetApplicationError() error
}
// tchannelCall wraps a TChannel InboundCall into an inboundCall.
//
// We need to do this so that we can change the return type of call.Response()
// to match inboundCall's Response().
type tchannelCall struct{ *tchannel.InboundCall }
func (c tchannelCall) Response() inboundCallResponse {
return c.InboundCall.Response()
}
// handler wraps a transport.UnaryHandler into a TChannel Handler.
type handler struct {
existing map[string]tchannel.Handler
router transport.Router
tracer opentracing.Tracer
}
func (h handler) Handle(ctx ncontext.Context, call *tchannel.InboundCall) {
h.handle(ctx, tchannelCall{call})
}
func (h handler) handle(ctx context.Context, call inboundCall) {
// you MUST close the responseWriter no matter what unless you have a tchannel.SystemError
responseWriter := newResponseWriter(call.Response(), call.Format())
err := h.callHandler(ctx, call, responseWriter)
if err != nil && !responseWriter.isApplicationError {
// TODO: log error
_ = call.Response().SendSystemError(getSystemError(err))
return
}
if err != nil && responseWriter.isApplicationError {
// we have an error, so we're going to propagate it as a yarpc error,
// regardless of whether or not it is a system error.
status := yarpcerrors.FromError(errors.WrapHandlerError(err, call.ServiceName(), call.MethodString()))
// TODO: what to do with error? we could have a whole complicated scheme to
// return a SystemError here, might want to do that
text, _ := status.Code().MarshalText()
responseWriter.addHeader(ErrorCodeHeaderKey, string(text))
if status.Name() != "" {
responseWriter.addHeader(ErrorNameHeaderKey, status.Name())
}
if status.Message() != "" {
responseWriter.addHeader(ErrorMessageHeaderKey, status.Message())
}
}
if err := responseWriter.Close(); err != nil {
// TODO: log error
_ = call.Response().SendSystemError(getSystemError(err))
}
}
func (h handler) callHandler(ctx context.Context, call inboundCall, responseWriter *responseWriter) error {
start := time.Now()
_, ok := ctx.Deadline()
if !ok {
return tchannel.ErrTimeoutRequired
}
treq := &transport.Request{
Caller: call.CallerName(),
Service: call.ServiceName(),
Encoding: transport.Encoding(call.Format()),
Procedure: call.MethodString(),
ShardKey: call.ShardKey(),
RoutingKey: call.RoutingKey(),
RoutingDelegate: call.RoutingDelegate(),
}
ctx, headers, err := readRequestHeaders(ctx, call.Format(), call.Arg2Reader)
if err != nil {
return errors.RequestHeadersDecodeError(treq, err)
}
treq.Headers = headers
if tcall, ok := call.(tchannelCall); ok {
tracer := h.tracer
ctx = tchannel.ExtractInboundSpan(ctx, tcall.InboundCall, headers.Items(), tracer)
}
body, err := call.Arg3Reader()
if err != nil {
return err
}
defer body.Close()
treq.Body = body
if err := transport.ValidateRequest(treq); err != nil {
return err
}
spec, err := h.router.Choose(ctx, treq)
if err != nil {
if yarpcerrors.FromError(err).Code() != yarpcerrors.CodeUnimplemented {
return err
}
if tcall, ok := call.(tchannelCall); !ok {
if m, ok := h.existing[call.MethodString()]; ok {
m.Handle(ctx, tcall.InboundCall)
return nil
}
}
return err
}
if err := transport.ValidateRequestContext(ctx); err != nil {
return err
}
switch spec.Type() {
case transport.Unary:
return transport.DispatchUnaryHandler(ctx, spec.Unary(), start, treq, responseWriter)
default:
return yarpcerrors.Newf(yarpcerrors.CodeUnimplemented, "transport tchannel does not handle %s handlers", spec.Type().String())
}
}
type responseWriter struct {
failedWith error
format tchannel.Format
headers transport.Headers
buffer *bufferpool.Buffer
response inboundCallResponse
isApplicationError bool
}
func newResponseWriter(response inboundCallResponse, format tchannel.Format) *responseWriter {
return &responseWriter{
response: response,
format: format,
}
}
func (rw *responseWriter) AddHeaders(h transport.Headers) {
for k, v := range h.Items() {
// TODO: is this considered a breaking change?
if isReservedHeaderKey(k) {
rw.failedWith = appendError(rw.failedWith, fmt.Errorf("cannot use reserved header key: %s", k))
return
}
rw.addHeader(k, v)
}
}
func (rw *responseWriter) addHeader(key string, value string) {
rw.headers = rw.headers.With(key, value)
}
func (rw *responseWriter) SetApplicationError() {
rw.isApplicationError = true
}
func (rw *responseWriter) Write(s []byte) (int, error) {
if rw.failedWith != nil {
return 0, rw.failedWith
}
if rw.buffer == nil {
rw.buffer = bufferpool.Get()
}
n, err := rw.buffer.Write(s)
if err != nil {
rw.failedWith = appendError(rw.failedWith, err)
}
return n, err
}
func (rw *responseWriter) Close() error {
retErr := rw.failedWith
if rw.isApplicationError {
if err := rw.response.SetApplicationError(); err != nil {
retErr = appendError(retErr, fmt.Errorf("SetApplicationError() failed: %v", err))
}
}
retErr = appendError(retErr, writeHeaders(rw.format, rw.headers, rw.response.Arg2Writer))
// Arg3Writer must be opened and closed regardless of if there is data
// However, if there is a system error, we do not want to do this
bodyWriter, err := rw.response.Arg3Writer()
if err != nil {
return appendError(retErr, err)
}
defer func() { retErr = appendError(retErr, bodyWriter.Close()) }()
if rw.buffer != nil {
defer bufferpool.Put(rw.buffer)
if _, err := rw.buffer.WriteTo(bodyWriter); err != nil {
return appendError(retErr, err)
}
}
return retErr
}
func getSystemError(err error) error {
if _, ok := err.(tchannel.SystemError); ok {
return err
}
if !yarpcerrors.IsStatus(err) {
return tchannel.NewSystemError(tchannel.ErrCodeUnexpected, err.Error())
}
status := yarpcerrors.FromError(err)
tchannelCode, ok := _codeToTChannelCode[status.Code()]
if !ok {
tchannelCode = tchannel.ErrCodeUnexpected
}
return tchannel.NewSystemError(tchannelCode, status.Message())
}
func appendError(left error, right error) error {
if _, ok := left.(tchannel.SystemError); ok {
return left
}
if _, ok := right.(tchannel.SystemError); ok {
return right
}
return multierr.Append(left, right)
}
| 1 | 16,189 | Shouldn't the exact case option matter here? | yarpc-yarpc-go | go |
@@ -184,7 +184,7 @@ def Derived(parent_cls):
n = luigi.IntParameter()
# ...
- class MyTask(luigi.uti.Derived(AnotherTask)):
+ class MyTask(luigi.util.Derived(AnotherTask)):
def requires(self):
return self.parent_obj
def run(self): | 1 | # Copyright (c) 2012 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import warnings
import logging
import datetime
from luigi import parameter
import task
import functools
logger = logging.getLogger('luigi-interface')
def common_params(task_instance, task_cls):
"""Grab all the values in task_instance that are found in task_cls"""
assert isinstance(task_cls, task.Register), "task_cls must be an uninstantiated Task"
task_instance_param_names = dict(task_instance.get_params()).keys()
task_cls_param_names = dict(task_cls.get_params()).keys()
common_param_names = list(set.intersection(set(task_instance_param_names),set(task_cls_param_names)))
common_param_vals = [(key,dict(task_cls.get_params())[key]) for key in common_param_names]
common_kwargs = dict([(key,task_instance.param_kwargs[key]) for key in common_param_names])
vals = dict(task_instance.get_param_values(common_param_vals, [], common_kwargs))
return vals
def task_wraps(P):
# In order to make the behavior of a wrapper class nicer, we set the name of the
# new class to the wrapped class, and copy over the docstring and module as well.
# This makes it possible to pickle the wrapped class etc.
# Btw, this is a slight abuse of functools.wraps. It's meant to be used only for
# functions, but it works for classes too, if you pass updated=[]
return functools.wraps(P, updated=[])
class inherits(object):
'''Task inheritance.
Usage::
class AnotherTask(luigi.Task):
n = luigi.IntParameter()
# ...
@inherits(AnotherTask):
class MyTask(luigi.Task):
def requires(self):
return self.clone_parent()
def run(self):
print self.n # this will be defined
# ...
'''
def __init__(self, task_to_inherit):
super(inherits, self).__init__()
self.task_to_inherit = task_to_inherit
def __call__(self, task_that_inherits):
this_param_names = dict(task_that_inherits.get_nonglobal_params()).keys()
for param_name, param_obj in self.task_to_inherit.get_params():
if not hasattr(task_that_inherits, param_name):
setattr(task_that_inherits, param_name, param_obj)
# Modify task_that_inherits by subclassing it and adding methods
@task_wraps(task_that_inherits)
class Wrapped(task_that_inherits):
def clone_parent(_self, **args):
return _self.clone(cls=self.task_to_inherit, **args)
return Wrapped
class requires(object):
''' Same as @inherits, but also auto-defines the requires method
'''
def __init__(self, task_to_require):
super(requires, self).__init__()
self.inherit_decorator = inherits(task_to_require)
def __call__(self, task_that_requires):
task_that_requires = self.inherit_decorator(task_that_requires)
# Modify task_that_requres by subclassing it and adding methods
@task_wraps(task_that_requires)
class Wrapped(task_that_requires):
def requires(_self):
return _self.clone_parent()
return Wrapped
class copies(object):
''' Auto-copies a task
Usage::
@copies(MyTask):
class CopyOfMyTask(luigi.Task):
def output(self):
return LocalTarget(self.date.strftime('/var/xyz/report-%Y-%m-%d'))
'''
def __init__(self, task_to_copy):
super(copies, self).__init__()
self.requires_decorator = requires(task_to_copy)
def __call__(self, task_that_copies):
task_that_copies = self.requires_decorator(task_that_copies)
# Modify task_that_copies by subclassing it and adding methods
@task_wraps(task_that_copies)
class Wrapped(task_that_copies):
def run(_self):
i, o = _self.input(), _self.output()
f = o.open('w') # TODO: assert that i, o are Target objects and not complex datastructures
for line in i.open('r'):
f.write(line)
f.close()
return Wrapped
def delegates(task_that_delegates):
''' Lets a task call methods on subtask(s).
The way this works is that the subtask is run as a part of the task, but the task itself doesn't have
to care about the requirements of the subtasks. The subtask doesn't exist from the scheduler's point
of view, and its dependencies are instead required by the main task.
Example::
class PowersOfN(luigi.Task):
n = luigi.IntParameter()
def f(self, x): return x ** self.n
@delegates
class T(luigi.Task):
def subtasks(self): return PowersOfN(5)
def run(self): print self.subtasks().f(42)
'''
if not hasattr(task_that_delegates, 'subtasks'):
# This method can (optionally) define a couple of delegate tasks that
# will be accessible as interfaces, meaning that the task can access
# those tasks and run methods defined on them, etc
raise AttributeError('%s needs to implement the method "subtasks"' % task_that_delegates)
@task_wraps(task_that_delegates)
class Wrapped(task_that_delegates):
def deps(self):
# Overrides method in base class
return task.flatten(self.requires()) + task.flatten([t.deps() for t in task.flatten(self.subtasks())])
def run(self):
for t in task.flatten(self.subtasks()):
t.run()
task_that_delegates.run(self)
return Wrapped
def Derived(parent_cls):
''' This is a class factory function. It returns a new class with same parameters as
the parent class, sets the internal value self.parent_obj to an instance of it, and
lets you override the rest of it. Useful if you have a class that's an immediate result
of a previous class and you don't want to reimplement everything. Also useful if you
want to wrap a class (see wrap_test.py for an example).
Note 1: The derived class does not inherit from the parent class
Note 2: You can add more parameters in the derived class
Usage::
class AnotherTask(luigi.Task):
n = luigi.IntParameter()
# ...
class MyTask(luigi.uti.Derived(AnotherTask)):
def requires(self):
return self.parent_obj
def run(self):
print self.n # this will be defined
# ...
'''
class DerivedCls(task.Task):
def __init__(self, *args, **kwargs):
param_values = {}
for k, v in self.get_param_values(self.get_nonglobal_params(), args, kwargs):
param_values[k] = v
# Figure out which params the parent need (it's always a subset)
parent_param_values = {}
for k, v in parent_cls.get_nonglobal_params():
parent_param_values[k] = param_values[k]
self.parent_obj = parent_cls(**parent_param_values)
super(DerivedCls, self).__init__(*args, **kwargs)
warnings.warn('Derived is deprecated, please use the @inherits decorator instead', DeprecationWarning)
# Copy parent's params to child
for param_name, param_obj in parent_cls.get_params():
setattr(DerivedCls, param_name, param_obj)
return DerivedCls
def Copy(parent_cls):
''' Creates a new Task that copies the old task.
Usage::
class CopyOfMyTask(Copy(MyTask)):
def output(self):
return LocalTarget(self.date.strftime('/var/xyz/report-%Y-%m-%d'))
'''
class CopyCls(Derived(parent_cls)):
def requires(self):
return self.parent_obj
output = NotImplemented
def run(self):
i, o = self.input(), self.output()
f = o.open('w') # TODO: assert that i, o are Target objects and not complex datastructures
for line in i.open('r'):
f.write(line)
f.close()
warnings.warn('Copy is deprecated, please use the @copies decorator instead', DeprecationWarning)
return CopyCls
class CompositionTask(task.Task):
# Experimental support for composition task. This is useful if you have two tasks where
# X has a dependency on Y and X wants to invoke methods on Y. The problem with a normal
# requires() style dependency is that if X and Y are run in different processes then
# X can not access Y. To solve this, you can let X own a reference to an Y and have it
# run it as a part of its own run method.
def __init__(self, *args, **kwargs):
warnings.warn('CompositionTask is deprecated, please use the @delegates decorator instead', DeprecationWarning)
super(CompositionTask, self).__init__(*args, **kwargs)
def subtasks(self):
# This method can (optionally) define a couple of delegate tasks that
# will be accessible as interfaces, meaning that the task can access
# those tasks and run methods defined on them, etc
return [] # default impl
def deps(self):
# Overrides method in base class
return task.flatten(self.requires()) + task.flatten([t.deps() for t in task.flatten(self.subtasks())])
def run_subtasks(self):
for t in task.flatten(self.subtasks()):
t.run()
# Note that your run method must also initialize subtasks
# def run(self):
# self.run_subtasks()
# ...
def deprecate_kwarg(old_name, new_name, kw_value):
""" Rename keyword arguments, but keep backwards compatibility.
Usage:
>>> @deprecate_kwarg('old', 'new', 'defval')
... def some_func(old='defval'):
... print(old)
...
>>> some_func(new='yay')
yay
>>> some_func(old='yaay')
yaay
>>> some_func()
defval
"""
def real_decorator(function):
def new_function(*args, **kwargs):
value = kw_value
if old_name in kwargs:
warnings.warn('Keyword argument {0} is deprecated, use {1}'
.format(old_name, new_name))
value = kwargs[old_name]
if new_name in kwargs:
value = kwargs[new_name]
del kwargs[new_name]
kwargs[old_name] = value
return function(*args, **kwargs)
return new_function
return real_decorator
def previous(task):
"""Return a previous Task of the same family.
By default checks if this task family only has one non-global parameter and if
it is a DateParameter, DateHourParameter or DateIntervalParameter in which case
it returns with the time decremented by 1 (hour, day or interval)
"""
params = task.get_nonglobal_params()
previous_params = {}
previous_date_params = {}
for param_name, param_obj in params:
param_value = getattr(task, param_name)
if isinstance(param_obj, parameter.DateParameter):
previous_date_params[param_name] = param_value - datetime.timedelta(days=1)
elif isinstance(param_obj, parameter.DateHourParameter):
previous_date_params[param_name] = param_value - datetime.timedelta(hours=1)
elif isinstance(param_obj, parameter.DateIntervalParameter):
previous_date_params[param_name] = param_value.prev()
else:
previous_params[param_name] = param_value
previous_params.update(previous_date_params)
if len(previous_date_params) == 0:
raise NotImplementedError("No task parameter - can't determine previous task")
elif len(previous_date_params) > 1:
raise NotImplementedError("Too many date-related task parameters - can't determine previous task")
else:
return task.clone(**previous_params)
def get_previous_completed(task, max_steps=10):
prev = task
for i in xrange(max_steps):
prev = previous(prev)
logger.debug("Checking if %s is complete" % prev.task_id)
if prev.complete():
return prev
return None
| 1 | 10,072 | Errr, why haven't we noticed this until now? | spotify-luigi | py |
@@ -456,7 +456,11 @@ public final class Lucene50PostingsFormat extends PostingsFormat {
@Override
public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException {
PostingsWriterBase postingsWriter = new Lucene50PostingsWriter(state);
- state.segmentInfo.putAttribute(MODE_KEY, fstLoadMode.name());
+ final String previous = state.segmentInfo.putAttribute(MODE_KEY, fstLoadMode.name());
+ if (previous != null && previous.equals(fstLoadMode.name()) == false) {
+ throw new IllegalStateException("found existing value for " + MODE_KEY + " for segment: " + state.segmentInfo.name +
+ "old=" + previous + ", new=" + fstLoadMode.name());
+ }
boolean success = false;
try {
FieldsConsumer ret = new BlockTreeTermsWriter(state, | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.codecs.lucene50;
import java.io.IOException;
import org.apache.lucene.codecs.BlockTermState;
import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.codecs.FieldsConsumer;
import org.apache.lucene.codecs.FieldsProducer;
import org.apache.lucene.codecs.MultiLevelSkipListWriter;
import org.apache.lucene.codecs.PostingsFormat;
import org.apache.lucene.codecs.PostingsReaderBase;
import org.apache.lucene.codecs.PostingsWriterBase;
import org.apache.lucene.codecs.blocktree.BlockTreeTermsReader;
import org.apache.lucene.codecs.blocktree.BlockTreeTermsWriter;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.SegmentReadState;
import org.apache.lucene.index.SegmentWriteState;
import org.apache.lucene.index.TermState;
import org.apache.lucene.store.DataOutput;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.packed.PackedInts;
/**
* Lucene 5.0 postings format, which encodes postings in packed integer blocks
* for fast decode.
*
* <p>
* Basic idea:
* <ul>
* <li>
* <b>Packed Blocks and VInt Blocks</b>:
* <p>In packed blocks, integers are encoded with the same bit width ({@link PackedInts packed format}):
* the block size (i.e. number of integers inside block) is fixed (currently 128). Additionally blocks
* that are all the same value are encoded in an optimized way.</p>
* <p>In VInt blocks, integers are encoded as {@link DataOutput#writeVInt VInt}:
* the block size is variable.</p>
* </li>
*
* <li>
* <b>Block structure</b>:
* <p>When the postings are long enough, Lucene50PostingsFormat will try to encode most integer data
* as a packed block.</p>
* <p>Take a term with 259 documents as an example, the first 256 document ids are encoded as two packed
* blocks, while the remaining 3 are encoded as one VInt block. </p>
* <p>Different kinds of data are always encoded separately into different packed blocks, but may
* possibly be interleaved into the same VInt block. </p>
* <p>This strategy is applied to pairs:
* <document number, frequency>,
* <position, payload length>,
* <position, offset start, offset length>, and
* <position, payload length, offsetstart, offset length>.</p>
* </li>
*
* <li>
* <b>Skipdata settings</b>:
* <p>The structure of skip table is quite similar to previous version of Lucene. Skip interval is the
* same as block size, and each skip entry points to the beginning of each block. However, for
* the first block, skip data is omitted.</p>
* </li>
*
* <li>
* <b>Positions, Payloads, and Offsets</b>:
* <p>A position is an integer indicating where the term occurs within one document.
* A payload is a blob of metadata associated with current position.
* An offset is a pair of integers indicating the tokenized start/end offsets for given term
* in current position: it is essentially a specialized payload. </p>
* <p>When payloads and offsets are not omitted, numPositions==numPayloads==numOffsets (assuming a
* null payload contributes one count). As mentioned in block structure, it is possible to encode
* these three either combined or separately.
* <p>In all cases, payloads and offsets are stored together. When encoded as a packed block,
* position data is separated out as .pos, while payloads and offsets are encoded in .pay (payload
* metadata will also be stored directly in .pay). When encoded as VInt blocks, all these three are
* stored interleaved into the .pos (so is payload metadata).</p>
* <p>With this strategy, the majority of payload and offset data will be outside .pos file.
* So for queries that require only position data, running on a full index with payloads and offsets,
* this reduces disk pre-fetches.</p>
* </li>
* </ul>
*
* <p>
* Files and detailed format:
* <ul>
* <li><tt>.tim</tt>: <a href="#Termdictionary">Term Dictionary</a></li>
* <li><tt>.tip</tt>: <a href="#Termindex">Term Index</a></li>
* <li><tt>.doc</tt>: <a href="#Frequencies">Frequencies and Skip Data</a></li>
* <li><tt>.pos</tt>: <a href="#Positions">Positions</a></li>
* <li><tt>.pay</tt>: <a href="#Payloads">Payloads and Offsets</a></li>
* </ul>
*
* <a name="Termdictionary"></a>
* <dl>
* <dd>
* <b>Term Dictionary</b>
*
* <p>The .tim file contains the list of terms in each
* field along with per-term statistics (such as docfreq)
* and pointers to the frequencies, positions, payload and
* skip data in the .doc, .pos, and .pay files.
* See {@link BlockTreeTermsWriter} for more details on the format.
*
* <p>NOTE: The term dictionary can plug into different postings implementations:
* the postings writer/reader are actually responsible for encoding
* and decoding the PostingsHeader and TermMetadata sections described here:
*
* <ul>
* <li>PostingsHeader --> Header, PackedBlockSize</li>
* <li>TermMetadata --> (DocFPDelta|SingletonDocID), PosFPDelta?, PosVIntBlockFPDelta?, PayFPDelta?,
* SkipFPDelta?</li>
* <li>Header, --> {@link CodecUtil#writeIndexHeader IndexHeader}</li>
* <li>PackedBlockSize, SingletonDocID --> {@link DataOutput#writeVInt VInt}</li>
* <li>DocFPDelta, PosFPDelta, PayFPDelta, PosVIntBlockFPDelta, SkipFPDelta --> {@link DataOutput#writeVLong VLong}</li>
* <li>Footer --> {@link CodecUtil#writeFooter CodecFooter}</li>
* </ul>
* <p>Notes:
* <ul>
* <li>Header is a {@link CodecUtil#writeIndexHeader IndexHeader} storing the version information
* for the postings.</li>
* <li>PackedBlockSize is the fixed block size for packed blocks. In packed block, bit width is
* determined by the largest integer. Smaller block size result in smaller variance among width
* of integers hence smaller indexes. Larger block size result in more efficient bulk i/o hence
* better acceleration. This value should always be a multiple of 64, currently fixed as 128 as
* a tradeoff. It is also the skip interval used to accelerate {@link org.apache.lucene.index.PostingsEnum#advance(int)}.
* <li>DocFPDelta determines the position of this term's TermFreqs within the .doc file.
* In particular, it is the difference of file offset between this term's
* data and previous term's data (or zero, for the first term in the block).On disk it is
* stored as the difference from previous value in sequence. </li>
* <li>PosFPDelta determines the position of this term's TermPositions within the .pos file.
* While PayFPDelta determines the position of this term's <TermPayloads, TermOffsets?> within
* the .pay file. Similar to DocFPDelta, it is the difference between two file positions (or
* neglected, for fields that omit payloads and offsets).</li>
* <li>PosVIntBlockFPDelta determines the position of this term's last TermPosition in last pos packed
* block within the .pos file. It is synonym for PayVIntBlockFPDelta or OffsetVIntBlockFPDelta.
* This is actually used to indicate whether it is necessary to load following
* payloads and offsets from .pos instead of .pay. Every time a new block of positions are to be
* loaded, the PostingsReader will use this value to check whether current block is packed format
* or VInt. When packed format, payloads and offsets are fetched from .pay, otherwise from .pos.
* (this value is neglected when total number of positions i.e. totalTermFreq is less or equal
* to PackedBlockSize).
* <li>SkipFPDelta determines the position of this term's SkipData within the .doc
* file. In particular, it is the length of the TermFreq data.
* SkipDelta is only stored if DocFreq is not smaller than SkipMinimum
* (i.e. 128 in Lucene50PostingsFormat).</li>
* <li>SingletonDocID is an optimization when a term only appears in one document. In this case, instead
* of writing a file pointer to the .doc file (DocFPDelta), and then a VIntBlock at that location, the
* single document ID is written to the term dictionary.</li>
* </ul>
* </dd>
* </dl>
*
* <a name="Termindex"></a>
* <dl>
* <dd>
* <b>Term Index</b>
* <p>The .tip file contains an index into the term dictionary, so that it can be
* accessed randomly. See {@link BlockTreeTermsWriter} for more details on the format.
* </dd>
* </dl>
*
*
* <a name="Frequencies"></a>
* <dl>
* <dd>
* <b>Frequencies and Skip Data</b>
*
* <p>The .doc file contains the lists of documents which contain each term, along
* with the frequency of the term in that document (except when frequencies are
* omitted: {@link IndexOptions#DOCS}). It also saves skip data to the beginning of
* each packed or VInt block, when the length of document list is larger than packed block size.</p>
*
* <ul>
* <li>docFile(.doc) --> Header, <TermFreqs, SkipData?><sup>TermCount</sup>, Footer</li>
* <li>Header --> {@link CodecUtil#writeIndexHeader IndexHeader}</li>
* <li>TermFreqs --> <PackedBlock> <sup>PackedDocBlockNum</sup>,
* VIntBlock? </li>
* <li>PackedBlock --> PackedDocDeltaBlock, PackedFreqBlock?
* <li>VIntBlock --> <DocDelta[, Freq?]><sup>DocFreq-PackedBlockSize*PackedDocBlockNum</sup>
* <li>SkipData --> <<SkipLevelLength, SkipLevel>
* <sup>NumSkipLevels-1</sup>, SkipLevel>, SkipDatum?</li>
* <li>SkipLevel --> <SkipDatum> <sup>TrimmedDocFreq/(PackedBlockSize^(Level + 1))</sup></li>
* <li>SkipDatum --> DocSkip, DocFPSkip, <PosFPSkip, PosBlockOffset, PayLength?,
* PayFPSkip?>?, SkipChildLevelPointer?</li>
* <li>PackedDocDeltaBlock, PackedFreqBlock --> {@link PackedInts PackedInts}</li>
* <li>DocDelta, Freq, DocSkip, DocFPSkip, PosFPSkip, PosBlockOffset, PayByteUpto, PayFPSkip
* -->
* {@link DataOutput#writeVInt VInt}</li>
* <li>SkipChildLevelPointer --> {@link DataOutput#writeVLong VLong}</li>
* <li>Footer --> {@link CodecUtil#writeFooter CodecFooter}</li>
* </ul>
* <p>Notes:
* <ul>
* <li>PackedDocDeltaBlock is theoretically generated from two steps:
* <ol>
* <li>Calculate the difference between each document number and previous one,
* and get a d-gaps list (for the first document, use absolute value); </li>
* <li>For those d-gaps from first one to PackedDocBlockNum*PackedBlockSize<sup>th</sup>,
* separately encode as packed blocks.</li>
* </ol>
* If frequencies are not omitted, PackedFreqBlock will be generated without d-gap step.
* </li>
* <li>VIntBlock stores remaining d-gaps (along with frequencies when possible) with a format
* that encodes DocDelta and Freq:
* <p>DocDelta: if frequencies are indexed, this determines both the document
* number and the frequency. In particular, DocDelta/2 is the difference between
* this document number and the previous document number (or zero when this is the
* first document in a TermFreqs). When DocDelta is odd, the frequency is one.
* When DocDelta is even, the frequency is read as another VInt. If frequencies
* are omitted, DocDelta contains the gap (not multiplied by 2) between document
* numbers and no frequency information is stored.</p>
* <p>For example, the TermFreqs for a term which occurs once in document seven
* and three times in document eleven, with frequencies indexed, would be the
* following sequence of VInts:</p>
* <p>15, 8, 3</p>
* <p>If frequencies were omitted ({@link IndexOptions#DOCS}) it would be this
* sequence of VInts instead:</p>
* <p>7,4</p>
* </li>
* <li>PackedDocBlockNum is the number of packed blocks for current term's docids or frequencies.
* In particular, PackedDocBlockNum = floor(DocFreq/PackedBlockSize) </li>
* <li>TrimmedDocFreq = DocFreq % PackedBlockSize == 0 ? DocFreq - 1 : DocFreq.
* We use this trick since the definition of skip entry is a little different from base interface.
* In {@link MultiLevelSkipListWriter}, skip data is assumed to be saved for
* skipInterval<sup>th</sup>, 2*skipInterval<sup>th</sup> ... posting in the list. However,
* in Lucene50PostingsFormat, the skip data is saved for skipInterval+1<sup>th</sup>,
* 2*skipInterval+1<sup>th</sup> ... posting (skipInterval==PackedBlockSize in this case).
* When DocFreq is multiple of PackedBlockSize, MultiLevelSkipListWriter will expect one
* more skip data than Lucene50SkipWriter. </li>
* <li>SkipDatum is the metadata of one skip entry.
* For the first block (no matter packed or VInt), it is omitted.</li>
* <li>DocSkip records the document number of every PackedBlockSize<sup>th</sup> document number in
* the postings (i.e. last document number in each packed block). On disk it is stored as the
* difference from previous value in the sequence. </li>
* <li>DocFPSkip records the file offsets of each block (excluding )posting at
* PackedBlockSize+1<sup>th</sup>, 2*PackedBlockSize+1<sup>th</sup> ... , in DocFile.
* The file offsets are relative to the start of current term's TermFreqs.
* On disk it is also stored as the difference from previous SkipDatum in the sequence.</li>
* <li>Since positions and payloads are also block encoded, the skip should skip to related block first,
* then fetch the values according to in-block offset. PosFPSkip and PayFPSkip record the file
* offsets of related block in .pos and .pay, respectively. While PosBlockOffset indicates
* which value to fetch inside the related block (PayBlockOffset is unnecessary since it is always
* equal to PosBlockOffset). Same as DocFPSkip, the file offsets are relative to the start of
* current term's TermFreqs, and stored as a difference sequence.</li>
* <li>PayByteUpto indicates the start offset of the current payload. It is equivalent to
* the sum of the payload lengths in the current block up to PosBlockOffset</li>
* </ul>
* </dd>
* </dl>
*
* <a name="Positions"></a>
* <dl>
* <dd>
* <b>Positions</b>
* <p>The .pos file contains the lists of positions that each term occurs at within documents. It also
* sometimes stores part of payloads and offsets for speedup.</p>
* <ul>
* <li>PosFile(.pos) --> Header, <TermPositions> <sup>TermCount</sup>, Footer</li>
* <li>Header --> {@link CodecUtil#writeIndexHeader IndexHeader}</li>
* <li>TermPositions --> <PackedPosDeltaBlock> <sup>PackedPosBlockNum</sup>,
* VIntBlock? </li>
* <li>VIntBlock --> <PositionDelta[, PayloadLength?], PayloadData?,
* OffsetDelta?, OffsetLength?><sup>PosVIntCount</sup>
* <li>PackedPosDeltaBlock --> {@link PackedInts PackedInts}</li>
* <li>PositionDelta, OffsetDelta, OffsetLength -->
* {@link DataOutput#writeVInt VInt}</li>
* <li>PayloadData --> {@link DataOutput#writeByte byte}<sup>PayLength</sup></li>
* <li>Footer --> {@link CodecUtil#writeFooter CodecFooter}</li>
* </ul>
* <p>Notes:
* <ul>
* <li>TermPositions are order by term (terms are implicit, from the term dictionary), and position
* values for each term document pair are incremental, and ordered by document number.</li>
* <li>PackedPosBlockNum is the number of packed blocks for current term's positions, payloads or offsets.
* In particular, PackedPosBlockNum = floor(totalTermFreq/PackedBlockSize) </li>
* <li>PosVIntCount is the number of positions encoded as VInt format. In particular,
* PosVIntCount = totalTermFreq - PackedPosBlockNum*PackedBlockSize</li>
* <li>The procedure how PackedPosDeltaBlock is generated is the same as PackedDocDeltaBlock
* in chapter <a href="#Frequencies">Frequencies and Skip Data</a>.</li>
* <li>PositionDelta is, if payloads are disabled for the term's field, the
* difference between the position of the current occurrence in the document and
* the previous occurrence (or zero, if this is the first occurrence in this
* document). If payloads are enabled for the term's field, then PositionDelta/2
* is the difference between the current and the previous position. If payloads
* are enabled and PositionDelta is odd, then PayloadLength is stored, indicating
* the length of the payload at the current term position.</li>
* <li>For example, the TermPositions for a term which occurs as the fourth term in
* one document, and as the fifth and ninth term in a subsequent document, would
* be the following sequence of VInts (payloads disabled):
* <p>4, 5, 4</p></li>
* <li>PayloadData is metadata associated with the current term position. If
* PayloadLength is stored at the current position, then it indicates the length
* of this payload. If PayloadLength is not stored, then this payload has the same
* length as the payload at the previous position.</li>
* <li>OffsetDelta/2 is the difference between this position's startOffset from the
* previous occurrence (or zero, if this is the first occurrence in this document).
* If OffsetDelta is odd, then the length (endOffset-startOffset) differs from the
* previous occurrence and an OffsetLength follows. Offset data is only written for
* {@link IndexOptions#DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS}.</li>
* </ul>
* </dd>
* </dl>
*
* <a name="Payloads"></a>
* <dl>
* <dd>
* <b>Payloads and Offsets</b>
* <p>The .pay file will store payloads and offsets associated with certain term-document positions.
* Some payloads and offsets will be separated out into .pos file, for performance reasons.
* <ul>
* <li>PayFile(.pay): --> Header, <TermPayloads, TermOffsets?> <sup>TermCount</sup>, Footer</li>
* <li>Header --> {@link CodecUtil#writeIndexHeader IndexHeader}</li>
* <li>TermPayloads --> <PackedPayLengthBlock, SumPayLength, PayData> <sup>PackedPayBlockNum</sup>
* <li>TermOffsets --> <PackedOffsetStartDeltaBlock, PackedOffsetLengthBlock> <sup>PackedPayBlockNum</sup>
* <li>PackedPayLengthBlock, PackedOffsetStartDeltaBlock, PackedOffsetLengthBlock --> {@link PackedInts PackedInts}</li>
* <li>SumPayLength --> {@link DataOutput#writeVInt VInt}</li>
* <li>PayData --> {@link DataOutput#writeByte byte}<sup>SumPayLength</sup></li>
* <li>Footer --> {@link CodecUtil#writeFooter CodecFooter}</li>
* </ul>
* <p>Notes:
* <ul>
* <li>The order of TermPayloads/TermOffsets will be the same as TermPositions, note that part of
* payload/offsets are stored in .pos.</li>
* <li>The procedure how PackedPayLengthBlock and PackedOffsetLengthBlock are generated is the
* same as PackedFreqBlock in chapter <a href="#Frequencies">Frequencies and Skip Data</a>.
* While PackedStartDeltaBlock follows a same procedure as PackedDocDeltaBlock.</li>
* <li>PackedPayBlockNum is always equal to PackedPosBlockNum, for the same term. It is also synonym
* for PackedOffsetBlockNum.</li>
* <li>SumPayLength is the total length of payloads written within one block, should be the sum
* of PayLengths in one packed block.</li>
* <li>PayLength in PackedPayLengthBlock is the length of each payload associated with the current
* position.</li>
* </ul>
* </dd>
* </dl>
*
* @lucene.experimental
*/
public final class Lucene50PostingsFormat extends PostingsFormat {
/**
* Filename extension for document number, frequencies, and skip data.
* See chapter: <a href="#Frequencies">Frequencies and Skip Data</a>
*/
public static final String DOC_EXTENSION = "doc";
/**
* Filename extension for positions.
* See chapter: <a href="#Positions">Positions</a>
*/
public static final String POS_EXTENSION = "pos";
/**
* Filename extension for payloads and offsets.
* See chapter: <a href="#Payloads">Payloads and Offsets</a>
*/
public static final String PAY_EXTENSION = "pay";
/** Attribute key for fst mode. */
static final String MODE_KEY = Lucene50PostingsFormat.class.getSimpleName() + ".fstMode";
/**
* Expert: The maximum number of skip levels. Smaller values result in
* slightly smaller indexes, but slower skipping in big posting lists.
*/
static final int MAX_SKIP_LEVELS = 10;
final static String TERMS_CODEC = "Lucene50PostingsWriterTerms";
final static String DOC_CODEC = "Lucene50PostingsWriterDoc";
final static String POS_CODEC = "Lucene50PostingsWriterPos";
final static String PAY_CODEC = "Lucene50PostingsWriterPay";
// Increment version to change it
final static int VERSION_START = 0;
final static int VERSION_IMPACT_SKIP_DATA = 1;
final static int VERSION_CURRENT = VERSION_IMPACT_SKIP_DATA;
private final int minTermBlockSize;
private final int maxTermBlockSize;
private final FSTLoadMode fstLoadMode;
/**
* An enum that allows to control if term index FSTs are loaded into memory or read off-heap
*/
public enum FSTLoadMode {
/**
* Always read FSTs from disk.
* NOTE: If this option is used the FST will be read off-heap even if buffered directory implementations
* are used.
*/
OFF_HEAP,
/**
* Never read FSTs from disk ie. all fields FSTs are loaded into memory
*/
ON_HEAP,
/**
* Always read FSTs from disk.
* An exception is made for ID fields in an IndexWriter context which are always loaded into memory.
* This is useful to guarantee best update performance even if a non MMapDirectory is used.
* NOTE: If this option is used the FST will be read off-heap even if buffered directory implementations
* are used.
* See {@link FSTLoadMode#AUTO}
*/
OPTIMIZE_UPDATES_OFF_HEAP,
/**
* Automatically make the decision if FSTs are read from disk depending if the segment read from an MMAPDirectory
* An exception is made for ID fields in an IndexWriter context which are always loaded into memory.
*/
AUTO
}
/**
* Fixed packed block size, number of integers encoded in
* a single packed block.
*/
// NOTE: must be multiple of 64 because of PackedInts long-aligned encoding/decoding
public final static int BLOCK_SIZE = 128;
/** Creates {@code Lucene50PostingsFormat} with default
* settings. */
public Lucene50PostingsFormat() {
this(BlockTreeTermsWriter.DEFAULT_MIN_BLOCK_SIZE, BlockTreeTermsWriter.DEFAULT_MAX_BLOCK_SIZE, FSTLoadMode.AUTO);
}
/** Creates {@code Lucene50PostingsFormat} with custom
* values for {@code minBlockSize} and {@code
* maxBlockSize} passed to block terms dictionary.
* @see BlockTreeTermsWriter#BlockTreeTermsWriter(SegmentWriteState,PostingsWriterBase,int,int) */
public Lucene50PostingsFormat(int minTermBlockSize, int maxTermBlockSize, FSTLoadMode fstLoadMode) {
super("Lucene50");
BlockTreeTermsWriter.validateSettings(minTermBlockSize, maxTermBlockSize);
this.minTermBlockSize = minTermBlockSize;
this.maxTermBlockSize = maxTermBlockSize;
this.fstLoadMode = fstLoadMode;
}
@Override
public String toString() {
return getName() + "(blocksize=" + BLOCK_SIZE + ")";
}
@Override
public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException {
PostingsWriterBase postingsWriter = new Lucene50PostingsWriter(state);
state.segmentInfo.putAttribute(MODE_KEY, fstLoadMode.name());
boolean success = false;
try {
FieldsConsumer ret = new BlockTreeTermsWriter(state,
postingsWriter,
minTermBlockSize,
maxTermBlockSize);
success = true;
return ret;
} finally {
if (!success) {
IOUtils.closeWhileHandlingException(postingsWriter);
}
}
}
@Override
public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException {
PostingsReaderBase postingsReader = new Lucene50PostingsReader(state);
String fstLoadModeKey = state.segmentInfo.getAttribute(MODE_KEY);
FSTLoadMode fstLoadMode = FSTLoadMode.AUTO;
if (fstLoadModeKey != null) {
fstLoadMode = FSTLoadMode.valueOf(fstLoadModeKey);
}
boolean success = false;
try {
FieldsProducer ret = new BlockTreeTermsReader(postingsReader, state, fstLoadMode);
success = true;
return ret;
} finally {
if (!success) {
IOUtils.closeWhileHandlingException(postingsReader);
}
}
}
final static class IntBlockTermState extends BlockTermState {
long docStartFP = 0;
long posStartFP = 0;
long payStartFP = 0;
long skipOffset = -1;
long lastPosBlockOffset = -1;
// docid when there is a single pulsed posting, otherwise -1
// freq is always implicitly totalTermFreq in this case.
int singletonDocID = -1;
@Override
public IntBlockTermState clone() {
IntBlockTermState other = new IntBlockTermState();
other.copyFrom(this);
return other;
}
@Override
public void copyFrom(TermState _other) {
super.copyFrom(_other);
IntBlockTermState other = (IntBlockTermState) _other;
docStartFP = other.docStartFP;
posStartFP = other.posStartFP;
payStartFP = other.payStartFP;
lastPosBlockOffset = other.lastPosBlockOffset;
skipOffset = other.skipOffset;
singletonDocID = other.singletonDocID;
}
@Override
public String toString() {
return super.toString() + " docStartFP=" + docStartFP + " posStartFP=" + posStartFP + " payStartFP=" + payStartFP + " lastPosBlockOffset=" + lastPosBlockOffset + " singletonDocID=" + singletonDocID;
}
}
}
| 1 | 28,923 | It's a little spooky that this method throws exception if you try to set the attribute to a different value than it was set before, but then does leave the new value set in the attributes? | apache-lucene-solr | java |
@@ -48,7 +48,7 @@ public class PodDBAdapter {
private static final String TAG = "PodDBAdapter";
public static final String DATABASE_NAME = "Antennapod.db";
- public static final int VERSION = 1090000;
+ public static final int VERSION = 1091000;
/**
* Maximum number of arguments for IN-operator. | 1 | package de.danoeh.antennapod.core.storage;
import android.annotation.SuppressLint;
import android.content.ContentValues;
import android.content.Context;
import android.database.Cursor;
import android.database.DatabaseErrorHandler;
import android.database.DatabaseUtils;
import android.database.DefaultDatabaseErrorHandler;
import android.database.MergeCursor;
import android.database.SQLException;
import android.database.sqlite.SQLiteDatabase;
import android.database.sqlite.SQLiteDatabase.CursorFactory;
import android.database.sqlite.SQLiteOpenHelper;
import android.text.TextUtils;
import android.util.Log;
import androidx.annotation.Nullable;
import org.apache.commons.io.FileUtils;
import java.io.File;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import de.danoeh.antennapod.core.feed.Chapter;
import de.danoeh.antennapod.core.feed.Feed;
import de.danoeh.antennapod.core.feed.FeedItem;
import de.danoeh.antennapod.core.feed.FeedMedia;
import de.danoeh.antennapod.core.feed.FeedPreferences;
import de.danoeh.antennapod.core.preferences.UserPreferences;
import de.danoeh.antennapod.core.service.download.DownloadStatus;
import de.danoeh.antennapod.core.util.LongIntMap;
import de.danoeh.antennapod.core.util.SortOrder;
import static de.danoeh.antennapod.core.feed.FeedPreferences.SPEED_USE_GLOBAL;
import static de.danoeh.antennapod.core.util.SortOrder.toCodeString;
// TODO Remove media column from feeditem table
/**
* Implements methods for accessing the database
*/
public class PodDBAdapter {
private static final String TAG = "PodDBAdapter";
public static final String DATABASE_NAME = "Antennapod.db";
public static final int VERSION = 1090000;
/**
* Maximum number of arguments for IN-operator.
*/
private static final int IN_OPERATOR_MAXIMUM = 800;
// Key-constants
public static final String KEY_ID = "id";
public static final String KEY_TITLE = "title";
public static final String KEY_CUSTOM_TITLE = "custom_title";
public static final String KEY_LINK = "link";
public static final String KEY_DESCRIPTION = "description";
public static final String KEY_FILE_URL = "file_url";
public static final String KEY_DOWNLOAD_URL = "download_url";
public static final String KEY_PUBDATE = "pubDate";
public static final String KEY_READ = "read";
public static final String KEY_DURATION = "duration";
public static final String KEY_POSITION = "position";
public static final String KEY_SIZE = "filesize";
public static final String KEY_MIME_TYPE = "mime_type";
public static final String KEY_IMAGE_URL = "image_url";
public static final String KEY_FEED = "feed";
public static final String KEY_MEDIA = "media";
public static final String KEY_DOWNLOADED = "downloaded";
public static final String KEY_LASTUPDATE = "last_update";
public static final String KEY_FEEDFILE = "feedfile";
public static final String KEY_REASON = "reason";
public static final String KEY_SUCCESSFUL = "successful";
public static final String KEY_FEEDFILETYPE = "feedfile_type";
public static final String KEY_COMPLETION_DATE = "completion_date";
public static final String KEY_FEEDITEM = "feeditem";
public static final String KEY_CONTENT_ENCODED = "content_encoded";
public static final String KEY_PAYMENT_LINK = "payment_link";
public static final String KEY_START = "start";
public static final String KEY_LANGUAGE = "language";
public static final String KEY_AUTHOR = "author";
public static final String KEY_HAS_CHAPTERS = "has_simple_chapters";
public static final String KEY_TYPE = "type";
public static final String KEY_ITEM_IDENTIFIER = "item_identifier";
public static final String KEY_FEED_IDENTIFIER = "feed_identifier";
public static final String KEY_REASON_DETAILED = "reason_detailed";
public static final String KEY_DOWNLOADSTATUS_TITLE = "title";
public static final String KEY_CHAPTER_TYPE = "type";
public static final String KEY_PLAYBACK_COMPLETION_DATE = "playback_completion_date";
public static final String KEY_AUTO_DOWNLOAD = "auto_download";
public static final String KEY_KEEP_UPDATED = "keep_updated";
public static final String KEY_AUTO_DELETE_ACTION = "auto_delete_action";
public static final String KEY_FEED_VOLUME_ADAPTION = "feed_volume_adaption";
public static final String KEY_PLAYED_DURATION = "played_duration";
public static final String KEY_USERNAME = "username";
public static final String KEY_PASSWORD = "password";
public static final String KEY_IS_PAGED = "is_paged";
public static final String KEY_NEXT_PAGE_LINK = "next_page_link";
public static final String KEY_HIDE = "hide";
public static final String KEY_SORT_ORDER = "sort_order";
public static final String KEY_LAST_UPDATE_FAILED = "last_update_failed";
public static final String KEY_HAS_EMBEDDED_PICTURE = "has_embedded_picture";
public static final String KEY_LAST_PLAYED_TIME = "last_played_time";
public static final String KEY_INCLUDE_FILTER = "include_filter";
public static final String KEY_EXCLUDE_FILTER = "exclude_filter";
public static final String KEY_FEED_PLAYBACK_SPEED = "feed_playback_speed";
// Table names
static final String TABLE_NAME_FEEDS = "Feeds";
static final String TABLE_NAME_FEED_ITEMS = "FeedItems";
static final String TABLE_NAME_FEED_IMAGES = "FeedImages";
static final String TABLE_NAME_FEED_MEDIA = "FeedMedia";
static final String TABLE_NAME_DOWNLOAD_LOG = "DownloadLog";
static final String TABLE_NAME_QUEUE = "Queue";
static final String TABLE_NAME_SIMPLECHAPTERS = "SimpleChapters";
static final String TABLE_NAME_FAVORITES = "Favorites";
// SQL Statements for creating new tables
private static final String TABLE_PRIMARY_KEY = KEY_ID
+ " INTEGER PRIMARY KEY AUTOINCREMENT ,";
private static final String CREATE_TABLE_FEEDS = "CREATE TABLE "
+ TABLE_NAME_FEEDS + " (" + TABLE_PRIMARY_KEY + KEY_TITLE
+ " TEXT," + KEY_CUSTOM_TITLE + " TEXT," + KEY_FILE_URL + " TEXT," + KEY_DOWNLOAD_URL + " TEXT,"
+ KEY_DOWNLOADED + " INTEGER," + KEY_LINK + " TEXT,"
+ KEY_DESCRIPTION + " TEXT," + KEY_PAYMENT_LINK + " TEXT,"
+ KEY_LASTUPDATE + " TEXT," + KEY_LANGUAGE + " TEXT," + KEY_AUTHOR
+ " TEXT," + KEY_IMAGE_URL + " TEXT," + KEY_TYPE + " TEXT,"
+ KEY_FEED_IDENTIFIER + " TEXT," + KEY_AUTO_DOWNLOAD + " INTEGER DEFAULT 1,"
+ KEY_USERNAME + " TEXT,"
+ KEY_PASSWORD + " TEXT,"
+ KEY_INCLUDE_FILTER + " TEXT DEFAULT '',"
+ KEY_EXCLUDE_FILTER + " TEXT DEFAULT '',"
+ KEY_KEEP_UPDATED + " INTEGER DEFAULT 1,"
+ KEY_IS_PAGED + " INTEGER DEFAULT 0,"
+ KEY_NEXT_PAGE_LINK + " TEXT,"
+ KEY_HIDE + " TEXT,"
+ KEY_SORT_ORDER + " TEXT,"
+ KEY_LAST_UPDATE_FAILED + " INTEGER DEFAULT 0,"
+ KEY_AUTO_DELETE_ACTION + " INTEGER DEFAULT 0,"
+ KEY_FEED_PLAYBACK_SPEED + " REAL DEFAULT " + SPEED_USE_GLOBAL + ","
+ KEY_FEED_VOLUME_ADAPTION + " INTEGER DEFAULT 0)";
private static final String CREATE_TABLE_FEED_ITEMS = "CREATE TABLE "
+ TABLE_NAME_FEED_ITEMS + " (" + TABLE_PRIMARY_KEY + KEY_TITLE
+ " TEXT," + KEY_CONTENT_ENCODED + " TEXT," + KEY_PUBDATE
+ " INTEGER," + KEY_READ + " INTEGER," + KEY_LINK + " TEXT,"
+ KEY_DESCRIPTION + " TEXT," + KEY_PAYMENT_LINK + " TEXT,"
+ KEY_MEDIA + " INTEGER," + KEY_FEED + " INTEGER,"
+ KEY_HAS_CHAPTERS + " INTEGER," + KEY_ITEM_IDENTIFIER + " TEXT,"
+ KEY_IMAGE_URL + " TEXT,"
+ KEY_AUTO_DOWNLOAD + " INTEGER)";
private static final String CREATE_TABLE_FEED_MEDIA = "CREATE TABLE "
+ TABLE_NAME_FEED_MEDIA + " (" + TABLE_PRIMARY_KEY + KEY_DURATION
+ " INTEGER," + KEY_FILE_URL + " TEXT," + KEY_DOWNLOAD_URL
+ " TEXT," + KEY_DOWNLOADED + " INTEGER," + KEY_POSITION
+ " INTEGER," + KEY_SIZE + " INTEGER," + KEY_MIME_TYPE + " TEXT,"
+ KEY_PLAYBACK_COMPLETION_DATE + " INTEGER,"
+ KEY_FEEDITEM + " INTEGER,"
+ KEY_PLAYED_DURATION + " INTEGER,"
+ KEY_HAS_EMBEDDED_PICTURE + " INTEGER,"
+ KEY_LAST_PLAYED_TIME + " INTEGER" + ")";
private static final String CREATE_TABLE_DOWNLOAD_LOG = "CREATE TABLE "
+ TABLE_NAME_DOWNLOAD_LOG + " (" + TABLE_PRIMARY_KEY + KEY_FEEDFILE
+ " INTEGER," + KEY_FEEDFILETYPE + " INTEGER," + KEY_REASON
+ " INTEGER," + KEY_SUCCESSFUL + " INTEGER," + KEY_COMPLETION_DATE
+ " INTEGER," + KEY_REASON_DETAILED + " TEXT,"
+ KEY_DOWNLOADSTATUS_TITLE + " TEXT)";
private static final String CREATE_TABLE_QUEUE = "CREATE TABLE "
+ TABLE_NAME_QUEUE + "(" + KEY_ID + " INTEGER PRIMARY KEY,"
+ KEY_FEEDITEM + " INTEGER," + KEY_FEED + " INTEGER)";
private static final String CREATE_TABLE_SIMPLECHAPTERS = "CREATE TABLE "
+ TABLE_NAME_SIMPLECHAPTERS + " (" + TABLE_PRIMARY_KEY + KEY_TITLE
+ " TEXT," + KEY_START + " INTEGER," + KEY_FEEDITEM + " INTEGER,"
+ KEY_LINK + " TEXT," + KEY_IMAGE_URL + " TEXT," + KEY_CHAPTER_TYPE + " INTEGER)";
// SQL Statements for creating indexes
static final String CREATE_INDEX_FEEDITEMS_FEED = "CREATE INDEX "
+ TABLE_NAME_FEED_ITEMS + "_" + KEY_FEED + " ON " + TABLE_NAME_FEED_ITEMS + " ("
+ KEY_FEED + ")";
static final String CREATE_INDEX_FEEDITEMS_PUBDATE = "CREATE INDEX IF NOT EXISTS "
+ TABLE_NAME_FEED_ITEMS + "_" + KEY_PUBDATE + " ON " + TABLE_NAME_FEED_ITEMS + " ("
+ KEY_PUBDATE + ")";
static final String CREATE_INDEX_FEEDITEMS_READ = "CREATE INDEX IF NOT EXISTS "
+ TABLE_NAME_FEED_ITEMS + "_" + KEY_READ + " ON " + TABLE_NAME_FEED_ITEMS + " ("
+ KEY_READ + ")";
static final String CREATE_INDEX_QUEUE_FEEDITEM = "CREATE INDEX "
+ TABLE_NAME_QUEUE + "_" + KEY_FEEDITEM + " ON " + TABLE_NAME_QUEUE + " ("
+ KEY_FEEDITEM + ")";
static final String CREATE_INDEX_FEEDMEDIA_FEEDITEM = "CREATE INDEX "
+ TABLE_NAME_FEED_MEDIA + "_" + KEY_FEEDITEM + " ON " + TABLE_NAME_FEED_MEDIA + " ("
+ KEY_FEEDITEM + ")";
static final String CREATE_INDEX_SIMPLECHAPTERS_FEEDITEM = "CREATE INDEX "
+ TABLE_NAME_SIMPLECHAPTERS + "_" + KEY_FEEDITEM + " ON " + TABLE_NAME_SIMPLECHAPTERS + " ("
+ KEY_FEEDITEM + ")";
static final String CREATE_TABLE_FAVORITES = "CREATE TABLE "
+ TABLE_NAME_FAVORITES + "(" + KEY_ID + " INTEGER PRIMARY KEY,"
+ KEY_FEEDITEM + " INTEGER," + KEY_FEED + " INTEGER)";
/**
* Select all columns from the feed-table
*/
private static final String[] FEED_SEL_STD = {
TABLE_NAME_FEEDS + "." + KEY_ID,
TABLE_NAME_FEEDS + "." + KEY_TITLE,
TABLE_NAME_FEEDS + "." + KEY_CUSTOM_TITLE,
TABLE_NAME_FEEDS + "." + KEY_FILE_URL,
TABLE_NAME_FEEDS + "." + KEY_DOWNLOAD_URL,
TABLE_NAME_FEEDS + "." + KEY_DOWNLOADED,
TABLE_NAME_FEEDS + "." + KEY_LINK,
TABLE_NAME_FEEDS + "." + KEY_DESCRIPTION,
TABLE_NAME_FEEDS + "." + KEY_PAYMENT_LINK,
TABLE_NAME_FEEDS + "." + KEY_LASTUPDATE,
TABLE_NAME_FEEDS + "." + KEY_LANGUAGE,
TABLE_NAME_FEEDS + "." + KEY_AUTHOR,
TABLE_NAME_FEEDS + "." + KEY_IMAGE_URL,
TABLE_NAME_FEEDS + "." + KEY_TYPE,
TABLE_NAME_FEEDS + "." + KEY_FEED_IDENTIFIER,
TABLE_NAME_FEEDS + "." + KEY_AUTO_DOWNLOAD,
TABLE_NAME_FEEDS + "." + KEY_KEEP_UPDATED,
TABLE_NAME_FEEDS + "." + KEY_IS_PAGED,
TABLE_NAME_FEEDS + "." + KEY_NEXT_PAGE_LINK,
TABLE_NAME_FEEDS + "." + KEY_USERNAME,
TABLE_NAME_FEEDS + "." + KEY_PASSWORD,
TABLE_NAME_FEEDS + "." + KEY_HIDE,
TABLE_NAME_FEEDS + "." + KEY_SORT_ORDER,
TABLE_NAME_FEEDS + "." + KEY_LAST_UPDATE_FAILED,
TABLE_NAME_FEEDS + "." + KEY_AUTO_DELETE_ACTION,
TABLE_NAME_FEEDS + "." + KEY_FEED_VOLUME_ADAPTION,
TABLE_NAME_FEEDS + "." + KEY_INCLUDE_FILTER,
TABLE_NAME_FEEDS + "." + KEY_EXCLUDE_FILTER,
TABLE_NAME_FEEDS + "." + KEY_FEED_PLAYBACK_SPEED
};
/**
* Select all columns from the feeditems-table except description and
* content-encoded.
*/
private static final String[] FEEDITEM_SEL_FI_SMALL = {
TABLE_NAME_FEED_ITEMS + "." + KEY_ID,
TABLE_NAME_FEED_ITEMS + "." + KEY_TITLE,
TABLE_NAME_FEED_ITEMS + "." + KEY_PUBDATE,
TABLE_NAME_FEED_ITEMS + "." + KEY_READ,
TABLE_NAME_FEED_ITEMS + "." + KEY_LINK,
TABLE_NAME_FEED_ITEMS + "." + KEY_PAYMENT_LINK,
TABLE_NAME_FEED_ITEMS + "." + KEY_MEDIA,
TABLE_NAME_FEED_ITEMS + "." + KEY_FEED,
TABLE_NAME_FEED_ITEMS + "." + KEY_HAS_CHAPTERS,
TABLE_NAME_FEED_ITEMS + "." + KEY_ITEM_IDENTIFIER,
TABLE_NAME_FEED_ITEMS + "." + KEY_IMAGE_URL,
TABLE_NAME_FEED_ITEMS + "." + KEY_AUTO_DOWNLOAD
};
/**
* All the tables in the database
*/
private static final String[] ALL_TABLES = {
TABLE_NAME_FEEDS,
TABLE_NAME_FEED_ITEMS,
TABLE_NAME_FEED_MEDIA,
TABLE_NAME_DOWNLOAD_LOG,
TABLE_NAME_QUEUE,
TABLE_NAME_SIMPLECHAPTERS,
TABLE_NAME_FAVORITES
};
/**
* Contains FEEDITEM_SEL_FI_SMALL as comma-separated list. Useful for raw queries.
*/
private static final String SEL_FI_SMALL_STR;
private static final String FEED_SEL_STD_STR;
static {
String selFiSmall = Arrays.toString(FEEDITEM_SEL_FI_SMALL);
SEL_FI_SMALL_STR = selFiSmall.substring(1, selFiSmall.length() - 1);
String selFeedSmall = Arrays.toString(FEED_SEL_STD);
FEED_SEL_STD_STR = selFeedSmall.substring(1, selFeedSmall.length() - 1);
}
/**
* Select id, description and content-encoded column from feeditems.
*/
private static final String[] SEL_FI_EXTRA = {KEY_ID, KEY_DESCRIPTION,
KEY_CONTENT_ENCODED, KEY_FEED};
private static Context context;
private static volatile SQLiteDatabase db;
public static void init(Context context) {
PodDBAdapter.context = context.getApplicationContext();
}
// Bill Pugh Singleton Implementation
private static class SingletonHolder {
private static final PodDBHelper dbHelper = new PodDBHelper(PodDBAdapter.context, DATABASE_NAME, null);
private static final PodDBAdapter dbAdapter = new PodDBAdapter();
}
public static PodDBAdapter getInstance() {
return SingletonHolder.dbAdapter;
}
private PodDBAdapter() {
}
public synchronized PodDBAdapter open() {
if (db == null || !db.isOpen() || db.isReadOnly()) {
db = openDb();
}
return this;
}
@SuppressLint("NewApi")
private SQLiteDatabase openDb() {
SQLiteDatabase newDb;
try {
newDb = SingletonHolder.dbHelper.getWritableDatabase();
newDb.disableWriteAheadLogging();
} catch (SQLException ex) {
Log.e(TAG, Log.getStackTraceString(ex));
newDb = SingletonHolder.dbHelper.getReadableDatabase();
}
return newDb;
}
public synchronized void close() {
// do nothing
}
public static boolean deleteDatabase() {
PodDBAdapter adapter = getInstance();
adapter.open();
try {
for (String tableName : ALL_TABLES) {
db.delete(tableName, "1", null);
}
return true;
} finally {
adapter.close();
}
}
/**
* Inserts or updates a feed entry
*
* @return the id of the entry
*/
private long setFeed(Feed feed) {
ContentValues values = new ContentValues();
values.put(KEY_TITLE, feed.getFeedTitle());
values.put(KEY_LINK, feed.getLink());
values.put(KEY_DESCRIPTION, feed.getDescription());
values.put(KEY_PAYMENT_LINK, feed.getPaymentLink());
values.put(KEY_AUTHOR, feed.getAuthor());
values.put(KEY_LANGUAGE, feed.getLanguage());
values.put(KEY_IMAGE_URL, feed.getImageUrl());
values.put(KEY_FILE_URL, feed.getFile_url());
values.put(KEY_DOWNLOAD_URL, feed.getDownload_url());
values.put(KEY_DOWNLOADED, feed.isDownloaded());
values.put(KEY_LASTUPDATE, feed.getLastUpdate());
values.put(KEY_TYPE, feed.getType());
values.put(KEY_FEED_IDENTIFIER, feed.getFeedIdentifier());
values.put(KEY_IS_PAGED, feed.isPaged());
values.put(KEY_NEXT_PAGE_LINK, feed.getNextPageLink());
if (feed.getItemFilter() != null && feed.getItemFilter().getValues().length > 0) {
values.put(KEY_HIDE, TextUtils.join(",", feed.getItemFilter().getValues()));
} else {
values.put(KEY_HIDE, "");
}
values.put(KEY_SORT_ORDER, toCodeString(feed.getSortOrder()));
values.put(KEY_LAST_UPDATE_FAILED, feed.hasLastUpdateFailed());
if (feed.getId() == 0) {
// Create new entry
Log.d(this.toString(), "Inserting new Feed into db");
feed.setId(db.insert(TABLE_NAME_FEEDS, null, values));
} else {
Log.d(this.toString(), "Updating existing Feed in db");
db.update(TABLE_NAME_FEEDS, values, KEY_ID + "=?",
new String[]{String.valueOf(feed.getId())});
}
return feed.getId();
}
public void setFeedPreferences(FeedPreferences prefs) {
if (prefs.getFeedID() == 0) {
throw new IllegalArgumentException("Feed ID of preference must not be null");
}
ContentValues values = new ContentValues();
values.put(KEY_AUTO_DOWNLOAD, prefs.getAutoDownload());
values.put(KEY_KEEP_UPDATED, prefs.getKeepUpdated());
values.put(KEY_AUTO_DELETE_ACTION, prefs.getAutoDeleteAction().ordinal());
values.put(KEY_FEED_VOLUME_ADAPTION, prefs.getVolumeAdaptionSetting().toInteger());
values.put(KEY_USERNAME, prefs.getUsername());
values.put(KEY_PASSWORD, prefs.getPassword());
values.put(KEY_INCLUDE_FILTER, prefs.getFilter().getIncludeFilter());
values.put(KEY_EXCLUDE_FILTER, prefs.getFilter().getExcludeFilter());
values.put(KEY_FEED_PLAYBACK_SPEED, prefs.getFeedPlaybackSpeed());
db.update(TABLE_NAME_FEEDS, values, KEY_ID + "=?", new String[]{String.valueOf(prefs.getFeedID())});
}
public void setFeedItemFilter(long feedId, Set<String> filterValues) {
String valuesList = TextUtils.join(",", filterValues);
Log.d(TAG, String.format(
"setFeedItemFilter() called with: feedId = [%d], filterValues = [%s]", feedId, valuesList));
ContentValues values = new ContentValues();
values.put(KEY_HIDE, valuesList);
db.update(TABLE_NAME_FEEDS, values, KEY_ID + "=?", new String[]{String.valueOf(feedId)});
}
public void setFeedItemSortOrder(long feedId, @Nullable SortOrder sortOrder) {
ContentValues values = new ContentValues();
values.put(KEY_SORT_ORDER, toCodeString(sortOrder));
db.update(TABLE_NAME_FEEDS, values, KEY_ID + "=?", new String[]{String.valueOf(feedId)});
}
/**
* Inserts or updates a media entry
*
* @return the id of the entry
*/
public long setMedia(FeedMedia media) {
ContentValues values = new ContentValues();
values.put(KEY_DURATION, media.getDuration());
values.put(KEY_POSITION, media.getPosition());
values.put(KEY_SIZE, media.getSize());
values.put(KEY_MIME_TYPE, media.getMime_type());
values.put(KEY_DOWNLOAD_URL, media.getDownload_url());
values.put(KEY_DOWNLOADED, media.isDownloaded());
values.put(KEY_FILE_URL, media.getFile_url());
values.put(KEY_HAS_EMBEDDED_PICTURE, media.hasEmbeddedPicture());
values.put(KEY_LAST_PLAYED_TIME, media.getLastPlayedTime());
if (media.getPlaybackCompletionDate() != null) {
values.put(KEY_PLAYBACK_COMPLETION_DATE, media.getPlaybackCompletionDate().getTime());
} else {
values.put(KEY_PLAYBACK_COMPLETION_DATE, 0);
}
if (media.getItem() != null) {
values.put(KEY_FEEDITEM, media.getItem().getId());
}
if (media.getId() == 0) {
media.setId(db.insert(TABLE_NAME_FEED_MEDIA, null, values));
} else {
db.update(TABLE_NAME_FEED_MEDIA, values, KEY_ID + "=?",
new String[]{String.valueOf(media.getId())});
}
return media.getId();
}
public void setFeedMediaPlaybackInformation(FeedMedia media) {
if (media.getId() != 0) {
ContentValues values = new ContentValues();
values.put(KEY_POSITION, media.getPosition());
values.put(KEY_DURATION, media.getDuration());
values.put(KEY_PLAYED_DURATION, media.getPlayedDuration());
values.put(KEY_LAST_PLAYED_TIME, media.getLastPlayedTime());
db.update(TABLE_NAME_FEED_MEDIA, values, KEY_ID + "=?",
new String[]{String.valueOf(media.getId())});
} else {
Log.e(TAG, "setFeedMediaPlaybackInformation: ID of media was 0");
}
}
public void setFeedMediaPlaybackCompletionDate(FeedMedia media) {
if (media.getId() != 0) {
ContentValues values = new ContentValues();
values.put(KEY_PLAYBACK_COMPLETION_DATE, media.getPlaybackCompletionDate().getTime());
values.put(KEY_PLAYED_DURATION, media.getPlayedDuration());
db.update(TABLE_NAME_FEED_MEDIA, values, KEY_ID + "=?",
new String[]{String.valueOf(media.getId())});
} else {
Log.e(TAG, "setFeedMediaPlaybackCompletionDate: ID of media was 0");
}
}
public void resetAllMediaPlayedDuration() {
try {
db.beginTransactionNonExclusive();
ContentValues values = new ContentValues();
values.put(KEY_PLAYED_DURATION, 0);
db.update(TABLE_NAME_FEED_MEDIA, values, null, new String[0]);
db.setTransactionSuccessful();
} catch (SQLException e) {
Log.e(TAG, Log.getStackTraceString(e));
} finally {
db.endTransaction();
}
}
/**
* Insert all FeedItems of a feed and the feed object itself in a single
* transaction
*/
public void setCompleteFeed(Feed... feeds) {
try {
db.beginTransactionNonExclusive();
for (Feed feed : feeds) {
setFeed(feed);
if (feed.getItems() != null) {
for (FeedItem item : feed.getItems()) {
setFeedItem(item, false);
}
}
if (feed.getPreferences() != null) {
setFeedPreferences(feed.getPreferences());
}
}
db.setTransactionSuccessful();
} catch (SQLException e) {
Log.e(TAG, Log.getStackTraceString(e));
} finally {
db.endTransaction();
}
}
/**
* Updates the download URL of a Feed.
*/
public void setFeedDownloadUrl(String original, String updated) {
ContentValues values = new ContentValues();
values.put(KEY_DOWNLOAD_URL, updated);
db.update(TABLE_NAME_FEEDS, values, KEY_DOWNLOAD_URL + "=?", new String[]{original});
}
public void setFeedItemlist(List<FeedItem> items) {
try {
db.beginTransactionNonExclusive();
for (FeedItem item : items) {
setFeedItem(item, true);
}
db.setTransactionSuccessful();
} catch (SQLException e) {
Log.e(TAG, Log.getStackTraceString(e));
} finally {
db.endTransaction();
}
}
public long setSingleFeedItem(FeedItem item) {
long result = 0;
try {
db.beginTransactionNonExclusive();
result = setFeedItem(item, true);
db.setTransactionSuccessful();
} catch (SQLException e) {
Log.e(TAG, Log.getStackTraceString(e));
} finally {
db.endTransaction();
}
return result;
}
/**
* Inserts or updates a feeditem entry
*
* @param item The FeedItem
* @param saveFeed true if the Feed of the item should also be saved. This should be set to
* false if the method is executed on a list of FeedItems of the same Feed.
* @return the id of the entry
*/
private long setFeedItem(FeedItem item, boolean saveFeed) {
ContentValues values = new ContentValues();
values.put(KEY_TITLE, item.getTitle());
values.put(KEY_LINK, item.getLink());
if (item.getDescription() != null) {
values.put(KEY_DESCRIPTION, item.getDescription());
}
if (item.getContentEncoded() != null) {
values.put(KEY_CONTENT_ENCODED, item.getContentEncoded());
}
values.put(KEY_PUBDATE, item.getPubDate().getTime());
values.put(KEY_PAYMENT_LINK, item.getPaymentLink());
if (saveFeed && item.getFeed() != null) {
setFeed(item.getFeed());
}
values.put(KEY_FEED, item.getFeed().getId());
if (item.isNew()) {
values.put(KEY_READ, FeedItem.NEW);
} else if (item.isPlayed()) {
values.put(KEY_READ, FeedItem.PLAYED);
} else {
values.put(KEY_READ, FeedItem.UNPLAYED);
}
values.put(KEY_HAS_CHAPTERS, item.getChapters() != null || item.hasChapters());
values.put(KEY_ITEM_IDENTIFIER, item.getItemIdentifier());
values.put(KEY_AUTO_DOWNLOAD, item.getAutoDownload());
values.put(KEY_IMAGE_URL, item.getImageUrl());
if (item.getId() == 0) {
item.setId(db.insert(TABLE_NAME_FEED_ITEMS, null, values));
} else {
db.update(TABLE_NAME_FEED_ITEMS, values, KEY_ID + "=?",
new String[]{String.valueOf(item.getId())});
}
if (item.getMedia() != null) {
setMedia(item.getMedia());
}
if (item.getChapters() != null) {
setChapters(item);
}
return item.getId();
}
public void setFeedItemRead(int played, long itemId, long mediaId,
boolean resetMediaPosition) {
try {
db.beginTransactionNonExclusive();
ContentValues values = new ContentValues();
values.put(KEY_READ, played);
db.update(TABLE_NAME_FEED_ITEMS, values, KEY_ID + "=?", new String[]{String.valueOf(itemId)});
if (resetMediaPosition) {
values.clear();
values.put(KEY_POSITION, 0);
db.update(TABLE_NAME_FEED_MEDIA, values, KEY_ID + "=?", new String[]{String.valueOf(mediaId)});
}
db.setTransactionSuccessful();
} catch (SQLException e) {
Log.e(TAG, Log.getStackTraceString(e));
} finally {
db.endTransaction();
}
}
/**
* Sets the 'read' attribute of the item.
*
* @param read must be one of FeedItem.PLAYED, FeedItem.NEW, FeedItem.UNPLAYED
* @param itemIds items to change the value of
*/
public void setFeedItemRead(int read, long... itemIds) {
try {
db.beginTransactionNonExclusive();
ContentValues values = new ContentValues();
for (long id : itemIds) {
values.clear();
values.put(KEY_READ, read);
db.update(TABLE_NAME_FEED_ITEMS, values, KEY_ID + "=?", new String[]{String.valueOf(id)});
}
db.setTransactionSuccessful();
} catch (SQLException e) {
Log.e(TAG, Log.getStackTraceString(e));
} finally {
db.endTransaction();
}
}
private void setChapters(FeedItem item) {
ContentValues values = new ContentValues();
for (Chapter chapter : item.getChapters()) {
values.put(KEY_TITLE, chapter.getTitle());
values.put(KEY_START, chapter.getStart());
values.put(KEY_FEEDITEM, item.getId());
values.put(KEY_LINK, chapter.getLink());
values.put(KEY_IMAGE_URL, chapter.getImageUrl());
values.put(KEY_CHAPTER_TYPE, chapter.getChapterType());
if (chapter.getId() == 0) {
chapter.setId(db.insert(TABLE_NAME_SIMPLECHAPTERS, null, values));
} else {
db.update(TABLE_NAME_SIMPLECHAPTERS, values, KEY_ID + "=?",
new String[]{String.valueOf(chapter.getId())});
}
}
}
public void setFeedLastUpdateFailed(long feedId, boolean failed) {
final String sql = "UPDATE " + TABLE_NAME_FEEDS
+ " SET " + KEY_LAST_UPDATE_FAILED + "=" + (failed ? "1" : "0")
+ " WHERE " + KEY_ID + "=" + feedId;
db.execSQL(sql);
}
void setFeedCustomTitle(long feedId, String customTitle) {
ContentValues values = new ContentValues();
values.put(KEY_CUSTOM_TITLE, customTitle);
db.update(TABLE_NAME_FEEDS, values, KEY_ID + "=?", new String[]{String.valueOf(feedId)});
}
/**
* Inserts or updates a download status.
*/
public long setDownloadStatus(DownloadStatus status) {
ContentValues values = new ContentValues();
values.put(KEY_FEEDFILE, status.getFeedfileId());
values.put(KEY_FEEDFILETYPE, status.getFeedfileType());
values.put(KEY_REASON, status.getReason().getCode());
values.put(KEY_SUCCESSFUL, status.isSuccessful());
values.put(KEY_COMPLETION_DATE, status.getCompletionDate().getTime());
values.put(KEY_REASON_DETAILED, status.getReasonDetailed());
values.put(KEY_DOWNLOADSTATUS_TITLE, status.getTitle());
if (status.getId() == 0) {
status.setId(db.insert(TABLE_NAME_DOWNLOAD_LOG, null, values));
} else {
db.update(TABLE_NAME_DOWNLOAD_LOG, values, KEY_ID + "=?",
new String[]{String.valueOf(status.getId())});
}
return status.getId();
}
public void setFeedItemAutoDownload(FeedItem feedItem, long autoDownload) {
ContentValues values = new ContentValues();
values.put(KEY_AUTO_DOWNLOAD, autoDownload);
db.update(TABLE_NAME_FEED_ITEMS, values, KEY_ID + "=?",
new String[]{String.valueOf(feedItem.getId())});
}
public void setFeedsItemsAutoDownload(Feed feed, boolean autoDownload) {
final String sql = "UPDATE " + TABLE_NAME_FEED_ITEMS
+ " SET " + KEY_AUTO_DOWNLOAD + "=" + (autoDownload ? "1" : "0")
+ " WHERE " + KEY_FEED + "=" + feed.getId();
db.execSQL(sql);
}
public void setFavorites(List<FeedItem> favorites) {
ContentValues values = new ContentValues();
try {
db.beginTransactionNonExclusive();
db.delete(TABLE_NAME_FAVORITES, null, null);
for (int i = 0; i < favorites.size(); i++) {
FeedItem item = favorites.get(i);
values.put(KEY_ID, i);
values.put(KEY_FEEDITEM, item.getId());
values.put(KEY_FEED, item.getFeed().getId());
db.insertWithOnConflict(TABLE_NAME_FAVORITES, null, values, SQLiteDatabase.CONFLICT_REPLACE);
}
db.setTransactionSuccessful();
} catch (SQLException e) {
Log.e(TAG, Log.getStackTraceString(e));
} finally {
db.endTransaction();
}
}
/**
* Adds the item to favorites
*/
public void addFavoriteItem(FeedItem item) {
// don't add an item that's already there...
if (isItemInFavorites(item)) {
Log.d(TAG, "item already in favorites");
return;
}
ContentValues values = new ContentValues();
values.put(KEY_FEEDITEM, item.getId());
values.put(KEY_FEED, item.getFeedId());
db.insert(TABLE_NAME_FAVORITES, null, values);
}
public void removeFavoriteItem(FeedItem item) {
String deleteClause = String.format("DELETE FROM %s WHERE %s=%s AND %s=%s",
TABLE_NAME_FAVORITES,
KEY_FEEDITEM, item.getId(),
KEY_FEED, item.getFeedId());
db.execSQL(deleteClause);
}
private boolean isItemInFavorites(FeedItem item) {
String query = String.format("SELECT %s from %s WHERE %s=%d",
KEY_ID, TABLE_NAME_FAVORITES, KEY_FEEDITEM, item.getId());
Cursor c = db.rawQuery(query, null);
int count = c.getCount();
c.close();
return count > 0;
}
public void setQueue(List<FeedItem> queue) {
ContentValues values = new ContentValues();
try {
db.beginTransactionNonExclusive();
db.delete(TABLE_NAME_QUEUE, null, null);
for (int i = 0; i < queue.size(); i++) {
FeedItem item = queue.get(i);
values.put(KEY_ID, i);
values.put(KEY_FEEDITEM, item.getId());
values.put(KEY_FEED, item.getFeed().getId());
db.insertWithOnConflict(TABLE_NAME_QUEUE, null, values, SQLiteDatabase.CONFLICT_REPLACE);
}
db.setTransactionSuccessful();
} catch (SQLException e) {
Log.e(TAG, Log.getStackTraceString(e));
} finally {
db.endTransaction();
}
}
public void clearQueue() {
db.delete(TABLE_NAME_QUEUE, null, null);
}
private void removeFeedMedia(FeedMedia media) {
// delete download log entries for feed media
db.delete(TABLE_NAME_DOWNLOAD_LOG, KEY_FEEDFILE + "=? AND " + KEY_FEEDFILETYPE + "=?",
new String[]{String.valueOf(media.getId()), String.valueOf(FeedMedia.FEEDFILETYPE_FEEDMEDIA)});
db.delete(TABLE_NAME_FEED_MEDIA, KEY_ID + "=?",
new String[]{String.valueOf(media.getId())});
}
private void removeChaptersOfItem(FeedItem item) {
db.delete(TABLE_NAME_SIMPLECHAPTERS, KEY_FEEDITEM + "=?",
new String[]{String.valueOf(item.getId())});
}
/**
* Remove a FeedItem and its FeedMedia entry.
*/
private void removeFeedItem(FeedItem item) {
if (item.getMedia() != null) {
removeFeedMedia(item.getMedia());
}
if (item.hasChapters() || item.getChapters() != null) {
removeChaptersOfItem(item);
}
db.delete(TABLE_NAME_FEED_ITEMS, KEY_ID + "=?",
new String[]{String.valueOf(item.getId())});
}
/**
* Remove a feed with all its FeedItems and Media entries.
*/
public void removeFeed(Feed feed) {
try {
db.beginTransactionNonExclusive();
if (feed.getItems() != null) {
for (FeedItem item : feed.getItems()) {
removeFeedItem(item);
}
}
// delete download log entries for feed
db.delete(TABLE_NAME_DOWNLOAD_LOG, KEY_FEEDFILE + "=? AND " + KEY_FEEDFILETYPE + "=?",
new String[]{String.valueOf(feed.getId()), String.valueOf(Feed.FEEDFILETYPE_FEED)});
db.delete(TABLE_NAME_FEEDS, KEY_ID + "=?",
new String[]{String.valueOf(feed.getId())});
db.setTransactionSuccessful();
} catch (SQLException e) {
Log.e(TAG, Log.getStackTraceString(e));
} finally {
db.endTransaction();
}
}
public void clearPlaybackHistory() {
ContentValues values = new ContentValues();
values.put(KEY_PLAYBACK_COMPLETION_DATE, 0);
db.update(TABLE_NAME_FEED_MEDIA, values, null, null);
}
public void clearDownloadLog() {
db.delete(TABLE_NAME_DOWNLOAD_LOG, null, null);
}
/**
* Get all Feeds from the Feed Table.
*
* @return The cursor of the query
*/
public final Cursor getAllFeedsCursor() {
return db.query(TABLE_NAME_FEEDS, FEED_SEL_STD, null, null, null, null,
KEY_TITLE + " COLLATE NOCASE ASC");
}
public final Cursor getFeedCursorDownloadUrls() {
return db.query(TABLE_NAME_FEEDS, new String[]{KEY_ID, KEY_DOWNLOAD_URL}, null, null, null, null, null);
}
/**
* Returns a cursor with all FeedItems of a Feed. Uses FEEDITEM_SEL_FI_SMALL
*
* @param feed The feed you want to get the FeedItems from.
* @return The cursor of the query
*/
public final Cursor getAllItemsOfFeedCursor(final Feed feed) {
return getAllItemsOfFeedCursor(feed.getId());
}
private Cursor getAllItemsOfFeedCursor(final long feedId) {
return db.query(TABLE_NAME_FEED_ITEMS, FEEDITEM_SEL_FI_SMALL, KEY_FEED
+ "=?", new String[]{String.valueOf(feedId)}, null, null,
null);
}
/**
* Return a cursor with the SEL_FI_EXTRA selection of a single feeditem.
*/
public final Cursor getExtraInformationOfItem(final FeedItem item) {
return db
.query(TABLE_NAME_FEED_ITEMS, SEL_FI_EXTRA, KEY_ID + "=?",
new String[]{String.valueOf(item.getId())}, null,
null, null);
}
public final Cursor getSimpleChaptersOfFeedItemCursor(final FeedItem item) {
return db.query(TABLE_NAME_SIMPLECHAPTERS, null, KEY_FEEDITEM
+ "=?", new String[]{String.valueOf(item.getId())}, null,
null, null
);
}
public final Cursor getDownloadLog(final int feedFileType, final long feedFileId) {
final String query = "SELECT * FROM " + TABLE_NAME_DOWNLOAD_LOG +
" WHERE " + KEY_FEEDFILE + "=" + feedFileId + " AND " + KEY_FEEDFILETYPE + "=" + feedFileType
+ " ORDER BY " + KEY_ID + " DESC";
return db.rawQuery(query, null);
}
public final Cursor getDownloadLogCursor(final int limit) {
return db.query(TABLE_NAME_DOWNLOAD_LOG, null, null, null, null,
null, KEY_COMPLETION_DATE + " DESC LIMIT " + limit);
}
/**
* Returns a cursor which contains all feed items in the queue. The returned
* cursor uses the FEEDITEM_SEL_FI_SMALL selection.
* cursor uses the FEEDITEM_SEL_FI_SMALL selection.
*/
public final Cursor getQueueCursor() {
Object[] args = new String[]{
SEL_FI_SMALL_STR,
TABLE_NAME_FEED_ITEMS, TABLE_NAME_QUEUE,
TABLE_NAME_FEED_ITEMS + "." + KEY_ID,
TABLE_NAME_QUEUE + "." + KEY_FEEDITEM,
TABLE_NAME_QUEUE + "." + KEY_ID};
String query = String.format("SELECT %s FROM %s INNER JOIN %s ON %s=%s ORDER BY %s", args);
return db.rawQuery(query, null);
}
public Cursor getQueueIDCursor() {
return db.query(TABLE_NAME_QUEUE, new String[]{KEY_FEEDITEM}, null, null, null, null, KEY_ID + " ASC", null);
}
public final Cursor getFavoritesCursor(int offset, int limit) {
Object[] args = new String[]{
SEL_FI_SMALL_STR,
TABLE_NAME_FEED_ITEMS, TABLE_NAME_FAVORITES,
TABLE_NAME_FEED_ITEMS + "." + KEY_ID,
TABLE_NAME_FAVORITES + "." + KEY_FEEDITEM,
TABLE_NAME_FEED_ITEMS + "." + KEY_PUBDATE,
String.valueOf(offset),
String.valueOf(limit)
};
String query = String.format("SELECT %s FROM %s INNER JOIN %s ON %s=%s ORDER BY %s DESC LIMIT %s, %s", args);
return db.rawQuery(query, null);
}
public void setFeedItems(int state) {
setFeedItems(Integer.MIN_VALUE, state, 0);
}
public void setFeedItems(int oldState, int newState) {
setFeedItems(oldState, newState, 0);
}
public void setFeedItems(int state, long feedId) {
setFeedItems(Integer.MIN_VALUE, state, feedId);
}
public void setFeedItems(int oldState, int newState, long feedId) {
String sql = "UPDATE " + TABLE_NAME_FEED_ITEMS + " SET " + KEY_READ + "=" + newState;
if (feedId > 0) {
sql += " WHERE " + KEY_FEED + "=" + feedId;
}
if (FeedItem.NEW <= oldState && oldState <= FeedItem.PLAYED) {
sql += feedId > 0 ? " AND " : " WHERE ";
sql += KEY_READ + "=" + oldState;
}
db.execSQL(sql);
}
/**
* Returns a cursor which contains all feed items that are considered new.
* Excludes those feeds that do not have 'Keep Updated' enabled.
* The returned cursor uses the FEEDITEM_SEL_FI_SMALL selection.
*/
public final Cursor getNewItemsCursor(int offset, int limit) {
Object[] args = new String[]{
SEL_FI_SMALL_STR,
TABLE_NAME_FEED_ITEMS,
TABLE_NAME_FEEDS,
TABLE_NAME_FEED_ITEMS + "." + KEY_FEED + "=" + TABLE_NAME_FEEDS + "." + KEY_ID,
TABLE_NAME_FEED_ITEMS + "." + KEY_READ + "=" + FeedItem.NEW + " AND " + TABLE_NAME_FEEDS + "." + KEY_KEEP_UPDATED + " > 0",
KEY_PUBDATE + " DESC",
String.valueOf(offset),
String.valueOf(limit)
};
final String query = String.format("SELECT %s FROM %s INNER JOIN %s ON %s WHERE %s "
+ "ORDER BY %s LIMIT %s, %s", args);
return db.rawQuery(query, null);
}
public final Cursor getRecentlyPublishedItemsCursor(int offset, int limit) {
return db.query(TABLE_NAME_FEED_ITEMS, FEEDITEM_SEL_FI_SMALL, null, null, null, null, KEY_PUBDATE + " DESC LIMIT " + offset + ", " + limit);
}
public Cursor getDownloadedItemsCursor() {
final String query = "SELECT " + SEL_FI_SMALL_STR
+ " FROM " + TABLE_NAME_FEED_ITEMS
+ " INNER JOIN " + TABLE_NAME_FEED_MEDIA
+ " ON " + TABLE_NAME_FEED_ITEMS + "." + KEY_ID + "=" + TABLE_NAME_FEED_MEDIA + "." + KEY_FEEDITEM
+ " WHERE " + TABLE_NAME_FEED_MEDIA + "." + KEY_DOWNLOADED + ">0";
return db.rawQuery(query, null);
}
/**
* Returns a cursor which contains feed media objects with a playback
* completion date in ascending order.
*
* @param limit The maximum row count of the returned cursor. Must be an
* integer >= 0.
* @throws IllegalArgumentException if limit < 0
*/
public final Cursor getCompletedMediaCursor(int limit) {
if (limit < 0) {
throw new IllegalArgumentException("Limit must be >= 0");
}
return db.query(TABLE_NAME_FEED_MEDIA, null,
KEY_PLAYBACK_COMPLETION_DATE + " > 0", null, null,
null, String.format("%s DESC LIMIT %d", KEY_PLAYBACK_COMPLETION_DATE, limit));
}
public final Cursor getSingleFeedMediaCursor(long id) {
return db.query(TABLE_NAME_FEED_MEDIA, null, KEY_ID + "=?", new String[]{String.valueOf(id)}, null, null, null);
}
public final Cursor getFeedMediaCursor(String... itemIds) {
int length = itemIds.length;
if (length > IN_OPERATOR_MAXIMUM) {
Log.w(TAG, "Length of id array is larger than "
+ IN_OPERATOR_MAXIMUM + ". Creating multiple cursors");
int numCursors = (int) (((double) length) / (IN_OPERATOR_MAXIMUM)) + 1;
Cursor[] cursors = new Cursor[numCursors];
for (int i = 0; i < numCursors; i++) {
int neededLength;
String[] parts;
final int elementsLeft = length - i * IN_OPERATOR_MAXIMUM;
if (elementsLeft >= IN_OPERATOR_MAXIMUM) {
neededLength = IN_OPERATOR_MAXIMUM;
parts = Arrays.copyOfRange(itemIds, i
* IN_OPERATOR_MAXIMUM, (i + 1)
* IN_OPERATOR_MAXIMUM);
} else {
neededLength = elementsLeft;
parts = Arrays.copyOfRange(itemIds, i
* IN_OPERATOR_MAXIMUM, (i * IN_OPERATOR_MAXIMUM)
+ neededLength);
}
cursors[i] = db.rawQuery("SELECT * FROM "
+ TABLE_NAME_FEED_MEDIA + " WHERE " + KEY_FEEDITEM + " IN "
+ buildInOperator(neededLength), parts);
}
Cursor result = new MergeCursor(cursors);
result.moveToFirst();
return result;
} else {
return db.query(TABLE_NAME_FEED_MEDIA, null, KEY_FEEDITEM + " IN "
+ buildInOperator(length), itemIds, null, null, null);
}
}
/**
* Builds an IN-operator argument depending on the number of items.
*/
private String buildInOperator(int size) {
if (size == 1) {
return "(?)";
}
return "(" + TextUtils.join(",", Collections.nCopies(size, "?")) + ")";
}
public final Cursor getFeedCursor(final long id) {
return db.query(TABLE_NAME_FEEDS, FEED_SEL_STD, KEY_ID + "=" + id, null,
null, null, null);
}
public final Cursor getFeedItemCursor(final String id) {
return getFeedItemCursor(new String[]{id});
}
public final Cursor getFeedItemCursor(final String[] ids) {
if (ids.length > IN_OPERATOR_MAXIMUM) {
throw new IllegalArgumentException(
"number of IDs must not be larger than "
+ IN_OPERATOR_MAXIMUM
);
}
return db.query(TABLE_NAME_FEED_ITEMS, FEEDITEM_SEL_FI_SMALL, KEY_ID + " IN "
+ buildInOperator(ids.length), ids, null, null, null);
}
public final Cursor getFeedItemCursor(final String podcastUrl, final String episodeUrl) {
String escapedPodcastUrl = DatabaseUtils.sqlEscapeString(podcastUrl);
String escapedEpisodeUrl = DatabaseUtils.sqlEscapeString(episodeUrl);
final String query = ""
+ "SELECT " + SEL_FI_SMALL_STR + " FROM " + TABLE_NAME_FEED_ITEMS
+ " INNER JOIN " + TABLE_NAME_FEEDS
+ " ON " + TABLE_NAME_FEED_ITEMS + "." + KEY_FEED + "=" + TABLE_NAME_FEEDS + "." + KEY_ID
+ " INNER JOIN " + TABLE_NAME_FEED_MEDIA
+ " ON " + TABLE_NAME_FEED_MEDIA + "." + KEY_FEEDITEM + "=" + TABLE_NAME_FEED_ITEMS + "." + KEY_ID
+ " WHERE " + TABLE_NAME_FEED_MEDIA + "." + KEY_DOWNLOAD_URL + "=" + escapedEpisodeUrl
+ " AND " + TABLE_NAME_FEEDS + "." + KEY_DOWNLOAD_URL + "=" + escapedPodcastUrl;
Log.d(TAG, "SQL: " + query);
return db.rawQuery(query, null);
}
public Cursor getImageAuthenticationCursor(final String imageUrl) {
String downloadUrl = DatabaseUtils.sqlEscapeString(imageUrl);
final String query = ""
+ "SELECT " + KEY_USERNAME + "," + KEY_PASSWORD + " FROM " + TABLE_NAME_FEED_ITEMS
+ " INNER JOIN " + TABLE_NAME_FEEDS
+ " ON " + TABLE_NAME_FEED_ITEMS + "." + KEY_FEED + " = " + TABLE_NAME_FEEDS + "." + KEY_ID
+ " WHERE " + TABLE_NAME_FEED_ITEMS + "." + KEY_IMAGE_URL + "=" + downloadUrl
+ " UNION SELECT " + KEY_USERNAME + "," + KEY_PASSWORD + " FROM " + TABLE_NAME_FEEDS
+ " WHERE " + TABLE_NAME_FEEDS + "." + KEY_IMAGE_URL + "=" + downloadUrl;
return db.rawQuery(query, null);
}
public int getQueueSize() {
final String query = String.format("SELECT COUNT(%s) FROM %s", KEY_ID, TABLE_NAME_QUEUE);
Cursor c = db.rawQuery(query, null);
int result = 0;
if (c.moveToFirst()) {
result = c.getInt(0);
}
c.close();
return result;
}
public final int getNumberOfNewItems() {
Object[] args = new String[]{
TABLE_NAME_FEED_ITEMS + "." + KEY_ID,
TABLE_NAME_FEED_ITEMS,
TABLE_NAME_FEEDS,
TABLE_NAME_FEED_ITEMS + "." + KEY_FEED + "=" + TABLE_NAME_FEEDS + "." + KEY_ID,
TABLE_NAME_FEED_ITEMS + "." + KEY_READ + "=" + FeedItem.NEW
+ " AND " + TABLE_NAME_FEEDS + "." + KEY_KEEP_UPDATED + " > 0"
};
final String query = String.format("SELECT COUNT(%s) FROM %s INNER JOIN %s ON %s WHERE %s", args);
Cursor c = db.rawQuery(query, null);
int result = 0;
if (c.moveToFirst()) {
result = c.getInt(0);
}
c.close();
return result;
}
public final LongIntMap getFeedCounters(long... feedIds) {
int setting = UserPreferences.getFeedCounterSetting();
String whereRead;
switch (setting) {
case UserPreferences.FEED_COUNTER_SHOW_NEW_UNPLAYED_SUM:
whereRead = "(" + KEY_READ + "=" + FeedItem.NEW +
" OR " + KEY_READ + "=" + FeedItem.UNPLAYED + ")";
break;
case UserPreferences.FEED_COUNTER_SHOW_NEW:
whereRead = KEY_READ + "=" + FeedItem.NEW;
break;
case UserPreferences.FEED_COUNTER_SHOW_UNPLAYED:
whereRead = KEY_READ + "=" + FeedItem.UNPLAYED;
break;
case UserPreferences.FEED_COUNTER_SHOW_DOWNLOADED:
whereRead = KEY_DOWNLOADED + "=1";
break;
case UserPreferences.FEED_COUNTER_SHOW_NONE:
// deliberate fall-through
default: // NONE
return new LongIntMap(0);
}
return conditionalFeedCounterRead(whereRead, feedIds);
}
private LongIntMap conditionalFeedCounterRead(String whereRead, long... feedIds) {
// work around TextUtils.join wanting only boxed items
// and StringUtils.join() causing NoSuchMethodErrors on MIUI
StringBuilder builder = new StringBuilder();
for (long id : feedIds) {
builder.append(id);
builder.append(',');
}
if (feedIds.length > 0) {
// there's an extra ',', get rid of it
builder.deleteCharAt(builder.length() - 1);
}
final String query = "SELECT " + KEY_FEED + ", COUNT(" + TABLE_NAME_FEED_ITEMS + "." + KEY_ID + ") AS count "
+ " FROM " + TABLE_NAME_FEED_ITEMS
+ " LEFT JOIN " + TABLE_NAME_FEED_MEDIA + " ON "
+ TABLE_NAME_FEED_ITEMS + "." + KEY_ID + "=" + TABLE_NAME_FEED_MEDIA + "." + KEY_FEEDITEM
+ " WHERE " + KEY_FEED + " IN (" + builder.toString() + ") "
+ " AND " + whereRead + " GROUP BY " + KEY_FEED;
Cursor c = db.rawQuery(query, null);
LongIntMap result = new LongIntMap(c.getCount());
if (c.moveToFirst()) {
do {
long feedId = c.getLong(0);
int count = c.getInt(1);
result.put(feedId, count);
} while (c.moveToNext());
}
c.close();
return result;
}
public final LongIntMap getPlayedEpisodesCounters(long... feedIds) {
String whereRead = KEY_READ + "=" + FeedItem.PLAYED;
return conditionalFeedCounterRead(whereRead, feedIds);
}
public final int getNumberOfDownloadedEpisodes() {
final String query = "SELECT COUNT(DISTINCT " + KEY_ID + ") AS count FROM " + TABLE_NAME_FEED_MEDIA +
" WHERE " + KEY_DOWNLOADED + " > 0";
Cursor c = db.rawQuery(query, null);
int result = 0;
if (c.moveToFirst()) {
result = c.getInt(0);
}
c.close();
return result;
}
/**
* Uses DatabaseUtils to escape a search query and removes ' at the
* beginning and the end of the string returned by the escape method.
*/
private String prepareSearchQuery(String query) {
StringBuilder builder = new StringBuilder();
DatabaseUtils.appendEscapedSQLString(builder, query);
builder.deleteCharAt(0);
builder.deleteCharAt(builder.length() - 1);
return builder.toString();
}
/**
* Searches for the given query in various values of all items or the items
* of a specified feed.
*
* @return A cursor with all search results in SEL_FI_EXTRA selection.
*/
public Cursor searchItems(long feedID, String searchQuery) {
String preparedQuery = prepareSearchQuery(searchQuery);
String queryFeedId = "";
if (feedID != 0) {
// search items in specific feed
queryFeedId = KEY_FEED + " = " + feedID;
} else {
// search through all items
queryFeedId = "1 = 1";
}
String query = "SELECT " + SEL_FI_SMALL_STR + " FROM " + TABLE_NAME_FEED_ITEMS
+ " WHERE " + queryFeedId + " AND ("
+ KEY_DESCRIPTION + " LIKE '%" + preparedQuery + "%' OR "
+ KEY_CONTENT_ENCODED + " LIKE '%" + preparedQuery + "%' OR "
+ KEY_TITLE + " LIKE '%" + preparedQuery + "%'"
+ ") ORDER BY " + KEY_PUBDATE + " DESC "
+ "LIMIT 300";
return db.rawQuery(query, null);
}
/**
* Searches for the given query in various values of all feeds.
*
* @return A cursor with all search results in SEL_FI_EXTRA selection.
*/
public Cursor searchFeeds(String searchQuery) {
String preparedQuery = prepareSearchQuery(searchQuery);
String query = "SELECT " + FEED_SEL_STD_STR + " FROM " + TABLE_NAME_FEEDS + " WHERE "
+ KEY_TITLE + " LIKE '%" + preparedQuery + "%' OR "
+ KEY_CUSTOM_TITLE + " LIKE '%" + preparedQuery + "%' OR "
+ KEY_AUTHOR + " LIKE '%" + preparedQuery + "%' OR "
+ KEY_DESCRIPTION + " LIKE '%" + preparedQuery + "%' "
+ "ORDER BY " + KEY_TITLE + " ASC "
+ "LIMIT 300";
return db.rawQuery(query, null);
}
/**
* Select number of items, new items, the date of the latest episode and the number of episodes in progress. The result
* is sorted by the title of the feed.
*/
private static final String FEED_STATISTICS_QUERY = "SELECT Feeds.id, num_items, new_items, latest_episode, in_progress FROM " +
" Feeds LEFT JOIN " +
"(SELECT feed,count(*) AS num_items," +
" COUNT(CASE WHEN read=0 THEN 1 END) AS new_items," +
" MAX(pubDate) AS latest_episode," +
" COUNT(CASE WHEN position>0 THEN 1 END) AS in_progress," +
" COUNT(CASE WHEN downloaded=1 THEN 1 END) AS episodes_downloaded " +
" FROM FeedItems LEFT JOIN FeedMedia ON FeedItems.id=FeedMedia.feeditem GROUP BY FeedItems.feed)" +
" ON Feeds.id = feed ORDER BY Feeds.title COLLATE NOCASE ASC;";
public Cursor getFeedStatisticsCursor() {
return db.rawQuery(FEED_STATISTICS_QUERY, null);
}
/**
* Called when a database corruption happens
*/
public static class PodDbErrorHandler implements DatabaseErrorHandler {
@Override
public void onCorruption(SQLiteDatabase db) {
Log.e(TAG, "Database corrupted: " + db.getPath());
File dbPath = new File(db.getPath());
File backupFolder = PodDBAdapter.context.getExternalFilesDir(null);
File backupFile = new File(backupFolder, "CorruptedDatabaseBackup.db");
try {
FileUtils.copyFile(dbPath, backupFile);
Log.d(TAG, "Dumped database to " + backupFile.getPath());
} catch (IOException e) {
Log.d(TAG, Log.getStackTraceString(e));
}
new DefaultDatabaseErrorHandler().onCorruption(db); // This deletes the database
}
}
/**
* Helper class for opening the Antennapod database.
*/
private static class PodDBHelper extends SQLiteOpenHelper {
/**
* Constructor.
*
* @param context Context to use
* @param name Name of the database
* @param factory to use for creating cursor objects
*/
public PodDBHelper(final Context context, final String name, final CursorFactory factory) {
super(context, name, factory, VERSION, new PodDbErrorHandler());
}
@Override
public void onCreate(final SQLiteDatabase db) {
db.execSQL(CREATE_TABLE_FEEDS);
db.execSQL(CREATE_TABLE_FEED_ITEMS);
db.execSQL(CREATE_TABLE_FEED_MEDIA);
db.execSQL(CREATE_TABLE_DOWNLOAD_LOG);
db.execSQL(CREATE_TABLE_QUEUE);
db.execSQL(CREATE_TABLE_SIMPLECHAPTERS);
db.execSQL(CREATE_TABLE_FAVORITES);
db.execSQL(CREATE_INDEX_FEEDITEMS_FEED);
db.execSQL(CREATE_INDEX_FEEDITEMS_PUBDATE);
db.execSQL(CREATE_INDEX_FEEDITEMS_READ);
db.execSQL(CREATE_INDEX_FEEDMEDIA_FEEDITEM);
db.execSQL(CREATE_INDEX_QUEUE_FEEDITEM);
db.execSQL(CREATE_INDEX_SIMPLECHAPTERS_FEEDITEM);
}
@Override
public void onUpgrade(final SQLiteDatabase db, final int oldVersion, final int newVersion) {
Log.w("DBAdapter", "Upgrading from version " + oldVersion + " to " + newVersion + ".");
DBUpgrader.upgrade(db, oldVersion, newVersion);
}
}
}
| 1 | 15,927 | Please only increment by 1. | AntennaPod-AntennaPod | java |
@@ -348,7 +348,7 @@ namespace Microsoft.Rest.Generator.Java
else if (primaryType == PrimaryType.TimeSpan ||
primaryType.Name == "Period")
{
- return "java.time.Period";
+ return "org.joda.time.Period";
}
else
{ | 1 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
using System;
using System.Collections.Generic;
using System.Globalization;
using System.Linq;
using System.Text.RegularExpressions;
using Microsoft.Rest.Generator.ClientModel;
using Microsoft.Rest.Generator.Utilities;
namespace Microsoft.Rest.Generator.Java
{
public class JavaCodeNamer : CodeNamer
{
private readonly HashSet<IType> _normalizedTypes;
/// <summary>
/// Initializes a new instance of CSharpCodeNamingFramework.
/// </summary>
public JavaCodeNamer()
{
// List retrieved from
// http://docs.oracle.com/javase/tutorial/java/nutsandbolts/_keywords.html
new HashSet<string>
{
"abstract", "assert", "boolean", "break", "byte",
"case", "catch", "char", "class", "const",
"continue", "default", "do", "double", "else",
"enum", "extends", "false", "final", "finally",
"float", "for", "goto", "if", "implements",
"import", "int", "long", "interface","instanceof",
"native", "new", "null", "package", "private",
"protected","public", "return", "short", "static",
"strictfp", "super", "switch", "synchronized","this",
"throw", "throws", "transient","true", "try",
"void", "volatile", "while", "date", "datetime",
"period", "stream", "string", "object", "header"
}.ForEach(s => ReservedWords.Add(s));
_normalizedTypes = new HashSet<IType>();
}
public override string GetFieldName(string name)
{
if (string.IsNullOrWhiteSpace(name))
{
return name;
}
return '_' + GetVariableName(name);
}
public override string GetPropertyName(string name)
{
if (string.IsNullOrWhiteSpace(name))
{
return name;
}
return CamelCase(RemoveInvalidCharacters(GetEscapedReservedName(name, "Property")));
}
public override string GetMethodName(string name)
{
name = GetEscapedReservedName(name, "Method");
return CamelCase(name);
}
public override string GetMethodGroupName(string name)
{
if (string.IsNullOrWhiteSpace(name))
{
return name;
}
name = GetEscapedReservedName(name, "Operations");
return PascalCase(name);
}
public override string GetEnumMemberName(string name)
{
if (string.IsNullOrWhiteSpace(name))
{
return name;
}
return RemoveInvalidCharacters(new Regex("[\\ -]+").Replace(name, "_")).ToUpper(CultureInfo.InvariantCulture);
}
public override string GetParameterName(string name)
{
return base.GetParameterName(GetEscapedReservedName(name, "Parameter"));
}
public override string GetVariableName(string name)
{
return base.GetVariableName(GetEscapedReservedName(name, "Variable"));
}
public static string GetServiceName(string name)
{
if (string.IsNullOrWhiteSpace(name))
{
return name;
}
return name + "Service";
}
public override void NormalizeClientModel(ServiceClient client)
{
if (client == null)
{
throw new ArgumentNullException("client");
}
base.NormalizeClientModel(client);
foreach (var method in client.Methods)
{
if (method.Group != null)
{
method.Group = method.Group.ToCamelCase();
}
var scope = new ScopeProvider();
foreach (var parameter in method.Parameters)
{
if (parameter.ClientProperty != null)
{
parameter.Name = string.Format(CultureInfo.InvariantCulture,
"{0}.get{1}()",
method.Group == null ? "this" : "this.client",
parameter.ClientProperty.Name.ToPascalCase());
}
else
{
parameter.Name = scope.GetVariableName(parameter.Name);
}
if (!parameter.IsRequired)
{
parameter.Type = WrapPrimitiveType(parameter.Type);
}
}
}
}
public override IType NormalizeType(IType type)
{
if (type == null)
{
return null;
}
var enumType = type as EnumType;
if (enumType != null && enumType.IsExpandable)
{
type = PrimaryType.String;
}
// Using Any instead of Contains since object hash is bound to a property which is modified during normalization
if (_normalizedTypes.Any(item => type.Equals(item)))
{
return _normalizedTypes.First(item => type.Equals(item));
}
_normalizedTypes.Add(type);
if (type is PrimaryType)
{
return NormalizePrimaryType(type as PrimaryType);
}
if (type is SequenceType)
{
return NormalizeSequenceType(type as SequenceType);
}
if (type is DictionaryType)
{
return NormalizeDictionaryType(type as DictionaryType);
}
if (type is CompositeType)
{
return NormalizeCompositeType(type as CompositeType);
}
if (type is EnumType)
{
return NormalizeEnumType(type as EnumType);
}
throw new NotSupportedException(string.Format(CultureInfo.InvariantCulture,
"Type {0} is not supported.", type.GetType()));
}
private IType NormalizeEnumType(EnumType enumType)
{
if (enumType.IsExpandable)
{
enumType.SerializedName = "string";
enumType.Name = "string";
}
else
{
enumType.Name = GetTypeName(enumType.Name);
}
for (int i = 0; i < enumType.Values.Count; i++)
{
enumType.Values[i].Name = GetEnumMemberName(enumType.Values[i].Name);
}
return enumType;
}
private IType NormalizeCompositeType(CompositeType compositeType)
{
compositeType.Name = GetTypeName(compositeType.Name);
foreach (var property in compositeType.Properties)
{
property.Name = GetPropertyName(property.Name);
property.Type = NormalizeType(property.Type);
if (!property.IsRequired)
{
property.Type = WrapPrimitiveType(property.Type);
}
}
return compositeType;
}
private static PrimaryType NormalizePrimaryType(PrimaryType primaryType)
{
if (primaryType == PrimaryType.Boolean)
{
primaryType.Name = "boolean";
}
else if (primaryType == PrimaryType.ByteArray)
{
primaryType.Name = "byte[]";
}
else if (primaryType == PrimaryType.Date)
{
primaryType.Name = "LocalDate";
}
else if (primaryType == PrimaryType.DateTime)
{
primaryType.Name = "DateTime";
}
else if (primaryType == PrimaryType.Double)
{
primaryType.Name = "double";
}
else if (primaryType == PrimaryType.Int)
{
primaryType.Name = "int";
}
else if (primaryType == PrimaryType.Long)
{
primaryType.Name = "long";
}
else if (primaryType == PrimaryType.Stream)
{
primaryType.Name = "InputStream";
}
else if (primaryType == PrimaryType.String)
{
primaryType.Name = "String";
}
else if (primaryType == PrimaryType.TimeSpan)
{
primaryType.Name = "Period";
}
else if (primaryType == PrimaryType.Object)
{
primaryType.Name = "Object";
}
return primaryType;
}
public static IType WrapPrimitiveType(IType type)
{
if (type is PrimaryType)
{
var primaryType = new PrimaryType();
if (type.Name == "boolean")
{
primaryType.Name = "Boolean";
}
else if (type.Name == "double")
{
primaryType.Name = "Double";
}
else if (type.Name == "int")
{
primaryType.Name = "Integer";
}
else if (type.Name == "long")
{
primaryType.Name = "Long";
}
else
{
return type;
}
return primaryType;
}
else if (type == null)
{
var newType = new PrimaryType();
newType.Name = "Void";
return newType;
}
else
{
return type;
}
}
private IType NormalizeSequenceType(SequenceType sequenceType)
{
sequenceType.ElementType = WrapPrimitiveType(NormalizeType(sequenceType.ElementType));
sequenceType.NameFormat = "List<{0}>";
return sequenceType;
}
private IType NormalizeDictionaryType(DictionaryType dictionaryType)
{
dictionaryType.ValueType = WrapPrimitiveType(NormalizeType(dictionaryType.ValueType));
dictionaryType.NameFormat = "Map<String, {0}>";
return dictionaryType;
}
public static String ImportedFrom(PrimaryType primaryType)
{
if (primaryType == null)
{
return null;
}
if (primaryType == PrimaryType.Date ||
primaryType.Name == "LocalDate")
{
return "org.joda.time.LocalDate";
}
else if (primaryType == PrimaryType.DateTime ||
primaryType.Name == "DateTime")
{
return "org.joda.time.DateTime";
}
else if (primaryType == PrimaryType.Stream ||
primaryType.Name == "InputStream")
{
return "java.io.InputStream";
}
else if (primaryType == PrimaryType.TimeSpan ||
primaryType.Name == "Period")
{
return "java.time.Period";
}
else
{
return null;
}
}
}
} | 1 | 21,149 | Sorry I might have missed some context, but what's the reason you choose `Period` over `Duration` or `Interval`? (Thumbs up for using `org.joda.time` instead!) | Azure-autorest | java |
@@ -2359,6 +2359,19 @@ DefaultSettings.prototype = {
*/
preventOverflow: false,
+ /**
+ * Prevents wheel event on overlays for doing default action.
+ *
+ * @type {Boolean}
+ * @default false
+ *
+ * @example
+ * ```js
+ * preventWheel: false,
+ * ```
+ */
+ preventWheel: false,
+
/**
* @description
* Enables the functionality of the {@link BindRowsWithHeaders} plugin which allows binding the table rows with their headers. | 1 | import { isDefined } from './helpers/mixed';
import { isObjectEqual } from './helpers/object';
/**
* @alias Options
* @constructor
* @description
* ## Constructor options
*
* Constructor options are applied using an object literal passed as a second argument to the Handsontable constructor.
*
* ```js
* const container = document.getElementById('example');
* const hot = new Handsontable(container, {
* data: myArray,
* width: 400,
* height: 300
* });
* ```
*
* ---
* ## Cascading configuration
*
* Handsontable is using *Cascading Configuration*, which is a fast way to provide configuration options
* for the entire table, including its columns and particular cells.
*
* Consider the following example:
* ```js
* const container = document.getElementById('example');
* const hot = new Handsontable(container, {
* readOnly: true,
* columns: [
* {readOnly: false},
* {},
* {}
* ],
* cells: function(row, col, prop) {
* var cellProperties = {};
*
* if (row === 0 && col === 0) {
* cellProperties.readOnly = true;
* }
*
* return cellProperties;
* }
* });
* ```
*
* The above notation will result in all TDs being *read only*, except for first column TDs which will be *editable*, except for the TD in top left corner which will still be *read only*.
*
* ### The Cascading Configuration model
*
* ##### 1. Constructor
*
* Configuration options that are provided using first-level `handsontable(container, {option: "value"})` and `updateSettings` method.
*
* ##### 2. Columns
*
* Configuration options that are provided using second-level object `handsontable(container, {columns: {option: "value"}]})`
*
* ##### 3. Cells
*
* Configuration options that are provided using third-level function `handsontable(container, {cells: function: (row, col, prop){ }})`
*
* ---
* ## Architecture performance
*
* The Cascading Configuration model is based on prototypical inheritance. It is much faster and memory efficient
* compared to the previous model that used jQuery extend. See: [http://jsperf.com/extending-settings](http://jsperf.com/extending-settings).
*
* ---
* __Important notice:__ In order for the data separation to work properly, make sure that each instance of Handsontable has a unique `id`.
*/
function DefaultSettings() {}
DefaultSettings.prototype = {
/**
* License key for commercial version of Handsontable.
*
* @type {String}
* @default undefined
* @example
* ```js
* licenseKey: '00000-00000-00000-00000-00000',
* // or
* licenseKey: 'non-commercial-and-evaluation',
* ```
*/
licenseKey: void 0,
/**
* @description
* Initial data source that will be bound to the data grid __by reference__ (editing data grid alters the data source).
* Can be declared as an array of arrays, array of objects or a function.
*
* See [Understanding binding as reference](https://docs.handsontable.com/tutorial-data-binding.html#page-reference).
*
* @type {Array[]|Object[]}
* @default undefined
* @example
* ```js
* // as an array of arrays
* data: [
* ['A', 'B', 'C'],
* ['D', 'E', 'F'],
* ['G', 'H', 'J']
* ]
*
* // as an array of objects
* data: [
* {id: 1, name: 'Ted Right'},
* {id: 2, name: 'Frank Honest'},
* {id: 3, name: 'Joan Well'},
* {id: 4, name: 'Gail Polite'},
* {id: 5, name: 'Michael Fair'},
* ]
* ```
*/
data: void 0,
/**
* @description
* Defines the structure of a new row when data source is an array of objects.
*
* See [data-schema](https://docs.handsontable.com/tutorial-data-sources.html#page-data-schema) for more options.
*
* @type {Object}
* @default undefined
*
* @example
* ```
* // with data schema we can start with an empty table
* data: null,
* dataSchema: {id: null, name: {first: null, last: null}, address: null},
* colHeaders: ['ID', 'First Name', 'Last Name', 'Address'],
* columns: [
* {data: 'id'},
* {data: 'name.first'},
* {data: 'name.last'},
* {data: 'address'}
* ],
* startRows: 5,
* minSpareRows: 1
* ```
*/
dataSchema: void 0,
/**
* Width of the grid. Can be a value or a function that returns a value.
*
* @type {Number|String|Function}
* @default undefined
*
* @example
* ```
* // as a number
* width: 500,
*
* // as a string
* width: '75vw',
*
* // as a function
* width: function() {
* return 500;
* },
* ```
*/
width: void 0,
/**
* Height of the grid. Can be a number or a function that returns a number.
*
* @type {Number|String|Function}
* @default undefined
*
* @example
* ```js
* // as a number
* height: 500,
*
* // as a string
* height: '75vh',
*
* // as a function
* height: function() {
* return 500;
* },
* ```
*/
height: void 0,
/**
* @description
* Initial number of rows.
*
* __Note:__ This option only has effect in Handsontable constructor and only if `data` option is not provided
*
* @type {Number}
* @default 5
*
* @example
* ```js
* // start with 15 empty rows
* startRows: 15,
* ```
*/
startRows: 5,
/**
* @description
* Initial number of columns.
*
* __Note:__ This option only has effect in Handsontable constructor and only if `data` option is not provided
*
* @type {Number}
* @default 5
*
* @example
* ```js
* // start with 15 empty columns
* startCols: 15,
* ```
*/
startCols: 5,
/**
* Setting `true` or `false` will enable or disable the default row headers (1, 2, 3).
* You can also define an array `['One', 'Two', 'Three', ...]` or a function to define the headers.
* If a function is set the index of the row is passed as a parameter.
*
* @type {Boolean|String[]|Function}
* @default undefined
*
* @example
* ```js
* // as a boolean
* rowHeaders: true,
*
* // as an array
* rowHeaders: ['1', '2', '3'],
*
* // as a function
* rowHeaders: function(index) {
* return index + ': AB';
* },
* ```
*/
rowHeaders: void 0,
/**
* Setting `true` or `false` will enable or disable the default column headers (A, B, C).
* You can also define an array `['One', 'Two', 'Three', ...]` or a function to define the headers.
* If a function is set, then the index of the column is passed as a parameter.
*
* @type {Boolean|String[]|Function}
* @default null
*
* @example
* ```js
* // as a boolean
* colHeaders: true,
*
* // as an array
* colHeaders: ['A', 'B', 'C'],
*
* // as a function
* colHeaders: function(index) {
* return index + ': AB';
* },
* ```
*/
colHeaders: null,
/**
* Defines column widths in pixels. Accepts number, string (that will be converted to a number), array of numbers
* (if you want to define column width separately for each column) or a function (if you want to set column width
* dynamically on each render).
*
* @type {Number|Number[]|String|String[]|Function}
* @default undefined
*
* @example
* ```js
* // as a number, for each column.
* colWidths: 100,
*
* // as a string, for each column.
* colWidths: '100px',
*
* // as an array, based on visual indexes. The rest of the columns have a default width.
* colWidths: [100, 120, 90],
*
* // as a function, based on visual indexes.
* colWidths: function(index) {
* return index * 10;
* },
* ```
*/
colWidths: void 0,
/**
* Defines row heights in pixels. Accepts numbers, strings (that will be converted into a number), array of numbers
* (if you want to define row height separately for each row) or a function (if you want to set row height dynamically
* on each render).
*
* If the {@link ManualRowResize} or {@link AutoRowSize} plugins are enabled, this is also the minimum height that can
* be set via either of those two plugins.
*
* Height should be equal or greater than 23px. Table is rendered incorrectly if height is less than 23px.
*
* @type {Number|Number[]|String|String[]|Function}
* @default undefined
*
* @example
* ```js
* // as a number, the same for all rows
* rowHeights: 100,
*
* // as a string, the same for all row
* rowHeights: '100px',
*
* // as an array, based on visual indexes. The rest of the rows have a default height
* rowHeights: [100, 120, 90],
*
* // as a function, based on visual indexes
* rowHeights: function(index) {
* return index * 10;
* },
* ```
*/
rowHeights: void 0,
/**
* @description
* Defines the cell properties and data binding for certain columns.
*
* __Note:__ Using this option sets a fixed number of columns (options `startCols`, `minCols`, `maxCols` will be ignored).
*
* See [documentation -> datasources.html](https://docs.handsontable.com/tutorial-data-sources.html#page-nested) for examples.
*
* @type {Object[]|Function}
* @default undefined
*
* @example
* ```js
* // as an array of objects
* // order of the objects in array is representation of physical indexes.
* columns: [
* {
* // column options for the first column
* type: 'numeric',
* numericFormat: {
* pattern: '0,0.00 $'
* }
* },
* {
* // column options for the second column
* type: 'text',
* readOnly: true
* }
* ],
*
* // or as a function, based on physical indexes
* columns: function(index) {
* return {
* type: index > 0 ? 'numeric' : 'text',
* readOnly: index < 1
* }
* }
* ```
*/
columns: void 0,
/**
* @description
* Defines the cell properties for given `row`, `col`, `prop` coordinates. Any constructor or column option may be
* overwritten for a particular cell (row/column combination) using the `cells` property in the Handsontable constructor.
*
* __Note:__ Parameters `row` and `col` always represent __physical indexes__. Example below show how to execute
* operations based on the __visual__ representation of Handsontable.
*
* Possible values of `prop`:
* - property name for column's data source object, when dataset is an [array of objects](/tutorial-data-sources.html#page-object)
* - the same number as `col`, when dataset is an [array of arrays](/tutorial-data-sources.html#page-array)
*
* @type {Function}
* @default undefined
*
* @example
* ```js
* cells: function(row, column, prop) {
* const cellProperties = {};
* const visualRowIndex = this.instance.toVisualRow(row);
* const visualColIndex = this.instance.toVisualColumn(column);
*
* if (visualRowIndex === 0 && visualColIndex === 0) {
* cellProperties.readOnly = true;
* }
*
* return cellProperties;
* },
* ```
*/
cells: void 0,
/**
* Any constructor or column option may be overwritten for a particular cell (row/column combination), using `cell`
* array passed to the Handsontable constructor.
*
* @type {Array[]}
* @default []
*
* @example
* ```js
* // make cell with coordinates (0, 0) read only
* cell: [
* {
* row: 0,
* col: 0,
* readOnly: true
* }
* ],
* ```
*/
cell: [],
/**
* @description
* If `true`, enables the {@link Comments} plugin, which enables an option to apply cell comments through the context menu
* (configurable with context menu keys `commentsAddEdit`, `commentsRemove`).
*
* To initialize Handsontable with predefined comments, provide cell coordinates and comment text values in a form of
* an array.
*
* See [Comments](https://docs.handsontable.com/demo-comments_.html) demo for examples.
*
* @type {Boolean|Object[]}
* @default false
*
* @example
* ```js
* // enable comments plugin
* comments: true,
*
* // or
* // enable comments plugin and add predefined comments
* comments: [
* {
* row: 1,
* col: 1,
* comment: {
* value: "Test comment"
* }
* }
* ],
* ```
*/
comments: false,
/**
* @description
* If `true`, enables the {@link CustomBorders} plugin, which enables an option to apply custom borders through the context
* menu (configurable with context menu key `borders`). To initialize Handsontable with predefined custom borders,
* provide cell coordinates and border styles in a form of an array.
*
* See [Custom Borders](https://docs.handsontable.com/demo-custom-borders.html) demo for examples.
*
* @type {Boolean|Object[]}
* @default false
*
* @example
* ```js
* // enable custom borders
* customBorders: true,
*
* // or
* // enable custom borders and start with predefined left border
* customBorders: [
* {
* range: {
* from: {
* row: 1,
* col: 1
* },
* to: {
* row: 3,
* col: 4
* }
* },
* left: {
* width: 2,
* color: 'red'
* },
* right: {},
* top: {},
* bottom: {}
* }
* ],
*
* // or
* customBorders: [
* {
* row: 2,
* col: 2,
* left: {
* width: 2,
* color: 'red'
* },
* right: {
* width: 1,
* color: 'green'
* },
* top: '',
* bottom: ''
* }
* ],
* ```
*/
customBorders: false,
/**
* Minimum number of rows. At least that number of rows will be created during initialization.
*
* @type {Number}
* @default 0
*
* @example
* ```js
* // set minimum table size to 10 rows
* minRows: 10,
* ```
*/
minRows: 0,
/**
* Minimum number of columns. At least that number of columns will be created during initialization.
*
* @type {Number}
* @default 0
*
* @example
* ```js
* // set minimum table size to 10 columns
* minCols: 10,
* ```
*/
minCols: 0,
/**
* Maximum number of rows. If set to a value lower than the initial row count, the data will be trimmed to the provided
* value as the number of rows.
*
* @type {Number}
* @default Infinity
*
* @example
* ```js
* // limit table size to maximum 300 rows
* maxRows: 300,
* ```
*/
maxRows: Infinity,
/**
* Maximum number of cols. If set to a value lower than the initial col count, the data will be trimmed to the provided
* value as the number of cols.
*
* @type {Number}
* @default Infinity
*
* @example
* ```js
* // limit table size to maximum 300 columns
* maxCols: 300,
* ```
*/
maxCols: Infinity,
/**
* When set to 1 (or more), Handsontable will add a new row at the end of grid if there are no more empty rows.
* (unless the number of rows exceeds the one set in the `maxRows` property)
*
* @type {Number}
* @default 0
*
* @example
* ```js
* // always add 3 empty rows at the table end
* minSpareRows: 3,
* ```
*/
minSpareRows: 0,
/**
* When set to 1 (or more), Handsontable will add a new column at the end of grid if there are no more empty columns.
* (unless the number of rows exceeds the one set in the `maxCols` property)
*
* @type {Number}
* @default 0
*
* @example
* ```js
* // always add 3 empty columns at the table end
* minSpareCols: 3,
* ```
*/
minSpareCols: 0,
/**
* If set to `false`, there won't be an option to insert new rows in the Context Menu.
*
* @type {Boolean}
* @default true
*
* @example
* ```js
* // hide "Insert row above" and "Insert row below" options from the Context Menu
* allowInsertRow: false,
* ```
*/
allowInsertRow: true,
/**
* If set to `false`, there won't be an option to insert new columns in the Context Menu.
*
* @type {Boolean}
* @default true
*
* @example
* ```js
* // hide "Insert column left" and "Insert column right" options from the Context Menu
* allowInsertColumn: false,
* ```
*/
allowInsertColumn: true,
/**
* If set to `false`, there won't be an option to remove rows in the Context Menu.
*
* @type {Boolean}
* @default true
*
* @example
* ```js
* // hide "Remove row" option from the Context Menu
* allowRemoveRow: false,
* ```
*/
allowRemoveRow: true,
/**
* If set to `false`, there won't be an option to remove columns in the Context Menu.
*
* @type {Boolean}
* @default true
*
* @example
* ```js
* // hide "Remove column" option from the Context Menu
* allowRemoveColumn: false,
* ```
*/
allowRemoveColumn: true,
/**
* @description
* Defines how the table selection reacts. The selection support three different behaviors defined as:
* * `'single'` Only a single cell can be selected.
* * `'range'` Multiple cells within a single range can be selected.
* * `'multiple'` Multiple ranges of cells can be selected.
*
* To see how to interact with selection by getting selected data or change styles of the selected cells go to
* [https://docs.handsontable.com/demo-selecting-ranges.html](https://docs.handsontable.com/demo-selecting-ranges.html).
*
* @type {String}
* @default 'multiple'
*
* @example
* ```js
* // only one cell can be selected at a time
* selectionMode: 'single',
* ```
*/
selectionMode: 'multiple',
/**
* Enables the fill handle (drag-down and copy-down) functionality, which shows a small rectangle in bottom
* right corner of the selected area, that let's you expand values to the adjacent cells.
*
* Setting to `true` enables the fillHandle plugin. Possible values: `true` (to enable in all directions),
* `'vertical'` or `'horizontal'` (to enable in one direction), `false` (to disable completely), an object with
* options: `autoInsertRow`, `direction`.
*
* If `autoInsertRow` option is `true`, fill-handler will create new rows till it reaches the last row.
* It is enabled by default.
*
* @type {Boolean|String|Object}
* @default true
*
* @example
* ```js
* // enable plugin in all directions and with autoInsertRow as true
* fillHandle: true,
*
* // or
* // enable plugin in vertical direction and with autoInsertRow as true
* fillHandle: 'vertical',
*
* // or
* fillHandle: {
* // enable plugin in both directions and with autoInsertRow as false
* autoInsertRow: false,
* },
*
* // or
* fillHandle: {
* // enable plugin in vertical direction and with autoInsertRow as false
* autoInsertRow: false,
* direction: 'vertical'
* },
* ```
*/
fillHandle: {
autoInsertRow: false,
},
/**
* Allows to specify the number of fixed (or *frozen*) rows at the top of the table.
*
* @type {Number}
* @default 0
*
* @example
* ```js
* // freeze the first 3 rows of the table.
* fixedRowsTop: 3,
* ```
*/
fixedRowsTop: 0,
/**
* Allows to specify the number of fixed (or *frozen*) rows at the bottom of the table.
*
* @type {Number}
* @default 0
*
* @example
* ```js
* // freeze the last 3 rows of the table.
* fixedRowsBottom: 3,
* ```
*/
fixedRowsBottom: 0,
/**
* Allows to specify the number of fixed (or *frozen*) columns on the left of the table.
*
* @type {Number}
* @default 0
*
* @example
* ```js
* // freeze first 3 columns of the table.
* fixedColumnsLeft: 3,
* ```
*/
fixedColumnsLeft: 0,
/**
* If `true`, mouse click outside the grid will deselect the current selection. Can be a function that takes the
* click event target and returns a boolean.
*
* @type {Boolean|Function}
* @default true
*
* @example
* ```js
* // don't clear current selection when mouse click was outside the grid
* outsideClickDeselects: false,
*
* // or
* outsideClickDeselects: function(event) {
* return false;
* }
* ```
*/
outsideClickDeselects: true,
/**
* If `true`, <kbd>ENTER</kbd> begins editing mode (like in Google Docs). If `false`, <kbd>ENTER</kbd> moves to next
* row (like Excel) and adds a new row if necessary. <kbd>TAB</kbd> adds new column if necessary.
*
* @type {Boolean}
* @default true
*
* @example
* ```js
* enterBeginsEditing: false,
* ```
*/
enterBeginsEditing: true,
/**
* Defines the cursor movement after <kbd>ENTER</kbd> was pressed (<kbd>SHIFT</kbd> + <kbd>ENTER</kbd> uses a negative vector). Can
* be an object or a function that returns an object. The event argument passed to the function is a DOM Event object
* received after the <kbd>ENTER</kbd> key has been pressed. This event object can be used to check whether user pressed
* <kbd>ENTER</kbd> or <kbd>SHIFT</kbd> + <kbd>ENTER</kbd>.
*
* @type {Object|Function}
* @default {row: 1, col: 0}
*
* @example
* ```js
* // move selection diagonal by 1 cell in x and y axis
* enterMoves: {row: 1, col: 1},
* // or as a function
* enterMoves: function(event) {
* return {row: 1, col: 1};
* },
* ```
*/
enterMoves: { row: 1, col: 0 },
/**
* Defines the cursor movement after <kbd>TAB</kbd> is pressed (<kbd>SHIFT</kbd> + <kbd>TAB</kbd> uses a negative vector). Can
* be an object or a function that returns an object. The event argument passed to the function is a DOM Event object
* received after the <kbd>TAB</kbd> key has been pressed. This event object can be used to check whether user pressed
* <kbd>TAB</kbd> or <kbd>SHIFT</kbd> + <kbd>TAB</kbd>.
*
* @type {Object|Function}
* @default {row: 0, col: 1}
*
* @example
* ```js
* // move selection 2 cells away after TAB pressed.
* tabMoves: {row: 2, col: 2},
* // or as a function
* tabMoves: function(event) {
* return {row: 2, col: 2};
* },
* ```
*/
tabMoves: { row: 0, col: 1 },
/**
* If `true`, pressing <kbd>TAB</kbd> or right arrow in the last column will move to first column in next row.
*
* @type {Boolean}
* @default true
*
* @example
* ```js
* // stop TAB key navigation on the last column
* autoWrapRow: false,
* ```
*/
autoWrapRow: true,
/**
* If `true`, pressing <kbd>ENTER</kbd> or down arrow in the last row will move to the first row in the next column.
*
* @type {Boolean}
* @default true
*
* @example
* ```js
* // stop ENTER key navigation on the last row
* autoWrapCol: false,
* ```
*/
autoWrapCol: true,
/**
* @description
* Turns on saving the state of column sorting, column positions and column sizes in local storage.
*
* You can save any sort of data in local storage to preserve table state between page reloads. In order to enable
* data storage mechanism, `persistentState` option must be set to `true` (you can set it either during Handsontable
* initialization or using the `updateSettings` method). When `persistentState` is enabled it exposes 3 hooks:
*
* __persistentStateSave__ (key: String, value: Mixed)
*
* * Saves value under given key in browser local storage.
*
* __persistentStateLoad__ (key: String, valuePlaceholder: Object)
*
* * Loads `value`, saved under given key, form browser local storage. The loaded `value` will be saved in
* `valuePlaceholder.value` (this is due to specific behaviour of `Hooks.run()` method). If no value have
* been saved under key `valuePlaceholder.value` will be `undefined`.
*
* __persistentStateReset__ (key: String)
*
* * Clears the value saved under `key`. If no `key` is given, all values associated with table will be cleared.
*
* __Note:__ The main reason behind using `persistentState` hooks rather than regular LocalStorage API is that it
* ensures separation of data stored by multiple Handsontable instances. In other words, if you have two (or more)
* instances of Handsontable on one page, data saved by one instance won't be accessible by the second instance.
* Those two instances can store data under the same key and no data would be overwritten.
*
* __Important:__ In order for the data separation to work properly, make sure that each instance of Handsontable has a unique `id`.
*
* @type {Boolean}
* @default false
*
* @example
* ```js
* // enable the persistent state plugin
* persistentState: true,
* ```
*/
persistentState: void 0,
/**
* Class name for all visible rows in the current selection.
*
* @type {String}
* @default undefined
*
* @example
* ```js
* // This will add a 'currentRow' class name to appropriate table cells.
* currentRowClassName: 'currentRow',
* ```
*/
currentRowClassName: void 0,
/**
* Class name for all visible columns in the current selection.
*
* @type {String}
* @default undefined
*
* @example
* ```js
* // This will add a 'currentColumn' class name to appropriate table cells.
* currentColClassName: 'currentColumn',
* ```
*/
currentColClassName: void 0,
/**
* Class name for all visible headers in current selection.
*
* @type {String}
* @default 'ht__highlight'
*
* @example
* ```js
* // This will add a 'ht__highlight' class name to appropriate table headers.
* currentHeaderClassName: 'ht__highlight',
* ```
*/
currentHeaderClassName: 'ht__highlight',
/**
* Class name for all active headers in selections. The header will be marked with this class name
* only when a whole column or row will be selected.
*
* @type {String}
* @since 0.38.2
* @default 'ht__active_highlight'
*
* @example
* ```js
* // this will add a 'ht__active_highlight' class name to appropriate table headers.
* activeHeaderClassName: 'ht__active_highlight',
* ```
*/
activeHeaderClassName: 'ht__active_highlight',
/**
* Class name for the Handsontable container element.
*
* @type {String|String[]}
* @default undefined
*
* @example
* ```js
* // set custom class for table container
* className: 'your__class--name',
*
* // or
* className: ['first-class-name', 'second-class-name'],
* ```
*/
className: void 0,
/**
* Class name for all tables inside container element.
*
* @type {String|String[]}
* @default undefined
*
* @example
* ```js
* // set custom class for table element
* tableClassName: 'your__class--name',
*
* // or
* tableClassName: ['first-class-name', 'second-class-name'],
* ```
*/
tableClassName: void 0,
/**
* @description
* Defines how the columns react, when the declared table width is different than the calculated sum of all column widths.
* [See more](https://docs.handsontable.com/demo-stretching.html) mode. Possible values:
* * `'none'` Disable stretching
* * `'last'` Stretch only the last column
* * `'all'` Stretch all the columns evenly
*
* @type {String}
* @default 'none'
*
* @example
* ```js
* // fit table to the container
* stretchH: 'all',
* ```
*/
stretchH: 'none',
/**
* Overwrites the default `isEmptyRow` method, which checks if row at the provided index is empty.
*
* @type {Function}
* @param {Number} row Visual row index.
* @returns {Boolean}
*
* @example
* ```js
* // define custom checks for empty row
* isEmptyRow: function(row) {
* ...
* },
* ```
*/
isEmptyRow(row) {
let col;
let colLen;
let value;
let meta;
for (col = 0, colLen = this.countCols(); col < colLen; col++) {
value = this.getDataAtCell(row, col);
if (value !== '' && value !== null && isDefined(value)) {
if (typeof value === 'object') {
meta = this.getCellMeta(row, col);
return isObjectEqual(this.getSchema()[meta.prop], value);
}
return false;
}
}
return true;
},
/**
* Overwrites the default `isEmptyCol` method, which checks if column at the provided index is empty.
*
* @type {Function}
* @param {Number} column Visual column index
* @returns {Boolean}
*
* @example
* ```js
* // define custom checks for empty column
* isEmptyCol: function(column) {
* return false;
* },
* ```
*/
isEmptyCol(col) {
let row;
let rowLen;
let value;
for (row = 0, rowLen = this.countRows(); row < rowLen; row++) {
value = this.getDataAtCell(row, col);
if (value !== '' && value !== null && isDefined(value)) {
return false;
}
}
return true;
},
/**
* When set to `true`, the table is re-rendered when it is detected that it was made visible in DOM.
*
* @type {Boolean}
* @default true
*
* @example
* ```js
* // don't rerender the table on visibility changes
* observeDOMVisibility: false,
* ```
*/
observeDOMVisibility: true,
/**
* If set to `true`, Handsontable will accept values that were marked as invalid by the cell `validator`. It will
* result with *invalid* cells being treated as *valid* (will save the *invalid* value into the Handsontable data source).
* If set to `false`, Handsontable will *not* accept the invalid values and won't allow the user to close the editor.
* This option will be particularly useful when used with the Autocomplete's `strict` mode.
*
* @type {Boolean}
* @default true
*
* @example
* ```js
* // don't save the invalid values
* allowInvalid: false,
* ```
*/
allowInvalid: true,
/**
* If set to `true`, Handsontable will accept values that are empty (`null`, `undefined` or `''`). If set to `false`,
* Handsontable will *not* accept the empty values and mark cell as invalid.
*
* @type {Boolean}
* @default true
*
* @example
* ```js
* // allow empty values for all cells (whole table)
* allowEmpty: true,
*
* // or
* columns: [
* {
* data: 'date',
* dateFormat: 'DD/MM/YYYY',
* // allow empty values only for the 'date' column
* allowEmpty: true
* }
* ],
* ```
*/
allowEmpty: true,
/**
* CSS class name for cells that did not pass validation.
*
* @type {String}
* @default 'htInvalid'
*
* @example
* ```js
* // set custom validation error class
* invalidCellClassName: 'highlight--error',
* ```
*/
invalidCellClassName: 'htInvalid',
/**
* When set to an non-empty string, displayed as the cell content for empty cells. If a value of a different type is provided,
* it will be stringified and applied as a string.
*
* @type {String}
* @default undefined
*
* @example
* ```js
* // add custom placeholder content to empty cells
* placeholder: 'Empty Cell',
* ```
*/
placeholder: void 0,
/**
* CSS class name for cells that have a placeholder in use.
*
* @type {String}
* @default 'htPlaceholder'
*
* @example
* ```js
* // set custom placeholder class
* placeholderCellClassName: 'has-placeholder',
* ```
*/
placeholderCellClassName: 'htPlaceholder',
/**
* CSS class name for read-only cells.
*
* @type {String}
* @default 'htDimmed'
*
* @example
* ```js
* // set custom read-only class
* readOnlyCellClassName: 'is-readOnly',
* ```
*/
readOnlyCellClassName: 'htDimmed',
/**
* @description
* If a string is provided, it may be one of the following predefined values:
* * `autocomplete`,
* * `checkbox`,
* * `html`,
* * `numeric`,
* * `password`.
* * `text`.
*
* Or you can [register](https://docs.handsontable.com/demo-custom-renderers.html) the custom renderer under specified name and use its name as an alias in your
* configuration.
*
* If a function is provided, it will receive the following arguments:
* ```js
* function(instance, TD, row, col, prop, value, cellProperties) {}
* ```
*
* You can read more about custom renderes [in the documentation](https://docs.handsontable.com/demo-custom-renderers.html).
*
* @type {String|Function}
* @default undefined
*
* @example
* ```js
* // register custom renderer
* Handsontable.renderers.registerRenderer('my.renderer', function(instance, TD, row, col, prop, value, cellProperties) {
* TD.innerHTML = value;
* });
*
* // use it for selected column:
* columns: [
* {
* // as a string with the name of build in renderer
* renderer: 'autocomplete',
* editor: 'select'
* },
* {
* // as an alias to custom renderer registered above
* renderer: 'my.renderer'
* },
* {
* // renderer as custom function
* renderer: function(hotInstance, TD, row, col, prop, value, cellProperties) {
* TD.style.color = 'blue';
* TD.innerHTML = value;
* }
* }
* ],
* ```
*/
renderer: void 0,
/**
* CSS class name added to the commented cells.
*
* @type {String}
* @default 'htCommentCell'
*
* @example
* ```js
* // set custom class for commented cells
* commentedCellClassName: 'has-comment',
* ```
*/
commentedCellClassName: 'htCommentCell',
/**
* If set to `true`, it enables the browser's native selection of a fragment of the text within a single cell, between
* adjacent cells or in a whole table. If set to `'cell'`, it enables the possibility of selecting a fragment of the
* text within a single cell's body.
*
* @type {Boolean|String}
* @default false
*
* @example
* ```js
* // enable text selection within table
* fragmentSelection: true,
*
* // or
* // enable text selection within cells only
* fragmentSelection: 'cell',
* ```
*/
fragmentSelection: false,
/**
* @description
* Makes cell [read only](https://docs.handsontable.com/demo-read-only.html).
*
* @type {Boolean}
* @default false
*
* @example
* ```js
* // set cell as read only
* readOnly: true,
* ```
*/
readOnly: false,
/**
* @description
* When added to a `column` property, it skips the column on paste and pastes the data on the next column to the right.
*
* @type {Boolean}
* @default false
*
* @example
* ```js
* columns: [
* {
* // don't paste data to this column
* skipColumnOnPaste: true
* }
* ],
* ```
*/
skipColumnOnPaste: false,
/**
* @description
* When added to a cell property, it skips the row on paste and pastes the data on the following row.
*
* @type {Boolean}
* @default false
*
* @example
* ```js
* cells: function(row, column) {
* const cellProperties = {};
*
* // don't paste data to the second row
* if (row === 1) {
* cellProperties.skipRowOnPaste = true;
* }
*
* return cellProperties;
* }
* ```
*/
skipRowOnPaste: false,
/**
* @description
* Setting to `true` enables the {@link Search} plugin (see [demo](https://docs.handsontable.com/demo-search-for-values.html)).
*
* @type {Boolean}
* @default false
*
* @example
* ```js
* // enable search plugin
* search: true,
*
* // or
* // as an object with detailed configuration
* search: {
* searchResultClass: 'customClass',
* queryMethod: function(queryStr, value) {
* ...
* },
* callback: function(instance, row, column, value, result) {
* ...
* }
* }
* ```
*/
search: false,
/**
* @description
* Shortcut to define the combination of the cell renderer, editor and validator for the column, cell or whole table.
*
* Possible values:
* * [autocomplete](https://docs.handsontable.com/demo-autocomplete.html)
* * [checkbox](https://docs.handsontable.com/demo-checkbox.html)
* * [date](https://docs.handsontable.com/demo-date.html)
* * [dropdown](https://docs.handsontable.com/demo-dropdown.html)
* * [handsontable](https://docs.handsontable.com/demo-handsontable.html)
* * [numeric](https://docs.handsontable.com/demo-numeric.html)
* * [password](https://docs.handsontable.com/demo-password.html)
* * text
* * [time](https://docs.handsontable.com/demo-time.html)
*
* Or you can register the custom cell type under specified name and use
* its name as an alias in your configuration.
*
* @type {String}
* @default 'text'
*
* @example
* ```js
* // register custom cell type:
* Handsontable.cellTypes.registerCellType('my.type', {
* editor: MyEditorClass,
* renderer: function(hot, td, row, col, prop, value, cellProperties) {
* td.innerHTML = value;
* },
* validator: function(value, callback) {
* callback(value === 'foo' ? true : false);
* }
* });
*
* // use it in column settings:
* columns: [
* {
* type: 'text'
* },
* {
* // an alias to custom type
* type: 'my.type'
* },
* {
* type: 'checkbox'
* }
* ],
* ```
*/
type: 'text',
/**
* @description
* Makes a cell copyable (pressing <kbd>CTRL</kbd> + <kbd>C</kbd> on your keyboard moves its value to system clipboard).
*
* __Note:__ this setting is `false` by default for cells with type `password`.
*
* @type {Boolean}
* @default true
*
* @example
* ```js
* cells: [
* {
* cell: 0,
* row: 0,
* // cell with coordinates (0, 0) can't be copied
* copyable: false,
* }
* ],
* ```
*/
copyable: true,
/**
* Defines the editor for the table/column/cell.
*
* If a string is provided, it may be one of the following predefined values:
* * [autocomplete](https://docs.handsontable.com/demo-autocomplete.html)
* * [checkbox](https://docs.handsontable.com/demo-checkbox.html)
* * [date](https://docs.handsontable.com/demo-date.html)
* * [dropdown](https://docs.handsontable.com/demo-dropdown.html)
* * [handsontable](https://docs.handsontable.com/demo-handsontable.html)
* * [mobile](https://docs.handsontable.com/demo-mobiles-and-tablets.html)
* * [password](https://docs.handsontable.com/demo-password.html)
* * [select](https://docs.handsontable.com/demo-select.html)
* * text
*
* Or you can [register](https://docs.handsontable.com/tutorial-cell-editor.html#registering-an-editor) the custom editor under specified name and use its name as an alias in your
* configuration.
*
* To disable cell editing completely set `editor` property to `false`.
*
* @type {String|Function|Boolean}
* @default undefined
*
* @example
* ```js
* columns: [
* {
* // set editor for the first column
* editor: 'select'
* },
* {
* // disable editor for the second column
* editor: false
* }
* ],
* ```
*/
editor: void 0,
/**
* Control number of choices for the autocomplete (or dropdown) typed cells. After exceeding it, a scrollbar for the
* dropdown list of choices will appear.
*
* @type {Number}
* @default 10
*
* @example
* ```js
* columns: [
* {
* type: 'autocomplete',
* // set autocomplete options list height
* visibleRows: 15,
* }
* ],
* ```
*/
visibleRows: 10,
/**
* Makes autocomplete or dropdown width the same as the edited cell width. If `false` then editor will be scaled
* according to its content.
*
* @type {Boolean}
* @default true
*
* @example
* ```js
* columns: [
* {
* type: 'autocomplete',
* // don't trim dropdown width with column width
* trimDropdown: false,
* }
* ],
* ```
*/
trimDropdown: true,
/**
* Setting to `true` enables the debug mode, currently used to test the correctness of the row and column
* header fixed positioning on a layer above the master table.
*
* @type {Boolean}
* @default false
*
* @example
* ```js
* // enable debug mode
* debug: true,
* ```
*/
debug: false,
/**
* When set to `true`, the text of the cell content is wrapped if it does not fit in the fixed column width.
*
* @type {Boolean}
* @default true
*
* @example
* ```js
* colWidths: 100,
* columns: [
* {
* // fixed column width is set but don't wrap the content
* wordWrap: false,
* }
* ],
* ```
*/
wordWrap: true,
/**
* CSS class name added to cells with cell meta `wordWrap: false`.
*
* @type {String}
* @default 'htNoWrap'
*
* @example
* ```js
* // set custom class for cells which content won't be wrapped
* noWordWrapClassName: 'is-noWrapCell',
* ```
*/
noWordWrapClassName: 'htNoWrap',
/**
* @description
* Defines if the right-click context menu should be enabled. Context menu allows to create new row or column at any
* place in the grid among [other features](https://docs.handsontable.com/demo-context-menu.html).
* Possible values:
* * `true` (to enable default options),
* * `false` (to disable completely)
* * an array of [predefined options](https://docs.handsontable.com/demo-context-menu.html#page-specific),
* * an object [with defined structure](https://docs.handsontable.com/demo-context-menu.html#page-custom)
*
* See [the context menu demo](https://docs.handsontable.com/demo-context-menu.html) for examples.
*
* @type {Boolean|String[]|Object}
* @default undefined
*
* @example
* ```js
* // as a boolean
* contextMenu: true,
*
* // as an array
* contextMenu: ['row_above', 'row_below', '---------', 'undo', 'redo'],
*
* // as an object (`name` attribute is required in the custom keys)
* contextMenu: {
* items: {
* "option1": {
* name: "option1"
* },
* "option2": {
* name: "option2",
* submenu: {
* items: [
* {
* key: "option2:suboption1",
* name: "option2:suboption1",
* callback: function(key, options) {
* ...
* }
* },
* ...
* ]
* }
* }
* }
* },
* ```
*/
contextMenu: void 0,
/**
* Disables or enables the copy/paste functionality.
*
* @type {Boolean}
* @default true
*
* @example
* ```js
* // disable copy and paste
* copyPaste: false,
* ```
*/
copyPaste: true,
/**
* If `true`, undo/redo functionality is enabled.
*
* @type {Boolean}
* @default undefined
*
* @example
* ```js
* // enable undo and redo
* undo: true,
* ```
*/
undo: void 0,
/**
* @description
* Turns on [Column sorting](https://docs.handsontable.com/demo-sorting-data.html). Can be either a boolean (`true` / `false`) or an object with a declared sorting options:
* * `initialConfig` - Object with predefined keys:
* * `column` - sorted column
* * `sortOrder` - order in which column will be sorted
* * `'asc'` = ascending
* * `'desc'` = descending
* * `indicator` - display status for sorting order indicator (an arrow icon in the column header, specifying the sorting order).
* * `true` = show sort indicator for sorted columns
* * `false` = don't show sort indicator for sorted columns
* * `headerAction` - allow to click on the headers to sort
* * `true` = turn on possibility to click on the headers to sort
* * `false` = turn off possibility to click on the headers to sort
* * `sortEmptyCells` - how empty values should be handled
* * `true` = the table sorts empty cells
* * `false` = the table moves all empty cells to the end of the table
* * `compareFunctionFactory` - curry function returning compare function; compare function should work in the same way as function which is handled by native `Array.sort` method); please take a look at below examples for more information.
*
* @type {Boolean|Object}
* @default undefined
*
* @example
* ```js
* // as boolean
* columnSorting: true
*
* // as an object with initial sort config (sort ascending for column at index 1)
* columnSorting: {
* initialConfig: {
* column: 1,
* sortOrder: 'asc'
* }
* }
*
* // as an object which define specific sorting options for all columns
* columnSorting: {
* sortEmptyCells: true, // true = the table sorts empty cells, false = the table moves all empty cells to the end of the table
* indicator: true, // true = shows indicator for all columns, false = don't show indicator for columns
* headerAction: false, // true = allow to click on the headers to sort, false = turn off possibility to click on the headers to sort
* compareFunctionFactory: function(sortOrder, columnMeta) {
* return function(value, nextValue) {
* // Some value comparisons which will return -1, 0 or 1...
* }
* }
* }```
*/
columnSorting: void 0,
/**
* Turns on [Manual column move](https://docs.handsontable.com/demo-moving-rows-and-columns.html), if set to a boolean or define initial column order (as an array of column indexes).
*
* @type {Boolean|Number[]}
* @default undefined
*
* @example
* ```js
* // as a boolean to enable column move
* manualColumnMove: true,
*
* // as a array with initial order
* // (move column index at 0 to 1 and move column index at 1 to 4)
* manualColumnMove: [1, 4],
* ```
*/
manualColumnMove: void 0,
/**
* @description
* Turns on [Manual column resize](https://docs.handsontable.com/demo-resizing.html), if set to a boolean or define initial column resized widths (an an array of widths).
*
* @type {Boolean|Number[]}
* @default undefined
*
* @example
* ```js
* // as a boolean to enable column resize
* manualColumnResize: true,
*
* // as a array with initial widths
* // (column at 0 index has 40px and column at 1 index has 50px)
* manualColumnResize: [40, 50],
* ```
*/
manualColumnResize: void 0,
/**
* @description
* Turns on [Manual row move](https://docs.handsontable.com/demo-moving-rows-and-columns.html), if set to a boolean or define initial row order (as an array of row indexes).
*
* @type {Boolean|Number[]}
* @default undefined
*
* @example
* ```js
* // as a boolean
* manualRowMove: true,
*
* // as a array with initial order
* // (move row index at 0 to 1 and move row index at 1 to 4)
* manualRowMove: [1, 4],
* ```
*/
manualRowMove: void 0,
/**
* @description
* Turns on [Manual row resize](https://docs.handsontable.com/demo-resizing.html), if set to a boolean or define initial row resized heights (as an array of heights).
*
* @type {Boolean|Number[]}
* @default undefined
*
* @example
* ```js
* // as a boolean to enable row resize
* manualRowResize: true,
*
* // as an array to set initial heights
* // (row at 0 index has 40px and row at 1 index has 50px)
* manualRowResize: [40, 50],
* ```
*/
manualRowResize: void 0,
/**
* @description
* If set to `true`, it enables a possibility to merge cells. If set to an array of objects, it merges the cells provided
* in the objects (see the example below). More information on [the demo page](https://docs.handsontable.com/demo-merge-cells.html).
*
* @type {Boolean|Object[]}
* @default false
*
* @example
* ```js
* // enables the mergeCells plugin
* margeCells: true,
*
* // declares a list of merged sections
* mergeCells: [
* // rowspan and colspan properties declare the width and height of a merged section in cells
* {row: 1, col: 1, rowspan: 3, colspan: 3},
* {row: 3, col: 4, rowspan: 2, colspan: 2},
* {row: 5, col: 6, rowspan: 3, colspan: 3}
* ],
* ```
*/
mergeCells: false,
/**
* @description
* Turns on [Multi-column sorting](https://docs.handsontable.com/pro/demo-multicolumn-sorting.html). Can be either a boolean (`true` / `false`) or an object with a declared sorting options:
* * `initialConfig` - Array containing objects, every with predefined keys:
* * `column` - sorted column
* * `sortOrder` - order in which column will be sorted
* * `'asc'` = ascending
* * `'desc'` = descending
* * `indicator` - display status for sorting order indicator (an arrow icon in the column header, specifying the sorting order).
* * `true` = show sort indicator for sorted columns
* * `false` = don't show sort indicator for sorted columns
* * `headerAction` - allow to click on the headers to sort
* * `true` = turn on possibility to click on the headers to sort
* * `false` = turn off possibility to click on the headers to sort
* * `sortEmptyCells` - how empty values should be handled
* * `true` = the table sorts empty cells
* * `false` = the table moves all empty cells to the end of the table
* * `compareFunctionFactory` - curry function returning compare function; compare function should work in the same way as function which is handled by native `Array.sort` method); please take a look at below examples for more information.
*
* @type {Boolean|Object}
* @default undefined
*
* @example
* ```js
* // as boolean
* multiColumnSorting: true
*
* // as an object with initial sort config (sort ascending for column at index 1 and then sort descending for column at index 0)
* multiColumnSorting: {
* initialConfig: [{
* column: 1,
* sortOrder: 'asc'
* }, {
* column: 0,
* sortOrder: 'desc'
* }]
* }
*
* // as an object which define specific sorting options for all columns
* multiColumnSorting: {
* sortEmptyCells: true, // true = the table sorts empty cells, false = the table moves all empty cells to the end of the table
* indicator: true, // true = shows indicator for all columns, false = don't show indicator for columns
* headerAction: false, // true = allow to click on the headers to sort, false = turn off possibility to click on the headers to sort
* compareFunctionFactory: function(sortOrder, columnMeta) {
* return function(value, nextValue) {
* // Some value comparisons which will return -1, 0 or 1...
* }
* }
* }```
*/
multiColumnSorting: void 0,
/**
* @description
* Number of rows to be rendered outside of the visible part of the table. By default, it's set to `'auto'`, which
* makes Handsontable to attempt to calculate the best offset performance-wise.
*
* You may test out different values to find the best one that works for your specific implementation.
*
* @type {Number|String}
* @default 'auto'
*
* @example
* ```js
* viewportRowRenderingOffset: 70,
* ```
*/
viewportRowRenderingOffset: 'auto',
/**
* @description
* Number of columns to be rendered outside of the visible part of the table. By default, it's set to `'auto'`, which
* makes Handsontable try calculating the best offset performance-wise.
*
* You may experiment with the value to find the one that works best for your specific implementation.
*
* @type {Number|String}
* @default 'auto'
*
* @example
* ```js
* viewportColumnRenderingOffset: 70,
* ```
*/
viewportColumnRenderingOffset: 'auto',
/**
* @description
* A function, regular expression or a string, which will be used in the process of cell validation. If a function is
* used, be sure to execute the callback argument with either `true` (`callback(true)`) if the validation passed
* or with `false` (`callback(false)`), if the validation failed.
*
* __Note__, that `this` in the function points to the `cellProperties` object.
*
* If a string is provided, it may be one of the following predefined values:
* * `autocomplete`,
* * `date`,
* * `numeric`,
* * `time`.
*
* Or you can [register](https://docs.handsontable.com/demo-data-validation.html) the validator function under specified name and use its name as an alias in your
* configuration.
*
* See more [in the demo](https://docs.handsontable.com/demo-data-validation.html).
*
* @type {Function|RegExp|String}
* @default undefined
*
* @example
* ```js
* columns: [
* {
* // as a function
* validator: function(value, callback) {
* ...
* }
* },
* {
* // regular expression
* validator: /^[0-9]$/
* },
* {
* // as a string
* validator: 'numeric'
* }
* ],
* ```
*/
validator: void 0,
/**
* @description
* Disables visual cells selection.
*
* Possible values:
* * `true` - Disables any type of visual selection (current and area selection),
* * `false` - Enables any type of visual selection. This is default value.
* * `'current'` - Disables the selection of a currently selected cell, the area selection is still present.
* * `'area'` - Disables the area selection, the currently selected cell selection is still present.
* * `'header'` - Disables the headers selection, the currently selected cell selection is still present.
*
* @type {Boolean|String|String[]}
* @default false
*
* @example
* ```js
* // as a boolean
* disableVisualSelection: true,
*
* // as a string ('current', 'area' or 'header')
* disableVisualSelection: 'current',
*
* // as an array
* disableVisualSelection: ['current', 'area'],
* ```
*/
disableVisualSelection: false,
/**
* Disables or enables {@link ManualColumnFreeze} plugin.
*
* @type {Boolean}
* @default undefined
*
* @example
* ```js
* // enable fixed columns
* manualColumnFreeze: true,
* ```
*/
manualColumnFreeze: void 0,
/**
* Defines whether Handsontable should trim the whitespace at the beginning and the end of the cell contents.
*
* @type {Boolean}
* @default true
*
* @example
* ```js
* columns: [
* {
* // don't remove whitespace
* trimWhitespace: false
* }
* ]
* ```
*/
trimWhitespace: true,
/**
* Defines data source for Autocomplete or Dropdown cell types.
*
* @type {Array|Function}
* @default undefined
*
* @example
* ```js
* // source as a array
* columns: [{
* type: 'autocomplete',
* source: ['A', 'B', 'C', 'D']
* }],
*
* // source as a function
* columns: [{
* type: 'autocomplete',
* source: function(query, callback) {
* fetch('https://example.com/query?q=' + query, function(response) {
* callback(response.items);
* })
* }
* }],
* ```
*/
source: void 0,
/**
* @description
* Defines the column header name.
*
* @type {String}
* @default undefined
*
* @example
* ```js
* // set header names for every column
* columns: [
* {
* title: 'First name',
* type: 'text',
* },
* {
* title: 'Last name',
* type: 'text',
* }
* ],
* ```
*/
title: void 0,
/**
* Data template for `'checkbox'` type when checkbox is checked.
*
* @type {Boolean|String|Number}
* @default true
*
* @example
* ```js
* checkedTemplate: 'good'
*
* // if a checkbox-typed cell is checked, then getDataAtCell(x, y),
* // where x and y are the coordinates of the cell will return 'good'.
* ```
*/
checkedTemplate: void 0,
/**
* Data template for `'checkbox'` type when checkbox is unchecked.
*
* @type {Boolean|String|Number}
* @default false
*
* @example
* ```js
* uncheckedTemplate: 'bad'
*
* // if a checkbox-typed cell is not checked, then getDataAtCell(x,y),
* // where x and y are the coordinates of the cell will return 'bad'.
* ```
*/
uncheckedTemplate: void 0,
/**
* @description
* Object which describes if renderer should create checkbox element with label element as a parent.
*
* __Note__, this option only works for [checkbox-typed](https://docs.handsontable.com/demo-checkbox.html) cells.
*
* By default the [checkbox](https://docs.handsontable.com/demo-checkbox.html) renderer renders the checkbox without a label.
*
* Possible object properties:
* * `property` - Defines the property name of the data object, which will to be used as a label.
* (eg. `label: {property: 'name.last'}`). This option works only if data was passed as an array of objects.
* * `position` - String which describes where to place the label text (before or after checkbox element).
* Valid values are `'before'` and '`after`' (defaults to `'after'`).
* * `value` - String or a Function which will be used as label text.
*
* @type {Object}
* @default undefined
*
* @example
* ```js
* columns: [{
* type: 'checkbox',
* // add "My label:" after the checkbox
* label: {position: 'after', value: 'My label: '}
* }],
* ```
*/
label: void 0,
/**
* Display format for numeric typed renderers.
*
* __Note__, this option only works for [numeric-typed](https://docs.handsontable.com/demo-numeric.html) cells.
*
* Format is described by two properties:
* * `pattern` - Handled by `numbro` for purpose of formatting numbers to desired pattern. List of supported patterns can be found [here](http://numbrojs.com/format.html#numbers).
* * `culture` - Handled by `numbro` for purpose of formatting currencies. Examples showing how it works can be found [here](http://numbrojs.com/format.html#currency). List of supported cultures can be found [here](http://numbrojs.com/languages.html#supported-languages).
*
* __Note:__ Please keep in mind that this option is used only to format the displayed output! It has no effect on the input data provided for the cell. The numeric data can be entered to the table only as floats (separated by a dot or a comma) or integers, and are stored in the source dataset as JavaScript numbers.
*
* Handsontable uses [numbro](http://numbrojs.com/) as a main library for numbers formatting.
*
* @since 0.35.0
* @type {Object}
* @default undefined
*
* @example
* ```js
* columns: [
* {
* type: 'numeric',
* // set desired format pattern and
* numericFormat: {
* pattern: '0,00',
* culture: 'en-US'
* }
* }
* ],
* ```
*/
numericFormat: void 0,
/**
* Language for Handsontable translation. Possible language codes are [listed here](https://docs.handsontable.com/tutorial-internationalization.html#available-languages).
*
* @type {String}
* @default 'en-US'
*
* @example
* ```js
* // set Polish language
* language: 'pl-PL',
* ```
*/
language: 'en-US',
/**
* Data source for [select-typed](https://docs.handsontable.com/demo-select.html) cells.
*
* __Note__, this option only works for [select-typed](https://docs.handsontable.com/demo-select.html) cells.
*
* @type {String[]}
* @default undefined
*
* @example
* ```js
* columns: [
* {
* editor: 'select',
* // add three select options to choose from
* selectOptions: ['A', 'B', 'C'],
* }
* ],
* ```
*/
selectOptions: void 0,
/**
* Enables or disables the {@link AutoColumnSize} plugin. Default value is `undefined`, which has the same effect as `true`.
* Disabling this plugin can increase performance, as no size-related calculations would be done.
*
* Column width calculations are divided into sync and async part. Each of those parts has their own advantages and
* disadvantages. Synchronous calculations are faster but they block the browser UI, while the slower asynchronous
* operations don't block the browser UI.
*
* To configure the sync/async distribution, you can pass an absolute value (number of columns) or a percentage value.
*
* You can also use the `useHeaders` option to take the column headers width into calculation.
*
* @type {Object|Boolean}
* @default {syncLimit: 50}
*
* @example
* ```js
* // as a number (300 columns in sync, rest async)
* autoColumnSize: {syncLimit: 300},
*
* // as a string (percent)
* autoColumnSize: {syncLimit: '40%'},
*
* // use headers width while calculating the column width
* autoColumnSize: {useHeaders: true},
* ```
*/
autoColumnSize: void 0,
/**
* Enables or disables {@link AutoRowSize} plugin. Default value is `undefined`, which has the same effect as `false`
* (disabled). Enabling this plugin can decrease performance, as size-related calculations would be performed.
*
* Row height calculations are divided into sync and async stages. Each of these stages has their own advantages and
* disadvantages. Synchronous calculations are faster but they block the browser UI, while the slower asynchronous
* operations don't block the browser UI.
*
* To configure the sync/async distribution, you can pass an absolute value (number of columns) or a percentage value.
*
* @type {Object|Boolean}
* @default {syncLimit: 500}
*
* @example
* ```js
* // as a number (300 columns in sync, rest async)
* autoRowSize: {syncLimit: 300},
*
* // as a string (percent)
* autoRowSize: {syncLimit: '40%'},
* ```
*/
autoRowSize: void 0,
/**
* Date validation format.
*
* __Note__, this option only works for [date-typed](https://docs.handsontable.com/demo-date.html) cells.
*
* @type {String}
* @default 'DD/MM/YYYY'
*
* @example
* ```js
* columns: [{
* type: 'date',
* // localise date format
* dateFormat: 'MM/DD/YYYY'
* }],
* ```
*/
dateFormat: 'DD/MM/YYYY',
/**
* If `true` then dates will be automatically formatted to match the desired format.
*
* __Note__, this option only works for [date-typed](https://docs.handsontable.com/demo-date.html) cells.
*
* @type {Boolean}
* @default false
*
* @example
* ```js
* columns: [{
* type: 'date',
* dateFormat: 'YYYY-MM-DD',
* // force selected date format
* correctFormat: true
* }],
* ```
*/
correctFormat: false,
/**
* Definition of default value which will fill the empty cells.
*
* __Note__, this option only works for [date-typed](https://docs.handsontable.com/demo-date.html) cells.
*
* @type {String}
* @default undefined
*
* @example
* ```js
* columns: [
* {
* type: 'date',
* // always set this date for empty cells
* defaultDate: '2015-02-02'
* }
* ],
* ```
*/
defaultDate: void 0,
/**
* If set to `true`, the value entered into the cell must match (case-sensitive) the autocomplete source.
* Otherwise, cell won't pass the validation. When filtering the autocomplete source list, the editor will
* be working in case-insensitive mode.
*
* __Note__, this option only works for [autocomplete-typed](https://docs.handsontable.com/demo-autocomplete.html) cells.
*
* @type {Boolean}
* @default undefined
*
* @example
* ```js
* columns: [{
* type: 'autocomplete',
* source: ['A', 'B', 'C'],
* // force selected value to match the source list
* strict: true
* }],
* ```
*/
strict: void 0,
/**
* If set to `true`, data defined in `source` of the autocomplete or dropdown cell will be treated as HTML.
*
* __Warning:__ Enabling this option can cause serious XSS vulnerabilities.
*
* __Note__, this option only works for [autocomplete-typed](https://docs.handsontable.com/demo-autocomplete.html) cells.
*
* @type {Boolean}
* @default false
*
* @example
* ```js
* columns: [{
* type: 'autocomplete',
* // use HTML in the source list
* allowHtml: true,
* source: ['<strong>foo</strong>', '<strong>bar</strong>']
* }],
* ```
*/
allowHtml: false,
/**
* If typed `true` then virtual rendering mechanism for handsontable will be disabled.
*
* @type {Boolean}
* @default undefined
*
* @example
* ```js
* // disable virtual rows rendering
* renderAllRows: true,
* ```
*/
renderAllRows: void 0,
/**
* Prevents table to overlap outside the parent element. If `'horizontal'` option is chosen then table will show
* a horizontal scrollbar if parent's width is narrower then table's width.
*
* Possible values:
* * `false` - Disables functionality.
* * `horizontal` - Prevents horizontal overflow table.
* * `vertical` - Prevents vertical overflow table.
*
* @type {String|Boolean}
* @default false
*
* @example
* ```js
* preventOverflow: 'horizontal',
* ```
*/
preventOverflow: false,
/**
* @description
* Enables the functionality of the {@link BindRowsWithHeaders} plugin which allows binding the table rows with their headers.
* If the plugin is enabled, the table row headers will "stick" to the rows, when they are hidden/moved. Basically,
* if at the initialization row 0 has a header titled "A", it will have it no matter what you do with the table.
*
* @type {Boolean|String}
* @default undefined
*
* @example
* ```js
* // keep row data and row headers in sync
* bindRowsWithHeaders: true
* ```
*/
bindRowsWithHeaders: void 0,
/**
* @description
* The {@link CollapsibleColumns} plugin allows collapsing of columns, covered by a header with the `colspan` property
* defined.
*
* Clicking the "collapse/expand" button collapses (or expands) all "child" headers except the first one.
*
* Setting the `collapsibleColumns` property to `true` will display a "collapse/expand" button in every
* header with a defined colspan` property.
*
* To limit this functionality to a smaller group of headers, define the `collapsibleColumns` property
* as an array of objects, as in the example below.
*
* @type {Boolean|Object[]}
* @default undefined
*
* @example
* ```js
* // enable collapsing for all headers
* collapsibleColumns: true,
*
* // or
* // enable collapsing for selected headers
* collapsibleColumns: [
* {row: -4, col: 1, collapsible: true},
* {row: -3, col: 5, collapsible: true}
* ],
* ```
*/
collapsibleColumns: void 0,
/**
* @description
* Allows making pre-defined calculations on the cell values and display the results within Handsontable.
*
* Possible types:
* * `'sum'`
* * `'min'`
* * `'max'`
* * `'count'`
* * `'average'`
* * `'custom'` - add `customFunction`
*
* [See the demo for more information](https://docs.handsontable.com/pro/demo-summary-calculations.html).
*
* @type {Object[]|Function}
* @default undefined
*
* @example
* ```
* columnSummary: [
* {
* destinationRow: 4,
* destinationColumn: 1,
* forceNumeric: true,
* reversedRowCoords: true,
* suppressDataTypeErrors: false,
* readOnly: true,
* roundFloat: false,
* type: 'custom',
* customFunction: function(endpoint) {
* return 100;
* }
* }
* ],
* ```
*/
columnSummary: void 0,
/**
* This plugin allows adding a configurable dropdown menu to the table's column headers. The dropdown menu acts like
* the {@link Options#contextMenu}, but is triggered by clicking the button in the header.
*
* @type {Boolean|Object|String[]}
* @default undefined
*
* @example
* ```js
* // enable dropdown menu
* dropdownMenu: true,
*
* // or
* // enable and configure dropdown menu options
* dropdownMenu: ['remove_col', '---------', 'make_read_only', 'alignment']
* ```
*/
dropdownMenu: void 0,
/**
* The {@link Filters} plugin allows filtering the table data either by the built-in component or with the API.
*
* @type {Boolean}
* @default undefined
*
* @example
* ```js
* // enable filters
* filters: true,
* ```
*/
filters: void 0,
/**
* The {@link Formulas} plugin allows Handsontable to process formula expressions defined in the provided data.
*
* @type {Boolean|Object}
* @default undefined
*
* @example
* ```js
* // enable formulas plugin
* formulas: true,
*
* // or as an object with custom variables to be used in formula expressions
* formulas: {
* variables: {
* FOO: 64,
* BAR: 'baz',
* }
* },
* ```
*/
formulas: void 0,
/**
* @description
* The {@link GanttChart} plugin enables a possibility to create a Gantt chart using a Handsontable instance. In this
* case, the whole table becomes read-only.
*
* @type {Object}
* @default undefined
*/
ganttChart: void 0,
/**
* @description
* Allows adding a tooltip to the table headers.
*
* Available options:
* * the `rows` property defines if tooltips should be added to row headers,
* * the `columns` property defines if tooltips should be added to column headers,
* * the `onlyTrimmed` property defines if tooltips should be added only to headers, which content is trimmed by the header itself (the content being wider then the header).
*
* @type {Boolean|Object}
* @default undefined
*
* @example
* ```js
* // enable tooltips for all headers
* headerTooltips: true,
*
* // or
* headerTooltips: {
* rows: false,
* columns: true,
* onlyTrimmed: true
* }
* ```
*/
headerTooltips: void 0,
/**
* The {@link HiddenColumns} plugin allows hiding of certain columns. You can pass additional configuration with an
* object notation. Options that are then available are:
* * `columns` - an array of rows that should be hidden on plugin initialization
* * `indicators` - enables small ui markers to indicate where are hidden columns
*
* @type {Boolean|Object}
* @default undefined
*
* @example
* ```js
* // enable column hiding
* hiddenColumns: true,
*
* // or
* hiddenColumns: {
* // set columns that are hidden by default
* columns: [5, 10, 15],
* // show where are hidden columns
* indicators: true
* }
* ```
*/
hiddenColumns: void 0,
/**
* The {@link HiddenRows} plugin allows hiding of certain rows. You can pass additional configuration with an
* object notation. Options that are then available are:
* * `rows` - an array of rows that should be hidden on plugin initialization
* * `indicators` - enables small ui markers to indicate where are hidden columns
*
* @type {Boolean|Object}
* @default undefined
*
* @example
* ```js
* // enable row hiding
* hiddenRows: true,
*
* // or
* hiddenRows: {
* // set rows that are hidden by default
* rows: [5, 10, 15],
* // show where are hidden rows
* indicators: true
* }
* ```
*/
hiddenRows: void 0,
/**
* @description
* Allows creating a nested header structure, using the HTML's colspan attribute.
*
* @type {Array[]}
* @default undefined
*
* @example
* ```
* nestedHeaders: [
* ['A', {label: 'B', colspan: 8}, 'C'],
* ['D', {label: 'E', colspan: 4}, {label: 'F', colspan: 4}, 'G'],
* ['H', 'I', 'J', 'K', 'L', 'M', 'N', 'R', 'S', 'T']
* ],
* ```
*/
nestedHeaders: void 0,
/**
* @description
* Plugin allowing hiding of certain rows.
*
* @type {Boolean|Number[]}
* @default undefined
*
* @example
* ```js
* // enable plugin
* trimRows: true,
*
* // or
* // trim selected rows on table initialization
* trimRows: [5, 10, 15],
* ```
*/
trimRows: void 0,
/**
* @description
* Allows setting a custom width of the row headers. You can provide a number or an array of widths, if many row
* header levels are defined.
*
* @type {Number|Number[]}
* @default undefined
*
* @example
* ```js
* // set width for all row headers
* rowHeaderWidth: 25,
*
* // or
* // set width for selected headers only
* rowHeaderWidth: [25, 30, 55],
* ```
*/
rowHeaderWidth: void 0,
/**
* @description
* Allows setting a custom height of the column headers. You can provide a number or an array of heights, if many
* column header levels are defined.
*
* @type {Number|Number[]}
* @default undefined
*
* @example
* ```js
* // set shared height for all headers
* columnHeaderHeight: 35,
*
* // or
* // set height for each header individually
* columnHeaderHeight: [35, 20, 55],
*
* // or
* // skipped headers will fallback to default value
* columnHeaderHeight: [35, undefined, 55],
* ```
*/
columnHeaderHeight: void 0,
/**
* @description
* Enables the {@link ObserveChanges} plugin switches table into one-way data binding where changes are applied into
* data source (from outside table) will be automatically reflected in the table.
*
* For every data change [afterChangesObserved](Hooks.html#event:afterChangesObserved) hook will be fired.
*
* @type {Boolean}
* @default undefined
*
* @example
* ```js
* observeChanges: true,
* ```
*/
observeChanges: void 0,
/**
* If defined as `true`, the Autocomplete's suggestion list would be sorted by relevance (the closer to the left the
* match is, the higher the suggestion).
*
* __Note__, this option only works for [autocomplete-typed](https://docs.handsontable.com/demo-autocomplete.html) cells.
*
* @type {Boolean}
* @default true
*
* @example
* ```js
* columns: [
* {
* type: 'autocomplete',
* source: [ ... ],
* // keep options order as they were defined
* sortByRelevance: false
* }
* ],
* ```
*/
sortByRelevance: true,
/**
* If defined as `true`, when the user types into the input area the Autocomplete's suggestion list is updated to only
* include those choices starting with what has been typed; if defined as `false` all suggestions remain shown, with
* those matching what has been typed marked in bold.
*
* __Note__, this option only works for [autocomplete-typed](https://docs.handsontable.com/demo-autocomplete.html) cells.
*
* @type {Boolean}
* @default true
*
* @example
* ```js
* columns: [
* {
* type: 'autocomplete',
* source: [ ... ],
* // don't hide options that don't match search query
* filter: false
* }
* ],
* ```
*/
filter: true,
/**
* If defined as `true`, filtering in the Autocomplete Editor will be case-sensitive.
*
* __Note__, this option only works for [autocomplete-typed](https://docs.handsontable.com/demo-autocomplete.html) cells.
*
* @type {Boolean}
* @default: false
*
* @example
* ```js
* columns: [
* {
* type: 'autocomplete',
* source: [ ... ],
* // match case while searching autocomplete options
* filteringCaseSensitive: true
* }
* ],
* ```
*/
filteringCaseSensitive: false,
/**
* @description
* Disables or enables the drag to scroll functionality.
*
* @type {Boolean}
* @default true
*
* @example
* ```js
* // don't scroll the viewport when selection gets to the viewport edge
* dragToScroll: false,
* ```
*/
dragToScroll: true,
/**
* @description
* Disable or enable the nested rows functionality - displaying nested structures in a two-dimensional data table.
*
* See [quick setup of the Nested rows](https://docs.handsontable.kbudnik/pro/next/demo-nested-rows.html).
* @example
* ```js
* nestedRows: true,
* ```
*
* @type {Boolean}
* @default false
*/
nestedRows: void 0,
};
export default DefaultSettings;
| 1 | 15,444 | Please check if is it possible to mark it as private. What's more please add this to the TypeScript definition file. | handsontable-handsontable | js |
@@ -28,6 +28,10 @@ static std::string GeneratedFileName(const std::string &path,
return path + file_name + "_generated.h";
}
+char ToUpper(char c) {
+ return static_cast<char>(::toupper(c));
+}
+
namespace cpp {
class CppGenerator : public BaseGenerator {
public: | 1 | /*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// independent from idl_parser, since this code is not needed for most clients
#include "flatbuffers/flatbuffers.h"
#include "flatbuffers/idl.h"
#include "flatbuffers/util.h"
#include "flatbuffers/code_generators.h"
namespace flatbuffers {
static std::string GeneratedFileName(const std::string &path,
const std::string &file_name) {
return path + file_name + "_generated.h";
}
namespace cpp {
class CppGenerator : public BaseGenerator {
public:
CppGenerator(const Parser &parser, const std::string &path,
const std::string &file_name)
: BaseGenerator(parser, path, file_name, "", "::"),
cur_name_space_(nullptr) {}
std::string GenIncludeGuard() const {
// Generate include guard.
std::string guard = file_name_;
// Remove any non-alpha-numeric characters that may appear in a filename.
struct IsAlnum {
bool operator()(char c) { return !isalnum(c); }
};
guard.erase(std::remove_if(guard.begin(), guard.end(), IsAlnum()),
guard.end());
guard = "FLATBUFFERS_GENERATED_" + guard;
guard += "_";
// For further uniqueness, also add the namespace.
auto name_space = parser_.namespaces_.back();
for (auto it = name_space->components.begin();
it != name_space->components.end(); ++it) {
guard += *it + "_";
}
guard += "H_";
std::transform(guard.begin(), guard.end(), guard.begin(), ::toupper);
return guard;
}
void GenIncludeDependencies() {
int num_includes = 0;
for (auto it = parser_.native_included_files_.begin();
it != parser_.native_included_files_.end(); ++it) {
code_ += "#include \"" + *it + "\"";
num_includes++;
}
for (auto it = parser_.included_files_.begin();
it != parser_.included_files_.end(); ++it) {
const auto basename =
flatbuffers::StripPath(flatbuffers::StripExtension(it->first));
if (basename != file_name_) {
code_ += "#include \"" + parser_.opts.include_prefix + basename +
"_generated.h\"";
num_includes++;
}
}
if (num_includes) code_ += "";
}
// Iterate through all definitions we haven't generate code for (enums,
// structs, and tables) and output them to a single file.
bool generate() {
if (IsEverythingGenerated()) return true;
code_.Clear();
code_ += "// " + std::string(FlatBuffersGeneratedWarning());
const auto include_guard = GenIncludeGuard();
code_ += "#ifndef " + include_guard;
code_ += "#define " + include_guard;
code_ += "";
code_ += "#include \"flatbuffers/flatbuffers.h\"";
code_ += "";
if (parser_.opts.include_dependence_headers) {
GenIncludeDependencies();
}
assert(!cur_name_space_);
// Generate forward declarations for all structs/tables, since they may
// have circular references.
for (auto it = parser_.structs_.vec.begin();
it != parser_.structs_.vec.end(); ++it) {
const auto &struct_def = **it;
if (!struct_def.generated) {
SetNameSpace(struct_def.defined_namespace);
code_ += "struct " + struct_def.name + ";";
if (parser_.opts.generate_object_based_api && !struct_def.fixed) {
code_ += "struct " + NativeName(struct_def.name) + ";";
}
code_ += "";
}
}
// Generate code for all the enum declarations.
for (auto it = parser_.enums_.vec.begin(); it != parser_.enums_.vec.end();
++it) {
const auto &enum_def = **it;
if (!enum_def.generated) {
SetNameSpace(enum_def.defined_namespace);
GenEnum(enum_def);
}
}
// Generate code for all structs, then all tables.
for (auto it = parser_.structs_.vec.begin();
it != parser_.structs_.vec.end(); ++it) {
const auto &struct_def = **it;
if (struct_def.fixed && !struct_def.generated) {
SetNameSpace(struct_def.defined_namespace);
GenStruct(struct_def);
}
}
for (auto it = parser_.structs_.vec.begin();
it != parser_.structs_.vec.end(); ++it) {
const auto &struct_def = **it;
if (!struct_def.fixed && !struct_def.generated) {
SetNameSpace(struct_def.defined_namespace);
GenTable(struct_def);
}
}
for (auto it = parser_.structs_.vec.begin();
it != parser_.structs_.vec.end(); ++it) {
const auto &struct_def = **it;
if (!struct_def.fixed && !struct_def.generated) {
SetNameSpace(struct_def.defined_namespace);
GenTablePost(struct_def);
}
}
// Generate code for union verifiers.
for (auto it = parser_.enums_.vec.begin(); it != parser_.enums_.vec.end();
++it) {
const auto &enum_def = **it;
if (enum_def.is_union && !enum_def.generated) {
SetNameSpace(enum_def.defined_namespace);
GenUnionPost(enum_def);
}
}
// Generate convenient global helper functions:
if (parser_.root_struct_def_) {
auto &struct_def = *parser_.root_struct_def_;
SetNameSpace(struct_def.defined_namespace);
const auto &name = struct_def.name;
const auto qualified_name =
parser_.namespaces_.back()->GetFullyQualifiedName(name);
const auto cpp_name = TranslateNameSpace(qualified_name);
code_.SetValue("STRUCT_NAME", name);
code_.SetValue("CPP_NAME", cpp_name);
// The root datatype accessor:
code_ += "inline \\";
code_ += "const {{CPP_NAME}} *Get{{STRUCT_NAME}}(const void *buf) {";
code_ += " return flatbuffers::GetRoot<{{CPP_NAME}}>(buf);";
code_ += "}";
code_ += "";
if (parser_.opts.mutable_buffer) {
code_ += "inline \\";
code_ += "{{STRUCT_NAME}} *GetMutable{{STRUCT_NAME}}(void *buf) {";
code_ += " return flatbuffers::GetMutableRoot<{{STRUCT_NAME}}>(buf);";
code_ += "}";
code_ += "";
}
if (parser_.file_identifier_.length()) {
// Return the identifier
code_ += "inline const char *{{STRUCT_NAME}}Identifier() {";
code_ += " return \"" + parser_.file_identifier_ + "\";";
code_ += "}";
code_ += "";
// Check if a buffer has the identifier.
code_ += "inline \\";
code_ += "bool {{STRUCT_NAME}}BufferHasIdentifier(const void *buf) {";
code_ += " return flatbuffers::BufferHasIdentifier(";
code_ += " buf, {{STRUCT_NAME}}Identifier());";
code_ += "}";
code_ += "";
}
// The root verifier.
if (parser_.file_identifier_.length()) {
code_.SetValue("ID", name + "Identifier()");
} else {
code_.SetValue("ID", "nullptr");
}
code_ += "inline bool Verify{{STRUCT_NAME}}Buffer(";
code_ += " flatbuffers::Verifier &verifier) {";
code_ += " return verifier.VerifyBuffer<{{CPP_NAME}}>({{ID}});";
code_ += "}";
code_ += "";
if (parser_.file_extension_.length()) {
// Return the extension
code_ += "inline const char *{{STRUCT_NAME}}Extension() {";
code_ += " return \"" + parser_.file_extension_ + "\";";
code_ += "}";
code_ += "";
}
// Finish a buffer with a given root object:
code_ += "inline void Finish{{STRUCT_NAME}}Buffer(";
code_ += " flatbuffers::FlatBufferBuilder &fbb,";
code_ += " flatbuffers::Offset<{{CPP_NAME}}> root) {";
if (parser_.file_identifier_.length())
code_ += " fbb.Finish(root, {{STRUCT_NAME}}Identifier());";
else
code_ += " fbb.Finish(root);";
code_ += "}";
code_ += "";
if (parser_.opts.generate_object_based_api) {
// A convenient root unpack function.
auto native_name =
NativeName(WrapInNameSpace(struct_def));
code_.SetValue("UNPACK_RETURN",
GenTypeNativePtr(native_name, nullptr, false));
code_.SetValue("UNPACK_TYPE",
GenTypeNativePtr(native_name, nullptr, true));
code_ += "inline {{UNPACK_RETURN}} UnPack{{STRUCT_NAME}}(";
code_ += " const void *buf,";
code_ += " const flatbuffers::resolver_function_t *res = nullptr) {";
code_ += " return {{UNPACK_TYPE}}\\";
code_ += "(Get{{STRUCT_NAME}}(buf)->UnPack(res));";
code_ += "}";
code_ += "";
}
}
assert(cur_name_space_);
SetNameSpace(nullptr);
// Close the include guard.
code_ += "#endif // " + include_guard;
const auto file_path = GeneratedFileName(path_, file_name_);
const auto final_code = code_.ToString();
return SaveFile(file_path.c_str(), final_code, false);
}
private:
CodeWriter code_;
// This tracks the current namespace so we can insert namespace declarations.
const Namespace *cur_name_space_;
const Namespace *CurrentNameSpace() const { return cur_name_space_; }
// Translates a qualified name in flatbuffer text format to the same name in
// the equivalent C++ namespace.
static std::string TranslateNameSpace(const std::string &qualified_name) {
std::string cpp_qualified_name = qualified_name;
size_t start_pos = 0;
while ((start_pos = cpp_qualified_name.find(".", start_pos)) !=
std::string::npos) {
cpp_qualified_name.replace(start_pos, 1, "::");
}
return cpp_qualified_name;
}
void GenComment(const std::vector<std::string> &dc, const char *prefix = "") {
std::string text;
::flatbuffers::GenComment(dc, &text, nullptr, prefix);
code_ += text + "\\";
}
// Return a C++ type from the table in idl.h
std::string GenTypeBasic(const Type &type, bool user_facing_type) const {
static const char *ctypename[] = {
#define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, JTYPE, GTYPE, NTYPE, PTYPE) \
#CTYPE,
FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD)
#undef FLATBUFFERS_TD
};
if (user_facing_type) {
if (type.enum_def) return WrapInNameSpace(*type.enum_def);
if (type.base_type == BASE_TYPE_BOOL) return "bool";
}
return ctypename[type.base_type];
}
// Return a C++ pointer type, specialized to the actual struct/table types,
// and vector element types.
std::string GenTypePointer(const Type &type) const {
switch (type.base_type) {
case BASE_TYPE_STRING: {
return "flatbuffers::String";
}
case BASE_TYPE_VECTOR: {
const auto type_name = GenTypeWire(type.VectorType(), "", false);
return "flatbuffers::Vector<" + type_name + ">";
}
case BASE_TYPE_STRUCT: {
return WrapInNameSpace(*type.struct_def);
}
case BASE_TYPE_UNION:
// fall through
default: {
return "void";
}
}
}
// Return a C++ type for any type (scalar/pointer) specifically for
// building a flatbuffer.
std::string GenTypeWire(const Type &type, const char *postfix,
bool user_facing_type) const {
if (IsScalar(type.base_type)) {
return GenTypeBasic(type, user_facing_type) + postfix;
} else if (IsStruct(type)) {
return "const " + GenTypePointer(type) + " *";
} else {
return "flatbuffers::Offset<" + GenTypePointer(type) + ">" + postfix;
}
}
// Return a C++ type for any type (scalar/pointer) that reflects its
// serialized size.
std::string GenTypeSize(const Type &type) const {
if (IsScalar(type.base_type)) {
return GenTypeBasic(type, false);
} else if (IsStruct(type)) {
return GenTypePointer(type);
} else {
return "flatbuffers::uoffset_t";
}
}
// TODO(wvo): make this configurable.
static std::string NativeName(const std::string &name) { return name + "T"; }
const std::string &PtrType(const FieldDef *field) {
auto attr = field ? field->attributes.Lookup("cpp_ptr_type") : nullptr;
return attr ? attr->constant : parser_.opts.cpp_object_api_pointer_type;
}
const std::string NativeString(const FieldDef *field) {
auto attr = field ? field->attributes.Lookup("cpp_str_type") : nullptr;
auto &ret = attr ? attr->constant : parser_.opts.cpp_object_api_string_type;
if (ret.empty()) {
return "std::string";
}
return ret;
}
std::string GenTypeNativePtr(const std::string &type, const FieldDef *field,
bool is_constructor) {
auto &ptr_type = PtrType(field);
if (ptr_type != "naked") {
return ptr_type + "<" + type + ">";
} else if (is_constructor) {
return "";
} else {
return type + " *";
}
}
std::string GenPtrGet(const FieldDef &field) {
auto &ptr_type = PtrType(&field);
return ptr_type == "naked" ? "" : ".get()";
}
std::string GenTypeNative(const Type &type, bool invector,
const FieldDef &field) {
switch (type.base_type) {
case BASE_TYPE_STRING: {
return NativeString(&field);
}
case BASE_TYPE_VECTOR: {
const auto type_name = GenTypeNative(type.VectorType(), true, field);
return "std::vector<" + type_name + ">";
}
case BASE_TYPE_STRUCT: {
auto type_name = WrapInNameSpace(*type.struct_def);
if (IsStruct(type)) {
auto native_type = type.struct_def->attributes.Lookup("native_type");
if (native_type) {
type_name = native_type->constant;
}
if (invector || field.native_inline) {
return type_name;
} else {
return GenTypeNativePtr(type_name, &field, false);
}
} else {
return GenTypeNativePtr(NativeName(type_name), &field, false);
}
}
case BASE_TYPE_UNION: {
return type.enum_def->name + "Union";
}
default: {
return GenTypeBasic(type, true);
}
}
}
// Return a C++ type for any type (scalar/pointer) specifically for
// using a flatbuffer.
std::string GenTypeGet(const Type &type, const char *afterbasic,
const char *beforeptr, const char *afterptr,
bool user_facing_type) {
if (IsScalar(type.base_type)) {
return GenTypeBasic(type, user_facing_type) + afterbasic;
} else {
return beforeptr + GenTypePointer(type) + afterptr;
}
}
std::string GenEnumDecl(const EnumDef &enum_def) const {
const IDLOptions &opts = parser_.opts;
return (opts.scoped_enums ? "enum class " : "enum ") + enum_def.name;
}
std::string GenEnumValDecl(const EnumDef &enum_def,
const std::string &enum_val) const {
const IDLOptions &opts = parser_.opts;
return opts.prefixed_enums ? enum_def.name + "_" + enum_val : enum_val;
}
std::string GetEnumValUse(const EnumDef &enum_def,
const EnumVal &enum_val) const {
const IDLOptions &opts = parser_.opts;
if (opts.scoped_enums) {
return enum_def.name + "::" + enum_val.name;
} else if (opts.prefixed_enums) {
return enum_def.name + "_" + enum_val.name;
} else {
return enum_val.name;
}
}
static std::string UnionVerifySignature(const EnumDef &enum_def) {
return "bool Verify" + enum_def.name +
"(flatbuffers::Verifier &verifier, const void *obj, " +
enum_def.name + " type)";
}
static std::string UnionVectorVerifySignature(const EnumDef &enum_def) {
return "bool Verify" + enum_def.name + "Vector" +
"(flatbuffers::Verifier &verifier, " +
"const flatbuffers::Vector<flatbuffers::Offset<void>> *values, " +
"const flatbuffers::Vector<uint8_t> *types)";
}
static std::string UnionUnPackSignature(const EnumDef &enum_def,
bool inclass) {
return (inclass ? "static " : "") +
std::string("flatbuffers::NativeTable *") +
(inclass ? "" : enum_def.name + "Union::") +
"UnPack(const void *obj, " + enum_def.name +
" type, const flatbuffers::resolver_function_t *resolver)";
}
static std::string UnionPackSignature(const EnumDef &enum_def, bool inclass) {
return "flatbuffers::Offset<void> " +
(inclass ? "" : enum_def.name + "Union::") +
"Pack(flatbuffers::FlatBufferBuilder &_fbb, " +
"const flatbuffers::rehasher_function_t *_rehasher" +
(inclass ? " = nullptr" : "") + ") const";
}
static std::string TableCreateSignature(const StructDef &struct_def,
bool predecl) {
return "flatbuffers::Offset<" + struct_def.name + "> Create" +
struct_def.name +
"(flatbuffers::FlatBufferBuilder &_fbb, const " +
NativeName(struct_def.name) +
" *_o, const flatbuffers::rehasher_function_t *_rehasher" +
(predecl ? " = nullptr" : "") + ")";
}
static std::string TablePackSignature(const StructDef &struct_def,
bool inclass) {
return std::string(inclass ? "static " : "") +
"flatbuffers::Offset<" + struct_def.name + "> " +
(inclass ? "" : struct_def.name + "::") +
"Pack(flatbuffers::FlatBufferBuilder &_fbb, " +
"const " + NativeName(struct_def.name) + "* _o, " +
"const flatbuffers::rehasher_function_t *_rehasher" +
(inclass ? " = nullptr" : "") + ")";
}
static std::string TableUnPackSignature(const StructDef &struct_def,
bool inclass) {
return NativeName(struct_def.name) + " *" +
(inclass ? "" : struct_def.name + "::") +
"UnPack(const flatbuffers::resolver_function_t *_resolver" +
(inclass ? " = nullptr" : "") + ") const";
}
static std::string TableUnPackToSignature(const StructDef &struct_def,
bool inclass) {
return "void " + (inclass ? "" : struct_def.name + "::") +
"UnPackTo(" + NativeName(struct_def.name) + " *" + "_o, " +
"const flatbuffers::resolver_function_t *_resolver" +
(inclass ? " = nullptr" : "") + ") const";
}
// Generate an enum declaration and an enum string lookup table.
void GenEnum(const EnumDef &enum_def) {
code_.SetValue("ENUM_NAME", enum_def.name);
code_.SetValue("BASE_TYPE", GenTypeBasic(enum_def.underlying_type, false));
code_.SetValue("SEP", "");
GenComment(enum_def.doc_comment);
code_ += GenEnumDecl(enum_def) + "\\";
if (parser_.opts.scoped_enums)
code_ += " : {{BASE_TYPE}}\\";
code_ += " {";
int64_t anyv = 0;
const EnumVal *minv = nullptr, *maxv = nullptr;
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
const auto &ev = **it;
GenComment(ev.doc_comment, " ");
code_.SetValue("KEY", GenEnumValDecl(enum_def, ev.name));
code_.SetValue("VALUE", NumToString(ev.value));
code_ += "{{SEP}} {{KEY}} = {{VALUE}}\\";
code_.SetValue("SEP", ",\n");
minv = !minv || minv->value > ev.value ? &ev : minv;
maxv = !maxv || maxv->value < ev.value ? &ev : maxv;
anyv |= ev.value;
}
if (parser_.opts.scoped_enums || parser_.opts.prefixed_enums) {
assert(minv && maxv);
code_.SetValue("SEP", ",\n");
if (enum_def.attributes.Lookup("bit_flags")) {
code_.SetValue("KEY", GenEnumValDecl(enum_def, "NONE"));
code_.SetValue("VALUE", "0");
code_ += "{{SEP}} {{KEY}} = {{VALUE}}\\";
code_.SetValue("KEY", GenEnumValDecl(enum_def, "ANY"));
code_.SetValue("VALUE", NumToString(anyv));
code_ += "{{SEP}} {{KEY}} = {{VALUE}}\\";
} else { // MIN & MAX are useless for bit_flags
code_.SetValue("KEY",GenEnumValDecl(enum_def, "MIN"));
code_.SetValue("VALUE", GenEnumValDecl(enum_def, minv->name));
code_ += "{{SEP}} {{KEY}} = {{VALUE}}\\";
code_.SetValue("KEY",GenEnumValDecl(enum_def, "MAX"));
code_.SetValue("VALUE", GenEnumValDecl(enum_def, maxv->name));
code_ += "{{SEP}} {{KEY}} = {{VALUE}}\\";
}
}
code_ += "";
code_ += "};";
if (parser_.opts.scoped_enums && enum_def.attributes.Lookup("bit_flags")) {
code_ += "DEFINE_BITMASK_OPERATORS({{ENUM_NAME}}, {{BASE_TYPE}})";
}
code_ += "";
// Generate a generate string table for enum values.
// Problem is, if values are very sparse that could generate really big
// tables. Ideally in that case we generate a map lookup instead, but for
// the moment we simply don't output a table at all.
auto range =
enum_def.vals.vec.back()->value - enum_def.vals.vec.front()->value + 1;
// Average distance between values above which we consider a table
// "too sparse". Change at will.
static const int kMaxSparseness = 5;
if (range / static_cast<int64_t>(enum_def.vals.vec.size()) <
kMaxSparseness) {
code_ += "inline const char **EnumNames{{ENUM_NAME}}() {";
code_ += " static const char *names[] = {";
auto val = enum_def.vals.vec.front()->value;
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
const auto &ev = **it;
while (val++ != ev.value) {
code_ += " \"\",";
}
code_ += " \"" + ev.name + "\",";
}
code_ += " nullptr";
code_ += " };";
code_ += " return names;";
code_ += "}";
code_ += "";
code_ += "inline const char *EnumName{{ENUM_NAME}}({{ENUM_NAME}} e) {";
code_ += " const size_t index = static_cast<int>(e)\\";
if (enum_def.vals.vec.front()->value) {
auto vals = GetEnumValUse(enum_def, *enum_def.vals.vec.front());
code_ += " - static_cast<int>(" + vals + ")\\";
}
code_ += ";";
code_ += " return EnumNames{{ENUM_NAME}}()[index];";
code_ += "}";
code_ += "";
}
// Generate type traits for unions to map from a type to union enum value.
if (enum_def.is_union) {
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
const auto &ev = **it;
if (it == enum_def.vals.vec.begin()) {
code_ += "template<typename T> struct {{ENUM_NAME}}Traits {";
}
else {
auto name = WrapInNameSpace(*ev.struct_def);
code_ += "template<> struct {{ENUM_NAME}}Traits<" + name + "> {";
}
auto value = GetEnumValUse(enum_def, ev);
code_ += " static const {{ENUM_NAME}} enum_value = " + value + ";";
code_ += "};";
code_ += "";
}
}
if (parser_.opts.generate_object_based_api && enum_def.is_union) {
// Generate a union type
code_.SetValue("NAME", enum_def.name);
code_.SetValue("NONE",
GetEnumValUse(enum_def, *enum_def.vals.Lookup("NONE")));
code_ += "struct {{NAME}}Union {";
code_ += " {{NAME}} type;";
code_ += " flatbuffers::NativeTable *table;";
code_ += "";
code_ += " {{NAME}}Union() : type({{NONE}}), table(nullptr) {}";
code_ += " {{NAME}}Union({{NAME}}Union&& u) FLATBUFFERS_NOEXCEPT :";
code_ += " type({{NONE}}), table(nullptr)";
code_ += " { std::swap(type, u.type); std::swap(table, u.table); }";
code_ += " {{NAME}}Union(const {{NAME}}Union &);";
code_ += " {{NAME}}Union &operator=(const {{NAME}}Union &);";
code_ += " {{NAME}}Union &operator=({{NAME}}Union &&u) FLATBUFFERS_NOEXCEPT";
code_ += " { std::swap(type, u.type); std::swap(table, u.table); return *this; }";
code_ += " ~{{NAME}}Union() { Reset(); }";
code_ += "";
code_ += " void Reset();";
code_ += "";
code_ += " template <typename T>";
code_ += " void Set(T&& value) {";
code_ += " Reset();";
code_ += " type = {{NAME}}Traits<typename T::TableType>::enum_value;";
code_ += " if (type != {{NONE}}) {";
code_ += " table = new T(std::forward<T>(value));";
code_ += " }";
code_ += " }";
code_ += "";
code_ += " " + UnionUnPackSignature(enum_def, true) + ";";
code_ += " " + UnionPackSignature(enum_def, true) + ";";
code_ += "";
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
const auto &ev = **it;
if (!ev.value) {
continue;
}
const auto native_type = NativeName(WrapInNameSpace(*ev.struct_def));
code_.SetValue("NATIVE_TYPE", native_type);
code_.SetValue("NATIVE_NAME", ev.name);
code_.SetValue("NATIVE_ID", GetEnumValUse(enum_def, ev));
code_ += " {{NATIVE_TYPE}} *As{{NATIVE_NAME}}() {";
code_ += " return type == {{NATIVE_ID}} ?";
code_ += " reinterpret_cast<{{NATIVE_TYPE}} *>(table) : nullptr;";
code_ += " }";
}
code_ += "};";
code_ += "";
}
if (enum_def.is_union) {
code_ += UnionVerifySignature(enum_def) + ";";
code_ += UnionVectorVerifySignature(enum_def) + ";";
code_ += "";
}
}
void GenUnionPost(const EnumDef &enum_def) {
// Generate a verifier function for this union that can be called by the
// table verifier functions. It uses a switch case to select a specific
// verifier function to call, this should be safe even if the union type
// has been corrupted, since the verifiers will simply fail when called
// on the wrong type.
code_.SetValue("ENUM_NAME", enum_def.name);
code_ += "inline " + UnionVerifySignature(enum_def) + " {";
code_ += " switch (type) {";
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
const auto &ev = **it;
code_.SetValue("LABEL", GetEnumValUse(enum_def, ev));
if (ev.value) {
code_.SetValue("TYPE", WrapInNameSpace(*ev.struct_def));
code_ += " case {{LABEL}}: {";
code_ += " auto ptr = reinterpret_cast<const {{TYPE}} *>(obj);";
code_ += " return verifier.VerifyTable(ptr);";
code_ += " }";
} else {
code_ += " case {{LABEL}}: {";
code_ += " return true;"; // "NONE" enum value.
code_ += " }";
}
}
code_ += " default: return false;";
code_ += " }";
code_ += "}";
code_ += "";
code_ += "inline " + UnionVectorVerifySignature(enum_def) + " {";
code_ += " if (values->size() != types->size()) return false;";
code_ += " for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) {";
code_ += " if (!Verify" + enum_def.name + "(";
code_ += " verifier, values->Get(i), types->GetEnum<" + enum_def.name + ">(i))) {";
code_ += " return false;";
code_ += " }";
code_ += " }";
code_ += " return true;";
code_ += "}";
code_ += "";
if (parser_.opts.generate_object_based_api) {
// Generate union Unpack() and Pack() functions.
code_ += "inline " + UnionUnPackSignature(enum_def, false) + " {";
code_ += " switch (type) {";
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
const auto &ev = **it;
if (!ev.value) {
continue;
}
code_.SetValue("LABEL", GetEnumValUse(enum_def, ev));
code_.SetValue("TYPE", WrapInNameSpace(*ev.struct_def));
code_ += " case {{LABEL}}: {";
code_ += " auto ptr = reinterpret_cast<const {{TYPE}} *>(obj);";
code_ += " return ptr->UnPack(resolver);";
code_ += " }";
}
code_ += " default: return nullptr;";
code_ += " }";
code_ += "}";
code_ += "";
code_ += "inline " + UnionPackSignature(enum_def, false) + " {";
code_ += " switch (type) {";
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
auto &ev = **it;
if (!ev.value) {
continue;
}
code_.SetValue("LABEL", GetEnumValUse(enum_def, ev));
code_.SetValue("TYPE", NativeName(WrapInNameSpace(*ev.struct_def)));
code_.SetValue("NAME", ev.struct_def->name);
code_ += " case {{LABEL}}: {";
code_ += " auto ptr = reinterpret_cast<const {{TYPE}} *>(table);";
code_ += " return Create{{NAME}}(_fbb, ptr, _rehasher).Union();";
code_ += " }";
}
code_ += " default: return 0;";
code_ += " }";
code_ += "}";
code_ += "";
// Union Reset() function.
code_.SetValue("NONE",
GetEnumValUse(enum_def, *enum_def.vals.Lookup("NONE")));
code_ += "inline void {{ENUM_NAME}}Union::Reset() {";
code_ += " switch (type) {";
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
const auto &ev = **it;
if (!ev.value) {
continue;
}
code_.SetValue("LABEL", GetEnumValUse(enum_def, ev));
code_.SetValue("TYPE", NativeName(WrapInNameSpace(*ev.struct_def)));
code_ += " case {{LABEL}}: {";
code_ += " auto ptr = reinterpret_cast<{{TYPE}} *>(table);";
code_ += " delete ptr;";
code_ += " break;";
code_ += " }";
}
code_ += " default: break;";
code_ += " }";
code_ += " table = nullptr;";
code_ += " type = {{NONE}};";
code_ += "}";
code_ += "";
}
}
// Generates a value with optionally a cast applied if the field has a
// different underlying type from its interface type (currently only the
// case for enums. "from" specify the direction, true meaning from the
// underlying type to the interface type.
std::string GenUnderlyingCast(const FieldDef &field, bool from,
const std::string &val) {
if (from && field.value.type.base_type == BASE_TYPE_BOOL) {
return val + " != 0";
} else if ((field.value.type.enum_def &&
IsScalar(field.value.type.base_type)) ||
field.value.type.base_type == BASE_TYPE_BOOL) {
return "static_cast<" + GenTypeBasic(field.value.type, from) + ">(" +
val + ")";
} else {
return val;
}
}
std::string GenFieldOffsetName(const FieldDef &field) {
std::string uname = field.name;
std::transform(uname.begin(), uname.end(), uname.begin(), ::toupper);
return "VT_" + uname;
}
void GenFullyQualifiedNameGetter(const std::string &name) {
if (!parser_.opts.generate_name_strings) {
return;
}
auto fullname = parser_.namespaces_.back()->GetFullyQualifiedName(name);
code_.SetValue("NAME", fullname);
code_.SetValue("CONSTEXPR", "FLATBUFFERS_CONSTEXPR");
code_ += " static {{CONSTEXPR}} const char *GetFullyQualifiedName() {";
code_ += " return \"{{NAME}}\";";
code_ += " }";
}
std::string GenDefaultConstant(const FieldDef &field) {
return field.value.type.base_type == BASE_TYPE_FLOAT
? field.value.constant + "f"
: field.value.constant;
}
std::string GetDefaultScalarValue(const FieldDef &field) {
if (field.value.type.enum_def && IsScalar(field.value.type.base_type)) {
auto ev = field.value.type.enum_def->ReverseLookup(
static_cast<int>(StringToInt(field.value.constant.c_str())), false);
if (ev) {
return WrapInNameSpace(
field.value.type.enum_def->defined_namespace,
GetEnumValUse(*field.value.type.enum_def, *ev));
} else {
return GenUnderlyingCast(field, true, field.value.constant);
}
} else if (field.value.type.base_type == BASE_TYPE_BOOL) {
return field.value.constant == "0" ? "false" : "true";
} else {
return GenDefaultConstant(field);
}
}
void GenParam(const FieldDef &field, bool direct, const char *prefix) {
code_.SetValue("PRE", prefix);
code_.SetValue("PARAM_NAME", field.name);
if (direct && field.value.type.base_type == BASE_TYPE_STRING) {
code_.SetValue("PARAM_TYPE", "const char *");
code_.SetValue("PARAM_VALUE", "nullptr");
} else if (direct && field.value.type.base_type == BASE_TYPE_VECTOR) {
auto type = GenTypeWire(field.value.type.VectorType(), "", false);
code_.SetValue("PARAM_TYPE", "const std::vector<" + type + "> *");
code_.SetValue("PARAM_VALUE", "nullptr");
} else {
code_.SetValue("PARAM_TYPE", GenTypeWire(field.value.type, " ", true));
code_.SetValue("PARAM_VALUE", GetDefaultScalarValue(field));
}
code_ += "{{PRE}}{{PARAM_TYPE}}{{PARAM_NAME}} = {{PARAM_VALUE}}\\";
}
// Generate a member, including a default value for scalars and raw pointers.
void GenMember(const FieldDef &field) {
if (!field.deprecated && // Deprecated fields won't be accessible.
field.value.type.base_type != BASE_TYPE_UTYPE) {
auto type = GenTypeNative(field.value.type, false, field);
auto cpp_type = field.attributes.Lookup("cpp_type");
auto full_type = (cpp_type ? cpp_type->constant + " *" : type + " ");
code_.SetValue("FIELD_TYPE", full_type);
code_.SetValue("FIELD_NAME", field.name);
code_ += " {{FIELD_TYPE}}{{FIELD_NAME}};";
}
}
// Generate the default constructor for this struct. Properly initialize all
// scalar members with default values.
void GenDefaultConstructor(const StructDef& struct_def) {
std::string initializer_list;
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (!field.deprecated && // Deprecated fields won't be accessible.
field.value.type.base_type != BASE_TYPE_UTYPE) {
auto cpp_type = field.attributes.Lookup("cpp_type");
// Scalar types get parsed defaults, raw pointers get nullptrs.
if (IsScalar(field.value.type.base_type)) {
if (!initializer_list.empty()) {
initializer_list += ",\n ";
}
initializer_list += field.name;
initializer_list += "(" + GetDefaultScalarValue(field) + ")";
} else if (field.value.type.base_type == BASE_TYPE_STRUCT) {
if (IsStruct(field.value.type)) {
auto native_default = field.attributes.Lookup("native_default");
if (native_default) {
if (!initializer_list.empty()) {
initializer_list += ",\n ";
}
initializer_list +=
field.name + "(" + native_default->constant + ")";
}
}
} else if (cpp_type) {
if (!initializer_list.empty()) {
initializer_list += ",\n ";
}
initializer_list += field.name + "(0)";
}
}
}
if (!initializer_list.empty()) {
initializer_list = "\n : " + initializer_list;
}
code_.SetValue("NATIVE_NAME", NativeName(struct_def.name));
code_.SetValue("INIT_LIST", initializer_list);
code_ += " {{NATIVE_NAME}}(){{INIT_LIST}} {";
code_ += " }";
}
void GenNativeTable(const StructDef &struct_def) {
const auto native_name = NativeName(struct_def.name);
code_.SetValue("STRUCT_NAME", struct_def.name);
code_.SetValue("NATIVE_NAME", native_name);
// Generate a C++ object that can hold an unpacked version of this table.
code_ += "struct {{NATIVE_NAME}} : public flatbuffers::NativeTable {";
code_ += " typedef {{STRUCT_NAME}} TableType;";
GenFullyQualifiedNameGetter(native_name);
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
GenMember(**it);
}
GenDefaultConstructor(struct_def);
code_ += "};";
code_ += "";
}
// Generate the code to call the appropriate Verify function(s) for a field.
void GenVerifyCall(const FieldDef &field, const char* prefix) {
code_.SetValue("PRE", prefix);
code_.SetValue("NAME", field.name);
code_.SetValue("REQUIRED", field.required ? "Required" : "");
code_.SetValue("SIZE", GenTypeSize(field.value.type));
code_.SetValue("OFFSET", GenFieldOffsetName(field));
code_ += "{{PRE}}VerifyField{{REQUIRED}}<{{SIZE}}>(verifier, {{OFFSET}})\\";
switch (field.value.type.base_type) {
case BASE_TYPE_UNION: {
code_.SetValue("ENUM_NAME", field.value.type.enum_def->name);
code_.SetValue("SUFFIX", UnionTypeFieldSuffix());
code_ += "{{PRE}}Verify{{ENUM_NAME}}(verifier, {{NAME}}(), "
"{{NAME}}{{SUFFIX}}())\\";
break;
}
case BASE_TYPE_STRUCT: {
if (!field.value.type.struct_def->fixed) {
code_ += "{{PRE}}verifier.VerifyTable({{NAME}}())\\";
}
break;
}
case BASE_TYPE_STRING: {
code_ += "{{PRE}}verifier.Verify({{NAME}}())\\";
break;
}
case BASE_TYPE_VECTOR: {
code_ += "{{PRE}}verifier.Verify({{NAME}}())\\";
switch (field.value.type.element) {
case BASE_TYPE_STRING: {
code_ += "{{PRE}}verifier.VerifyVectorOfStrings({{NAME}}())\\";
break;
}
case BASE_TYPE_STRUCT: {
if (!field.value.type.struct_def->fixed) {
code_ += "{{PRE}}verifier.VerifyVectorOfTables({{NAME}}())\\";
}
break;
}
case BASE_TYPE_UNION: {
code_.SetValue("ENUM_NAME", field.value.type.enum_def->name);
code_ += "{{PRE}}Verify{{ENUM_NAME}}Vector(verifier, {{NAME}}(), {{NAME}}_type())\\";
break;
}
default:
break;
}
break;
}
default: {
break;
}
}
}
// Generate an accessor struct, builder structs & function for a table.
void GenTable(const StructDef &struct_def) {
if (parser_.opts.generate_object_based_api) {
GenNativeTable(struct_def);
}
// Generate an accessor struct, with methods of the form:
// type name() const { return GetField<type>(offset, defaultval); }
GenComment(struct_def.doc_comment);
code_.SetValue("STRUCT_NAME", struct_def.name);
code_ += "struct {{STRUCT_NAME}} FLATBUFFERS_FINAL_CLASS"
" : private flatbuffers::Table {";
if (parser_.opts.generate_object_based_api) {
code_ += " typedef {{NATIVE_NAME}} NativeTableType;";
}
GenFullyQualifiedNameGetter(struct_def.name);
// Generate field id constants.
if (struct_def.fields.vec.size() > 0) {
// We need to add a trailing comma to all elements except the last one as
// older versions of gcc complain about this.
code_.SetValue("SEP", "");
code_ += " enum {";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (field.deprecated) {
// Deprecated fields won't be accessible.
continue;
}
code_.SetValue("OFFSET_NAME", GenFieldOffsetName(field));
code_.SetValue("OFFSET_VALUE", NumToString(field.value.offset));
code_ += "{{SEP}} {{OFFSET_NAME}} = {{OFFSET_VALUE}}\\";
code_.SetValue("SEP", ",\n");
}
code_ += "";
code_ += " };";
}
// Generate the accessors.
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (field.deprecated) {
// Deprecated fields won't be accessible.
continue;
}
const bool is_struct = IsStruct(field.value.type);
const bool is_scalar = IsScalar(field.value.type.base_type);
code_.SetValue("FIELD_NAME", field.name);
// Call a different accessor for pointers, that indirects.
std::string accessor = "";
if (is_scalar) {
accessor = "GetField<";
} else if (is_struct) {
accessor = "GetStruct<";
} else {
accessor = "GetPointer<";
}
auto offset_str = GenFieldOffsetName(field);
auto offset_type =
GenTypeGet(field.value.type, "", "const ", " *", false);
auto call = accessor + offset_type + ">(" + offset_str;
// Default value as second arg for non-pointer types.
if (is_scalar) {
call += ", " + GenDefaultConstant(field);
}
call += ")";
GenComment(field.doc_comment, " ");
code_.SetValue("FIELD_TYPE",
GenTypeGet(field.value.type, " ", "const ", " *", true));
code_.SetValue("FIELD_VALUE", GenUnderlyingCast(field, true, call));
code_ += " {{FIELD_TYPE}}{{FIELD_NAME}}() const {";
code_ += " return {{FIELD_VALUE}};";
code_ += " }";
if (field.value.type.base_type == BASE_TYPE_UNION) {
auto u = field.value.type.enum_def;
code_ += " template<typename T> "
"const T *{{FIELD_NAME}}_as() const;";
for (auto u_it = u->vals.vec.begin();
u_it != u->vals.vec.end(); ++u_it) {
if (!(*u_it)->struct_def) {
continue;
}
auto arg_struct_def = (*u_it)->struct_def;
auto full_struct_name = WrapInNameSpace(*arg_struct_def);
// @TODO: Mby make this decisions more universal? How?
code_.SetValue("U_GET_TYPE", field.name + UnionTypeFieldSuffix());
code_.SetValue("U_ELEMENT_TYPE", WrapInNameSpace(
u->defined_namespace, GetEnumValUse(*u, **u_it)));
code_.SetValue("U_FIELD_TYPE", "const " + full_struct_name + " *");
code_.SetValue("U_ELEMENT_NAME", full_struct_name);
code_.SetValue("U_FIELD_NAME",
field.name + "_as_" + (*u_it)->name);
// `const Type *union_name_asType() const` accessor.
code_ += " {{U_FIELD_TYPE}}{{U_FIELD_NAME}}() const {";
code_ += " return ({{U_GET_TYPE}}() == {{U_ELEMENT_TYPE}})? "
"static_cast<{{U_FIELD_TYPE}}>({{FIELD_NAME}}()) "
": nullptr;";
code_ += " }";
}
}
if (parser_.opts.mutable_buffer) {
if (is_scalar) {
const auto type = GenTypeWire(field.value.type, "", false);
code_.SetValue("SET_FN", "SetField<" + type + ">");
code_.SetValue("OFFSET_NAME", offset_str);
code_.SetValue("FIELD_TYPE", GenTypeBasic(field.value.type, true));
code_.SetValue("FIELD_VALUE",
GenUnderlyingCast(field, false, "_" + field.name));
code_.SetValue("DEFAULT_VALUE", GenDefaultConstant(field));
code_ += " bool mutate_{{FIELD_NAME}}({{FIELD_TYPE}} "
"_{{FIELD_NAME}}) {";
code_ += " return {{SET_FN}}({{OFFSET_NAME}}, {{FIELD_VALUE}}, {{DEFAULT_VALUE}});";
code_ += " }";
} else {
auto type = GenTypeGet(field.value.type, " ", "", " *", true);
auto underlying = accessor + type + ">(" + offset_str + ")";
code_.SetValue("FIELD_TYPE", type);
code_.SetValue("FIELD_VALUE",
GenUnderlyingCast(field, true, underlying));
code_ += " {{FIELD_TYPE}}mutable_{{FIELD_NAME}}() {";
code_ += " return {{FIELD_VALUE}};";
code_ += " }";
}
}
auto nested = field.attributes.Lookup("nested_flatbuffer");
if (nested) {
std::string qualified_name =
parser_.namespaces_.back()->GetFullyQualifiedName(
nested->constant);
auto nested_root = parser_.structs_.Lookup(qualified_name);
assert(nested_root); // Guaranteed to exist by parser.
(void)nested_root;
code_.SetValue("CPP_NAME", TranslateNameSpace(qualified_name));
code_ += " const {{CPP_NAME}} *{{FIELD_NAME}}_nested_root() const {";
code_ += " const uint8_t* data = {{FIELD_NAME}}()->Data();";
code_ += " return flatbuffers::GetRoot<{{CPP_NAME}}>(data);";
code_ += " }";
}
// Generate a comparison function for this field if it is a key.
if (field.key) {
const bool is_string = (field.value.type.base_type == BASE_TYPE_STRING);
code_ += " bool KeyCompareLessThan(const {{STRUCT_NAME}} *o) const {";
if (is_string) {
code_ += " return *{{FIELD_NAME}}() < *o->{{FIELD_NAME}}();";
} else {
code_ += " return {{FIELD_NAME}}() < o->{{FIELD_NAME}}();";
}
code_ += " }";
if (is_string) {
code_ += " int KeyCompareWithValue(const char *val) const {";
code_ += " return strcmp({{FIELD_NAME}}()->c_str(), val);";
code_ += " }";
} else {
auto type = GenTypeBasic(field.value.type, false);
if (parser_.opts.scoped_enums && field.value.type.enum_def &&
IsScalar(field.value.type.base_type)) {
type = GenTypeGet(field.value.type, " ", "const ", " *", true);
}
code_.SetValue("KEY_TYPE", type);
code_ += " int KeyCompareWithValue({{KEY_TYPE}} val) const {";
code_ += " const auto key = {{FIELD_NAME}}();";
code_ += " if (key < val) {";
code_ += " return -1;";
code_ += " } else if (key > val) {";
code_ += " return 1;";
code_ += " } else {";
code_ += " return 0;";
code_ += " }";
code_ += " }";
}
}
}
// Generate a verifier function that can check a buffer from an untrusted
// source will never cause reads outside the buffer.
code_ += " bool Verify(flatbuffers::Verifier &verifier) const {";
code_ += " return VerifyTableStart(verifier)\\";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (field.deprecated) {
continue;
}
GenVerifyCall(field, " &&\n ");
}
code_ += " &&\n verifier.EndTable();";
code_ += " }";
if (parser_.opts.generate_object_based_api) {
// Generate the UnPack() pre declaration.
code_ += " " + TableUnPackSignature(struct_def, true) + ";";
code_ += " " + TableUnPackToSignature(struct_def, true) + ";";
code_ += " " + TablePackSignature(struct_def, true) + ";";
}
code_ += "};"; // End of table.
code_ += "";
// Explicit specializations for union accessors
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (field.deprecated ||
field.value.type.base_type != BASE_TYPE_UNION) {
continue;
}
auto u = field.value.type.enum_def;
code_.SetValue("FIELD_NAME", field.name);
for (auto u_it = u->vals.vec.begin();
u_it != u->vals.vec.end(); ++u_it) {
if (!(*u_it)->struct_def) {
continue;
}
auto arg_struct_def = (*u_it)->struct_def;
auto full_struct_name = WrapInNameSpace(*arg_struct_def);
code_.SetValue("U_ELEMENT_TYPE", WrapInNameSpace(
u->defined_namespace, GetEnumValUse(*u, **u_it)));
code_.SetValue("U_FIELD_TYPE", "const " + full_struct_name + " *");
code_.SetValue("U_ELEMENT_NAME", full_struct_name);
code_.SetValue("U_FIELD_NAME",
field.name + "_as_" + (*u_it)->name);
// `template<> const T *union_name_as<T>() const` accessor.
code_ += "template<> "
"inline {{U_FIELD_TYPE}}{{STRUCT_NAME}}::{{FIELD_NAME}}_as"
"<{{U_ELEMENT_NAME}}>() const {";
code_ += " return {{U_FIELD_NAME}}();";
code_ += "}";
code_ += "";
}
}
GenBuilders(struct_def);
if (parser_.opts.generate_object_based_api) {
// Generate a pre-declaration for a CreateX method that works with an
// unpacked C++ object.
code_ += TableCreateSignature(struct_def, true) + ";";
code_ += "";
}
}
void GenBuilders(const StructDef &struct_def) {
code_.SetValue("STRUCT_NAME", struct_def.name);
// Generate a builder struct:
code_ += "struct {{STRUCT_NAME}}Builder {";
code_ += " flatbuffers::FlatBufferBuilder &fbb_;";
code_ += " flatbuffers::uoffset_t start_;";
bool has_string_or_vector_fields = false;
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (!field.deprecated) {
const bool is_scalar = IsScalar(field.value.type.base_type);
const bool is_string = field.value.type.base_type == BASE_TYPE_STRING;
const bool is_vector = field.value.type.base_type == BASE_TYPE_VECTOR;
if (is_string || is_vector) {
has_string_or_vector_fields = true;
}
std::string offset = GenFieldOffsetName(field);
std::string name = GenUnderlyingCast(field, false, field.name);
std::string value = is_scalar ? GenDefaultConstant(field) : "";
// Generate accessor functions of the form:
// void add_name(type name) {
// fbb_.AddElement<type>(offset, name, default);
// }
code_.SetValue("FIELD_NAME", field.name);
code_.SetValue("FIELD_TYPE", GenTypeWire(field.value.type, " ", true));
code_.SetValue("ADD_OFFSET", struct_def.name + "::" + offset);
code_.SetValue("ADD_NAME", name);
code_.SetValue("ADD_VALUE", value);
if (is_scalar) {
const auto type = GenTypeWire(field.value.type, "", false);
code_.SetValue("ADD_FN", "AddElement<" + type + ">");
} else if (IsStruct(field.value.type)) {
code_.SetValue("ADD_FN", "AddStruct");
} else {
code_.SetValue("ADD_FN", "AddOffset");
}
code_ += " void add_{{FIELD_NAME}}({{FIELD_TYPE}}{{FIELD_NAME}}) {";
code_ += " fbb_.{{ADD_FN}}(\\";
if (is_scalar) {
code_ += "{{ADD_OFFSET}}, {{ADD_NAME}}, {{ADD_VALUE}});";
} else {
code_ += "{{ADD_OFFSET}}, {{ADD_NAME}});";
}
code_ += " }";
}
}
// Builder constructor
code_ += " {{STRUCT_NAME}}Builder(flatbuffers::FlatBufferBuilder &_fbb)";
code_ += " : fbb_(_fbb) {";
code_ += " start_ = fbb_.StartTable();";
code_ += " }";
// Assignment operator;
code_ += " {{STRUCT_NAME}}Builder &operator="
"(const {{STRUCT_NAME}}Builder &);";
// Finish() function.
auto num_fields = NumToString(struct_def.fields.vec.size());
code_ += " flatbuffers::Offset<{{STRUCT_NAME}}> Finish() {";
code_ += " const auto end = fbb_.EndTable(start_, " + num_fields + ");";
code_ += " auto o = flatbuffers::Offset<{{STRUCT_NAME}}>(end);";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (!field.deprecated && field.required) {
code_.SetValue("FIELD_NAME", field.name);
code_.SetValue("OFFSET_NAME", GenFieldOffsetName(field));
code_ += " fbb_.Required(o, {{STRUCT_NAME}}::{{OFFSET_NAME}});";
}
}
code_ += " return o;";
code_ += " }";
code_ += "};";
code_ += "";
// Generate a convenient CreateX function that uses the above builder
// to create a table in one go.
code_ += "inline flatbuffers::Offset<{{STRUCT_NAME}}> "
"Create{{STRUCT_NAME}}(";
code_ += " flatbuffers::FlatBufferBuilder &_fbb\\";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (!field.deprecated) {
GenParam(field, false, ",\n ");
}
}
code_ += ") {";
code_ += " {{STRUCT_NAME}}Builder builder_(_fbb);";
for (size_t size = struct_def.sortbysize ? sizeof(largest_scalar_t) : 1;
size; size /= 2) {
for (auto it = struct_def.fields.vec.rbegin();
it != struct_def.fields.vec.rend(); ++it) {
const auto &field = **it;
if (!field.deprecated && (!struct_def.sortbysize ||
size == SizeOf(field.value.type.base_type))) {
code_.SetValue("FIELD_NAME", field.name);
code_ += " builder_.add_{{FIELD_NAME}}({{FIELD_NAME}});";
}
}
}
code_ += " return builder_.Finish();";
code_ += "}";
code_ += "";
// Generate a CreateXDirect function with vector types as parameters
if (has_string_or_vector_fields) {
code_ += "inline flatbuffers::Offset<{{STRUCT_NAME}}> "
"Create{{STRUCT_NAME}}Direct(";
code_ += " flatbuffers::FlatBufferBuilder &_fbb\\";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (!field.deprecated) {
GenParam(field, true, ",\n ");
}
}
// Need to call "Create" with the struct namespace.
const auto qualified_create_name = struct_def.defined_namespace->GetFullyQualifiedName("Create");
code_.SetValue("CREATE_NAME", TranslateNameSpace(qualified_create_name));
code_ += ") {";
code_ += " return {{CREATE_NAME}}{{STRUCT_NAME}}(";
code_ += " _fbb\\";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (!field.deprecated) {
code_.SetValue("FIELD_NAME", field.name);
if (field.value.type.base_type == BASE_TYPE_STRING) {
code_ += ",\n {{FIELD_NAME}} ? "
"_fbb.CreateString({{FIELD_NAME}}) : 0\\";
} else if (field.value.type.base_type == BASE_TYPE_VECTOR) {
auto type = GenTypeWire(field.value.type.VectorType(), "", false);
code_ += ",\n {{FIELD_NAME}} ? "
"_fbb.CreateVector<" + type + ">(*{{FIELD_NAME}}) : 0\\";
} else {
code_ += ",\n {{FIELD_NAME}}\\";
}
}
}
code_ += ");";
code_ += "}";
code_ += "";
}
}
std::string GenUnpackVal(const Type &type, const std::string &val,
bool invector, const FieldDef &afield) {
switch (type.base_type) {
case BASE_TYPE_STRING: {
return val + "->str()";
}
case BASE_TYPE_STRUCT: {
const auto name = WrapInNameSpace(*type.struct_def);
if (IsStruct(type)) {
auto native_type = type.struct_def->attributes.Lookup("native_type");
if (native_type) {
return "flatbuffers::UnPack(*" + val + ")";
} else if (invector || afield.native_inline) {
return "*" + val;
} else {
const auto ptype = GenTypeNativePtr(name, &afield, true);
return ptype + "(new " + name + "(*" + val + "))";
}
} else {
const auto ptype = GenTypeNativePtr(NativeName(name), &afield, true);
return ptype + "(" + val + "->UnPack(_resolver))";
}
}
default: {
return val;
break;
}
}
};
std::string GenUnpackFieldStatement(const FieldDef &field,
const FieldDef *union_field) {
std::string code;
switch (field.value.type.base_type) {
case BASE_TYPE_VECTOR: {
std::string indexing;
if (field.value.type.enum_def) {
indexing += "(" + field.value.type.enum_def->name + ")";
}
indexing += "_e->Get(_i)";
if (field.value.type.element == BASE_TYPE_BOOL) {
indexing += " != 0";
}
// Generate code that pushes data from _e to _o in the form:
// for (uoffset_t i = 0; i < _e->size(); ++i) {
// _o->field.push_back(_e->Get(_i));
// }
code += "{ _o->" + field.name + ".resize(_e->size()); ";
code += "for (flatbuffers::uoffset_t _i = 0;";
code += " _i < _e->size(); _i++) { ";
code += "_o->" + field.name + "[_i] = ";
code += GenUnpackVal(field.value.type.VectorType(),
indexing, true, field);
code += "; } }";
break;
}
case BASE_TYPE_UTYPE: {
assert(union_field->value.type.base_type == BASE_TYPE_UNION);
// Generate code that sets the union type, of the form:
// _o->field.type = _e;
code += "_o->" + union_field->name + ".type = _e;";
break;
}
case BASE_TYPE_UNION: {
// Generate code that sets the union table, of the form:
// _o->field.table = Union::Unpack(_e, field_type(), resolver);
code += "_o->" + field.name + ".table = ";
code += field.value.type.enum_def->name + "Union::UnPack(";
code += "_e, " + field.name + UnionTypeFieldSuffix() + "(),";
code += "_resolver);";
break;
}
default: {
auto cpp_type = field.attributes.Lookup("cpp_type");
if (cpp_type) {
// Generate code that resolves the cpp pointer type, of the form:
// if (resolver)
// (*resolver)(&_o->field, (hash_value_t)(_e));
// else
// _o->field = nullptr;
code += "if (_resolver) ";
code += "(*_resolver)";
code += "(reinterpret_cast<void **>(&_o->" + field.name + "), ";
code += "static_cast<flatbuffers::hash_value_t>(_e));";
code += " else ";
code += "_o->" + field.name + " = nullptr;";
} else {
// Generate code for assigning the value, of the form:
// _o->field = value;
code += "_o->" + field.name + " = ";
code += GenUnpackVal(field.value.type, "_e", false, field) + ";";
}
break;
}
}
return code;
}
std::string GenCreateParam(const FieldDef &field) {
std::string value = "_o->";
if (field.value.type.base_type == BASE_TYPE_UTYPE) {
value += field.name.substr(0, field.name.size() -
strlen(UnionTypeFieldSuffix()));
value += ".type";
} else {
value += field.name;
}
if (field.attributes.Lookup("cpp_type")) {
auto type = GenTypeBasic(field.value.type, false);
value = "_rehasher ? "
"static_cast<" + type + ">((*_rehasher)(" + value + ")) : 0";
}
std::string code;
switch (field.value.type.base_type) {
// String fields are of the form:
// _fbb.CreateString(_o->field)
case BASE_TYPE_STRING: {
code += "_fbb.CreateString(" + value + ")";
// For optional fields, check to see if there actually is any data
// in _o->field before attempting to access it.
if (!field.required) {
code = value + ".size() ? " + code + " : 0";
}
break;
}
// Vector fields come in several flavours, of the forms:
// _fbb.CreateVector(_o->field);
// _fbb.CreateVector((const utype*)_o->field.data(), _o->field.size());
// _fbb.CreateVectorOfStrings(_o->field)
// _fbb.CreateVectorOfStructs(_o->field)
// _fbb.CreateVector<Offset<T>>(_o->field.size() [&](size_t i) {
// return CreateT(_fbb, _o->Get(i), rehasher);
// });
case BASE_TYPE_VECTOR: {
auto vector_type = field.value.type.VectorType();
switch (vector_type.base_type) {
case BASE_TYPE_STRING: {
code += "_fbb.CreateVectorOfStrings(" + value + ")";
break;
}
case BASE_TYPE_STRUCT: {
if (IsStruct(vector_type)) {
code += "_fbb.CreateVectorOfStructs(" + value + ")";
} else {
code += "_fbb.CreateVector<flatbuffers::Offset<";
code += WrapInNameSpace(*vector_type.struct_def) + ">>";
code += "(" + value + ".size(), [&](size_t i) {";
code += " return Create" + vector_type.struct_def->name;
code += "(_fbb, " + value + "[i]" + GenPtrGet(field) + ", ";
code += "_rehasher); })";
}
break;
}
case BASE_TYPE_BOOL: {
code += "_fbb.CreateVector(" + value + ")";
break;
}
default: {
if (field.value.type.enum_def) {
// For enumerations, we need to get access to the array data for
// the underlying storage type (eg. uint8_t).
const auto basetype = GenTypeBasic(
field.value.type.enum_def->underlying_type, false);
code += "_fbb.CreateVector((const " + basetype + "*)" + value +
".data(), " + value + ".size())";
} else {
code += "_fbb.CreateVector(" + value + ")";
}
break;
}
}
// For optional fields, check to see if there actually is any data
// in _o->field before attempting to access it.
if (!field.required) {
code = value + ".size() ? " + code + " : 0";
}
break;
}
case BASE_TYPE_UNION: {
// _o->field.Pack(_fbb);
code += value + ".Pack(_fbb)";
break;
}
case BASE_TYPE_STRUCT: {
if (IsStruct(field.value.type)) {
auto native_type =
field.value.type.struct_def->attributes.Lookup("native_type");
if (native_type) {
code += "flatbuffers::Pack(" + value + ")";
} else if (field.native_inline) {
code += "&" + value;
} else {
code += value + " ? " + value + GenPtrGet(field) + " : 0";
}
} else {
// _o->field ? CreateT(_fbb, _o->field.get(), _rehasher);
const auto type = field.value.type.struct_def->name;
code += value + " ? Create" + type;
code += "(_fbb, " + value + GenPtrGet(field) + ", _rehasher)";
code += " : 0";
}
break;
}
default: {
code += value;
break;
}
}
return code;
}
// Generate code for tables that needs to come after the regular definition.
void GenTablePost(const StructDef &struct_def) {
code_.SetValue("STRUCT_NAME", struct_def.name);
code_.SetValue("NATIVE_NAME", NativeName(struct_def.name));
if (parser_.opts.generate_object_based_api) {
// Generate the X::UnPack() method.
code_ += "inline " + TableUnPackSignature(struct_def, false) + " {";
code_ += " auto _o = new {{NATIVE_NAME}}();";
code_ += " UnPackTo(_o, _resolver);";
code_ += " return _o;";
code_ += "}";
code_ += "";
code_ += "inline " + TableUnPackToSignature(struct_def, false) + " {";
code_ += " (void)_o;";
code_ += " (void)_resolver;";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (field.deprecated) {
continue;
}
// Assign a value from |this| to |_o|. Values from |this| are stored
// in a variable |_e| by calling this->field_type(). The value is then
// assigned to |_o| using the GenUnpackFieldStatement.
const bool is_union = field.value.type.base_type == BASE_TYPE_UTYPE;
const auto statement =
GenUnpackFieldStatement(field, is_union ? *(it + 1) : nullptr);
code_.SetValue("FIELD_NAME", field.name);
auto prefix = " { auto _e = {{FIELD_NAME}}(); ";
auto check = IsScalar(field.value.type.base_type) ? "" : "if (_e) ";
auto postfix = " };";
code_ += std::string(prefix) + check + statement + postfix;
}
code_ += "}";
code_ += "";
// Generate the X::Pack member function that simply calls the global
// CreateX function.
code_ += "inline " + TablePackSignature(struct_def, false) + " {";
code_ += " return Create{{STRUCT_NAME}}(_fbb, _o, _rehasher);";
code_ += "}";
code_ += "";
// Generate a CreateX method that works with an unpacked C++ object.
code_ += "inline " + TableCreateSignature(struct_def, false) + " {";
code_ += " (void)_rehasher;";
code_ += " (void)_o;";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
auto &field = **it;
if (field.deprecated) {
continue;
}
code_ += " auto _" + field.name + " = " + GenCreateParam(field) + ";";
}
// Need to call "Create" with the struct namespace.
const auto qualified_create_name = struct_def.defined_namespace->GetFullyQualifiedName("Create");
code_.SetValue("CREATE_NAME", TranslateNameSpace(qualified_create_name));
code_ += " return {{CREATE_NAME}}{{STRUCT_NAME}}(";
code_ += " _fbb\\";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
auto &field = **it;
if (field.deprecated) {
continue;
}
bool pass_by_address = false;
if (field.value.type.base_type == BASE_TYPE_STRUCT) {
if (IsStruct(field.value.type)) {
auto native_type =
field.value.type.struct_def->attributes.Lookup("native_type");
if (native_type) {
pass_by_address = true;
}
}
}
// Call the CreateX function using values from |_o|.
if (pass_by_address) {
code_ += ",\n &_" + field.name + "\\";
} else {
code_ += ",\n _" + field.name + "\\";
}
}
code_ += ");";
code_ += "}";
code_ += "";
}
}
static void GenPadding(
const FieldDef &field, std::string *code_ptr, int *id,
const std::function<void(int bits, std::string *code_ptr, int *id)> &f) {
if (field.padding) {
for (int i = 0; i < 4; i++) {
if (static_cast<int>(field.padding) & (1 << i)) {
f((1 << i) * 8, code_ptr, id);
}
}
assert(!(field.padding & ~0xF));
}
}
static void PaddingDefinition(int bits, std::string *code_ptr, int *id) {
*code_ptr += " int" + NumToString(bits) + "_t padding" +
NumToString((*id)++) + "__;";
}
static void PaddingInitializer(int bits, std::string *code_ptr, int *id) {
(void)bits;
*code_ptr += ",\n padding" + NumToString((*id)++) + "__(0)";
}
static void PaddingNoop(int bits, std::string *code_ptr, int *id) {
(void)bits;
*code_ptr += " (void)padding" + NumToString((*id)++) + "__;";
}
// Generate an accessor struct with constructor for a flatbuffers struct.
void GenStruct(const StructDef &struct_def) {
// Generate an accessor struct, with private variables of the form:
// type name_;
// Generates manual padding and alignment.
// Variables are private because they contain little endian data on all
// platforms.
GenComment(struct_def.doc_comment);
code_.SetValue("ALIGN", NumToString(struct_def.minalign));
code_.SetValue("STRUCT_NAME", struct_def.name);
code_ += "MANUALLY_ALIGNED_STRUCT({{ALIGN}}) "
"{{STRUCT_NAME}} FLATBUFFERS_FINAL_CLASS {";
code_ += " private:";
int padding_id = 0;
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
code_.SetValue("FIELD_TYPE",
GenTypeGet(field.value.type, " ", "", " ", false));
code_.SetValue("FIELD_NAME", field.name);
code_ += " {{FIELD_TYPE}}{{FIELD_NAME}}_;";
if (field.padding) {
std::string padding;
GenPadding(field, &padding, &padding_id, PaddingDefinition);
code_ += padding;
}
}
// Generate GetFullyQualifiedName
code_ += "";
code_ += " public:";
GenFullyQualifiedNameGetter(struct_def.name);
// Generate a default constructor.
code_ += " {{STRUCT_NAME}}() {";
code_ += " memset(this, 0, sizeof({{STRUCT_NAME}}));";
code_ += " }";
// Generate a copy constructor.
code_ += " {{STRUCT_NAME}}(const {{STRUCT_NAME}} &_o) {";
code_ += " memcpy(this, &_o, sizeof({{STRUCT_NAME}}));";
code_ += " }";
// Generate a constructor that takes all fields as arguments.
std::string arg_list;
std::string init_list;
padding_id = 0;
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
const auto member_name = field.name + "_";
const auto arg_name = "_" + field.name;
const auto arg_type =
GenTypeGet(field.value.type, " ", "const ", " &", true);
if (it != struct_def.fields.vec.begin()) {
arg_list += ", ";
init_list += ",\n ";
}
arg_list += arg_type;
arg_list += arg_name;
init_list += member_name;
if (IsScalar(field.value.type.base_type)) {
auto type = GenUnderlyingCast(field, false, arg_name);
init_list += "(flatbuffers::EndianScalar(" + type + "))";
} else {
init_list += "(" + arg_name + ")";
}
if (field.padding) {
GenPadding(field, &init_list, &padding_id, PaddingInitializer);
}
}
code_.SetValue("ARG_LIST", arg_list);
code_.SetValue("INIT_LIST", init_list);
code_ += " {{STRUCT_NAME}}({{ARG_LIST}})";
code_ += " : {{INIT_LIST}} {";
padding_id = 0;
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (field.padding) {
std::string padding;
GenPadding(field, &padding, &padding_id, PaddingNoop);
code_ += padding;
}
}
code_ += " }";
// Generate accessor methods of the form:
// type name() const { return flatbuffers::EndianScalar(name_); }
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
auto field_type = GenTypeGet(field.value.type, " ", "const ", " &", true);
auto is_scalar = IsScalar(field.value.type.base_type);
auto member = field.name + "_";
auto value = is_scalar ? "flatbuffers::EndianScalar(" + member + ")"
: member;
code_.SetValue("FIELD_NAME", field.name);
code_.SetValue("FIELD_TYPE", field_type);
code_.SetValue("FIELD_VALUE", GenUnderlyingCast(field, true, value));
GenComment(field.doc_comment, " ");
code_ += " {{FIELD_TYPE}}{{FIELD_NAME}}() const {";
code_ += " return {{FIELD_VALUE}};";
code_ += " }";
if (parser_.opts.mutable_buffer) {
auto mut_field_type = GenTypeGet(field.value.type, " ", "", " &", true);
code_.SetValue("FIELD_TYPE", mut_field_type);
if (is_scalar) {
code_.SetValue("ARG", GenTypeBasic(field.value.type, true));
code_.SetValue("FIELD_VALUE",
GenUnderlyingCast(field, false, "_" + field.name));
code_ += " void mutate_{{FIELD_NAME}}({{ARG}} _{{FIELD_NAME}}) {";
code_ += " flatbuffers::WriteScalar(&{{FIELD_NAME}}_, "
"{{FIELD_VALUE}});";
code_ += " }";
} else {
code_ += " {{FIELD_TYPE}}mutable_{{FIELD_NAME}}() {";
code_ += " return {{FIELD_NAME}}_;";
code_ += " }";
}
}
// Generate a comparison function for this field if it is a key.
if (field.key) {
code_ += " bool KeyCompareLessThan(const {{STRUCT_NAME}} *o) const {";
code_ += " return {{FIELD_NAME}}() < o->{{FIELD_NAME}}();";
code_ += " }";
auto type = GenTypeBasic(field.value.type, false);
if (parser_.opts.scoped_enums && field.value.type.enum_def &&
IsScalar(field.value.type.base_type)) {
type = GenTypeGet(field.value.type, " ", "const ", " *", true);
}
code_.SetValue("KEY_TYPE", type);
code_ += " int KeyCompareWithValue({{KEY_TYPE}} val) const {";
code_ += " const auto key = {{FIELD_NAME}}();";
code_ += " return static_cast<int>(key > val) - static_cast<int>(key < val);";
code_ += " }";
}
}
code_ += "};";
code_.SetValue("STRUCT_BYTE_SIZE", NumToString(struct_def.bytesize));
code_ += "STRUCT_END({{STRUCT_NAME}}, {{STRUCT_BYTE_SIZE}});";
code_ += "";
}
// Set up the correct namespace. Only open a namespace if the existing one is
// different (closing/opening only what is necessary).
//
// The file must start and end with an empty (or null) namespace so that
// namespaces are properly opened and closed.
void SetNameSpace(const Namespace *ns) {
if (cur_name_space_ == ns) {
return;
}
// Compute the size of the longest common namespace prefix.
// If cur_name_space is A::B::C::D and ns is A::B::E::F::G,
// the common prefix is A::B:: and we have old_size = 4, new_size = 5
// and common_prefix_size = 2
size_t old_size = cur_name_space_ ? cur_name_space_->components.size() : 0;
size_t new_size = ns ? ns->components.size() : 0;
size_t common_prefix_size = 0;
while (common_prefix_size < old_size && common_prefix_size < new_size &&
ns->components[common_prefix_size] ==
cur_name_space_->components[common_prefix_size]) {
common_prefix_size++;
}
// Close cur_name_space in reverse order to reach the common prefix.
// In the previous example, D then C are closed.
for (size_t j = old_size; j > common_prefix_size; --j) {
code_ += "} // namespace " + cur_name_space_->components[j - 1];
}
if (old_size != common_prefix_size) {
code_ += "";
}
// open namespace parts to reach the ns namespace
// in the previous example, E, then F, then G are opened
for (auto j = common_prefix_size; j != new_size; ++j) {
code_ += "namespace " + ns->components[j] + " {";
}
if (new_size != common_prefix_size) {
code_ += "";
}
cur_name_space_ = ns;
}
};
} // namespace cpp
bool GenerateCPP(const Parser &parser, const std::string &path,
const std::string &file_name) {
cpp::CppGenerator generator(parser, path, file_name);
return generator.generate();
}
std::string CPPMakeRule(const Parser &parser, const std::string &path,
const std::string &file_name) {
const auto filebase =
flatbuffers::StripPath(flatbuffers::StripExtension(file_name));
const auto included_files = parser.GetIncludedFilesRecursive(file_name);
std::string make_rule = GeneratedFileName(path, filebase) + ": ";
for (auto it = included_files.begin(); it != included_files.end(); ++it) {
make_rule += " " + *it;
}
return make_rule;
}
} // namespace flatbuffers
| 1 | 11,580 | Maybe move this to `util.h` ? | google-flatbuffers | java |
@@ -4,7 +4,8 @@
package Example
-import "github.com/google/flatbuffers/go"
+#include "flatbuffers/grpc.h"
+
import (
context "golang.org/x/net/context" | 1 | //Generated by gRPC Go plugin
//If you make any local changes, they will be lost
//source: monster_test
package Example
import "github.com/google/flatbuffers/go"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Client API for MonsterStorage service
type MonsterStorageClient interface{
Store(ctx context.Context, in *flatbuffers.Builder,
opts... grpc.CallOption) (* Stat, error)
Retrieve(ctx context.Context, in *flatbuffers.Builder,
opts... grpc.CallOption) (MonsterStorage_RetrieveClient, error)
}
type monsterStorageClient struct {
cc *grpc.ClientConn
}
func NewMonsterStorageClient(cc *grpc.ClientConn) MonsterStorageClient {
return &monsterStorageClient{cc}
}
func (c *monsterStorageClient) Store(ctx context.Context, in *flatbuffers.Builder,
opts... grpc.CallOption) (* Stat, error) {
out := new(Stat)
err := grpc.Invoke(ctx, "/Example.MonsterStorage/Store", in, out, c.cc, opts...)
if err != nil { return nil, err }
return out, nil
}
func (c *monsterStorageClient) Retrieve(ctx context.Context, in *flatbuffers.Builder,
opts... grpc.CallOption) (MonsterStorage_RetrieveClient, error) {
stream, err := grpc.NewClientStream(ctx, &_MonsterStorage_serviceDesc.Streams[0], c.cc, "/Example.MonsterStorage/Retrieve", opts...)
if err != nil { return nil, err }
x := &monsterStorageRetrieveClient{stream}
if err := x.ClientStream.SendMsg(in); err != nil { return nil, err }
if err := x.ClientStream.CloseSend(); err != nil { return nil, err }
return x,nil
}
type MonsterStorage_RetrieveClient interface {
Recv() (*Monster, error)
grpc.ClientStream
}
type monsterStorageRetrieveClient struct{
grpc.ClientStream
}
func (x *monsterStorageRetrieveClient) Recv() (*Monster, error) {
m := new(Monster)
if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }
return m, nil
}
// Server API for MonsterStorage service
type MonsterStorageServer interface {
Store(context.Context, *Monster) (*flatbuffers.Builder, error)
Retrieve(*Stat, MonsterStorage_RetrieveServer) error
}
func RegisterMonsterStorageServer(s *grpc.Server, srv MonsterStorageServer) {
s.RegisterService(&_MonsterStorage_serviceDesc, srv)
}
func _MonsterStorage_Store_Handler(srv interface{}, ctx context.Context,
dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(Monster)
if err := dec(in); err != nil { return nil, err }
if interceptor == nil { return srv.(MonsterStorageServer).Store(ctx, in) }
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/Example.MonsterStorage/Store",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MonsterStorageServer).Store(ctx, req.(* Monster))
}
return interceptor(ctx, in, info, handler)
}
func _MonsterStorage_Retrieve_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(Stat)
if err := stream.RecvMsg(m); err != nil { return err }
return srv.(MonsterStorageServer).Retrieve(m, &monsterStorageRetrieveServer{stream})
}
type MonsterStorage_RetrieveServer interface {
Send(* flatbuffers.Builder) error
grpc.ServerStream
}
type monsterStorageRetrieveServer struct {
grpc.ServerStream
}
func (x *monsterStorageRetrieveServer) Send(m *flatbuffers.Builder) error {
return x.ServerStream.SendMsg(m)
}
var _MonsterStorage_serviceDesc = grpc.ServiceDesc{
ServiceName: "Example.MonsterStorage",
HandlerType: (*MonsterStorageServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Store",
Handler: _MonsterStorage_Store_Handler,
},
},
Streams: []grpc.StreamDesc{
{
StreamName: "Retrieve",
Handler: _MonsterStorage_Retrieve_Handler,
ServerStreams: true,
},
},
}
| 1 | 11,736 | oops. this won't work will it | google-flatbuffers | java |
@@ -208,12 +208,16 @@ func (extra ExtraMetadataV3) IsReaderKeyBundleNew() bool {
// must be done separately.
func MakeInitialRootMetadataV3(tlfID tlf.ID, h tlf.Handle) (
*RootMetadataV3, error) {
- if tlfID.Type() != h.Type() {
+ switch {
+ case h.TypeForKeying() == tlf.TeamKeying && tlfID.Type() != tlf.SingleTeam:
+ fallthrough
+ case h.TypeForKeying() != tlf.TeamKeying && tlfID.Type() != h.Type():
return nil, errors.New("TlfID and TlfHandle disagree on TLF type")
+ default:
}
var writers []keybase1.UserOrTeamID
- if tlfID.Type() != tlf.Private {
+ if h.TypeForKeying() != tlf.PrivateKeying {
writers = make([]keybase1.UserOrTeamID, len(h.Writers))
copy(writers, h.Writers)
} | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package kbfsmd
import (
"fmt"
"runtime"
goerrors "github.com/go-errors/errors"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/go-codec/codec"
"github.com/keybase/kbfs/kbfscodec"
"github.com/keybase/kbfs/kbfscrypto"
"github.com/keybase/kbfs/tlf"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
// WriterMetadataV3 stores the metadata for a TLF that is
// only editable by users with writer permissions.
type WriterMetadataV3 struct {
// Serialized, possibly encrypted, version of the PrivateMetadata
SerializedPrivateMetadata []byte `codec:"data"`
// The last KB user with writer permissions to this TLF
// who modified this WriterMetadata
LastModifyingWriter keybase1.UID `codec:"lmw"`
// For public and single-team TLFs (since those don't have any
// keys at all).
Writers []keybase1.UserOrTeamID `codec:",omitempty"`
// Writers identified by unresolved social assertions.
UnresolvedWriters []keybase1.SocialAssertion `codec:"uw,omitempty"`
// Pointer to the writer key bundle for private TLFs.
WKeyBundleID TLFWriterKeyBundleID `codec:"wkid,omitempty"`
// Latest key generation.
LatestKeyGen KeyGen `codec:"lkg"`
// The directory ID, signed over to make verification easier
ID tlf.ID
// The branch ID, currently only set if this is in unmerged per-device history.
BID BranchID
// Flags
WFlags WriterFlags
// Estimated disk usage at this revision
DiskUsage uint64
// Estimated MD disk usage at this revision
MDDiskUsage uint64 `codec:",omitempty"`
// The total number of bytes in new data blocks
RefBytes uint64
// The total number of bytes in unreferenced blocks
UnrefBytes uint64
// The total number of bytes in new MD blocks
MDRefBytes uint64 `codec:",omitempty"`
codec.UnknownFieldSetHandler
}
// RootMetadataV3 is the MD that is signed by the reader or
// writer. Unlike RootMetadata, it contains exactly the serializable
// metadata.
type RootMetadataV3 struct {
// The metadata that is only editable by the writer.
WriterMetadata WriterMetadataV3 `codec:"wmd"`
// The last KB user who modified this RootMetadata
LastModifyingUser keybase1.UID
// Flags
Flags MetadataFlags
// The revision number
Revision Revision
// Pointer to the previous root block ID
PrevRoot ID
// For private TLFs. Any unresolved social assertions for readers.
UnresolvedReaders []keybase1.SocialAssertion `codec:"ur,omitempty"`
// Pointer to the reader key bundle for private TLFs.
RKeyBundleID TLFReaderKeyBundleID `codec:"rkid,omitempty"`
// ConflictInfo is set if there's a conflict for the given folder's
// handle after a social assertion resolution.
ConflictInfo *tlf.HandleExtension `codec:"ci,omitempty"`
// FinalizedInfo is set if there are no more valid writer keys capable
// of writing to the given folder.
FinalizedInfo *tlf.HandleExtension `codec:"fi,omitempty"`
// The root of the global Keybase Merkle tree at the time this
// update was created (from the writer's perspective). This field
// was added to V3 after it was live for a while, and older
// clients that don't know about this field yet might copy it into
// new updates via the unknown fields copier. Which means new MD
// updates might end up referring to older Merkle roots. That's
// ok since this is just a hint anyway, and shouldn't be fully
// trusted when checking MD updates against the Merkle tree.
// NOTE: this is a pointer in order to get the correct "omitempty"
// behavior, so that old MDs are still verifiable.
KBMerkleRoot *keybase1.MerkleRootV2 `codec:"mr,omitempty"`
codec.UnknownFieldSetHandler
}
// TODO: Use pkg/errors instead.
type missingKeyBundlesError struct {
stack []uintptr
}
func (e missingKeyBundlesError) Error() string {
s := "Missing key bundles: \n"
for _, pc := range e.stack {
f := goerrors.NewStackFrame(pc)
s += f.String()
}
return s
}
func makeMissingKeyBundlesError() missingKeyBundlesError {
stack := make([]uintptr, 20)
n := runtime.Callers(2, stack)
return missingKeyBundlesError{stack[:n]}
}
// ExtraMetadataV3 contains references to key bundles stored outside of metadata
// blocks. This only ever exists in memory and is never serialized itself.
type ExtraMetadataV3 struct {
wkb TLFWriterKeyBundleV3
rkb TLFReaderKeyBundleV3
// Set if wkb is new and should be sent to the server on an MD
// put.
wkbNew bool
// Set if rkb is new and should be sent to the server on an MD
// put.
rkbNew bool
}
// NewExtraMetadataV3 creates a new ExtraMetadataV3 given a pair of key bundles
func NewExtraMetadataV3(
wkb TLFWriterKeyBundleV3, rkb TLFReaderKeyBundleV3,
wkbNew, rkbNew bool) *ExtraMetadataV3 {
return &ExtraMetadataV3{wkb, rkb, wkbNew, rkbNew}
}
// MetadataVersion implements the ExtraMetadata interface for ExtraMetadataV3.
func (extra ExtraMetadataV3) MetadataVersion() MetadataVer {
return SegregatedKeyBundlesVer
}
func (extra *ExtraMetadataV3) updateNew(wkbNew, rkbNew bool) {
extra.wkbNew = extra.wkbNew || wkbNew
extra.rkbNew = extra.rkbNew || rkbNew
}
// DeepCopy implements the ExtraMetadata interface for ExtraMetadataV3.
func (extra ExtraMetadataV3) DeepCopy(codec kbfscodec.Codec) (
ExtraMetadata, error) {
wkb, err := extra.wkb.DeepCopy(codec)
if err != nil {
return nil, err
}
rkb, err := extra.rkb.DeepCopy(codec)
if err != nil {
return nil, err
}
return NewExtraMetadataV3(wkb, rkb, extra.wkbNew, extra.rkbNew), nil
}
// MakeSuccessorCopy implements the ExtraMetadata interface for ExtraMetadataV3.
func (extra ExtraMetadataV3) MakeSuccessorCopy(codec kbfscodec.Codec) (
ExtraMetadata, error) {
wkb, err := extra.wkb.DeepCopy(codec)
if err != nil {
return nil, err
}
rkb, err := extra.rkb.DeepCopy(codec)
if err != nil {
return nil, err
}
return NewExtraMetadataV3(wkb, rkb, false, false), nil
}
// GetWriterKeyBundle returns the contained writer key bundle.
func (extra ExtraMetadataV3) GetWriterKeyBundle() TLFWriterKeyBundleV3 {
return extra.wkb
}
// GetReaderKeyBundle returns the contained reader key bundle.
func (extra ExtraMetadataV3) GetReaderKeyBundle() TLFReaderKeyBundleV3 {
return extra.rkb
}
// IsWriterKeyBundleNew returns whether or not the writer key bundle
// is new and should be sent to the server on an MD put.
func (extra ExtraMetadataV3) IsWriterKeyBundleNew() bool {
return extra.wkbNew
}
// IsReaderKeyBundleNew returns whether or not the reader key bundle
// is new and should be sent to the server on an MD put.
func (extra ExtraMetadataV3) IsReaderKeyBundleNew() bool {
return extra.rkbNew
}
// MakeInitialRootMetadataV3 creates a new RootMetadataV3
// object with revision RevisionInitial, and the given TLF ID
// and handle. Note that if the given ID/handle are private, rekeying
// must be done separately.
func MakeInitialRootMetadataV3(tlfID tlf.ID, h tlf.Handle) (
*RootMetadataV3, error) {
if tlfID.Type() != h.Type() {
return nil, errors.New("TlfID and TlfHandle disagree on TLF type")
}
var writers []keybase1.UserOrTeamID
if tlfID.Type() != tlf.Private {
writers = make([]keybase1.UserOrTeamID, len(h.Writers))
copy(writers, h.Writers)
}
var unresolvedWriters, unresolvedReaders []keybase1.SocialAssertion
if len(h.UnresolvedWriters) > 0 {
unresolvedWriters = make(
[]keybase1.SocialAssertion, len(h.UnresolvedWriters))
copy(unresolvedWriters, h.UnresolvedWriters)
}
if len(h.UnresolvedReaders) > 0 {
unresolvedReaders = make(
[]keybase1.SocialAssertion, len(h.UnresolvedReaders))
copy(unresolvedReaders, h.UnresolvedReaders)
}
return &RootMetadataV3{
WriterMetadata: WriterMetadataV3{
Writers: writers,
ID: tlfID,
UnresolvedWriters: unresolvedWriters,
},
Revision: RevisionInitial,
UnresolvedReaders: unresolvedReaders,
// Normally an MD wouldn't start out with extensions, but this
// is useful for tests.
ConflictInfo: h.ConflictInfo,
FinalizedInfo: h.FinalizedInfo,
}, nil
}
// TlfID implements the RootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) TlfID() tlf.ID {
return md.WriterMetadata.ID
}
// KeyGenerationsToUpdate implements the RootMetadata interface
// for RootMetadataV3.
func (md *RootMetadataV3) KeyGenerationsToUpdate() (KeyGen, KeyGen) {
latest := md.LatestKeyGeneration()
if latest < FirstValidKeyGen {
return 0, 0
}
// We only keep track of the latest key generation in extra.
return latest, latest + 1
}
// LatestKeyGeneration implements the RootMetadata interface for
// RootMetadataV3.
func (md *RootMetadataV3) LatestKeyGeneration() KeyGen {
if md.TlfID().Type() == tlf.Public {
return PublicKeyGen
}
return md.WriterMetadata.LatestKeyGen
}
func (md *RootMetadataV3) haveOnlyUserRKeysChanged(
codec kbfscodec.Codec, prevMD *RootMetadataV3,
user keybase1.UID, prevRkb, rkb TLFReaderKeyBundleV3) (bool, error) {
if len(rkb.Keys) != len(prevRkb.Keys) {
return false, nil
}
for u, keys := range rkb.Keys {
if u != user {
prevKeys := prevRkb.Keys[u]
keysEqual, err := kbfscodec.Equal(codec, keys, prevKeys)
if err != nil {
return false, err
}
if !keysEqual {
return false, nil
}
}
}
return true, nil
}
// IsValidRekeyRequest implements the RootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) IsValidRekeyRequest(
codec kbfscodec.Codec, prevBareMd RootMetadata,
user keybase1.UID, prevExtra, extra ExtraMetadata) (
bool, error) {
if !md.IsWriterMetadataCopiedSet() {
// Not a copy.
return false, nil
}
prevMd, ok := prevBareMd.(*RootMetadataV3)
if !ok {
// Not the same type so not a copy.
return false, nil
}
prevExtraV3, ok := prevExtra.(*ExtraMetadataV3)
if !ok {
return false, errors.New("Invalid previous extra metadata")
}
extraV3, ok := extra.(*ExtraMetadataV3)
if !ok {
return false, errors.New("Invalid extra metadata")
}
writerEqual, err := kbfscodec.Equal(
codec, md.WriterMetadata, prevMd.WriterMetadata)
if err != nil {
return false, err
}
if !writerEqual {
// Copy mismatch.
return false, nil
}
onlyUserRKeysChanged, err := md.haveOnlyUserRKeysChanged(
codec, prevMd, user, prevExtraV3.rkb, extraV3.rkb)
if err != nil {
return false, err
}
if !onlyUserRKeysChanged {
// Keys outside of this user's reader key set have changed.
return false, nil
}
return true, nil
}
// MergedStatus implements the RootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) MergedStatus() MergeStatus {
if md.WriterMetadata.WFlags&MetadataFlagUnmerged != 0 {
return Unmerged
}
return Merged
}
// IsRekeySet implements the RootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) IsRekeySet() bool {
return md.Flags&MetadataFlagRekey != 0
}
// IsWriterMetadataCopiedSet implements the RootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) IsWriterMetadataCopiedSet() bool {
return md.Flags&MetadataFlagWriterMetadataCopied != 0
}
// IsFinal implements the RootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) IsFinal() bool {
return md.Flags&MetadataFlagFinal != 0
}
func (md *RootMetadataV3) checkNonPrivateExtra(extra ExtraMetadata) error {
if md.TlfID().Type() == tlf.Private {
return errors.New("checkNonPrivateExtra called on private TLF")
}
if extra != nil {
return errors.Errorf("Expected nil, got %T", extra)
}
return nil
}
func (md *RootMetadataV3) getTLFKeyBundles(extra ExtraMetadata) (
*TLFWriterKeyBundleV3, *TLFReaderKeyBundleV3, error) {
if md.TlfID().Type() != tlf.Private {
return nil, nil, InvalidNonPrivateTLFOperation{
md.TlfID(), "getTLFKeyBundles", md.Version(),
}
}
if extra == nil {
return nil, nil, makeMissingKeyBundlesError()
}
extraV3, ok := extra.(*ExtraMetadataV3)
if !ok {
return nil, nil, errors.Errorf(
"Expected *ExtraMetadataV3, got %T", extra)
}
return &extraV3.wkb, &extraV3.rkb, nil
}
// GetTLFKeyBundlesForTest returns the writer and reader key bundles
// from extra.
func (md *RootMetadataV3) GetTLFKeyBundlesForTest(extra ExtraMetadata) (
*TLFWriterKeyBundleV3, *TLFReaderKeyBundleV3, error) {
return md.getTLFKeyBundles(extra)
}
func (md *RootMetadataV3) isNonTeamWriter(
ctx context.Context, user keybase1.UID,
cryptKey kbfscrypto.CryptPublicKey, extra ExtraMetadata) (bool, error) {
switch md.TlfID().Type() {
case tlf.Public:
err := md.checkNonPrivateExtra(extra)
if err != nil {
return false, err
}
for _, w := range md.WriterMetadata.Writers {
if w == user.AsUserOrTeam() {
return true, nil
}
}
return false, nil
case tlf.Private:
wkb, _, err := md.getTLFKeyBundles(extra)
if err != nil {
return false, err
}
return wkb.IsWriter(user, cryptKey), nil
default:
return false, errors.Errorf("Unknown TLF type: %s", md.TlfID().Type())
}
}
// IsWriter implements the RootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) IsWriter(
ctx context.Context, user keybase1.UID,
cryptKey kbfscrypto.CryptPublicKey, verifyingKey kbfscrypto.VerifyingKey,
teamMemChecker TeamMembershipChecker, extra ExtraMetadata) (bool, error) {
switch md.TlfID().Type() {
case tlf.SingleTeam:
err := md.checkNonPrivateExtra(extra)
if err != nil {
return false, err
}
tid, err := md.WriterMetadata.Writers[0].AsTeam()
if err != nil {
return false, err
}
// TODO: Eventually this will have to use a Merkle sequence
// number to check historic versions.
isWriter, err := teamMemChecker.IsTeamWriter(
ctx, tid, user, verifyingKey)
if err != nil {
return false, err
}
return isWriter, nil
default:
return md.isNonTeamWriter(ctx, user, cryptKey, extra)
}
}
// IsReader implements the RootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) IsReader(
ctx context.Context, user keybase1.UID,
cryptKey kbfscrypto.CryptPublicKey, teamMemChecker TeamMembershipChecker,
extra ExtraMetadata) (bool, error) {
switch md.TlfID().Type() {
case tlf.Public:
err := md.checkNonPrivateExtra(extra)
if err != nil {
return false, err
}
return true, nil
case tlf.Private:
// Writers are also readers.
isWriter, err := md.isNonTeamWriter(ctx, user, cryptKey, extra)
if err != nil {
return false, err
}
if isWriter {
return true, nil
}
_, rkb, err := md.getTLFKeyBundles(extra)
if err != nil {
return false, err
}
return rkb.IsReader(user, cryptKey), nil
case tlf.SingleTeam:
err := md.checkNonPrivateExtra(extra)
if err != nil {
return false, err
}
tid, err := md.WriterMetadata.Writers[0].AsTeam()
if err != nil {
return false, err
}
// TODO: Eventually this will have to use a Merkle sequence
// number to check historic versions.
isReader, err := teamMemChecker.IsTeamReader(ctx, tid, user)
if err != nil {
return false, err
}
return isReader, nil
default:
panic(fmt.Sprintf("Unknown TLF type: %s", md.TlfID().Type()))
}
}
// DeepCopy implements the RootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) DeepCopy(
codec kbfscodec.Codec) (MutableRootMetadata, error) {
var newMd RootMetadataV3
if err := kbfscodec.Update(codec, &newMd, md); err != nil {
return nil, err
}
return &newMd, nil
}
// MakeSuccessorCopy implements the ImmutableRootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) MakeSuccessorCopy(
codec kbfscodec.Codec, extra ExtraMetadata, _ MetadataVer,
_ func() ([]kbfscrypto.TLFCryptKey, error), isReadableAndWriter bool) (
MutableRootMetadata, ExtraMetadata, error) {
var extraCopy ExtraMetadata
if extra != nil {
var err error
extraCopy, err = extra.MakeSuccessorCopy(codec)
if err != nil {
return nil, nil, err
}
}
mdCopy, err := md.DeepCopy(codec)
if err != nil {
return nil, nil, err
}
// TODO: If there is ever a RootMetadataV4 this will need to perform the conversion.
return mdCopy, extraCopy, nil
}
// CheckValidSuccessor implements the RootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) CheckValidSuccessor(
currID ID, nextMd RootMetadata) error {
// (1) Verify current metadata is non-final.
if md.IsFinal() {
return MetadataIsFinalError{}
}
// (2) Check TLF ID.
if nextMd.TlfID() != md.TlfID() {
return MDTlfIDMismatch{
CurrID: md.TlfID(),
NextID: nextMd.TlfID(),
}
}
// (3) Check revision.
if nextMd.RevisionNumber() != md.RevisionNumber()+1 {
return MDRevisionMismatch{
Rev: nextMd.RevisionNumber(),
Curr: md.RevisionNumber(),
}
}
// (4) Check PrevRoot pointer.
expectedPrevRoot := currID
if nextMd.IsFinal() {
expectedPrevRoot = md.GetPrevRoot()
}
if nextMd.GetPrevRoot() != expectedPrevRoot {
return MDPrevRootMismatch{
prevRoot: nextMd.GetPrevRoot(),
expectedPrevRoot: expectedPrevRoot,
}
}
// (5) Check branch ID.
if md.MergedStatus() == nextMd.MergedStatus() && md.BID() != nextMd.BID() {
return errors.Errorf("Unexpected branch ID on successor: %s vs. %s",
md.BID(), nextMd.BID())
} else if md.MergedStatus() == Unmerged && nextMd.MergedStatus() == Merged {
return errors.New("merged MD can't follow unmerged MD")
}
// (6) Check disk usage.
expectedUsage := md.DiskUsage()
if !nextMd.IsWriterMetadataCopiedSet() {
expectedUsage += nextMd.RefBytes() - nextMd.UnrefBytes()
}
if nextMd.DiskUsage() != expectedUsage {
return MDDiskUsageMismatch{
expectedDiskUsage: expectedUsage,
actualDiskUsage: nextMd.DiskUsage(),
}
}
expectedMDUsage := md.MDDiskUsage()
if !nextMd.IsWriterMetadataCopiedSet() {
expectedMDUsage += nextMd.MDRefBytes()
}
// Add an exception for the case where MDRefBytes is equal, since
// it probably indicates an older client just copied the previous
// MDRefBytes value as an unknown field.
if nextMd.MDDiskUsage() != expectedMDUsage &&
md.MDRefBytes() != nextMd.MDRefBytes() {
return MDDiskUsageMismatch{
expectedDiskUsage: expectedMDUsage,
actualDiskUsage: nextMd.MDDiskUsage(),
}
}
// TODO: Check that the successor (bare) TLF handle is the
// same or more resolved.
return nil
}
// CheckValidSuccessorForServer implements the RootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) CheckValidSuccessorForServer(
currID ID, nextMd RootMetadata) error {
err := md.CheckValidSuccessor(currID, nextMd)
switch err := err.(type) {
case nil:
break
case MDRevisionMismatch:
return ServerErrorConflictRevision{
Expected: err.Curr + 1,
Actual: err.Rev,
}
case MDPrevRootMismatch:
return ServerErrorConflictPrevRoot{
Expected: err.expectedPrevRoot,
Actual: err.prevRoot,
}
case MDDiskUsageMismatch:
return ServerErrorConflictDiskUsage{
Expected: err.expectedDiskUsage,
Actual: err.actualDiskUsage,
}
default:
return ServerError{Err: err}
}
return nil
}
// MakeBareTlfHandle implements the RootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) MakeBareTlfHandle(extra ExtraMetadata) (
tlf.Handle, error) {
var writers, readers []keybase1.UserOrTeamID
if md.TlfID().Type() == tlf.Private {
wkb, rkb, err := md.getTLFKeyBundles(extra)
if err != nil {
return tlf.Handle{}, err
}
writers = make([]keybase1.UserOrTeamID, 0, len(wkb.Keys))
readers = make([]keybase1.UserOrTeamID, 0, len(rkb.Keys))
for w := range wkb.Keys {
writers = append(writers, w.AsUserOrTeam())
}
for r := range rkb.Keys {
// TODO: Return an error instead if r is
// PublicUID. Maybe return an error if r is in
// WKeys also. Or do all this in
// MakeBareTlfHandle.
if _, ok := wkb.Keys[r]; !ok &&
r != keybase1.PublicUID {
readers = append(readers, r.AsUserOrTeam())
}
}
} else {
err := md.checkNonPrivateExtra(extra)
if err != nil {
return tlf.Handle{}, err
}
writers = md.WriterMetadata.Writers
if md.TlfID().Type() == tlf.Public {
readers = []keybase1.UserOrTeamID{keybase1.PublicUID.AsUserOrTeam()}
}
}
return tlf.MakeHandle(
writers, readers,
md.WriterMetadata.UnresolvedWriters, md.UnresolvedReaders,
md.TlfHandleExtensions())
}
// TlfHandleExtensions implements the RootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) TlfHandleExtensions() (
extensions []tlf.HandleExtension) {
if md.ConflictInfo != nil {
extensions = append(extensions, *md.ConflictInfo)
}
if md.FinalizedInfo != nil {
extensions = append(extensions, *md.FinalizedInfo)
}
return extensions
}
// PromoteReaders implements the RootMetadata interface for
// RootMetadataV3.
func (md *RootMetadataV3) PromoteReaders(
readersToPromote map[keybase1.UID]bool, extra ExtraMetadata) error {
if md.TlfID().Type() != tlf.Private {
return InvalidNonPrivateTLFOperation{md.TlfID(), "PromoteReaders", md.Version()}
}
if len(readersToPromote) == 0 {
return nil
}
wkb, rkb, err := md.getTLFKeyBundles(extra)
if err != nil {
return err
}
for reader := range readersToPromote {
dkim, ok := rkb.Keys[reader]
if !ok {
return errors.Errorf("Could not find %s in rkb", reader)
}
// TODO: This is incorrect, since dkim contains offsets info
// rkb.TLFEphemeralPublicKeys, which don't directly translate
// to offsets into wkb.TLFEphemeralPublicKeys.
//
// Also, doing this may leave some entries in
// rkb.TLFEphemeralPublicKeys unreferenced, so they should be
// removed.
//
// See KBFS-1719.
wkb.Keys[reader] = dkim
delete(rkb.Keys, reader)
}
return nil
}
// RevokeRemovedDevices implements the RootMetadata interface for
// RootMetadataV3.
func (md *RootMetadataV3) RevokeRemovedDevices(
updatedWriterKeys, updatedReaderKeys UserDevicePublicKeys,
extra ExtraMetadata) (ServerHalfRemovalInfo, error) {
if md.TlfID().Type() != tlf.Private {
return nil, InvalidNonPrivateTLFOperation{
md.TlfID(), "RevokeRemovedDevices", md.Version()}
}
wkb, rkb, err := md.getTLFKeyBundles(extra)
if err != nil {
return nil, err
}
wRemovalInfo := wkb.Keys.RemoveDevicesNotIn(updatedWriterKeys)
rRemovalInfo := rkb.Keys.RemoveDevicesNotIn(updatedReaderKeys)
return wRemovalInfo.MergeUsers(rRemovalInfo)
}
// GetUserDevicePublicKeys implements the RootMetadata interface
// for RootMetadataV3.
func (md *RootMetadataV3) GetUserDevicePublicKeys(extra ExtraMetadata) (
writerDeviceKeys, readerDeviceKeys UserDevicePublicKeys, err error) {
if md.TlfID().Type() != tlf.Private {
return nil, nil, InvalidNonPrivateTLFOperation{
md.TlfID(), "GetUserDevicePublicKeys", md.Version()}
}
wkb, rkb, err := md.getTLFKeyBundles(extra)
if err != nil {
return nil, nil, err
}
return wkb.Keys.ToPublicKeys(), rkb.Keys.ToPublicKeys(), nil
}
// GetTLFCryptKeyParams implements the RootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) GetTLFCryptKeyParams(
keyGen KeyGen, user keybase1.UID,
key kbfscrypto.CryptPublicKey, extra ExtraMetadata) (
kbfscrypto.TLFEphemeralPublicKey,
kbfscrypto.EncryptedTLFCryptKeyClientHalf,
kbfscrypto.TLFCryptKeyServerHalfID, bool, error) {
if keyGen != md.LatestKeyGeneration() {
return kbfscrypto.TLFEphemeralPublicKey{},
kbfscrypto.EncryptedTLFCryptKeyClientHalf{},
kbfscrypto.TLFCryptKeyServerHalfID{}, false,
TLFCryptKeyNotPerDeviceEncrypted{md.TlfID(), keyGen}
}
wkb, rkb, err := md.getTLFKeyBundles(extra)
if err != nil {
return kbfscrypto.TLFEphemeralPublicKey{},
kbfscrypto.EncryptedTLFCryptKeyClientHalf{},
kbfscrypto.TLFCryptKeyServerHalfID{}, false, err
}
isWriter := true
dkim := wkb.Keys[user]
if dkim == nil {
dkim = rkb.Keys[user]
if dkim == nil {
return kbfscrypto.TLFEphemeralPublicKey{},
kbfscrypto.EncryptedTLFCryptKeyClientHalf{},
kbfscrypto.TLFCryptKeyServerHalfID{}, false, nil
}
isWriter = false
}
info, ok := dkim[key]
if !ok {
return kbfscrypto.TLFEphemeralPublicKey{},
kbfscrypto.EncryptedTLFCryptKeyClientHalf{},
kbfscrypto.TLFCryptKeyServerHalfID{}, false, nil
}
var publicKeys kbfscrypto.TLFEphemeralPublicKeys
var keyType string
if isWriter {
publicKeys = wkb.TLFEphemeralPublicKeys
keyType = "writer"
} else {
publicKeys = rkb.TLFEphemeralPublicKeys
keyType = "reader"
}
keyCount := len(publicKeys)
index := info.EPubKeyIndex
if index >= keyCount {
return kbfscrypto.TLFEphemeralPublicKey{},
kbfscrypto.EncryptedTLFCryptKeyClientHalf{},
kbfscrypto.TLFCryptKeyServerHalfID{}, false,
errors.Errorf("Invalid %s key index %d >= %d",
keyType, index, keyCount)
}
return publicKeys[index], info.ClientHalf, info.ServerHalfID, true, nil
}
// CheckWKBID returns an error if the ID of the given writer key
// bundle doesn't match the given one.
func CheckWKBID(codec kbfscodec.Codec,
wkbID TLFWriterKeyBundleID, wkb TLFWriterKeyBundleV3) error {
computedWKBID, err := MakeTLFWriterKeyBundleID(codec, wkb)
if err != nil {
return err
}
if wkbID != computedWKBID {
return errors.Errorf("Expected WKB ID %s, got %s",
wkbID, computedWKBID)
}
return nil
}
// CheckRKBID returns an error if the ID of the given reader key
// bundle doesn't match the given one.
func CheckRKBID(codec kbfscodec.Codec,
rkbID TLFReaderKeyBundleID, rkb TLFReaderKeyBundleV3) error {
computedRKBID, err := MakeTLFReaderKeyBundleID(codec, rkb)
if err != nil {
return err
}
if rkbID != computedRKBID {
return errors.Errorf("Expected RKB ID %s, got %s",
rkbID, computedRKBID)
}
return nil
}
// IsValidAndSigned implements the RootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) IsValidAndSigned(
ctx context.Context, codec kbfscodec.Codec,
teamMemChecker TeamMembershipChecker, extra ExtraMetadata,
writerVerifyingKey kbfscrypto.VerifyingKey) error {
if md.TlfID().Type() == tlf.Private {
wkb, rkb, err := md.getTLFKeyBundles(extra)
if err != nil {
return err
}
err = CheckWKBID(codec, md.GetTLFWriterKeyBundleID(), *wkb)
if err != nil {
return err
}
err = CheckRKBID(codec, md.GetTLFReaderKeyBundleID(), *rkb)
if err != nil {
return err
}
} else {
err := md.checkNonPrivateExtra(extra)
if err != nil {
return err
}
}
if md.IsFinal() {
if md.Revision < RevisionInitial+1 {
return errors.Errorf("Invalid final revision %d", md.Revision)
}
if md.Revision == (RevisionInitial + 1) {
if md.PrevRoot != (ID{}) {
return errors.Errorf("Invalid PrevRoot %s for initial final revision", md.PrevRoot)
}
} else {
if md.PrevRoot == (ID{}) {
return errors.New("No PrevRoot for non-initial final revision")
}
}
} else {
if md.Revision < RevisionInitial {
return errors.Errorf("Invalid revision %d", md.Revision)
}
if md.Revision == RevisionInitial {
if md.PrevRoot != (ID{}) {
return errors.Errorf("Invalid PrevRoot %s for initial revision", md.PrevRoot)
}
} else {
if md.PrevRoot == (ID{}) {
return errors.New("No PrevRoot for non-initial revision")
}
}
}
if len(md.WriterMetadata.SerializedPrivateMetadata) == 0 {
return errors.New("No private metadata")
}
if (md.MergedStatus() == Merged) != (md.BID() == NullBranchID) {
return errors.Errorf("Branch ID %s doesn't match merged status %s",
md.BID(), md.MergedStatus())
}
handle, err := md.MakeBareTlfHandle(extra)
if err != nil {
return err
}
writer := md.LastModifyingWriter()
user := md.LastModifyingUser
isWriter := false
isReader := false
if md.TlfID().Type() == tlf.SingleTeam {
tid, err := md.WriterMetadata.Writers[0].AsTeam()
if err != nil {
return err
}
// TODO: Eventually this will have to use a Merkle sequence
// number to check historic versions.
isWriter, err = teamMemChecker.IsTeamWriter(
ctx, tid, writer, writerVerifyingKey)
if err != nil {
return err
}
isReader, err = teamMemChecker.IsTeamReader(ctx, tid, user)
if err != nil {
return err
}
} else {
isWriter = handle.IsWriter(writer.AsUserOrTeam())
isReader = handle.IsReader(user.AsUserOrTeam())
}
// Make sure the last writer is valid.
if !isWriter {
return errors.Errorf("Invalid modifying writer %s", writer)
}
// Make sure the last modifier is valid.
if !isReader {
return errors.Errorf("Invalid modifying user %s", user)
}
return nil
}
// IsLastModifiedBy implements the RootMetadata interface for
// RootMetadataV3.
func (md *RootMetadataV3) IsLastModifiedBy(
uid keybase1.UID, key kbfscrypto.VerifyingKey) error {
// Verify the user and device are the writer.
writer := md.LastModifyingWriter()
if !md.IsWriterMetadataCopiedSet() {
if writer != uid {
return errors.Errorf("Last writer %s != %s", writer, uid)
}
}
// Verify the user and device are the last modifier.
user := md.GetLastModifyingUser()
if user != uid {
return errors.Errorf("Last modifier %s != %s", user, uid)
}
return nil
}
// LastModifyingWriter implements the RootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) LastModifyingWriter() keybase1.UID {
return md.WriterMetadata.LastModifyingWriter
}
// GetLastModifyingUser implements the RootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) GetLastModifyingUser() keybase1.UID {
return md.LastModifyingUser
}
// RefBytes implements the RootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) RefBytes() uint64 {
return md.WriterMetadata.RefBytes
}
// UnrefBytes implements the RootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) UnrefBytes() uint64 {
return md.WriterMetadata.UnrefBytes
}
// MDRefBytes implements the RootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) MDRefBytes() uint64 {
return md.WriterMetadata.MDRefBytes
}
// DiskUsage implements the RootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) DiskUsage() uint64 {
return md.WriterMetadata.DiskUsage
}
// MDDiskUsage implements the RootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) MDDiskUsage() uint64 {
return md.WriterMetadata.MDDiskUsage
}
// SetRefBytes implements the MutableRootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) SetRefBytes(refBytes uint64) {
md.WriterMetadata.RefBytes = refBytes
}
// SetUnrefBytes implements the MutableRootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) SetUnrefBytes(unrefBytes uint64) {
md.WriterMetadata.UnrefBytes = unrefBytes
}
// SetMDRefBytes implements the MutableRootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) SetMDRefBytes(mdRefBytes uint64) {
md.WriterMetadata.MDRefBytes = mdRefBytes
}
// SetDiskUsage implements the MutableRootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) SetDiskUsage(diskUsage uint64) {
md.WriterMetadata.DiskUsage = diskUsage
}
// SetMDDiskUsage implements the MutableRootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) SetMDDiskUsage(mdDiskUsage uint64) {
md.WriterMetadata.MDDiskUsage = mdDiskUsage
}
// AddRefBytes implements the MutableRootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) AddRefBytes(refBytes uint64) {
md.WriterMetadata.RefBytes += refBytes
}
// AddUnrefBytes implements the MutableRootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) AddUnrefBytes(unrefBytes uint64) {
md.WriterMetadata.UnrefBytes += unrefBytes
}
// AddMDRefBytes implements the MutableRootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) AddMDRefBytes(mdRefBytes uint64) {
md.WriterMetadata.MDRefBytes += mdRefBytes
}
// AddDiskUsage implements the MutableRootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) AddDiskUsage(diskUsage uint64) {
md.WriterMetadata.DiskUsage += diskUsage
}
// AddMDDiskUsage implements the MutableRootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) AddMDDiskUsage(mdDiskUsage uint64) {
md.WriterMetadata.MDDiskUsage += mdDiskUsage
}
// RevisionNumber implements the RootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) RevisionNumber() Revision {
return md.Revision
}
// MerkleRoot implements the RootMetadata interface for
// RootMetadataV3.
func (md *RootMetadataV3) MerkleRoot() keybase1.MerkleRootV2 {
return *md.KBMerkleRoot
}
// BID implements the RootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) BID() BranchID {
return md.WriterMetadata.BID
}
// GetPrevRoot implements the RootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) GetPrevRoot() ID {
return md.PrevRoot
}
// ClearRekeyBit implements the MutableRootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) ClearRekeyBit() {
md.Flags &= ^MetadataFlagRekey
}
// ClearWriterMetadataCopiedBit implements the MutableRootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) ClearWriterMetadataCopiedBit() {
md.Flags &= ^MetadataFlagWriterMetadataCopied
}
// IsUnmergedSet implements the MutableRootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) IsUnmergedSet() bool {
return (md.WriterMetadata.WFlags & MetadataFlagUnmerged) != 0
}
// SetUnmerged implements the MutableRootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) SetUnmerged() {
md.WriterMetadata.WFlags |= MetadataFlagUnmerged
}
// SetBranchID implements the MutableRootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) SetBranchID(bid BranchID) {
md.WriterMetadata.BID = bid
}
// SetPrevRoot implements the MutableRootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) SetPrevRoot(mdID ID) {
md.PrevRoot = mdID
}
// GetSerializedPrivateMetadata implements the RootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) GetSerializedPrivateMetadata() []byte {
return md.WriterMetadata.SerializedPrivateMetadata
}
// SetSerializedPrivateMetadata implements the MutableRootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) SetSerializedPrivateMetadata(spmd []byte) {
md.WriterMetadata.SerializedPrivateMetadata = spmd
}
// GetSerializedWriterMetadata implements the RootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) GetSerializedWriterMetadata(
codec kbfscodec.Codec) ([]byte, error) {
return codec.Encode(md.WriterMetadata)
}
// SignWriterMetadataInternally implements the MutableRootMetadata interface for RootMetadataV2.
func (md *RootMetadataV3) SignWriterMetadataInternally(
ctx context.Context, codec kbfscodec.Codec,
signer kbfscrypto.Signer) error {
// Nothing to do.
//
// TODO: Set a flag, and a way to check it so that we can
// verify that this is called before sending to the server.
return nil
}
// SetLastModifyingWriter implements the MutableRootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) SetLastModifyingWriter(user keybase1.UID) {
md.WriterMetadata.LastModifyingWriter = user
}
// SetLastModifyingUser implements the MutableRootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) SetLastModifyingUser(user keybase1.UID) {
md.LastModifyingUser = user
}
// SetRekeyBit implements the MutableRootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) SetRekeyBit() {
md.Flags |= MetadataFlagRekey
}
// SetFinalBit implements the MutableRootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) SetFinalBit() {
md.Flags |= MetadataFlagFinal
}
// SetWriterMetadataCopiedBit implements the MutableRootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) SetWriterMetadataCopiedBit() {
md.Flags |= MetadataFlagWriterMetadataCopied
}
// SetRevision implements the MutableRootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) SetRevision(revision Revision) {
md.Revision = revision
}
// SetMerkleRoot implements the MutableRootMetadata interface for
// RootMetadataV3.
func (md *RootMetadataV3) SetMerkleRoot(root keybase1.MerkleRootV2) {
md.KBMerkleRoot = &root
}
func (md *RootMetadataV3) updateKeyBundles(codec kbfscodec.Codec,
extra ExtraMetadata,
updatedWriterKeys, updatedReaderKeys UserDevicePublicKeys,
ePubKey kbfscrypto.TLFEphemeralPublicKey,
ePrivKey kbfscrypto.TLFEphemeralPrivateKey,
tlfCryptKey kbfscrypto.TLFCryptKey) (UserDeviceKeyServerHalves, error) {
if md.TlfID().Type() != tlf.Private {
return nil, InvalidNonPrivateTLFOperation{
md.TlfID(), "updateKeyBundles", md.Version()}
}
wkb, rkb, err := md.getTLFKeyBundles(extra)
if err != nil {
return nil, err
}
// No need to explicitly handle the reader rekey case.
var newWriterIndex int
if len(updatedWriterKeys) > 0 {
newWriterIndex = len(wkb.TLFEphemeralPublicKeys)
}
wServerHalves, err := wkb.Keys.FillInUserInfos(
newWriterIndex, updatedWriterKeys,
ePrivKey, tlfCryptKey)
if err != nil {
return nil, err
}
// If we didn't fill in any new writer infos, don't add a new
// writer ephemeral key.
if len(wServerHalves) > 0 {
wkb.TLFEphemeralPublicKeys =
append(wkb.TLFEphemeralPublicKeys, ePubKey)
}
var newReaderIndex int
if len(updatedReaderKeys) > 0 {
newReaderIndex = len(rkb.TLFEphemeralPublicKeys)
}
rServerHalves, err := rkb.Keys.FillInUserInfos(
newReaderIndex, updatedReaderKeys,
ePrivKey, tlfCryptKey)
if err != nil {
return nil, err
}
// If we didn't fill in any new reader infos, don't add a new
// reader ephemeral key.
if len(rServerHalves) > 0 {
rkb.TLFEphemeralPublicKeys =
append(rkb.TLFEphemeralPublicKeys, ePubKey)
}
return wServerHalves.MergeUsers(rServerHalves)
}
// AddKeyGeneration implements the MutableRootMetadata interface
// for RootMetadataV3.
func (md *RootMetadataV3) AddKeyGeneration(
codec kbfscodec.Codec, currExtra ExtraMetadata,
updatedWriterKeys, updatedReaderKeys UserDevicePublicKeys,
ePubKey kbfscrypto.TLFEphemeralPublicKey,
ePrivKey kbfscrypto.TLFEphemeralPrivateKey,
pubKey kbfscrypto.TLFPublicKey,
currCryptKey, nextCryptKey kbfscrypto.TLFCryptKey) (
nextExtra ExtraMetadata,
serverHalves UserDeviceKeyServerHalves, err error) {
if md.TlfID().Type() != tlf.Private {
return nil, nil, InvalidNonPrivateTLFOperation{
md.TlfID(), "AddKeyGeneration", md.Version()}
}
if len(updatedWriterKeys) == 0 {
return nil, nil, errors.New(
"updatedWriterKeys unexpectedly empty")
}
if nextCryptKey == (kbfscrypto.TLFCryptKey{}) {
return nil, nil, errors.New("Zero next crypt key")
}
latestKeyGen := md.LatestKeyGeneration()
var encryptedHistoricKeys kbfscrypto.EncryptedTLFCryptKeys
if currCryptKey == (kbfscrypto.TLFCryptKey{}) {
if latestKeyGen >= FirstValidKeyGen {
return nil, nil, errors.Errorf(
"Zero current crypt key with latest key generation %d",
latestKeyGen)
}
} else {
currExtraV3, ok := currExtra.(*ExtraMetadataV3)
if !ok {
return nil, nil, errors.New("Invalid curr extra metadata")
}
existingWriterKeys := currExtraV3.wkb.Keys.ToPublicKeys()
if !existingWriterKeys.Equals(updatedWriterKeys) {
return nil, nil, fmt.Errorf(
"existingWriterKeys=%+v != updatedWriterKeys=%+v",
existingWriterKeys, updatedWriterKeys)
}
existingReaderKeys := currExtraV3.rkb.Keys.ToPublicKeys()
if !existingReaderKeys.Equals(updatedReaderKeys) {
return nil, nil, fmt.Errorf(
"existingReaderKeys=%+v != updatedReaderKeys=%+v",
existingReaderKeys, updatedReaderKeys)
}
if latestKeyGen < FirstValidKeyGen {
return nil, nil, errors.New(
"Non-zero current crypt key with no existing key generations")
}
var historicKeys []kbfscrypto.TLFCryptKey
if latestKeyGen > FirstValidKeyGen {
var err error
historicKeys, err = kbfscrypto.DecryptTLFCryptKeys(
codec,
currExtraV3.wkb.EncryptedHistoricTLFCryptKeys,
currCryptKey)
if err != nil {
return nil, nil, err
}
expectedHistoricKeyCount :=
int(md.LatestKeyGeneration() - FirstValidKeyGen)
if len(historicKeys) != expectedHistoricKeyCount {
return nil, nil, errors.Errorf(
"Expected %d historic keys, got %d",
expectedHistoricKeyCount,
len(historicKeys))
}
}
historicKeys = append(historicKeys, currCryptKey)
var err error
encryptedHistoricKeys, err = kbfscrypto.EncryptTLFCryptKeys(
codec, historicKeys, nextCryptKey)
if err != nil {
return nil, nil, err
}
}
newWriterKeys := TLFWriterKeyBundleV3{
Keys: make(UserDeviceKeyInfoMapV3),
TLFPublicKey: pubKey,
EncryptedHistoricTLFCryptKeys: encryptedHistoricKeys,
}
newReaderKeys := TLFReaderKeyBundleV3{
Keys: make(UserDeviceKeyInfoMapV3),
}
md.WriterMetadata.LatestKeyGen++
nextExtra = NewExtraMetadataV3(newWriterKeys, newReaderKeys, true, true)
serverHalves, err = md.updateKeyBundles(codec, nextExtra,
updatedWriterKeys, updatedReaderKeys,
ePubKey, ePrivKey, nextCryptKey)
if err != nil {
return nil, nil, err
}
return nextExtra, serverHalves, nil
}
// SetLatestKeyGenerationForTeamTLF implements the
// MutableRootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) SetLatestKeyGenerationForTeamTLF(keyGen KeyGen) {
if md.TlfID().Type() != tlf.SingleTeam {
panic(fmt.Sprintf(
"Can't call SetLatestKeyGenerationForTeamTLF on a %s TLF",
md.TlfID().Type()))
}
md.WriterMetadata.LatestKeyGen = keyGen
}
// SetUnresolvedReaders implements the MutableRootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) SetUnresolvedReaders(readers []keybase1.SocialAssertion) {
md.UnresolvedReaders = readers
}
// SetUnresolvedWriters implements the MutableRootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) SetUnresolvedWriters(writers []keybase1.SocialAssertion) {
md.WriterMetadata.UnresolvedWriters = writers
}
// SetConflictInfo implements the MutableRootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) SetConflictInfo(ci *tlf.HandleExtension) {
md.ConflictInfo = ci
}
// SetFinalizedInfo implements the MutableRootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) SetFinalizedInfo(fi *tlf.HandleExtension) {
md.FinalizedInfo = fi
}
// SetWriters implements the MutableRootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) SetWriters(writers []keybase1.UserOrTeamID) {
md.WriterMetadata.Writers = writers
}
// SetTlfID implements the MutableRootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) SetTlfID(tlf tlf.ID) {
md.WriterMetadata.ID = tlf
}
// ClearFinalBit implements the MutableRootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) ClearFinalBit() {
md.Flags &= ^MetadataFlagFinal
}
// Version implements the MutableRootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) Version() MetadataVer {
return SegregatedKeyBundlesVer
}
// GetCurrentTLFPublicKey implements the RootMetadata interface
// for RootMetadataV3.
func (md *RootMetadataV3) GetCurrentTLFPublicKey(
extra ExtraMetadata) (kbfscrypto.TLFPublicKey, error) {
wkb, _, err := md.getTLFKeyBundles(extra)
if err != nil {
return kbfscrypto.TLFPublicKey{}, err
}
return wkb.TLFPublicKey, nil
}
// GetUnresolvedParticipants implements the RootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) GetUnresolvedParticipants() []keybase1.SocialAssertion {
writers := md.WriterMetadata.UnresolvedWriters
readers := md.UnresolvedReaders
users := make([]keybase1.SocialAssertion, 0, len(writers)+len(readers))
users = append(users, writers...)
users = append(users, readers...)
return users
}
// UpdateKeyBundles implements the MutableRootMetadata interface
// for RootMetadataV3.
func (md *RootMetadataV3) UpdateKeyBundles(codec kbfscodec.Codec,
extra ExtraMetadata,
updatedWriterKeys, updatedReaderKeys UserDevicePublicKeys,
ePubKey kbfscrypto.TLFEphemeralPublicKey,
ePrivKey kbfscrypto.TLFEphemeralPrivateKey,
tlfCryptKeys []kbfscrypto.TLFCryptKey) (
[]UserDeviceKeyServerHalves, error) {
if len(tlfCryptKeys) != 1 {
return nil, fmt.Errorf(
"(MDv3) Expected 1 TLF crypt key, got %d",
len(tlfCryptKeys))
}
serverHalves, err := md.updateKeyBundles(codec, extra,
updatedWriterKeys, updatedReaderKeys,
ePubKey, ePrivKey, tlfCryptKeys[0])
if err != nil {
return nil, err
}
return []UserDeviceKeyServerHalves{serverHalves}, nil
}
// GetTLFWriterKeyBundleID implements the RootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) GetTLFWriterKeyBundleID() TLFWriterKeyBundleID {
return md.WriterMetadata.WKeyBundleID
}
// GetTLFReaderKeyBundleID implements the RootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) GetTLFReaderKeyBundleID() TLFReaderKeyBundleID {
return md.RKeyBundleID
}
// FinalizeRekey implements the MutableRootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) FinalizeRekey(
codec kbfscodec.Codec, extra ExtraMetadata) error {
extraV3, ok := extra.(*ExtraMetadataV3)
if !ok {
return errors.New("Invalid extra metadata")
}
oldWKBID := md.WriterMetadata.WKeyBundleID
oldRKBID := md.RKeyBundleID
newWKBID, err := MakeTLFWriterKeyBundleID(codec, extraV3.wkb)
if err != nil {
return err
}
newRKBID, err := MakeTLFReaderKeyBundleID(codec, extraV3.rkb)
if err != nil {
return err
}
md.WriterMetadata.WKeyBundleID = newWKBID
md.RKeyBundleID = newRKBID
extraV3.updateNew(newWKBID != oldWKBID, newRKBID != oldRKBID)
return nil
}
// StoresHistoricTLFCryptKeys implements the RootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) StoresHistoricTLFCryptKeys() bool {
return true
}
// GetHistoricTLFCryptKey implements the RootMetadata interface for RootMetadataV3.
func (md *RootMetadataV3) GetHistoricTLFCryptKey(codec kbfscodec.Codec,
keyGen KeyGen, currentKey kbfscrypto.TLFCryptKey, extra ExtraMetadata) (
kbfscrypto.TLFCryptKey, error) {
extraV3, ok := extra.(*ExtraMetadataV3)
if !ok {
return kbfscrypto.TLFCryptKey{}, errors.New(
"Invalid extra metadata")
}
if keyGen < FirstValidKeyGen || keyGen >= md.LatestKeyGeneration() {
return kbfscrypto.TLFCryptKey{}, errors.Errorf(
"Invalid key generation %d", keyGen)
}
oldKeys, err := kbfscrypto.DecryptTLFCryptKeys(
codec, extraV3.wkb.EncryptedHistoricTLFCryptKeys, currentKey)
if err != nil {
return kbfscrypto.TLFCryptKey{}, err
}
index := int(keyGen - FirstValidKeyGen)
if index >= len(oldKeys) || index < 0 {
return kbfscrypto.TLFCryptKey{}, errors.Errorf(
"Index %d out of range (max: %d)", index, len(oldKeys))
}
return oldKeys[index], nil
}
| 1 | 18,464 | @strib I assumed this is what you meant; but let me know if I'm wrong! | keybase-kbfs | go |
@@ -106,7 +106,7 @@ func (wh *WorkflowNilCheckHandler) DeprecateNamespace(ctx context.Context, reque
// StartWorkflowExecution starts a new long running workflow instance. It will create the instance with
// 'WorkflowExecutionStarted' event in history and also schedule the first WorkflowTask for the worker to make the
-// first decision for this instance. It will return 'WorkflowExecutionAlreadyStartedError', if an instance already
+// first command for this instance. It will return 'WorkflowExecutionAlreadyStartedError', if an instance already
// exists with same workflowId.
func (wh *WorkflowNilCheckHandler) StartWorkflowExecution(ctx context.Context, request *workflowservice.StartWorkflowExecutionRequest) (_ *workflowservice.StartWorkflowExecutionResponse, retError error) {
resp, err := wh.parentHandler.StartWorkflowExecution(ctx, request) | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package frontend
import (
"context"
"go.temporal.io/api/workflowservice/v1"
)
var _ workflowservice.WorkflowServiceServer = (*WorkflowNilCheckHandler)(nil)
type (
// WorkflowNilCheckHandler - gRPC handler interface for workflow workflowservice
WorkflowNilCheckHandler struct {
parentHandler Handler
}
)
// Due to bug in gogo/protobuf https://github.com/gogo/protobuf/issues/651 response can't be nil when error is also nil.
// This handler makes sure response is always not nil, when error is nil.
// Can be removed from pipeline when bug is resolved.
// NewWorkflowNilCheckHandler creates handler that never returns nil response when error is nil
func NewWorkflowNilCheckHandler(
parentHandler Handler,
) *WorkflowNilCheckHandler {
handler := &WorkflowNilCheckHandler{
parentHandler: parentHandler,
}
return handler
}
// RegisterNamespace creates a new namespace which can be used as a container for all resources. Namespace is a top level
// entity within Temporal, used as a container for all resources like workflow executions, taskqueues, etc. Namespace
// acts as a sandbox and provides isolation for all resources within the namespace. All resources belongs to exactly one
// namespace.
func (wh *WorkflowNilCheckHandler) RegisterNamespace(ctx context.Context, request *workflowservice.RegisterNamespaceRequest) (_ *workflowservice.RegisterNamespaceResponse, retError error) {
resp, err := wh.parentHandler.RegisterNamespace(ctx, request)
if resp == nil && err == nil {
resp = &workflowservice.RegisterNamespaceResponse{}
}
return resp, err
}
// DescribeNamespace returns the information and configuration for a registered namespace.
func (wh *WorkflowNilCheckHandler) DescribeNamespace(ctx context.Context, request *workflowservice.DescribeNamespaceRequest) (_ *workflowservice.DescribeNamespaceResponse, retError error) {
resp, err := wh.parentHandler.DescribeNamespace(ctx, request)
if resp == nil && err == nil {
resp = &workflowservice.DescribeNamespaceResponse{}
}
return resp, err
}
// ListNamespaces returns the information and configuration for all namespaces.
func (wh *WorkflowNilCheckHandler) ListNamespaces(ctx context.Context, request *workflowservice.ListNamespacesRequest) (_ *workflowservice.ListNamespacesResponse, retError error) {
resp, err := wh.parentHandler.ListNamespaces(ctx, request)
if resp == nil && err == nil {
resp = &workflowservice.ListNamespacesResponse{}
}
return resp, err
}
// UpdateNamespace is used to update the information and configuration for a registered namespace.
func (wh *WorkflowNilCheckHandler) UpdateNamespace(ctx context.Context, request *workflowservice.UpdateNamespaceRequest) (_ *workflowservice.UpdateNamespaceResponse, retError error) {
resp, err := wh.parentHandler.UpdateNamespace(ctx, request)
if resp == nil && err == nil {
resp = &workflowservice.UpdateNamespaceResponse{}
}
return resp, err
}
// DeprecateNamespace us used to update status of a registered namespace to DEPRECATED. Once the namespace is deprecated
// it cannot be used to start new workflow executions. Existing workflow executions will continue to run on
// deprecated namespaces.
func (wh *WorkflowNilCheckHandler) DeprecateNamespace(ctx context.Context, request *workflowservice.DeprecateNamespaceRequest) (_ *workflowservice.DeprecateNamespaceResponse, retError error) {
resp, err := wh.parentHandler.DeprecateNamespace(ctx, request)
if resp == nil && err == nil {
resp = &workflowservice.DeprecateNamespaceResponse{}
}
return resp, err
}
// StartWorkflowExecution starts a new long running workflow instance. It will create the instance with
// 'WorkflowExecutionStarted' event in history and also schedule the first WorkflowTask for the worker to make the
// first decision for this instance. It will return 'WorkflowExecutionAlreadyStartedError', if an instance already
// exists with same workflowId.
func (wh *WorkflowNilCheckHandler) StartWorkflowExecution(ctx context.Context, request *workflowservice.StartWorkflowExecutionRequest) (_ *workflowservice.StartWorkflowExecutionResponse, retError error) {
resp, err := wh.parentHandler.StartWorkflowExecution(ctx, request)
if resp == nil && err == nil {
resp = &workflowservice.StartWorkflowExecutionResponse{}
}
return resp, err
}
// GetWorkflowExecutionHistory returns the history of specified workflow execution. It fails with 'EntityNotExistError' if speficied workflow
// execution in unknown to the service.
func (wh *WorkflowNilCheckHandler) GetWorkflowExecutionHistory(ctx context.Context, request *workflowservice.GetWorkflowExecutionHistoryRequest) (_ *workflowservice.GetWorkflowExecutionHistoryResponse, retError error) {
resp, err := wh.parentHandler.GetWorkflowExecutionHistory(ctx, request)
if resp == nil && err == nil {
resp = &workflowservice.GetWorkflowExecutionHistoryResponse{}
}
return resp, err
}
// PollWorkflowTaskQueue is called by application worker to process WorkflowTask from a specific taskQueue. A
// WorkflowTask is dispatched to callers for active workflow executions, with pending decisions.
// Application is then expected to call 'RespondWorkflowTaskCompleted' API when it is done processing the WorkflowTask.
// It will also create a 'WorkflowTaskStarted' event in the history for that session before handing off WorkflowTask to
// application worker.
func (wh *WorkflowNilCheckHandler) PollWorkflowTaskQueue(ctx context.Context, request *workflowservice.PollWorkflowTaskQueueRequest) (_ *workflowservice.PollWorkflowTaskQueueResponse, retError error) {
resp, err := wh.parentHandler.PollWorkflowTaskQueue(ctx, request)
if resp == nil && err == nil {
resp = &workflowservice.PollWorkflowTaskQueueResponse{}
}
return resp, err
}
// RespondWorkflowTaskCompleted is called by application worker to complete a WorkflowTask handed as a result of
// 'PollWorkflowTaskQueue' API call. Completing a WorkflowTask will result in new events for the workflow execution and
// potentially new ActivityTask being created for corresponding decisions. It will also create a WorkflowTaskCompleted
// event in the history for that session. Use the 'taskToken' provided as response of PollWorkflowTaskQueue API call
// for completing the WorkflowTask.
// The response could contain a new workflow task if there is one or if the request asking for one.
func (wh *WorkflowNilCheckHandler) RespondWorkflowTaskCompleted(ctx context.Context, request *workflowservice.RespondWorkflowTaskCompletedRequest) (_ *workflowservice.RespondWorkflowTaskCompletedResponse, retError error) {
resp, err := wh.parentHandler.RespondWorkflowTaskCompleted(ctx, request)
if resp == nil && err == nil {
resp = &workflowservice.RespondWorkflowTaskCompletedResponse{}
}
return resp, err
}
// RespondWorkflowTaskFailed is called by application worker to indicate failure. This results in
// WorkflowTaskFailedEvent written to the history and a new WorkflowTask created. This API can be used by client to
// either clear sticky taskqueue or report any panics during WorkflowTask processing. Temporal will only append first
// WorkflowTaskFailed event to the history of workflow execution for consecutive failures.
func (wh *WorkflowNilCheckHandler) RespondWorkflowTaskFailed(ctx context.Context, request *workflowservice.RespondWorkflowTaskFailedRequest) (_ *workflowservice.RespondWorkflowTaskFailedResponse, retError error) {
resp, err := wh.parentHandler.RespondWorkflowTaskFailed(ctx, request)
if resp == nil && err == nil {
resp = &workflowservice.RespondWorkflowTaskFailedResponse{}
}
return resp, err
}
// PollActivityTaskQueue is called by application worker to process ActivityTask from a specific taskQueue. ActivityTask
// is dispatched to callers whenever a ScheduleTask decision is made for a workflow execution.
// Application is expected to call 'RespondActivityTaskCompleted' or 'RespondActivityTaskFailed' once it is done
// processing the task.
// Application also needs to call 'RecordActivityTaskHeartbeat' API within 'heartbeatTimeoutSeconds' interval to
// prevent the task from getting timed out. An event 'ActivityTaskStarted' event is also written to workflow execution
// history before the ActivityTask is dispatched to application worker.
func (wh *WorkflowNilCheckHandler) PollActivityTaskQueue(ctx context.Context, request *workflowservice.PollActivityTaskQueueRequest) (_ *workflowservice.PollActivityTaskQueueResponse, retError error) {
resp, err := wh.parentHandler.PollActivityTaskQueue(ctx, request)
if resp == nil && err == nil {
resp = &workflowservice.PollActivityTaskQueueResponse{}
}
return resp, err
}
// RecordActivityTaskHeartbeat is called by application worker while it is processing an ActivityTask. If worker fails
// to heartbeat within 'heartbeatTimeoutSeconds' interval for the ActivityTask, then it will be marked as timedout and
// 'ActivityTaskTimedOut' event will be written to the workflow history. Calling 'RecordActivityTaskHeartbeat' will
// fail with 'EntityNotExistsError' in such situations. Use the 'taskToken' provided as response of
// PollActivityTaskQueue API call for heartbeating.
func (wh *WorkflowNilCheckHandler) RecordActivityTaskHeartbeat(ctx context.Context, request *workflowservice.RecordActivityTaskHeartbeatRequest) (_ *workflowservice.RecordActivityTaskHeartbeatResponse, retError error) {
resp, err := wh.parentHandler.RecordActivityTaskHeartbeat(ctx, request)
if resp == nil && err == nil {
resp = &workflowservice.RecordActivityTaskHeartbeatResponse{}
}
return resp, err
}
// RecordActivityTaskHeartbeatById is called by application worker while it is processing an ActivityTask. If worker fails
// to heartbeat within 'heartbeatTimeoutSeconds' interval for the ActivityTask, then it will be marked as timedout and
// 'ActivityTaskTimedOut' event will be written to the workflow history. Calling 'RecordActivityTaskHeartbeatById' will
// fail with 'EntityNotExistsError' in such situations. Instead of using 'taskToken' like in RecordActivityTaskHeartbeat,
// use Namespace, WorkflowID and ActivityID
func (wh *WorkflowNilCheckHandler) RecordActivityTaskHeartbeatById(ctx context.Context, request *workflowservice.RecordActivityTaskHeartbeatByIdRequest) (_ *workflowservice.RecordActivityTaskHeartbeatByIdResponse, retError error) {
resp, err := wh.parentHandler.RecordActivityTaskHeartbeatById(ctx, request)
if resp == nil && err == nil {
resp = &workflowservice.RecordActivityTaskHeartbeatByIdResponse{}
}
return resp, err
}
// RespondActivityTaskCompleted is called by application worker when it is done processing an ActivityTask. It will
// result in a new 'ActivityTaskCompleted' event being written to the workflow history and a new WorkflowTask
// created for the workflow so new decisions could be made. Use the 'taskToken' provided as response of
// PollActivityTaskQueue API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid
// anymore due to activity timeout.
func (wh *WorkflowNilCheckHandler) RespondActivityTaskCompleted(ctx context.Context, request *workflowservice.RespondActivityTaskCompletedRequest) (_ *workflowservice.RespondActivityTaskCompletedResponse, retError error) {
resp, err := wh.parentHandler.RespondActivityTaskCompleted(ctx, request)
if resp == nil && err == nil {
resp = &workflowservice.RespondActivityTaskCompletedResponse{}
}
return resp, err
}
// RespondActivityTaskCompletedById is called by application worker when it is done processing an ActivityTask.
// It will result in a new 'ActivityTaskCompleted' event being written to the workflow history and a new WorkflowTask
// created for the workflow so new decisions could be made. Similar to RespondActivityTaskCompleted but use Namespace,
// WorkflowID and ActivityID instead of 'taskToken' for completion. It fails with 'EntityNotExistsError'
// if the these IDs are not valid anymore due to activity timeout.
func (wh *WorkflowNilCheckHandler) RespondActivityTaskCompletedById(ctx context.Context, request *workflowservice.RespondActivityTaskCompletedByIdRequest) (_ *workflowservice.RespondActivityTaskCompletedByIdResponse, retError error) {
resp, err := wh.parentHandler.RespondActivityTaskCompletedById(ctx, request)
if resp == nil && err == nil {
resp = &workflowservice.RespondActivityTaskCompletedByIdResponse{}
}
return resp, err
}
// RespondActivityTaskFailed is called by application worker when it is done processing an ActivityTask. It will
// result in a new 'ActivityTaskFailed' event being written to the workflow history and a new WorkflowTask
// created for the workflow instance so new decisions could be made. Use the 'taskToken' provided as response of
// PollActivityTaskQueue API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid
// anymore due to activity timeout.
func (wh *WorkflowNilCheckHandler) RespondActivityTaskFailed(ctx context.Context, request *workflowservice.RespondActivityTaskFailedRequest) (_ *workflowservice.RespondActivityTaskFailedResponse, retError error) {
resp, err := wh.parentHandler.RespondActivityTaskFailed(ctx, request)
if resp == nil && err == nil {
resp = &workflowservice.RespondActivityTaskFailedResponse{}
}
return resp, err
}
// RespondActivityTaskFailedById is called by application worker when it is done processing an ActivityTask.
// It will result in a new 'ActivityTaskFailed' event being written to the workflow history and a new WorkflowTask
// created for the workflow instance so new decisions could be made. Similar to RespondActivityTaskFailed but use
// Namespace, WorkflowID and ActivityID instead of 'taskToken' for completion. It fails with 'EntityNotExistsError'
// if the these IDs are not valid anymore due to activity timeout.
func (wh *WorkflowNilCheckHandler) RespondActivityTaskFailedById(ctx context.Context, request *workflowservice.RespondActivityTaskFailedByIdRequest) (_ *workflowservice.RespondActivityTaskFailedByIdResponse, retError error) {
resp, err := wh.parentHandler.RespondActivityTaskFailedById(ctx, request)
if resp == nil && err == nil {
resp = &workflowservice.RespondActivityTaskFailedByIdResponse{}
}
return resp, err
}
// RespondActivityTaskCanceled is called by application worker when it is successfully canceled an ActivityTask. It will
// result in a new 'ActivityTaskCanceled' event being written to the workflow history and a new WorkflowTask
// created for the workflow instance so new decisions could be made. Use the 'taskToken' provided as response of
// PollActivityTaskQueue API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid
// anymore due to activity timeout.
func (wh *WorkflowNilCheckHandler) RespondActivityTaskCanceled(ctx context.Context, request *workflowservice.RespondActivityTaskCanceledRequest) (_ *workflowservice.RespondActivityTaskCanceledResponse, retError error) {
resp, err := wh.parentHandler.RespondActivityTaskCanceled(ctx, request)
if resp == nil && err == nil {
resp = &workflowservice.RespondActivityTaskCanceledResponse{}
}
return resp, err
}
// RespondActivityTaskCanceledById is called by application worker when it is successfully canceled an ActivityTask.
// It will result in a new 'ActivityTaskCanceled' event being written to the workflow history and a new WorkflowTask
// created for the workflow instance so new decisions could be made. Similar to RespondActivityTaskCanceled but use
// Namespace, WorkflowID and ActivityID instead of 'taskToken' for completion. It fails with 'EntityNotExistsError'
// if the these IDs are not valid anymore due to activity timeout.
func (wh *WorkflowNilCheckHandler) RespondActivityTaskCanceledById(ctx context.Context, request *workflowservice.RespondActivityTaskCanceledByIdRequest) (_ *workflowservice.RespondActivityTaskCanceledByIdResponse, retError error) {
resp, err := wh.parentHandler.RespondActivityTaskCanceledById(ctx, request)
if resp == nil && err == nil {
resp = &workflowservice.RespondActivityTaskCanceledByIdResponse{}
}
return resp, err
}
// RequestCancelWorkflowExecution is called by application worker when it wants to request cancellation of a workflow instance.
// It will result in a new 'WorkflowExecutionCancelRequested' event being written to the workflow history and a new WorkflowTask
// created for the workflow instance so new decisions could be made. It fails with 'EntityNotExistsError' if the workflow is not valid
// anymore due to completion or doesn't exist.
func (wh *WorkflowNilCheckHandler) RequestCancelWorkflowExecution(ctx context.Context, request *workflowservice.RequestCancelWorkflowExecutionRequest) (_ *workflowservice.RequestCancelWorkflowExecutionResponse, retError error) {
resp, err := wh.parentHandler.RequestCancelWorkflowExecution(ctx, request)
if resp == nil && err == nil {
resp = &workflowservice.RequestCancelWorkflowExecutionResponse{}
}
return resp, err
}
// SignalWorkflowExecution is used to send a signal event to running workflow execution. This results in
// WorkflowExecutionSignaled event recorded in the history and a workflow task being created for the execution.
func (wh *WorkflowNilCheckHandler) SignalWorkflowExecution(ctx context.Context, request *workflowservice.SignalWorkflowExecutionRequest) (_ *workflowservice.SignalWorkflowExecutionResponse, retError error) {
resp, err := wh.parentHandler.SignalWorkflowExecution(ctx, request)
if resp == nil && err == nil {
resp = &workflowservice.SignalWorkflowExecutionResponse{}
}
return resp, err
}
// SignalWithStartWorkflowExecution is used to ensure sending signal to a workflow.
// If the workflow is running, this results in WorkflowExecutionSignaled event being recorded in the history
// and a workflow task being created for the execution.
// If the workflow is not running or not found, this results in WorkflowExecutionStarted and WorkflowExecutionSignaled
// events being recorded in history, and a workflow task being created for the execution
func (wh *WorkflowNilCheckHandler) SignalWithStartWorkflowExecution(ctx context.Context, request *workflowservice.SignalWithStartWorkflowExecutionRequest) (_ *workflowservice.SignalWithStartWorkflowExecutionResponse, retError error) {
resp, err := wh.parentHandler.SignalWithStartWorkflowExecution(ctx, request)
if resp == nil && err == nil {
resp = &workflowservice.SignalWithStartWorkflowExecutionResponse{}
}
return resp, err
}
// ResetWorkflowExecution reset an existing workflow execution to WorkflowTaskCompleted event(exclusive).
// And it will immediately terminating the current execution instance.
func (wh *WorkflowNilCheckHandler) ResetWorkflowExecution(ctx context.Context, request *workflowservice.ResetWorkflowExecutionRequest) (_ *workflowservice.ResetWorkflowExecutionResponse, retError error) {
resp, err := wh.parentHandler.ResetWorkflowExecution(ctx, request)
if resp == nil && err == nil {
resp = &workflowservice.ResetWorkflowExecutionResponse{}
}
return resp, err
}
// TerminateWorkflowExecution terminates an existing workflow execution by recording WorkflowExecutionTerminated event
// in the history and immediately terminating the execution instance.
func (wh *WorkflowNilCheckHandler) TerminateWorkflowExecution(ctx context.Context, request *workflowservice.TerminateWorkflowExecutionRequest) (_ *workflowservice.TerminateWorkflowExecutionResponse, retError error) {
resp, err := wh.parentHandler.TerminateWorkflowExecution(ctx, request)
if resp == nil && err == nil {
resp = &workflowservice.TerminateWorkflowExecutionResponse{}
}
return resp, err
}
// ListOpenWorkflowExecutions is a visibility API to list the open executions in a specific namespace.
func (wh *WorkflowNilCheckHandler) ListOpenWorkflowExecutions(ctx context.Context, request *workflowservice.ListOpenWorkflowExecutionsRequest) (_ *workflowservice.ListOpenWorkflowExecutionsResponse, retError error) {
resp, err := wh.parentHandler.ListOpenWorkflowExecutions(ctx, request)
if resp == nil && err == nil {
resp = &workflowservice.ListOpenWorkflowExecutionsResponse{}
}
return resp, err
}
// ListClosedWorkflowExecutions is a visibility API to list the closed executions in a specific namespace.
func (wh *WorkflowNilCheckHandler) ListClosedWorkflowExecutions(ctx context.Context, request *workflowservice.ListClosedWorkflowExecutionsRequest) (_ *workflowservice.ListClosedWorkflowExecutionsResponse, retError error) {
resp, err := wh.parentHandler.ListClosedWorkflowExecutions(ctx, request)
if resp == nil && err == nil {
resp = &workflowservice.ListClosedWorkflowExecutionsResponse{}
}
return resp, err
}
// ListWorkflowExecutions is a visibility API to list workflow executions in a specific namespace.
func (wh *WorkflowNilCheckHandler) ListWorkflowExecutions(ctx context.Context, request *workflowservice.ListWorkflowExecutionsRequest) (_ *workflowservice.ListWorkflowExecutionsResponse, retError error) {
resp, err := wh.parentHandler.ListWorkflowExecutions(ctx, request)
if resp == nil && err == nil {
resp = &workflowservice.ListWorkflowExecutionsResponse{}
}
return resp, err
}
// ListArchivedWorkflowExecutions is a visibility API to list archived workflow executions in a specific namespace.
func (wh *WorkflowNilCheckHandler) ListArchivedWorkflowExecutions(ctx context.Context, request *workflowservice.ListArchivedWorkflowExecutionsRequest) (_ *workflowservice.ListArchivedWorkflowExecutionsResponse, retError error) {
resp, err := wh.parentHandler.ListArchivedWorkflowExecutions(ctx, request)
if resp == nil && err == nil {
resp = &workflowservice.ListArchivedWorkflowExecutionsResponse{}
}
return resp, err
}
// ScanWorkflowExecutions is a visibility API to list large amount of workflow executions in a specific namespace without order.
func (wh *WorkflowNilCheckHandler) ScanWorkflowExecutions(ctx context.Context, request *workflowservice.ScanWorkflowExecutionsRequest) (_ *workflowservice.ScanWorkflowExecutionsResponse, retError error) {
resp, err := wh.parentHandler.ScanWorkflowExecutions(ctx, request)
if resp == nil && err == nil {
resp = &workflowservice.ScanWorkflowExecutionsResponse{}
}
return resp, err
}
// CountWorkflowExecutions is a visibility API to count of workflow executions in a specific namespace.
func (wh *WorkflowNilCheckHandler) CountWorkflowExecutions(ctx context.Context, request *workflowservice.CountWorkflowExecutionsRequest) (_ *workflowservice.CountWorkflowExecutionsResponse, retError error) {
resp, err := wh.parentHandler.CountWorkflowExecutions(ctx, request)
if resp == nil && err == nil {
resp = &workflowservice.CountWorkflowExecutionsResponse{}
}
return resp, err
}
// GetSearchAttributes is a visibility API to get all legal keys that could be used in list APIs
func (wh *WorkflowNilCheckHandler) GetSearchAttributes(ctx context.Context, request *workflowservice.GetSearchAttributesRequest) (_ *workflowservice.GetSearchAttributesResponse, retError error) {
resp, err := wh.parentHandler.GetSearchAttributes(ctx, request)
if resp == nil && err == nil {
resp = &workflowservice.GetSearchAttributesResponse{}
}
return resp, err
}
// RespondQueryTaskCompleted is called by application worker to complete a QueryTask (which is a WorkflowTask for query)
// as a result of 'PollWorkflowTaskQueue' API call. Completing a QueryTask will unblock the client call to 'QueryWorkflow'
// API and return the query result to client as a response to 'QueryWorkflow' API call.
func (wh *WorkflowNilCheckHandler) RespondQueryTaskCompleted(ctx context.Context, request *workflowservice.RespondQueryTaskCompletedRequest) (_ *workflowservice.RespondQueryTaskCompletedResponse, retError error) {
resp, err := wh.parentHandler.RespondQueryTaskCompleted(ctx, request)
if resp == nil && err == nil {
resp = &workflowservice.RespondQueryTaskCompletedResponse{}
}
return resp, err
}
// ResetStickyTaskQueue resets the sticky taskqueue related information in mutable state of a given workflow.
// Things cleared are:
// 1. StickyTaskQueue
// 2. StickyScheduleToStartTimeout
// 3. ClientLibraryVersion
// 4. ClientFeatureVersion
// 5. ClientImpl
func (wh *WorkflowNilCheckHandler) ResetStickyTaskQueue(ctx context.Context, request *workflowservice.ResetStickyTaskQueueRequest) (_ *workflowservice.ResetStickyTaskQueueResponse, retError error) {
resp, err := wh.parentHandler.ResetStickyTaskQueue(ctx, request)
if resp == nil && err == nil {
resp = &workflowservice.ResetStickyTaskQueueResponse{}
}
return resp, err
}
// QueryWorkflow returns query result for a specified workflow execution
func (wh *WorkflowNilCheckHandler) QueryWorkflow(ctx context.Context, request *workflowservice.QueryWorkflowRequest) (_ *workflowservice.QueryWorkflowResponse, retError error) {
resp, err := wh.parentHandler.QueryWorkflow(ctx, request)
if resp == nil && err == nil {
resp = &workflowservice.QueryWorkflowResponse{}
}
return resp, err
}
// DescribeWorkflowExecution returns information about the specified workflow execution.
func (wh *WorkflowNilCheckHandler) DescribeWorkflowExecution(ctx context.Context, request *workflowservice.DescribeWorkflowExecutionRequest) (_ *workflowservice.DescribeWorkflowExecutionResponse, retError error) {
resp, err := wh.parentHandler.DescribeWorkflowExecution(ctx, request)
if resp == nil && err == nil {
resp = &workflowservice.DescribeWorkflowExecutionResponse{}
}
return resp, err
}
// DescribeTaskQueue returns information about the target taskqueue, right now this API returns the
// pollers which polled this taskqueue in last few minutes.
func (wh *WorkflowNilCheckHandler) DescribeTaskQueue(ctx context.Context, request *workflowservice.DescribeTaskQueueRequest) (_ *workflowservice.DescribeTaskQueueResponse, retError error) {
resp, err := wh.parentHandler.DescribeTaskQueue(ctx, request)
if resp == nil && err == nil {
resp = &workflowservice.DescribeTaskQueueResponse{}
}
return resp, err
}
// GetClusterInfo ...
func (wh *WorkflowNilCheckHandler) GetClusterInfo(ctx context.Context, request *workflowservice.GetClusterInfoRequest) (_ *workflowservice.GetClusterInfoResponse, retError error) {
resp, err := wh.parentHandler.GetClusterInfo(ctx, request)
if resp == nil && err == nil {
resp = &workflowservice.GetClusterInfoResponse{}
}
return resp, err
}
// ListTaskQueuePartitions ...
func (wh *WorkflowNilCheckHandler) ListTaskQueuePartitions(ctx context.Context, request *workflowservice.ListTaskQueuePartitionsRequest) (_ *workflowservice.ListTaskQueuePartitionsResponse, retError error) {
resp, err := wh.parentHandler.ListTaskQueuePartitions(ctx, request)
if resp == nil && err == nil {
resp = &workflowservice.ListTaskQueuePartitionsResponse{}
}
return resp, err
}
| 1 | 9,834 | I believe the error has changed | temporalio-temporal | go |
@@ -41,6 +41,16 @@ module Blacklight
self.class.new(params || ActionController::Parameters.new, blacklight_config, controller)
end
+ ##
+ # Check if the query has any constraints defined (a query, facet, etc)
+ #
+ # @return [Boolean]
+ # rubocop:disable Style/PredicateName
+ def has_constraints?
+ params[:q].present? || params[:f].present?
+ end
+ # rubocop:enable Style/PredicateName
+
##
# Extension point for downstream applications
# to provide more interesting routing to | 1 | # frozen_string_literal: true
module Blacklight
# This class encapsulates the search state as represented by the query
# parameters namely: :f, :q, :page, :per_page and, :sort
class SearchState
attr_reader :blacklight_config # Must be called blacklight_config, because Blacklight::Facet calls blacklight_config.
attr_reader :params
# This method is never accessed in this class, but may be used by subclasses that need
# to access the url_helpers
attr_reader :controller
delegate :facet_configuration_for_field, to: :blacklight_config
# @param [ActionController::Parameters] params
# @param [Blacklight::Config] blacklight_config
# @param [ApplicationController] controller used for the routing helpers
def initialize(params, blacklight_config, controller = nil)
if params.respond_to?(:to_unsafe_h)
# This is the typical (not-ActionView::TestCase) code path.
@params = params.to_unsafe_h
# In Rails 5 to_unsafe_h returns a HashWithIndifferentAccess, in Rails 4 it returns Hash
@params = @params.with_indifferent_access if @params.instance_of? Hash
elsif params.is_a? Hash
# This is an ActionView::TestCase workaround for Rails 4.2.
@params = params.dup.with_indifferent_access
else
@params = params.dup.to_h.with_indifferent_access
end
@blacklight_config = blacklight_config
@controller = controller
end
def to_hash
@params
end
alias to_h to_hash
def reset(params = nil)
self.class.new(params || ActionController::Parameters.new, blacklight_config, controller)
end
##
# Extension point for downstream applications
# to provide more interesting routing to
# documents
def url_for_document(doc, options = {})
if respond_to?(:blacklight_config) &&
blacklight_config.show.route &&
(!doc.respond_to?(:to_model) || doc.to_model.is_a?(SolrDocument))
route = blacklight_config.show.route.merge(action: :show, id: doc).merge(options)
route[:controller] = params[:controller] if route[:controller] == :current
route
else
doc
end
end
# adds the value and/or field to params[:f]
# Does NOT remove request keys and otherwise ensure that the hash
# is suitable for a redirect. See
# add_facet_params_and_redirect
def add_facet_params(field, item)
p = reset_search_params
add_facet_param(p, field, item)
if item && item.respond_to?(:fq) && item.fq
Array(item.fq).each do |f, v|
add_facet_param(p, f, v)
end
end
p
end
# Used in catalog/facet action, facets.rb view, for a click
# on a facet value. Add on the facet params to existing
# search constraints. Remove any paginator-specific request
# params, or other request params that should be removed
# for a 'fresh' display.
# Change the action to 'index' to send them back to
# catalog/index with their new facet choice.
def add_facet_params_and_redirect(field, item)
new_params = add_facet_params(field, item)
# Delete any request params from facet-specific action, needed
# to redir to index action properly.
request_keys = blacklight_config.facet_paginator_class.request_keys
new_params.extract!(*request_keys.values)
new_params
end
# copies the current params (or whatever is passed in as the 3rd arg)
# removes the field value from params[:f]
# removes the field if there are no more values in params[:f][field]
# removes additional params (page, id, etc..)
def remove_facet_params(field, item)
if item.respond_to? :field
field = item.field
end
facet_config = facet_configuration_for_field(field)
url_field = facet_config.key
value = facet_value_for_facet_item(item)
p = reset_search_params
# need to dup the facet values too,
# if the values aren't dup'd, then the values
# from the session will get remove in the show view...
p[:f] = (p[:f] || {}).dup
p[:f][url_field] = (p[:f][url_field] || []).dup
collection = p[:f][url_field]
# collection should be an array, because we link to ?f[key][]=value,
# however, Facebook (and maybe some other PHP tools) tranform that parameters
# into ?f[key][0]=value, which Rails interprets as a Hash.
if collection.is_a? Hash
collection = collection.values
end
p[:f][url_field] = collection - [value]
p[:f].delete(url_field) if p[:f][url_field].empty?
p.delete(:f) if p[:f].empty?
p
end
# Merge the source params with the params_to_merge hash
# @param [Hash] params_to_merge to merge into above
# @return [ActionController::Parameters] the current search parameters after being sanitized by Blacklight::Parameters.sanitize
# @yield [params] The merged parameters hash before being sanitized
def params_for_search(params_to_merge = {})
# params hash we'll return
my_params = params.dup.merge(self.class.new(params_to_merge, blacklight_config, controller))
if block_given?
yield my_params
end
if my_params[:page] && (my_params[:per_page] != params[:per_page] || my_params[:sort] != params[:sort])
my_params[:page] = 1
end
Parameters.sanitize(my_params)
end
private
##
# Reset any search parameters that store search context
# and need to be reset when e.g. constraints change
# @return [ActionController::Parameters]
def reset_search_params
Parameters.sanitize(params).except(:page, :counter)
end
# TODO: this code is duplicated in Blacklight::FacetsHelperBehavior
def facet_value_for_facet_item item
if item.respond_to? :value
item.value
else
item
end
end
def add_facet_param(p, field, item)
if item.respond_to? :field
field = item.field
end
facet_config = facet_configuration_for_field(field)
url_field = facet_config.key
value = facet_value_for_facet_item(item)
p[:f] = (p[:f] || {}).dup # the command above is not deep in rails3, !@#$!@#$
p[:f][url_field] = (p[:f][url_field] || []).dup
if facet_config.single && p[:f][url_field].present?
p[:f][url_field] = []
end
p[:f][url_field].push(value)
end
end
end
| 1 | 6,900 | Lint/UnneededDisable: Unnecessary disabling of Naming/PredicateName. | projectblacklight-blacklight | rb |
@@ -58,6 +58,8 @@ describe ProposalsController do
end
it 'should redirect random users' do
+ skip "flaky spec"
+
proposal = FactoryGirl.create(:proposal)
get :show, id: proposal.id
expect(response).to redirect_to(proposals_path) | 1 | describe ProposalsController do
include ReturnToHelper
let(:user) { FactoryGirl.create(:user) }
describe '#index' do
before do
login_as(user)
end
it 'sets data fields' do
proposal1 = FactoryGirl.create(:proposal, requester: user)
proposal2 = FactoryGirl.create(:proposal)
proposal2.individual_approvals.create!(user: user, status: 'actionable')
get :index
expect(assigns(:pending_data).rows.sort).to eq [proposal1, proposal2]
expect(assigns(:approved_data).rows.sort).to be_empty
expect(assigns(:cancelled_data).rows.sort).to be_empty
end
end
describe '#archive' do
before do
login_as(user)
end
it 'should show all the closed proposals' do
2.times.map do |i|
FactoryGirl.create(:proposal, requester: user, status: 'approved')
end
FactoryGirl.create(:proposal, requester: user)
get :archive
expect(assigns(:proposals_data).rows.size).to eq(2)
end
context 'smoke test' do
render_views
it 'does not explode' do
get :archive
end
end
end
describe '#show' do
before do
login_as(user)
end
context 'visitors' do
it 'should allow the requester to see it' do
proposal = FactoryGirl.create(:proposal, requester: user)
get :show, id: proposal.id
expect(response).not_to redirect_to("/proposals/")
expect(flash[:alert]).not_to be_present
end
it 'should redirect random users' do
proposal = FactoryGirl.create(:proposal)
get :show, id: proposal.id
expect(response).to redirect_to(proposals_path)
expect(flash[:alert]).to be_present
end
end
context 'admins' do
let(:proposal) { FactoryGirl.create(:proposal, requester_id: 5555, client_data_type: 'SomeCompany::SomethingApprovable') }
before do
expect(Proposal).to receive(:client_model_names).and_return(['SomeCompany::SomethingApprovable'])
expect(Proposal).to receive(:client_slugs).and_return(%w(some_company some_other_company))
end
after do
ENV['ADMIN_EMAILS'] = ''
ENV['CLIENT_ADMIN_EMAILS'] = ''
end
it "allows admins to view requests of same client" do
ENV['CLIENT_ADMIN_EMAILS'] = user.email_address
user.update_attributes!(client_slug: 'some_company')
get :show, id: proposal.id
expect(response).not_to redirect_to(proposals_path)
expect(response.request.fullpath).to eq(proposal_path proposal.id)
end
it "allows app admins to view requests outside of related client" do
user.update_attributes!(client_slug: 'some_other_company')
ENV['ADMIN_EMAILS'] = "#{user.email_address}"
get :show, id: proposal.id
expect(response).not_to redirect_to(proposals_path)
expect(response.request.fullpath).to eq(proposal_path proposal.id)
end
end
end
describe '#query' do
let!(:proposal) { FactoryGirl.create(:proposal, requester: user) }
before do
login_as(user)
end
it 'should only include proposals user is a part of' do
get :query
expect(assigns(:proposals_data).rows).to eq([proposal])
end
it 'should filter results by date range' do
past_proposal = FactoryGirl.create(
:proposal, created_at: Date.new(2012, 5, 6), requester: user)
get :query
expect(assigns(:proposals_data).rows).to eq([proposal, past_proposal])
get :query, start_date: '2012-05-04', end_date: '2012-05-07'
expect(assigns(:proposals_data).rows).to eq([past_proposal])
get :query, start_date: '2012-05-04', end_date: '2012-05-06'
expect(assigns(:proposals_data).rows).to eq([])
end
it 'ignores bad input' do
get :query, start_date: 'dasdas'
expect(assigns(:proposals_data).rows).to eq([proposal])
end
context "#datespan_header" do
render_views
it 'has a nice header for month spans' do
get :query, start_date: '2012-05-01', end_date: '2012-06-01'
expect(response.body).to include("May 2012")
end
it 'has a generic header for other dates' do
get :query, start_date: '2012-05-02', end_date: '2012-06-02'
expect(response.body).to include("2012-05-02 - 2012-06-02")
end
end
context 'search' do
it 'plays nicely with TabularData' do
double, single, triple = 3.times.map { FactoryGirl.create(:proposal, requester: user) }
double.update(public_id: 'AAA AAA')
single.update(public_id: 'AAA')
triple.update(public_id: 'AAA AAA AAA')
get :query, text: "AAA"
query = assigns(:proposals_data).rows
expect(query.length).to be(3)
expect(query[0].id).to be(triple.id)
expect(query[1].id).to be(double.id)
expect(query[2].id).to be(single.id)
end
end
end
describe '#cancel_form' do
let(:proposal) { FactoryGirl.create(:proposal) }
it 'should allow the requester to see it' do
login_as(user)
proposal.update_attributes(requester_id: user.id)
get :show, id: proposal.id
expect(response).not_to redirect_to("/proposals/")
expect(flash[:alert]).not_to be_present
end
it 'should redirect random users' do
login_as(user)
get :cancel_form, id: proposal.id
expect(response).to redirect_to(proposal_path)
expect(flash[:alert]).to eq 'You are not the requester'
end
it 'should redirect for cancelled requests' do
proposal.update_attributes(status:'cancelled')
login_as(proposal.requester)
get :cancel_form, id: proposal.id
expect(response).to redirect_to(proposal_path proposal.id)
expect(flash[:alert]).to eq 'Sorry, this proposal has been cancelled.'
end
end
describe "#cancel" do
let!(:proposal) { FactoryGirl.create(:proposal, requester: user) }
before do
login_as(user)
end
it 'sends a cancellation email' do
mock_dispatcher = double('dispatcher').as_null_object
allow(Dispatcher).to receive(:new).and_return(mock_dispatcher)
expect(mock_dispatcher).to receive(:deliver_cancellation_emails)
post :cancel, id: proposal.id, reason_input:'My test cancellation text'
end
end
describe '#approve' do
it "signs the user in via the token" do
proposal = FactoryGirl.create(:proposal, :with_approver)
approval = proposal.individual_approvals.first
token = approval.create_api_token!
post :approve, id: proposal.id, cch: token.access_token
expect(controller.send(:current_user)).to eq(approval.user)
end
it "won't sign the user in via the token if delegated" do
proposal = FactoryGirl.create(:proposal, :with_approver)
approval = proposal.individual_approvals.first
token = approval.create_api_token!
approval.user.add_delegate(FactoryGirl.create(:user))
post :approve, id: proposal.id, cch: token.access_token
# TODO simplify this check
expect(response).to redirect_to(root_path(return_to: self.make_return_to("Previous", request.fullpath)))
end
it "won't allow a missing token when using GET" do
proposal = FactoryGirl.create(:proposal, :with_approver)
login_as(proposal.approvers.first)
get :approve, id: proposal.id
expect(response).to have_http_status(403)
end
it "will allow action if the token is valid" do
proposal = FactoryGirl.create(:proposal, :with_approver)
approval = proposal.individual_approvals.first
token = approval.create_api_token!
get :approve, id: proposal.id, cch: token.access_token
approval.reload
expect(approval.approved?).to be(true)
end
it "doesn't allow a token to be reused" do
proposal = FactoryGirl.create(:proposal, :with_approver)
approval = proposal.individual_approvals.first
token = approval.create_api_token!
token.use!
get :approve, id: proposal.id, cch: token.access_token
expect(flash[:alert]).to include("Please sign in")
end
it "won't allow the approval to be approved twice through the web ui" do
proposal = FactoryGirl.create(:proposal, :with_approver)
login_as(proposal.approvers.first)
post :approve, id: proposal.id
expect(proposal.reload.approved?).to be true
expect(flash[:success]).not_to be_nil
expect(flash[:alert]).to be_nil
flash.clear
post :approve, id: proposal.id
expect(flash[:success]).to be_nil
expect(flash[:alert]).not_to be_nil
end
it "won't allow different delegates to approve" do
proposal = FactoryGirl.create(:proposal, :with_approver)
delegate1, delegate2 = FactoryGirl.create(:user), FactoryGirl.create(:user)
mailbox = proposal.approvers.first
mailbox.add_delegate(delegate1)
mailbox.add_delegate(delegate2)
login_as(delegate1)
post :approve, id: proposal.id
expect(flash[:success]).not_to be_nil
expect(flash[:alert]).to be_nil
flash.clear
login_as(delegate2)
post :approve, id: proposal.id
expect(flash[:success]).to be_nil
expect(flash[:alert]).not_to be_nil
end
it "allows a delegate to approve via the web UI" do
proposal = FactoryGirl.create(:proposal, :with_serial_approvers)
mailbox = proposal.approvers.second
delegate = FactoryGirl.create(:user)
mailbox.add_delegate(delegate)
proposal.individual_approvals.first.approve!
login_as(delegate)
post :approve, id: proposal.id
expect(flash[:success]).not_to be_nil
expect(flash[:alert]).to be_nil
expect(proposal.reload.approved?).to be true
end
end
end
| 1 | 13,757 | Should this be removed now? | 18F-C2 | rb |
@@ -25,6 +25,8 @@ public abstract class MockGrpcMethodView {
public abstract String responseTypeName();
+ public abstract String streamHandle();
+
public abstract GrpcStreamingType grpcStreamingType();
public static Builder newBuilder() { | 1 | /* Copyright 2016 Google Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.viewmodel.testing;
import com.google.api.codegen.config.GrpcStreamingConfig.GrpcStreamingType;
import com.google.auto.value.AutoValue;
@AutoValue
public abstract class MockGrpcMethodView {
public abstract String name();
public abstract String requestTypeName();
public abstract String responseTypeName();
public abstract GrpcStreamingType grpcStreamingType();
public static Builder newBuilder() {
return new AutoValue_MockGrpcMethodView.Builder()
.grpcStreamingType(GrpcStreamingType.NonStreaming);
}
@AutoValue.Builder
public abstract static class Builder {
public abstract Builder name(String val);
public abstract Builder requestTypeName(String val);
public abstract Builder responseTypeName(String val);
public abstract Builder grpcStreamingType(GrpcStreamingType val);
public abstract MockGrpcMethodView build();
}
}
| 1 | 19,287 | streamHandle sounds vague to me. Can you be more specific? | googleapis-gapic-generator | java |
@@ -736,7 +736,8 @@ Cursor.prototype._initializeCursor = function(callback) {
return;
}
- server.command(cursor.ns, cursor.cmd, cursor.options, queryCallback);
+ const commandOptions = Object.assign({}, cursor.options, cursor.cursorState);
+ server.command(cursor.ns, cursor.cmd, commandOptions, queryCallback);
});
};
| 1 | 'use strict';
const Logger = require('./connection/logger');
const retrieveBSON = require('./connection/utils').retrieveBSON;
const MongoError = require('./error').MongoError;
const MongoNetworkError = require('./error').MongoNetworkError;
const mongoErrorContextSymbol = require('./error').mongoErrorContextSymbol;
const f = require('util').format;
const collationNotSupported = require('./utils').collationNotSupported;
const ReadPreference = require('./topologies/read_preference');
const isUnifiedTopology = require('./utils').isUnifiedTopology;
const BSON = retrieveBSON();
const Long = BSON.Long;
/**
* This is a cursor results callback
*
* @callback resultCallback
* @param {error} error An error object. Set to null if no error present
* @param {object} document
*/
/**
* @fileOverview The **Cursor** class is an internal class that embodies a cursor on MongoDB
* allowing for iteration over the results returned from the underlying query.
*
* **CURSORS Cannot directly be instantiated**
*/
/**
* Creates a new Cursor, not to be used directly
* @class
* @param {object} topology The server topology instance.
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
* @param {{object}|Long} cmd The selector (can be a command or a cursorId)
* @param {object} [options=null] Optional settings.
* @param {object} [options.batchSize=1000] Batchsize for the operation
* @param {array} [options.documents=[]] Initial documents list for cursor
* @param {object} [options.transforms=null] Transform methods for the cursor results
* @param {function} [options.transforms.query] Transform the value returned from the initial query
* @param {function} [options.transforms.doc] Transform each document returned from Cursor.prototype.next
* @return {Cursor} A cursor instance
* @property {number} cursorBatchSize The current cursorBatchSize for the cursor
* @property {number} cursorLimit The current cursorLimit for the cursor
* @property {number} cursorSkip The current cursorSkip for the cursor
*/
var Cursor = function(topology, ns, cmd, options) {
options = options || {};
// Cursor pool
this.pool = null;
// Cursor server
this.server = null;
// Do we have a not connected handler
this.disconnectHandler = options.disconnectHandler;
// Set local values
this.bson = topology.s.bson;
this.ns = ns;
this.cmd = cmd;
this.options = options;
this.topology = topology;
// All internal state
this.cursorState = {
cursorId: null,
cmd: cmd,
documents: options.documents || [],
cursorIndex: 0,
dead: false,
killed: false,
init: false,
notified: false,
limit: options.limit || cmd.limit || 0,
skip: options.skip || cmd.skip || 0,
batchSize: options.batchSize || cmd.batchSize || 1000,
currentLimit: 0,
// Result field name if not a cursor (contains the array of results)
transforms: options.transforms,
raw: options.raw || (cmd && cmd.raw)
};
if (typeof options.session === 'object') {
this.cursorState.session = options.session;
}
// Add promoteLong to cursor state
const topologyOptions = topology.s.options;
if (typeof topologyOptions.promoteLongs === 'boolean') {
this.cursorState.promoteLongs = topologyOptions.promoteLongs;
} else if (typeof options.promoteLongs === 'boolean') {
this.cursorState.promoteLongs = options.promoteLongs;
}
// Add promoteValues to cursor state
if (typeof topologyOptions.promoteValues === 'boolean') {
this.cursorState.promoteValues = topologyOptions.promoteValues;
} else if (typeof options.promoteValues === 'boolean') {
this.cursorState.promoteValues = options.promoteValues;
}
// Add promoteBuffers to cursor state
if (typeof topologyOptions.promoteBuffers === 'boolean') {
this.cursorState.promoteBuffers = topologyOptions.promoteBuffers;
} else if (typeof options.promoteBuffers === 'boolean') {
this.cursorState.promoteBuffers = options.promoteBuffers;
}
if (topologyOptions.reconnect) {
this.cursorState.reconnect = topologyOptions.reconnect;
}
// Logger
this.logger = Logger('Cursor', topologyOptions);
//
// Did we pass in a cursor id
if (typeof cmd === 'number') {
this.cursorState.cursorId = Long.fromNumber(cmd);
this.cursorState.lastCursorId = this.cursorState.cursorId;
} else if (cmd instanceof Long) {
this.cursorState.cursorId = cmd;
this.cursorState.lastCursorId = cmd;
}
};
Cursor.prototype.setCursorBatchSize = function(value) {
this.cursorState.batchSize = value;
};
Cursor.prototype.cursorBatchSize = function() {
return this.cursorState.batchSize;
};
Cursor.prototype.setCursorLimit = function(value) {
this.cursorState.limit = value;
};
Cursor.prototype.cursorLimit = function() {
return this.cursorState.limit;
};
Cursor.prototype.setCursorSkip = function(value) {
this.cursorState.skip = value;
};
Cursor.prototype.cursorSkip = function() {
return this.cursorState.skip;
};
Cursor.prototype._endSession = function(options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options = options || {};
const session = this.cursorState.session;
if (session && (options.force || session.owner === this)) {
this.cursorState.session = undefined;
session.endSession(callback);
return true;
}
if (callback) {
callback();
}
return false;
};
//
// Handle callback (including any exceptions thrown)
var handleCallback = function(callback, err, result) {
try {
callback(err, result);
} catch (err) {
process.nextTick(function() {
throw err;
});
}
};
// Internal methods
Cursor.prototype._getMore = function(callback) {
if (this.logger.isDebug())
this.logger.debug(f('schedule getMore call for query [%s]', JSON.stringify(this.query)));
// Set the current batchSize
var batchSize = this.cursorState.batchSize;
if (
this.cursorState.limit > 0 &&
this.cursorState.currentLimit + batchSize > this.cursorState.limit
) {
batchSize = this.cursorState.limit - this.cursorState.currentLimit;
}
this.server.getMore(this.ns, this.cursorState, batchSize, this.options, callback);
};
/**
* Clone the cursor
* @method
* @return {Cursor}
*/
Cursor.prototype.clone = function() {
return this.topology.cursor(this.ns, this.cmd, this.options);
};
/**
* Checks if the cursor is dead
* @method
* @return {boolean} A boolean signifying if the cursor is dead or not
*/
Cursor.prototype.isDead = function() {
return this.cursorState.dead === true;
};
/**
* Checks if the cursor was killed by the application
* @method
* @return {boolean} A boolean signifying if the cursor was killed by the application
*/
Cursor.prototype.isKilled = function() {
return this.cursorState.killed === true;
};
/**
* Checks if the cursor notified it's caller about it's death
* @method
* @return {boolean} A boolean signifying if the cursor notified the callback
*/
Cursor.prototype.isNotified = function() {
return this.cursorState.notified === true;
};
/**
* Returns current buffered documents length
* @method
* @return {number} The number of items in the buffered documents
*/
Cursor.prototype.bufferedCount = function() {
return this.cursorState.documents.length - this.cursorState.cursorIndex;
};
/**
* Returns current buffered documents
* @method
* @return {Array} An array of buffered documents
*/
Cursor.prototype.readBufferedDocuments = function(number) {
var unreadDocumentsLength = this.cursorState.documents.length - this.cursorState.cursorIndex;
var length = number < unreadDocumentsLength ? number : unreadDocumentsLength;
var elements = this.cursorState.documents.slice(
this.cursorState.cursorIndex,
this.cursorState.cursorIndex + length
);
// Transform the doc with passed in transformation method if provided
if (this.cursorState.transforms && typeof this.cursorState.transforms.doc === 'function') {
// Transform all the elements
for (var i = 0; i < elements.length; i++) {
elements[i] = this.cursorState.transforms.doc(elements[i]);
}
}
// Ensure we do not return any more documents than the limit imposed
// Just return the number of elements up to the limit
if (
this.cursorState.limit > 0 &&
this.cursorState.currentLimit + elements.length > this.cursorState.limit
) {
elements = elements.slice(0, this.cursorState.limit - this.cursorState.currentLimit);
this.kill();
}
// Adjust current limit
this.cursorState.currentLimit = this.cursorState.currentLimit + elements.length;
this.cursorState.cursorIndex = this.cursorState.cursorIndex + elements.length;
// Return elements
return elements;
};
/**
* Kill the cursor
* @method
* @param {resultCallback} callback A callback function
*/
Cursor.prototype.kill = function(callback) {
// Set cursor to dead
this.cursorState.dead = true;
this.cursorState.killed = true;
// Remove documents
this.cursorState.documents = [];
// If no cursor id just return
if (
this.cursorState.cursorId == null ||
this.cursorState.cursorId.isZero() ||
this.cursorState.init === false
) {
if (callback) callback(null, null);
return;
}
this.server.killCursors(this.ns, this.cursorState, callback);
};
/**
* Resets the cursor
* @method
* @return {null}
*/
Cursor.prototype.rewind = function() {
if (this.cursorState.init) {
if (!this.cursorState.dead) {
this.kill();
}
this.cursorState.currentLimit = 0;
this.cursorState.init = false;
this.cursorState.dead = false;
this.cursorState.killed = false;
this.cursorState.notified = false;
this.cursorState.documents = [];
this.cursorState.cursorId = null;
this.cursorState.cursorIndex = 0;
}
};
/**
* Validate if the pool is dead and return error
*/
var isConnectionDead = function(self, callback) {
if (self.pool && self.pool.isDestroyed()) {
self.cursorState.killed = true;
const err = new MongoNetworkError(
f('connection to host %s:%s was destroyed', self.pool.host, self.pool.port)
);
_setCursorNotifiedImpl(self, () => callback(err));
return true;
}
return false;
};
/**
* Validate if the cursor is dead but was not explicitly killed by user
*/
var isCursorDeadButNotkilled = function(self, callback) {
// Cursor is dead but not marked killed, return null
if (self.cursorState.dead && !self.cursorState.killed) {
self.cursorState.killed = true;
setCursorNotified(self, callback);
return true;
}
return false;
};
/**
* Validate if the cursor is dead and was killed by user
*/
var isCursorDeadAndKilled = function(self, callback) {
if (self.cursorState.dead && self.cursorState.killed) {
handleCallback(callback, new MongoError('cursor is dead'));
return true;
}
return false;
};
/**
* Validate if the cursor was killed by the user
*/
var isCursorKilled = function(self, callback) {
if (self.cursorState.killed) {
setCursorNotified(self, callback);
return true;
}
return false;
};
/**
* Mark cursor as being dead and notified
*/
var setCursorDeadAndNotified = function(self, callback) {
self.cursorState.dead = true;
setCursorNotified(self, callback);
};
/**
* Mark cursor as being notified
*/
var setCursorNotified = function(self, callback) {
_setCursorNotifiedImpl(self, () => handleCallback(callback, null, null));
};
var _setCursorNotifiedImpl = function(self, callback) {
self.cursorState.notified = true;
self.cursorState.documents = [];
self.cursorState.cursorIndex = 0;
if (self._endSession) {
return self._endSession(undefined, () => callback());
}
return callback();
};
var nextFunction = function(self, callback) {
// We have notified about it
if (self.cursorState.notified) {
return callback(new Error('cursor is exhausted'));
}
// Cursor is killed return null
if (isCursorKilled(self, callback)) return;
// Cursor is dead but not marked killed, return null
if (isCursorDeadButNotkilled(self, callback)) return;
// We have a dead and killed cursor, attempting to call next should error
if (isCursorDeadAndKilled(self, callback)) return;
// We have just started the cursor
if (!self.cursorState.init) {
// Topology is not connected, save the call in the provided store to be
// Executed at some point when the handler deems it's reconnected
if (!self.topology.isConnected(self.options)) {
// Only need this for single server, because repl sets and mongos
// will always continue trying to reconnect
if (self.topology._type === 'server' && !self.topology.s.options.reconnect) {
// Reconnect is disabled, so we'll never reconnect
return callback(new MongoError('no connection available'));
}
if (self.disconnectHandler != null) {
if (self.topology.isDestroyed()) {
// Topology was destroyed, so don't try to wait for it to reconnect
return callback(new MongoError('Topology was destroyed'));
}
self.disconnectHandler.addObjectAndMethod('cursor', self, 'next', [callback], callback);
return;
}
}
self._initializeCursor((err, result) => {
if (err || result === null) {
callback(err, result);
return;
}
nextFunction(self, callback);
});
return;
}
if (self.cursorState.limit > 0 && self.cursorState.currentLimit >= self.cursorState.limit) {
// Ensure we kill the cursor on the server
self.kill();
// Set cursor in dead and notified state
return setCursorDeadAndNotified(self, callback);
} else if (
self.cursorState.cursorIndex === self.cursorState.documents.length &&
!Long.ZERO.equals(self.cursorState.cursorId)
) {
// Ensure an empty cursor state
self.cursorState.documents = [];
self.cursorState.cursorIndex = 0;
// Check if topology is destroyed
if (self.topology.isDestroyed())
return callback(
new MongoNetworkError('connection destroyed, not possible to instantiate cursor')
);
// Check if connection is dead and return if not possible to
// execute a getMore on this connection
if (isConnectionDead(self, callback)) return;
// Execute the next get more
self._getMore(function(err, doc, connection) {
if (err) {
if (err instanceof MongoError) {
err[mongoErrorContextSymbol].isGetMore = true;
}
return handleCallback(callback, err);
}
if (self.cursorState.cursorId && self.cursorState.cursorId.isZero() && self._endSession) {
self._endSession();
}
// Save the returned connection to ensure all getMore's fire over the same connection
self.connection = connection;
// Tailable cursor getMore result, notify owner about it
// No attempt is made here to retry, this is left to the user of the
// core module to handle to keep core simple
if (
self.cursorState.documents.length === 0 &&
self.cmd.tailable &&
Long.ZERO.equals(self.cursorState.cursorId)
) {
// No more documents in the tailed cursor
return handleCallback(
callback,
new MongoError({
message: 'No more documents in tailed cursor',
tailable: self.cmd.tailable,
awaitData: self.cmd.awaitData
})
);
} else if (
self.cursorState.documents.length === 0 &&
self.cmd.tailable &&
!Long.ZERO.equals(self.cursorState.cursorId)
) {
return nextFunction(self, callback);
}
if (self.cursorState.limit > 0 && self.cursorState.currentLimit >= self.cursorState.limit) {
return setCursorDeadAndNotified(self, callback);
}
nextFunction(self, callback);
});
} else if (
self.cursorState.documents.length === self.cursorState.cursorIndex &&
self.cmd.tailable &&
Long.ZERO.equals(self.cursorState.cursorId)
) {
return handleCallback(
callback,
new MongoError({
message: 'No more documents in tailed cursor',
tailable: self.cmd.tailable,
awaitData: self.cmd.awaitData
})
);
} else if (
self.cursorState.documents.length === self.cursorState.cursorIndex &&
Long.ZERO.equals(self.cursorState.cursorId)
) {
setCursorDeadAndNotified(self, callback);
} else {
if (self.cursorState.limit > 0 && self.cursorState.currentLimit >= self.cursorState.limit) {
// Ensure we kill the cursor on the server
self.kill();
// Set cursor in dead and notified state
return setCursorDeadAndNotified(self, callback);
}
// Increment the current cursor limit
self.cursorState.currentLimit += 1;
// Get the document
var doc = self.cursorState.documents[self.cursorState.cursorIndex++];
// Doc overflow
if (!doc || doc.$err) {
// Ensure we kill the cursor on the server
self.kill();
// Set cursor in dead and notified state
return setCursorDeadAndNotified(self, function() {
handleCallback(callback, new MongoError(doc ? doc.$err : undefined));
});
}
// Transform the doc with passed in transformation method if provided
if (self.cursorState.transforms && typeof self.cursorState.transforms.doc === 'function') {
doc = self.cursorState.transforms.doc(doc);
}
// Return the document
handleCallback(callback, null, doc);
}
};
Cursor.prototype._initializeCursor = function(callback) {
const cursor = this;
// NOTE: this goes away once cursors use `executeOperation`
if (isUnifiedTopology(cursor.topology) && cursor.topology.shouldCheckForSessionSupport()) {
cursor.topology.selectServer(ReadPreference.primaryPreferred, err => {
if (err) {
callback(err);
return;
}
cursor.next(callback);
});
return;
}
// Very explicitly choose what is passed to selectServer
const serverSelectOptions = {};
if (cursor.cursorState.session) {
serverSelectOptions.session = cursor.cursorState.session;
}
if (cursor.options.readPreference) {
serverSelectOptions.readPreference = cursor.options.readPreference;
}
return cursor.topology.selectServer(serverSelectOptions, (err, server) => {
if (err) {
const disconnectHandler = cursor.disconnectHandler;
if (disconnectHandler != null) {
return disconnectHandler.addObjectAndMethod('cursor', cursor, 'next', [callback], callback);
}
return callback(err);
}
cursor.server = server;
cursor.cursorState.init = true;
if (collationNotSupported(cursor.server, cursor.cmd)) {
return callback(new MongoError(`server ${cursor.server.name} does not support collation`));
}
function done(err, result) {
if (
cursor.cursorState.cursorId &&
cursor.cursorState.cursorId.isZero() &&
cursor._endSession
) {
cursor._endSession();
}
if (
cursor.cursorState.documents.length === 0 &&
cursor.cursorState.cursorId &&
cursor.cursorState.cursorId.isZero() &&
!cursor.cmd.tailable &&
!cursor.cmd.awaitData
) {
return setCursorNotified(cursor, callback);
}
callback(err, result);
}
// NOTE: this is a special internal method for cloning a cursor, consider removing
if (cursor.cursorState.cursorId != null) {
return done();
}
const queryCallback = (err, r) => {
if (err) {
return done(err);
}
const result = r.message;
if (result.queryFailure) {
return done(new MongoError(result.documents[0]), null);
}
// Check if we have a command cursor
if (
Array.isArray(result.documents) &&
result.documents.length === 1 &&
(!cursor.cmd.find || (cursor.cmd.find && cursor.cmd.virtual === false)) &&
(typeof result.documents[0].cursor !== 'string' ||
result.documents[0]['$err'] ||
result.documents[0]['errmsg'] ||
Array.isArray(result.documents[0].result))
) {
// We have an error document, return the error
if (result.documents[0]['$err'] || result.documents[0]['errmsg']) {
return done(new MongoError(result.documents[0]), null);
}
// We have a cursor document
if (result.documents[0].cursor != null && typeof result.documents[0].cursor !== 'string') {
var id = result.documents[0].cursor.id;
// If we have a namespace change set the new namespace for getmores
if (result.documents[0].cursor.ns) {
cursor.ns = result.documents[0].cursor.ns;
}
// Promote id to long if needed
cursor.cursorState.cursorId = typeof id === 'number' ? Long.fromNumber(id) : id;
cursor.cursorState.lastCursorId = cursor.cursorState.cursorId;
cursor.cursorState.operationTime = result.documents[0].operationTime;
// If we have a firstBatch set it
if (Array.isArray(result.documents[0].cursor.firstBatch)) {
cursor.cursorState.documents = result.documents[0].cursor.firstBatch; //.reverse();
}
// Return after processing command cursor
return done(null, result);
}
if (Array.isArray(result.documents[0].result)) {
cursor.cursorState.documents = result.documents[0].result;
cursor.cursorState.cursorId = Long.ZERO;
return done(null, result);
}
}
// Otherwise fall back to regular find path
const cursorId = result.cursorId || 0;
cursor.cursorState.cursorId = cursorId instanceof Long ? cursorId : Long.fromNumber(cursorId);
cursor.cursorState.documents = result.documents;
cursor.cursorState.lastCursorId = result.cursorId;
// Transform the results with passed in transformation method if provided
if (
cursor.cursorState.transforms &&
typeof cursor.cursorState.transforms.query === 'function'
) {
cursor.cursorState.documents = cursor.cursorState.transforms.query(result);
}
done(null, result);
};
if (cursor.logger.isDebug()) {
cursor.logger.debug(
`issue initial query [${JSON.stringify(cursor.cmd)}] with flags [${JSON.stringify(
cursor.query
)}]`
);
}
if (cursor.cmd.find != null) {
server.query(cursor.ns, cursor.cmd, cursor.cursorState, cursor.options, queryCallback);
return;
}
server.command(cursor.ns, cursor.cmd, cursor.options, queryCallback);
});
};
/**
* Retrieve the next document from the cursor
* @method
* @param {resultCallback} callback A callback function
*/
Cursor.prototype.next = function(callback) {
nextFunction(this, callback);
};
module.exports = Cursor;
| 1 | 16,020 | Does everything on cursorState belong in the command options? | mongodb-node-mongodb-native | js |
@@ -559,7 +559,7 @@ func (m *MatchNot) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
for d.Next() {
var mp matcherPair
matcherMap := make(map[string]RequestMatcher)
- for d.NextBlock(0) {
+ for d.NextArg() || d.NextBlock(0) {
matcherName := d.Val()
mod, err := caddy.GetModule("http.matchers." + matcherName)
if err != nil { | 1 | // Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddyhttp
import (
"encoding/json"
"fmt"
"log"
"net"
"net/http"
"net/textproto"
"net/url"
"path/filepath"
"regexp"
"strings"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
)
type (
// MatchHost matches requests by the Host value (case-insensitive).
//
// When used in a top-level HTTP route,
// [qualifying domain names](/docs/automatic-https#hostname-requirements)
// may trigger [automatic HTTPS](/docs/automatic-https), which automatically
// provisions and renews certificates for you. Before doing this, you
// should ensure that DNS records for these domains are properly configured,
// especially A/AAAA pointed at your server.
//
// Automatic HTTPS can be
// [customized or disabled](/docs/modules/http#servers/automatic_https).
//
// Wildcards (`*`) may be used to represent exactly one label of the
// hostname, in accordance with RFC 1034 (because host matchers are also
// used for automatic HTTPS which influences TLS certificates). Thus,
// a host of `*` matches hosts like `localhost` or `internal` but not
// `example.com`. To catch all hosts, omit the host matcher entirely.
//
// The wildcard can be useful for matching all subdomains, for example:
// `*.example.com` matches `foo.example.com` but not `foo.bar.example.com`.
MatchHost []string
// MatchPath matches requests by the URI's path (case-insensitive). Path
// matches are exact, but wildcards may be used:
//
// - At the end, for a prefix match (`/prefix/*`)
// - At the beginning, for a suffix match (`*.suffix`)
// - On both sides, for a substring match (`*/contains/*`)
// - In the middle, for a globular match (`/accounts/*/info`)
//
// This matcher is fast, so it does not support regular expressions or
// capture groups. For slower but more powerful matching, use the
// path_regexp matcher.
MatchPath []string
// MatchPathRE matches requests by a regular expression on the URI's path.
//
// Upon a match, it adds placeholders to the request: `{http.regexp.name.capture_group}`
// where `name` is the regular expression's name, and `capture_group` is either
// the named or positional capture group from the expression itself. If no name
// is given, then the placeholder omits the name: `{http.regexp.capture_group}`
// (potentially leading to collisions).
MatchPathRE struct{ MatchRegexp }
// MatchMethod matches requests by the method.
MatchMethod []string
// MatchQuery matches requests by URI's query string.
MatchQuery url.Values
// MatchHeader matches requests by header fields. It performs fast,
// exact string comparisons of the field values. Fast prefix, suffix,
// and substring matches can also be done by suffixing, prefixing, or
// surrounding the value with the wildcard `*` character, respectively.
// If a list is null, the header must not exist. If the list is empty,
// the field must simply exist, regardless of its value.
MatchHeader http.Header
// MatchHeaderRE matches requests by a regular expression on header fields.
//
// Upon a match, it adds placeholders to the request: `{http.regexp.name.capture_group}`
// where `name` is the regular expression's name, and `capture_group` is either
// the named or positional capture group from the expression itself. If no name
// is given, then the placeholder omits the name: `{http.regexp.capture_group}`
// (potentially leading to collisions).
MatchHeaderRE map[string]*MatchRegexp
// MatchProtocol matches requests by protocol.
MatchProtocol string
// MatchRemoteIP matches requests by client IP (or CIDR range).
MatchRemoteIP struct {
Ranges []string `json:"ranges,omitempty"`
cidrs []*net.IPNet
}
// MatchNot matches requests by negating the results of its matcher
// sets. A single "not" matcher takes one or more matcher sets. Each
// matcher set is OR'ed; in other words, if any matcher set returns
// true, the final result of the "not" matcher is false. Individual
// matchers within a set work the same (i.e. different matchers in
// the same set are AND'ed).
MatchNot struct {
MatcherSetsRaw []caddy.ModuleMap `json:"-" caddy:"namespace=http.matchers"`
MatcherSets []MatcherSet `json:"-"`
}
)
func init() {
caddy.RegisterModule(MatchHost{})
caddy.RegisterModule(MatchPath{})
caddy.RegisterModule(MatchPathRE{})
caddy.RegisterModule(MatchMethod{})
caddy.RegisterModule(MatchQuery{})
caddy.RegisterModule(MatchHeader{})
caddy.RegisterModule(MatchHeaderRE{})
caddy.RegisterModule(new(MatchProtocol))
caddy.RegisterModule(MatchRemoteIP{})
caddy.RegisterModule(MatchNot{})
}
// CaddyModule returns the Caddy module information.
func (MatchHost) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "http.matchers.host",
New: func() caddy.Module { return new(MatchHost) },
}
}
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
func (m *MatchHost) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
for d.Next() {
*m = append(*m, d.RemainingArgs()...)
}
return nil
}
// Match returns true if r matches m.
func (m MatchHost) Match(r *http.Request) bool {
reqHost, _, err := net.SplitHostPort(r.Host)
if err != nil {
// OK; probably didn't have a port
reqHost = r.Host
// make sure we strip the brackets from IPv6 addresses
reqHost = strings.TrimPrefix(reqHost, "[")
reqHost = strings.TrimSuffix(reqHost, "]")
}
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
outer:
for _, host := range m {
host = repl.ReplaceAll(host, "")
if strings.Contains(host, "*") {
patternParts := strings.Split(host, ".")
incomingParts := strings.Split(reqHost, ".")
if len(patternParts) != len(incomingParts) {
continue
}
for i := range patternParts {
if patternParts[i] == "*" {
continue
}
if !strings.EqualFold(patternParts[i], incomingParts[i]) {
continue outer
}
}
return true
} else if strings.EqualFold(reqHost, host) {
return true
}
}
return false
}
// CaddyModule returns the Caddy module information.
func (MatchPath) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "http.matchers.path",
New: func() caddy.Module { return new(MatchPath) },
}
}
// Provision lower-cases the paths in m to ensure case-insensitive matching.
func (m MatchPath) Provision(_ caddy.Context) error {
for i := range m {
m[i] = strings.ToLower(m[i])
}
return nil
}
// Match returns true if r matches m.
func (m MatchPath) Match(r *http.Request) bool {
lowerPath := strings.ToLower(r.URL.Path)
// see #2917; Windows ignores trailing dots and spaces
// when accessing files (sigh), potentially causing a
// security risk (cry) if PHP files end up being served
// as static files, exposing the source code, instead of
// being matched by *.php to be treated as PHP scripts
lowerPath = strings.TrimRight(lowerPath, ". ")
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
for _, matchPath := range m {
matchPath = repl.ReplaceAll(matchPath, "")
// special case: whole path is wildcard; this is unnecessary
// as it matches all requests, which is the same as no matcher
if matchPath == "*" {
return true
}
// special case: first and last characters are wildcard,
// treat it as a fast substring match
if len(matchPath) > 1 &&
strings.HasPrefix(matchPath, "*") &&
strings.HasSuffix(matchPath, "*") {
if strings.Contains(lowerPath, matchPath[1:len(matchPath)-1]) {
return true
}
continue
}
// special case: first character is a wildcard,
// treat it as a fast suffix match
if strings.HasPrefix(matchPath, "*") {
if strings.HasSuffix(lowerPath, matchPath[1:]) {
return true
}
continue
}
// special case: last character is a wildcard,
// treat it as a fast prefix match
if strings.HasSuffix(matchPath, "*") {
if strings.HasPrefix(lowerPath, matchPath[:len(matchPath)-1]) {
return true
}
continue
}
// for everything else, try globular matching, which also
// is exact matching if there are no glob/wildcard chars;
// can ignore error here because we can't handle it anyway
matches, _ := filepath.Match(matchPath, lowerPath)
if matches {
return true
}
}
return false
}
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
func (m *MatchPath) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
for d.Next() {
*m = append(*m, d.RemainingArgs()...)
}
return nil
}
// CaddyModule returns the Caddy module information.
func (MatchPathRE) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "http.matchers.path_regexp",
New: func() caddy.Module { return new(MatchPathRE) },
}
}
// Match returns true if r matches m.
func (m MatchPathRE) Match(r *http.Request) bool {
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
return m.MatchRegexp.Match(r.URL.Path, repl)
}
// CaddyModule returns the Caddy module information.
func (MatchMethod) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "http.matchers.method",
New: func() caddy.Module { return new(MatchMethod) },
}
}
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
func (m *MatchMethod) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
for d.Next() {
*m = append(*m, d.RemainingArgs()...)
}
return nil
}
// Match returns true if r matches m.
func (m MatchMethod) Match(r *http.Request) bool {
for _, method := range m {
if r.Method == method {
return true
}
}
return false
}
// CaddyModule returns the Caddy module information.
func (MatchQuery) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "http.matchers.query",
New: func() caddy.Module { return new(MatchQuery) },
}
}
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
func (m *MatchQuery) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
if *m == nil {
*m = make(map[string][]string)
}
for d.Next() {
var query string
if !d.Args(&query) {
return d.ArgErr()
}
parts := strings.SplitN(query, "=", 2)
if len(parts) != 2 {
return d.Errf("malformed query matcher token: %s; must be in param=val format", d.Val())
}
url.Values(*m).Set(parts[0], parts[1])
}
return nil
}
// Match returns true if r matches m.
func (m MatchQuery) Match(r *http.Request) bool {
for param, vals := range m {
paramVal, found := r.URL.Query()[param]
if found {
for _, v := range vals {
if paramVal[0] == v || v == "*" {
return true
}
}
}
}
return false
}
// CaddyModule returns the Caddy module information.
func (MatchHeader) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "http.matchers.header",
New: func() caddy.Module { return new(MatchHeader) },
}
}
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
func (m *MatchHeader) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
if *m == nil {
*m = make(map[string][]string)
}
for d.Next() {
var field, val string
if !d.Args(&field, &val) {
return d.Errf("expected both field and value")
}
http.Header(*m).Set(field, val)
}
return nil
}
// Like req.Header.Get(), but that works with Host header.
// go's http module swallows "Host" header.
func getHeader(r *http.Request, field string) []string {
field = textproto.CanonicalMIMEHeaderKey(field)
if field == "Host" {
return []string{r.Host}
}
return r.Header[field]
}
// Match returns true if r matches m.
func (m MatchHeader) Match(r *http.Request) bool {
for field, allowedFieldVals := range m {
actualFieldVals := getHeader(r, field)
if allowedFieldVals != nil && len(allowedFieldVals) == 0 && actualFieldVals != nil {
// a non-nil but empty list of allowed values means
// match if the header field exists at all
continue
}
var match bool
fieldVals:
for _, actualFieldVal := range actualFieldVals {
for _, allowedFieldVal := range allowedFieldVals {
switch {
case allowedFieldVal == "*":
match = true
case strings.HasPrefix(allowedFieldVal, "*") && strings.HasSuffix(allowedFieldVal, "*"):
match = strings.Contains(actualFieldVal, allowedFieldVal[1:len(allowedFieldVal)-1])
case strings.HasPrefix(allowedFieldVal, "*"):
match = strings.HasSuffix(actualFieldVal, allowedFieldVal[1:])
case strings.HasSuffix(allowedFieldVal, "*"):
match = strings.HasPrefix(actualFieldVal, allowedFieldVal[:len(allowedFieldVal)-1])
default:
match = actualFieldVal == allowedFieldVal
}
if match {
break fieldVals
}
}
}
if !match {
return false
}
}
return true
}
// CaddyModule returns the Caddy module information.
func (MatchHeaderRE) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "http.matchers.header_regexp",
New: func() caddy.Module { return new(MatchHeaderRE) },
}
}
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
func (m *MatchHeaderRE) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
if *m == nil {
*m = make(map[string]*MatchRegexp)
}
for d.Next() {
var first, second, third string
if !d.Args(&first, &second) {
return d.ArgErr()
}
var name, field, val string
if d.Args(&third) {
name = first
field = second
val = third
} else {
field = first
val = second
}
(*m)[field] = &MatchRegexp{Pattern: val, Name: name}
}
return nil
}
// Match returns true if r matches m.
func (m MatchHeaderRE) Match(r *http.Request) bool {
for field, rm := range m {
actualFieldVals := getHeader(r, field)
match := false
fieldVal:
for _, actualFieldVal := range actualFieldVals {
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
if rm.Match(actualFieldVal, repl) {
match = true
break fieldVal
}
}
if !match {
return false
}
}
return true
}
// Provision compiles m's regular expressions.
func (m MatchHeaderRE) Provision(ctx caddy.Context) error {
for _, rm := range m {
err := rm.Provision(ctx)
if err != nil {
return err
}
}
return nil
}
// Validate validates m's regular expressions.
func (m MatchHeaderRE) Validate() error {
for _, rm := range m {
err := rm.Validate()
if err != nil {
return err
}
}
return nil
}
// CaddyModule returns the Caddy module information.
func (MatchProtocol) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "http.matchers.protocol",
New: func() caddy.Module { return new(MatchProtocol) },
}
}
// Match returns true if r matches m.
func (m MatchProtocol) Match(r *http.Request) bool {
switch string(m) {
case "grpc":
return r.Header.Get("content-type") == "application/grpc"
case "https":
return r.TLS != nil
case "http":
return r.TLS == nil
}
return false
}
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
func (m *MatchProtocol) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
for d.Next() {
var proto string
if !d.Args(&proto) {
return d.Err("expected exactly one protocol")
}
*m = MatchProtocol(proto)
}
return nil
}
// CaddyModule returns the Caddy module information.
func (MatchNot) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "http.matchers.not",
New: func() caddy.Module { return new(MatchNot) },
}
}
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
func (m *MatchNot) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
// first, unmarshal each matcher in the set from its tokens
type matcherPair struct {
raw caddy.ModuleMap
decoded MatcherSet
}
for d.Next() {
var mp matcherPair
matcherMap := make(map[string]RequestMatcher)
for d.NextBlock(0) {
matcherName := d.Val()
mod, err := caddy.GetModule("http.matchers." + matcherName)
if err != nil {
return d.Errf("getting matcher module '%s': %v", matcherName, err)
}
unm, ok := mod.New().(caddyfile.Unmarshaler)
if !ok {
return d.Errf("matcher module '%s' is not a Caddyfile unmarshaler", matcherName)
}
err = unm.UnmarshalCaddyfile(d.NewFromNextSegment())
if err != nil {
return err
}
rm := unm.(RequestMatcher)
matcherMap[matcherName] = rm
mp.decoded = append(mp.decoded, rm)
}
// we should now have a functional 'not' matcher, but we also
// need to be able to marshal as JSON, otherwise config
// adaptation will be missing the matchers!
mp.raw = make(caddy.ModuleMap)
for name, matcher := range matcherMap {
jsonBytes, err := json.Marshal(matcher)
if err != nil {
return fmt.Errorf("marshaling %T matcher: %v", matcher, err)
}
mp.raw[name] = jsonBytes
}
m.MatcherSetsRaw = append(m.MatcherSetsRaw, mp.raw)
}
return nil
}
// UnmarshalJSON satisfies json.Unmarshaler. It puts the JSON
// bytes directly into m's MatcherSetsRaw field.
func (m *MatchNot) UnmarshalJSON(data []byte) error {
return json.Unmarshal(data, &m.MatcherSetsRaw)
}
// MarshalJSON satisfies json.Marshaler by marshaling
// m's raw matcher sets.
func (m MatchNot) MarshalJSON() ([]byte, error) {
return json.Marshal(m.MatcherSetsRaw)
}
// Provision loads the matcher modules to be negated.
func (m *MatchNot) Provision(ctx caddy.Context) error {
matcherSets, err := ctx.LoadModule(m, "MatcherSetsRaw")
if err != nil {
return fmt.Errorf("loading matcher sets: %v", err)
}
for _, modMap := range matcherSets.([]map[string]interface{}) {
var ms MatcherSet
for _, modIface := range modMap {
ms = append(ms, modIface.(RequestMatcher))
}
m.MatcherSets = append(m.MatcherSets, ms)
}
return nil
}
// Match returns true if r matches m. Since this matcher negates
// the embedded matchers, false is returned if any of its matcher
// sets return true.
func (m MatchNot) Match(r *http.Request) bool {
for _, ms := range m.MatcherSets {
if ms.Match(r) {
return false
}
}
return true
}
// CaddyModule returns the Caddy module information.
func (MatchRemoteIP) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "http.matchers.remote_ip",
New: func() caddy.Module { return new(MatchRemoteIP) },
}
}
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
func (m *MatchRemoteIP) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
for d.Next() {
m.Ranges = append(m.Ranges, d.RemainingArgs()...)
}
return nil
}
// Provision parses m's IP ranges, either from IP or CIDR expressions.
func (m *MatchRemoteIP) Provision(ctx caddy.Context) error {
for _, str := range m.Ranges {
if strings.Contains(str, "/") {
_, ipNet, err := net.ParseCIDR(str)
if err != nil {
return fmt.Errorf("parsing CIDR expression: %v", err)
}
m.cidrs = append(m.cidrs, ipNet)
} else {
ip := net.ParseIP(str)
if ip == nil {
return fmt.Errorf("invalid IP address: %s", str)
}
mask := len(ip) * 8
m.cidrs = append(m.cidrs, &net.IPNet{
IP: ip,
Mask: net.CIDRMask(mask, mask),
})
}
}
return nil
}
func (m MatchRemoteIP) getClientIP(r *http.Request) (net.IP, error) {
var remote string
if fwdFor := r.Header.Get("X-Forwarded-For"); fwdFor != "" {
remote = strings.TrimSpace(strings.Split(fwdFor, ",")[0])
}
if remote == "" {
remote = r.RemoteAddr
}
ipStr, _, err := net.SplitHostPort(remote)
if err != nil {
ipStr = remote // OK; probably didn't have a port
}
ip := net.ParseIP(ipStr)
if ip == nil {
return nil, fmt.Errorf("invalid client IP address: %s", ipStr)
}
return ip, nil
}
// Match returns true if r matches m.
func (m MatchRemoteIP) Match(r *http.Request) bool {
clientIP, err := m.getClientIP(r)
if err != nil {
log.Printf("[ERROR] remote_ip matcher: %v", err)
return false
}
for _, ipRange := range m.cidrs {
if ipRange.Contains(clientIP) {
return true
}
}
return false
}
// MatchRegexp is an embedable type for matching
// using regular expressions. It adds placeholders
// to the request's replacer.
type MatchRegexp struct {
// A unique name for this regular expression. Optional,
// but useful to prevent overwriting captures from other
// regexp matchers.
Name string `json:"name,omitempty"`
// The regular expression to evaluate, in RE2 syntax,
// which is the same general syntax used by Go, Perl,
// and Python. For details, see
// [Go's regexp package](https://golang.org/pkg/regexp/).
// Captures are accessible via placeholders. Unnamed
// capture groups are exposed as their numeric, 1-based
// index, while named capture groups are available by
// the capture group name.
Pattern string `json:"pattern"`
compiled *regexp.Regexp
phPrefix string
}
// Provision compiles the regular expression.
func (mre *MatchRegexp) Provision(caddy.Context) error {
re, err := regexp.Compile(mre.Pattern)
if err != nil {
return fmt.Errorf("compiling matcher regexp %s: %v", mre.Pattern, err)
}
mre.compiled = re
mre.phPrefix = regexpPlaceholderPrefix
if mre.Name != "" {
mre.phPrefix += "." + mre.Name
}
return nil
}
// Validate ensures mre is set up correctly.
func (mre *MatchRegexp) Validate() error {
if mre.Name != "" && !wordRE.MatchString(mre.Name) {
return fmt.Errorf("invalid regexp name (must contain only word characters): %s", mre.Name)
}
return nil
}
// Match returns true if input matches the compiled regular
// expression in mre. It sets values on the replacer repl
// associated with capture groups, using the given scope
// (namespace).
func (mre *MatchRegexp) Match(input string, repl *caddy.Replacer) bool {
matches := mre.compiled.FindStringSubmatch(input)
if matches == nil {
return false
}
// save all capture groups, first by index
for i, match := range matches {
key := fmt.Sprintf("%s.%d", mre.phPrefix, i)
repl.Set(key, match)
}
// then by name
for i, name := range mre.compiled.SubexpNames() {
if i != 0 && name != "" {
key := fmt.Sprintf("%s.%s", mre.phPrefix, name)
repl.Set(key, matches[i])
}
}
return true
}
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
func (mre *MatchRegexp) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
for d.Next() {
args := d.RemainingArgs()
switch len(args) {
case 1:
mre.Pattern = args[0]
case 2:
mre.Name = args[0]
mre.Pattern = args[1]
default:
return d.ArgErr()
}
}
return nil
}
// ResponseMatcher is a type which can determine if an
// HTTP response matches some criteria.
type ResponseMatcher struct {
// If set, one of these status codes would be required.
// A one-digit status can be used to represent all codes
// in that class (e.g. 3 for all 3xx codes).
StatusCode []int `json:"status_code,omitempty"`
// If set, each header specified must be one of the specified values.
Headers http.Header `json:"headers,omitempty"`
}
// Match returns true if the given statusCode and hdr match rm.
func (rm ResponseMatcher) Match(statusCode int, hdr http.Header) bool {
if !rm.matchStatusCode(statusCode) {
return false
}
return rm.matchHeaders(hdr)
}
func (rm ResponseMatcher) matchStatusCode(statusCode int) bool {
if rm.StatusCode == nil {
return true
}
for _, code := range rm.StatusCode {
if StatusCodeMatches(statusCode, code) {
return true
}
}
return false
}
func (rm ResponseMatcher) matchHeaders(hdr http.Header) bool {
for field, allowedFieldVals := range rm.Headers {
actualFieldVals, fieldExists := hdr[textproto.CanonicalMIMEHeaderKey(field)]
if allowedFieldVals != nil && len(allowedFieldVals) == 0 && fieldExists {
// a non-nil but empty list of allowed values means
// match if the header field exists at all
continue
}
var match bool
fieldVals:
for _, actualFieldVal := range actualFieldVals {
for _, allowedFieldVal := range allowedFieldVals {
if actualFieldVal == allowedFieldVal {
match = true
break fieldVals
}
}
}
if !match {
return false
}
}
return true
}
var wordRE = regexp.MustCompile(`\w+`)
const regexpPlaceholderPrefix = "http.regexp"
// Interface guards
var (
_ RequestMatcher = (*MatchHost)(nil)
_ RequestMatcher = (*MatchPath)(nil)
_ RequestMatcher = (*MatchPathRE)(nil)
_ caddy.Provisioner = (*MatchPathRE)(nil)
_ RequestMatcher = (*MatchMethod)(nil)
_ RequestMatcher = (*MatchQuery)(nil)
_ RequestMatcher = (*MatchHeader)(nil)
_ RequestMatcher = (*MatchHeaderRE)(nil)
_ caddy.Provisioner = (*MatchHeaderRE)(nil)
_ RequestMatcher = (*MatchProtocol)(nil)
_ RequestMatcher = (*MatchRemoteIP)(nil)
_ caddy.Provisioner = (*MatchRemoteIP)(nil)
_ RequestMatcher = (*MatchNot)(nil)
_ caddy.Provisioner = (*MatchNot)(nil)
_ caddy.Provisioner = (*MatchRegexp)(nil)
_ caddyfile.Unmarshaler = (*MatchHost)(nil)
_ caddyfile.Unmarshaler = (*MatchPath)(nil)
_ caddyfile.Unmarshaler = (*MatchPathRE)(nil)
_ caddyfile.Unmarshaler = (*MatchMethod)(nil)
_ caddyfile.Unmarshaler = (*MatchQuery)(nil)
_ caddyfile.Unmarshaler = (*MatchHeader)(nil)
_ caddyfile.Unmarshaler = (*MatchHeaderRE)(nil)
_ caddyfile.Unmarshaler = (*MatchProtocol)(nil)
_ caddyfile.Unmarshaler = (*MatchRemoteIP)(nil)
_ json.Marshaler = (*MatchNot)(nil)
_ json.Unmarshaler = (*MatchNot)(nil)
)
| 1 | 14,614 | This is clever, but I do admit I think it's kinda weird. We can go with it for now and fix it later if people complain. | caddyserver-caddy | go |
@@ -31,7 +31,9 @@ import (
)
var (
- gsRegex = regexp.MustCompile(`^gs://([a-z0-9][-_.a-z0-9]*)/(.+)$`)
+ bucket = `([a-z0-9][-_.a-z0-9]*)`
+ bucketRegex = regexp.MustCompile(fmt.Sprintf(`^gs://%s/?$`, bucket))
+ gsRegex = regexp.MustCompile(fmt.Sprintf(`^gs://%s/(.+)$`, bucket))
)
// StorageClient implements domain.StorageClientInterface. It implements main Storage functions | 1 | // Copyright 2019 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storageutils
import (
"context"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"regexp"
"strings"
"cloud.google.com/go/storage"
"github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/common/domain"
"github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/common/utils/logging"
"google.golang.org/api/iterator"
)
var (
gsRegex = regexp.MustCompile(`^gs://([a-z0-9][-_.a-z0-9]*)/(.+)$`)
)
// StorageClient implements domain.StorageClientInterface. It implements main Storage functions
// used by image import features.
type StorageClient struct {
ObjectDeleter commondomain.StorageObjectDeleterInterface
StorageClient *storage.Client
Logger logging.LoggerInterface
Ctx context.Context
Oic commondomain.ObjectIteratorCreatorInterface
}
// NewStorageClient creates a StorageClient
func NewStorageClient(ctx context.Context, client *storage.Client,
logger logging.LoggerInterface) (*StorageClient, error) {
sc := &StorageClient{StorageClient: client, Ctx: ctx,
Oic: &ObjectIteratorCreator{ctx: ctx, sc: client}, Logger: logger}
sc.ObjectDeleter = &StorageObjectDeleter{sc}
return sc, nil
}
// CreateBucket creates a GCS bucket
func (sc *StorageClient) CreateBucket(
bucketName string, project string, attrs *storage.BucketAttrs) error {
return sc.StorageClient.Bucket(bucketName).Create(sc.Ctx, project, attrs)
}
// Buckets returns a bucket iterator for all buckets within a project
func (sc *StorageClient) Buckets(projectID string) *storage.BucketIterator {
return sc.StorageClient.Buckets(sc.Ctx, projectID)
}
// GetBucketAttrs returns bucket attributes for given bucket
func (sc *StorageClient) GetBucketAttrs(bucket string) (*storage.BucketAttrs, error) {
return sc.StorageClient.Bucket(bucket).Attrs(sc.Ctx)
}
// GetObjectReader creates a new Reader to read the contents of the object.
func (sc *StorageClient) GetObjectReader(bucket string, objectPath string) (io.ReadCloser, error) {
return sc.GetBucket(bucket).Object(objectPath).NewReader(sc.Ctx)
}
// GetBucket returns a BucketHandle, which provides operations on the named bucket.
func (sc *StorageClient) GetBucket(bucket string) *storage.BucketHandle {
return sc.StorageClient.Bucket(bucket)
}
// GetObjects returns object iterator for given bucket and path
func (sc *StorageClient) GetObjects(bucket string, objectPath string) commondomain.ObjectIteratorInterface {
return sc.Oic.CreateObjectIterator(bucket, objectPath)
}
// DeleteObject deletes GCS object in given bucket and object path
func (sc *StorageClient) DeleteObject(bucket string, objectPath string) error {
return sc.ObjectDeleter.DeleteObject(bucket, objectPath)
}
// DeleteGcsPath deletes a GCS path, including files
func (sc *StorageClient) DeleteGcsPath(gcsPath string) error {
bucketName, objectPath, err := SplitGCSPath(gcsPath)
if err != nil {
return err
}
log.Printf("Deleting content of: %v", gcsPath)
it := sc.GetObjects(bucketName, objectPath)
for {
attrs, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
return err
}
sc.Logger.Log(fmt.Sprintf("Deleting gs://%v/%v\n", bucketName, attrs.Name))
if err := sc.DeleteObject(bucketName, attrs.Name); err != nil {
return err
}
}
return nil
}
// FindGcsFile finds a file in a GCS directory path for given file extension. File extension can
// be a file name as well.
func (sc *StorageClient) FindGcsFile(gcsDirectoryPath string, fileExtension string) (*storage.ObjectHandle, error) {
bucketName, objectPath, err := SplitGCSPath(gcsDirectoryPath)
if err != nil {
return nil, err
}
it := sc.GetObjects(bucketName, objectPath)
for {
attrs, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
return nil, err
}
if !strings.HasSuffix(attrs.Name, fileExtension) {
continue
}
sc.Logger.Log(fmt.Sprintf("Found gs://%v/%v\n", bucketName, attrs.Name))
return sc.GetBucket(bucketName).Object(attrs.Name), nil
}
return nil, fmt.Errorf(
"path %v doesn't contain a file with %v extension", gcsDirectoryPath, fileExtension)
}
// GetGcsFileContent returns content of a GCS object as byte array
func (sc *StorageClient) GetGcsFileContent(gcsObject *storage.ObjectHandle) ([]byte, error) {
reader, err := gcsObject.NewReader(sc.Ctx)
if err != nil {
return nil, err
}
return ioutil.ReadAll(reader)
}
// WriteToGCS writes content from a reader to destination bucket and path
func (sc *StorageClient) WriteToGCS(
destinationBucketName string, destinationObjectPath string, reader io.Reader) error {
destinationBucket := sc.GetBucket(destinationBucketName)
fileWriter := destinationBucket.Object(destinationObjectPath).NewWriter(sc.Ctx)
if _, err := io.Copy(fileWriter, reader); err != nil {
return err
}
return fileWriter.Close()
}
// Close closes the Client.
//
// Close need not be called at program exit.
func (sc *StorageClient) Close() error {
return sc.StorageClient.Close()
}
// SplitGCSPath splits GCS path into bucket and object path portions
func SplitGCSPath(p string) (string, string, error) {
matches := gsRegex.FindStringSubmatch(p)
if matches != nil {
return matches[1], matches[2], nil
}
return "", "", fmt.Errorf("%q is not a valid GCS path", p)
}
// HTTPClient implements domain.HTTPClientInterface which abstracts HTTP functionality used by
// image import features.
type HTTPClient struct {
httpClient *http.Client
}
// Get executes HTTP GET request for given URL
func (hc *HTTPClient) Get(url string) (resp *http.Response, err error) {
return hc.httpClient.Get(url)
}
// StorageObjectDeleter is responsible for deleting object
type StorageObjectDeleter struct {
sc *StorageClient
}
// DeleteObject deletes GCS object in given bucket and path
func (sod *StorageObjectDeleter) DeleteObject(bucket string, objectPath string) error {
return sod.sc.GetBucket(bucket).Object(objectPath).Delete(sod.sc.Ctx)
}
| 1 | 8,739 | the variable names are not entirely clear. "bucket" is essentially the regex that follows gcs naming pattern, right? if so, it should be changed to bucketregex. and then bucketregex, gsregex should be changed to something more specific to what pattern the regex is supposed to match. | GoogleCloudPlatform-compute-image-tools | go |
@@ -26,8 +26,8 @@ import com.google.common.collect.Lists;
import java.util.List;
/** Responsible for producing package meta-data related views for Java GAPIC clients */
-public class JavaGapicPackageTransformer extends JavaPackageTransformer
- implements ModelToViewTransformer {
+public class JavaGapicPackageTransformer<T extends ApiModel> extends JavaPackageTransformer
+ implements ModelToViewTransformer<T> {
private final PackageMetadataConfig packageConfig;
public JavaGapicPackageTransformer(PackageMetadataConfig packageConfig) { | 1 | /* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.transformer.java;
import com.google.api.codegen.config.ApiModel;
import com.google.api.codegen.config.GapicProductConfig;
import com.google.api.codegen.config.PackageMetadataConfig;
import com.google.api.codegen.packagegen.java.JavaPackageTransformer;
import com.google.api.codegen.transformer.ModelToViewTransformer;
import com.google.api.codegen.viewmodel.ViewModel;
import com.google.api.codegen.viewmodel.metadata.PackageMetadataView;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import java.util.List;
/** Responsible for producing package meta-data related views for Java GAPIC clients */
public class JavaGapicPackageTransformer extends JavaPackageTransformer
implements ModelToViewTransformer {
private final PackageMetadataConfig packageConfig;
public JavaGapicPackageTransformer(PackageMetadataConfig packageConfig) {
super(ImmutableMap.of("java/build_gapic.gradle.snip", "build.gradle"), null);
this.packageConfig = packageConfig;
}
@Override
public List<ViewModel> transform(ApiModel model, GapicProductConfig productConfig) {
List<ViewModel> viewModels = Lists.newArrayList();
for (PackageMetadataView.Builder builder :
this.generateMetadataViewBuilders(model, packageConfig, null)) {
viewModels.add(builder.build());
}
return viewModels;
}
@Override
public List<String> getTemplateFileNames() {
return Lists.newArrayList(getSnippetsOutput().keySet());
}
}
| 1 | 25,666 | I think `ApiModelT` would be clearer than `T` | googleapis-gapic-generator | java |
@@ -1,6 +1,5 @@
-// $Id$
//
-// Copyright (C) 2004-2008 Greg Landrum and Rational Discovery LLC
+// Copyright (C) 2004-2016 Greg Landrum and Rational Discovery LLC
//
// @@ All Rights Reserved @@
// This file is part of the RDKit. | 1 | // $Id$
//
// Copyright (C) 2004-2008 Greg Landrum and Rational Discovery LLC
//
// @@ All Rights Reserved @@
// This file is part of the RDKit.
// The contents are covered by the terms of the BSD license
// which is included in the file license.txt, found at the root
// of the RDKit source tree.
//
#include "RingInfo.h"
#include <RDGeneral/Invariant.h>
#include <algorithm>
namespace RDKit {
bool RingInfo::isAtomInRingOfSize(unsigned int idx, unsigned int size) const {
PRECONDITION(df_init, "RingInfo not initialized");
if (idx < d_atomMembers.size()) {
return std::find(d_atomMembers[idx].begin(), d_atomMembers[idx].end(),
static_cast<int>(size)) != d_atomMembers[idx].end();
} else {
return false;
}
}
unsigned int RingInfo::minAtomRingSize(unsigned int idx) const {
PRECONDITION(df_init, "RingInfo not initialized");
if (idx < d_atomMembers.size() && d_atomMembers[idx].size()) {
return *std::min_element(d_atomMembers[idx].begin(),
d_atomMembers[idx].end());
} else {
return 0;
}
}
unsigned int RingInfo::numAtomRings(unsigned int idx) const {
PRECONDITION(df_init, "RingInfo not initialized");
if (idx < d_atomMembers.size()) {
return rdcast<unsigned int>(d_atomMembers[idx].size());
} else {
return 0;
}
}
bool RingInfo::isBondInRingOfSize(unsigned int idx, unsigned int size) const {
PRECONDITION(df_init, "RingInfo not initialized");
if (idx < d_bondMembers.size()) {
return std::find(d_bondMembers[idx].begin(), d_bondMembers[idx].end(),
static_cast<int>(size)) != d_bondMembers[idx].end();
} else {
return false;
}
}
unsigned int RingInfo::minBondRingSize(unsigned int idx) const {
PRECONDITION(df_init, "RingInfo not initialized");
if (idx < d_bondMembers.size() && d_bondMembers[idx].size()) {
return *std::min_element(d_bondMembers[idx].begin(),
d_bondMembers[idx].end());
} else {
return 0;
}
}
unsigned int RingInfo::numBondRings(unsigned int idx) const {
PRECONDITION(df_init, "RingInfo not initialized");
if (idx < d_bondMembers.size()) {
return rdcast<unsigned int>(d_bondMembers[idx].size());
} else {
return 0;
}
}
unsigned int RingInfo::numRings() const {
PRECONDITION(df_init, "RingInfo not initialized");
PRECONDITION(d_atomRings.size() == d_bondRings.size(), "length mismatch");
return rdcast<unsigned int>(d_atomRings.size());
}
unsigned int RingInfo::addRing(const INT_VECT &atomIndices,
const INT_VECT &bondIndices) {
PRECONDITION(df_init, "RingInfo not initialized");
PRECONDITION(atomIndices.size() == bondIndices.size(), "length mismatch");
int sz = rdcast<int>(atomIndices.size());
for (auto i : atomIndices) {
if (i >= static_cast<int>(d_atomMembers.size()))
d_atomMembers.resize((i) + 1);
d_atomMembers[i].push_back(sz);
}
for (auto i : bondIndices) {
if (i >= static_cast<int>(d_bondMembers.size()))
d_bondMembers.resize((i) + 1);
d_bondMembers[i].push_back(sz);
}
d_atomRings.push_back(atomIndices);
d_bondRings.push_back(bondIndices);
POSTCONDITION(d_atomRings.size() == d_bondRings.size(), "length mismatch");
return rdcast<unsigned int>(d_atomRings.size());
}
void RingInfo::initialize() {
PRECONDITION(!df_init, "already initialized");
df_init = true;
};
void RingInfo::reset() {
if (!df_init) return;
df_init = false;
d_atomMembers.clear();
d_bondMembers.clear();
d_atomRings.clear();
d_bondRings.clear();
}
void RingInfo::preallocate(unsigned int numAtoms, unsigned int numBonds) {
d_atomMembers.resize(numAtoms);
d_bondMembers.resize(numBonds);
}
}
| 1 | 19,518 | Shouldn't this be 2019? | rdkit-rdkit | cpp |
@@ -30,6 +30,8 @@ public enum Status {
SUCCEEDED(50),
KILLING(55),
KILLED(60),
+ // POD_FAILED refers to a failed containerized flow due to pod failure
+ POD_FAILED(65),
FAILED(70),
FAILED_FINISHING(80),
SKIPPED(90), | 1 | /*
* Copyright 2014 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.executor;
import com.google.common.collect.ImmutableMap;
import java.util.Arrays;
import java.util.Set;
import java.util.TreeSet;
public enum Status {
READY(10),
DISPATCHING (15),
PREPARING(20),
RUNNING(30),
PAUSED(40),
SUCCEEDED(50),
KILLING(55),
KILLED(60),
FAILED(70),
FAILED_FINISHING(80),
SKIPPED(90),
DISABLED(100),
QUEUED(110),
FAILED_SUCCEEDED(120),
CANCELLED(125);
// status is TINYINT in DB and the value ranges from -128 to 127
private static final ImmutableMap<Integer, Status> numValMap = Arrays.stream(Status.values())
.collect(ImmutableMap.toImmutableMap(status -> status.getNumVal(), status -> status));
public static final Set<Status> nonFinishingStatusAfterFlowStartsSet = new TreeSet<>(
Arrays.asList(Status.RUNNING, Status.QUEUED, Status.PAUSED, Status.FAILED_FINISHING));
private final int numVal;
Status(final int numVal) {
this.numVal = numVal;
}
public static Status fromInteger(final int x) {
return numValMap.getOrDefault(x, READY);
}
public static boolean isStatusFinished(final Status status) {
switch (status) {
case FAILED:
case KILLED:
case SUCCEEDED:
case SKIPPED:
case FAILED_SUCCEEDED:
case CANCELLED:
return true;
default:
return false;
}
}
public static boolean isStatusRunning(final Status status) {
switch (status) {
case RUNNING:
case FAILED_FINISHING:
case QUEUED:
return true;
default:
return false;
}
}
public static boolean isStatusFailed(final Status status) {
switch (status) {
case FAILED:
case KILLED:
case CANCELLED:
return true;
default:
return false;
}
}
public static boolean isStatusSucceeded(final Status status) {
switch (status) {
case SUCCEEDED:
case FAILED_SUCCEEDED:
case SKIPPED:
return true;
default:
return false;
}
}
public int getNumVal() {
return this.numVal;
}
}
| 1 | 22,150 | We should consider giving this state a more generic name which could be used for flows in similar state in non-containerized Azkban. For instance in the event of a bare-metal executor crashing we could the switch any flows assigned to that executor to this state. Something like `EXECUTE_INFRA_FAILED`, better alternatives should be possible though. | azkaban-azkaban | java |
@@ -4,8 +4,12 @@ var bad = [],
virtualNode.children.forEach(({ actualNode }) => {
var nodeName = actualNode.nodeName.toUpperCase();
- if (actualNode.nodeType === 1 && nodeName !== 'DT' && nodeName !== 'DD' && permitted.indexOf(nodeName) === -1) {
- bad.push(actualNode);
+
+ if (actualNode.nodeType === 1 && permitted.indexOf(nodeName) === -1) {
+ var role = (actualNode.getAttribute('role') || '').toLowerCase();
+ if ((nodeName !== 'DT' && nodeName !== 'DD') || role) {
+ bad.push(actualNode);
+ }
} else if (actualNode.nodeType === 3 && actualNode.nodeValue.trim() !== '') {
hasNonEmptyTextNode = true;
} | 1 | var bad = [],
permitted = ['STYLE', 'META', 'LINK', 'MAP', 'AREA', 'SCRIPT', 'DATALIST', 'TEMPLATE'],
hasNonEmptyTextNode = false;
virtualNode.children.forEach(({ actualNode }) => {
var nodeName = actualNode.nodeName.toUpperCase();
if (actualNode.nodeType === 1 && nodeName !== 'DT' && nodeName !== 'DD' && permitted.indexOf(nodeName) === -1) {
bad.push(actualNode);
} else if (actualNode.nodeType === 3 && actualNode.nodeValue.trim() !== '') {
hasNonEmptyTextNode = true;
}
});
if (bad.length) {
this.relatedNodes(bad);
}
var retVal = !!bad.length || hasNonEmptyTextNode;
return retVal;
| 1 | 11,593 | This should allow `role=definition` and `role=term`, possibly also `role=list`? | dequelabs-axe-core | js |
@@ -511,6 +511,19 @@ public class Spark3Util {
TableProperties.PARQUET_BATCH_SIZE, TableProperties.PARQUET_BATCH_SIZE_DEFAULT));
}
+ public static Boolean propertyAsBoolean(CaseInsensitiveStringMap options, String property, Boolean defaultValue) {
+ if (defaultValue != null) {
+ return options.getBoolean(property, defaultValue);
+ }
+
+ String value = options.get(property);
+ if (value != null) {
+ return Boolean.parseBoolean(value);
+ }
+
+ return null;
+ }
+
public static Long propertyAsLong(CaseInsensitiveStringMap options, String property, Long defaultValue) {
if (defaultValue != null) {
return options.getLong(property, defaultValue); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.spark;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.iceberg.DistributionMode;
import org.apache.iceberg.FileFormat;
import org.apache.iceberg.MetadataTableType;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.Schema;
import org.apache.iceberg.TableProperties;
import org.apache.iceberg.UpdateProperties;
import org.apache.iceberg.UpdateSchema;
import org.apache.iceberg.catalog.Namespace;
import org.apache.iceberg.catalog.TableIdentifier;
import org.apache.iceberg.expressions.BoundPredicate;
import org.apache.iceberg.expressions.ExpressionVisitors;
import org.apache.iceberg.expressions.Term;
import org.apache.iceberg.expressions.UnboundPredicate;
import org.apache.iceberg.hadoop.HadoopInputFile;
import org.apache.iceberg.io.FileIO;
import org.apache.iceberg.io.InputFile;
import org.apache.iceberg.relocated.com.google.common.base.Joiner;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableSet;
import org.apache.iceberg.spark.source.SparkTable;
import org.apache.iceberg.transforms.PartitionSpecVisitor;
import org.apache.iceberg.transforms.SortOrderVisitor;
import org.apache.iceberg.types.Type;
import org.apache.iceberg.types.TypeUtil;
import org.apache.iceberg.types.Types;
import org.apache.iceberg.util.ArrayUtil;
import org.apache.iceberg.util.Pair;
import org.apache.iceberg.util.PropertyUtil;
import org.apache.iceberg.util.SortOrderUtil;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.RuntimeConfig;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.catalyst.analysis.NoSuchTableException;
import org.apache.spark.sql.catalyst.parser.ParseException;
import org.apache.spark.sql.catalyst.parser.ParserInterface;
import org.apache.spark.sql.connector.catalog.CatalogManager;
import org.apache.spark.sql.connector.catalog.CatalogPlugin;
import org.apache.spark.sql.connector.catalog.Identifier;
import org.apache.spark.sql.connector.catalog.Table;
import org.apache.spark.sql.connector.catalog.TableCatalog;
import org.apache.spark.sql.connector.catalog.TableChange;
import org.apache.spark.sql.connector.expressions.Expression;
import org.apache.spark.sql.connector.expressions.Expressions;
import org.apache.spark.sql.connector.expressions.Literal;
import org.apache.spark.sql.connector.expressions.Transform;
import org.apache.spark.sql.connector.iceberg.distributions.Distribution;
import org.apache.spark.sql.connector.iceberg.distributions.Distributions;
import org.apache.spark.sql.connector.iceberg.distributions.OrderedDistribution;
import org.apache.spark.sql.connector.iceberg.expressions.SortOrder;
import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Relation;
import org.apache.spark.sql.types.IntegerType;
import org.apache.spark.sql.types.LongType;
import org.apache.spark.sql.util.CaseInsensitiveStringMap;
import scala.Some;
import scala.collection.JavaConverters;
import scala.collection.Seq;
import static org.apache.iceberg.TableProperties.WRITE_DISTRIBUTION_MODE;
import static org.apache.iceberg.TableProperties.WRITE_DISTRIBUTION_MODE_DEFAULT;
import static org.apache.iceberg.TableProperties.WRITE_DISTRIBUTION_MODE_RANGE;
public class Spark3Util {
private static final Set<String> LOCALITY_WHITELIST_FS = ImmutableSet.of("hdfs");
private static final Set<String> RESERVED_PROPERTIES = ImmutableSet.of(
TableCatalog.PROP_LOCATION, TableCatalog.PROP_PROVIDER);
private static final Joiner DOT = Joiner.on(".");
private Spark3Util() {
}
public static Map<String, String> rebuildCreateProperties(Map<String, String> createProperties) {
ImmutableMap.Builder<String, String> tableProperties = ImmutableMap.builder();
createProperties.entrySet().stream()
.filter(entry -> !RESERVED_PROPERTIES.contains(entry.getKey()))
.forEach(tableProperties::put);
String provider = createProperties.get(TableCatalog.PROP_PROVIDER);
if ("parquet".equalsIgnoreCase(provider)) {
tableProperties.put(TableProperties.DEFAULT_FILE_FORMAT, "parquet");
} else if ("avro".equalsIgnoreCase(provider)) {
tableProperties.put(TableProperties.DEFAULT_FILE_FORMAT, "avro");
} else if ("orc".equalsIgnoreCase(provider)) {
tableProperties.put(TableProperties.DEFAULT_FILE_FORMAT, "orc");
} else if (provider != null && !"iceberg".equalsIgnoreCase(provider)) {
throw new IllegalArgumentException("Unsupported format in USING: " + provider);
}
return tableProperties.build();
}
/**
* Applies a list of Spark table changes to an {@link UpdateProperties} operation.
*
* @param pendingUpdate an uncommitted UpdateProperties operation to configure
* @param changes a list of Spark table changes
* @return the UpdateProperties operation configured with the changes
*/
public static UpdateProperties applyPropertyChanges(UpdateProperties pendingUpdate, List<TableChange> changes) {
for (TableChange change : changes) {
if (change instanceof TableChange.SetProperty) {
TableChange.SetProperty set = (TableChange.SetProperty) change;
pendingUpdate.set(set.property(), set.value());
} else if (change instanceof TableChange.RemoveProperty) {
TableChange.RemoveProperty remove = (TableChange.RemoveProperty) change;
pendingUpdate.remove(remove.property());
} else {
throw new UnsupportedOperationException("Cannot apply unknown table change: " + change);
}
}
return pendingUpdate;
}
/**
* Applies a list of Spark table changes to an {@link UpdateSchema} operation.
*
* @param pendingUpdate an uncommitted UpdateSchema operation to configure
* @param changes a list of Spark table changes
* @return the UpdateSchema operation configured with the changes
*/
public static UpdateSchema applySchemaChanges(UpdateSchema pendingUpdate, List<TableChange> changes) {
for (TableChange change : changes) {
if (change instanceof TableChange.AddColumn) {
apply(pendingUpdate, (TableChange.AddColumn) change);
} else if (change instanceof TableChange.UpdateColumnType) {
TableChange.UpdateColumnType update = (TableChange.UpdateColumnType) change;
Type newType = SparkSchemaUtil.convert(update.newDataType());
Preconditions.checkArgument(newType.isPrimitiveType(),
"Cannot update '%s', not a primitive type: %s", DOT.join(update.fieldNames()), update.newDataType());
pendingUpdate.updateColumn(DOT.join(update.fieldNames()), newType.asPrimitiveType());
} else if (change instanceof TableChange.UpdateColumnComment) {
TableChange.UpdateColumnComment update = (TableChange.UpdateColumnComment) change;
pendingUpdate.updateColumnDoc(DOT.join(update.fieldNames()), update.newComment());
} else if (change instanceof TableChange.RenameColumn) {
TableChange.RenameColumn rename = (TableChange.RenameColumn) change;
pendingUpdate.renameColumn(DOT.join(rename.fieldNames()), rename.newName());
} else if (change instanceof TableChange.DeleteColumn) {
TableChange.DeleteColumn delete = (TableChange.DeleteColumn) change;
pendingUpdate.deleteColumn(DOT.join(delete.fieldNames()));
} else if (change instanceof TableChange.UpdateColumnNullability) {
TableChange.UpdateColumnNullability update = (TableChange.UpdateColumnNullability) change;
if (update.nullable()) {
pendingUpdate.makeColumnOptional(DOT.join(update.fieldNames()));
} else {
pendingUpdate.requireColumn(DOT.join(update.fieldNames()));
}
} else if (change instanceof TableChange.UpdateColumnPosition) {
apply(pendingUpdate, (TableChange.UpdateColumnPosition) change);
} else {
throw new UnsupportedOperationException("Cannot apply unknown table change: " + change);
}
}
return pendingUpdate;
}
private static void apply(UpdateSchema pendingUpdate, TableChange.UpdateColumnPosition update) {
Preconditions.checkArgument(update.position() != null, "Invalid position: null");
if (update.position() instanceof TableChange.After) {
TableChange.After after = (TableChange.After) update.position();
String referenceField = peerName(update.fieldNames(), after.column());
pendingUpdate.moveAfter(DOT.join(update.fieldNames()), referenceField);
} else if (update.position() instanceof TableChange.First) {
pendingUpdate.moveFirst(DOT.join(update.fieldNames()));
} else {
throw new IllegalArgumentException("Unknown position for reorder: " + update.position());
}
}
private static void apply(UpdateSchema pendingUpdate, TableChange.AddColumn add) {
Preconditions.checkArgument(add.isNullable(),
"Incompatible change: cannot add required column: %s", leafName(add.fieldNames()));
Type type = SparkSchemaUtil.convert(add.dataType());
pendingUpdate.addColumn(parentName(add.fieldNames()), leafName(add.fieldNames()), type, add.comment());
if (add.position() instanceof TableChange.After) {
TableChange.After after = (TableChange.After) add.position();
String referenceField = peerName(add.fieldNames(), after.column());
pendingUpdate.moveAfter(DOT.join(add.fieldNames()), referenceField);
} else if (add.position() instanceof TableChange.First) {
pendingUpdate.moveFirst(DOT.join(add.fieldNames()));
} else {
Preconditions.checkArgument(add.position() == null,
"Cannot add '%s' at unknown position: %s", DOT.join(add.fieldNames()), add.position());
}
}
public static org.apache.iceberg.Table toIcebergTable(Table table) {
Preconditions.checkArgument(table instanceof SparkTable, "Table %s is not an Iceberg table", table);
SparkTable sparkTable = (SparkTable) table;
return sparkTable.table();
}
/**
* Converts a PartitionSpec to Spark transforms.
*
* @param spec a PartitionSpec
* @return an array of Transforms
*/
public static Transform[] toTransforms(PartitionSpec spec) {
List<Transform> transforms = PartitionSpecVisitor.visit(spec,
new PartitionSpecVisitor<Transform>() {
@Override
public Transform identity(String sourceName, int sourceId) {
return Expressions.identity(sourceName);
}
@Override
public Transform bucket(String sourceName, int sourceId, int numBuckets) {
return Expressions.bucket(numBuckets, sourceName);
}
@Override
public Transform truncate(String sourceName, int sourceId, int width) {
return Expressions.apply("truncate", Expressions.column(sourceName), Expressions.literal(width));
}
@Override
public Transform year(String sourceName, int sourceId) {
return Expressions.years(sourceName);
}
@Override
public Transform month(String sourceName, int sourceId) {
return Expressions.months(sourceName);
}
@Override
public Transform day(String sourceName, int sourceId) {
return Expressions.days(sourceName);
}
@Override
public Transform hour(String sourceName, int sourceId) {
return Expressions.hours(sourceName);
}
@Override
public Transform unknown(int fieldId, String sourceName, int sourceId, String transform) {
return Expressions.apply(transform, Expressions.column(sourceName));
}
});
return transforms.toArray(new Transform[0]);
}
public static Distribution buildRequiredDistribution(org.apache.iceberg.Table table) {
DistributionMode distributionMode = distributionModeFor(table);
switch (distributionMode) {
case NONE:
return Distributions.unspecified();
case HASH:
if (table.spec().isUnpartitioned()) {
return Distributions.unspecified();
} else {
return Distributions.clustered(toTransforms(table.spec()));
}
case RANGE:
if (table.spec().isUnpartitioned() && table.sortOrder().isUnsorted()) {
return Distributions.unspecified();
} else {
org.apache.iceberg.SortOrder requiredSortOrder = SortOrderUtil.buildSortOrder(table);
return Distributions.ordered(convert(requiredSortOrder));
}
default:
throw new IllegalArgumentException("Unsupported distribution mode: " + distributionMode);
}
}
public static SortOrder[] buildRequiredOrdering(Distribution distribution, org.apache.iceberg.Table table) {
if (distribution instanceof OrderedDistribution) {
OrderedDistribution orderedDistribution = (OrderedDistribution) distribution;
return orderedDistribution.ordering();
} else {
org.apache.iceberg.SortOrder requiredSortOrder = SortOrderUtil.buildSortOrder(table);
return convert(requiredSortOrder);
}
}
public static DistributionMode distributionModeFor(org.apache.iceberg.Table table) {
boolean isSortedTable = !table.sortOrder().isUnsorted();
String defaultModeName = isSortedTable ? WRITE_DISTRIBUTION_MODE_RANGE : WRITE_DISTRIBUTION_MODE_DEFAULT;
String modeName = table.properties().getOrDefault(WRITE_DISTRIBUTION_MODE, defaultModeName);
return DistributionMode.fromName(modeName);
}
public static SortOrder[] convert(org.apache.iceberg.SortOrder sortOrder) {
List<OrderField> converted = SortOrderVisitor.visit(sortOrder, new SortOrderToSpark());
return converted.toArray(new OrderField[0]);
}
public static Term toIcebergTerm(Transform transform) {
Preconditions.checkArgument(transform.references().length == 1,
"Cannot convert transform with more than one column reference: %s", transform);
String colName = DOT.join(transform.references()[0].fieldNames());
switch (transform.name()) {
case "identity":
return org.apache.iceberg.expressions.Expressions.ref(colName);
case "bucket":
return org.apache.iceberg.expressions.Expressions.bucket(colName, findWidth(transform));
case "years":
return org.apache.iceberg.expressions.Expressions.year(colName);
case "months":
return org.apache.iceberg.expressions.Expressions.month(colName);
case "date":
case "days":
return org.apache.iceberg.expressions.Expressions.day(colName);
case "date_hour":
case "hours":
return org.apache.iceberg.expressions.Expressions.hour(colName);
case "truncate":
return org.apache.iceberg.expressions.Expressions.truncate(colName, findWidth(transform));
default:
throw new UnsupportedOperationException("Transform is not supported: " + transform);
}
}
/**
* Converts Spark transforms into a {@link PartitionSpec}.
*
* @param schema the table schema
* @param partitioning Spark Transforms
* @return a PartitionSpec
*/
public static PartitionSpec toPartitionSpec(Schema schema, Transform[] partitioning) {
if (partitioning == null || partitioning.length == 0) {
return PartitionSpec.unpartitioned();
}
PartitionSpec.Builder builder = PartitionSpec.builderFor(schema);
for (Transform transform : partitioning) {
Preconditions.checkArgument(transform.references().length == 1,
"Cannot convert transform with more than one column reference: %s", transform);
String colName = DOT.join(transform.references()[0].fieldNames());
switch (transform.name()) {
case "identity":
builder.identity(colName);
break;
case "bucket":
builder.bucket(colName, findWidth(transform));
break;
case "years":
builder.year(colName);
break;
case "months":
builder.month(colName);
break;
case "date":
case "days":
builder.day(colName);
break;
case "date_hour":
case "hours":
builder.hour(colName);
break;
case "truncate":
builder.truncate(colName, findWidth(transform));
break;
default:
throw new UnsupportedOperationException("Transform is not supported: " + transform);
}
}
return builder.build();
}
@SuppressWarnings("unchecked")
private static int findWidth(Transform transform) {
for (Expression expr : transform.arguments()) {
if (expr instanceof Literal) {
if (((Literal) expr).dataType() instanceof IntegerType) {
Literal<Integer> lit = (Literal<Integer>) expr;
Preconditions.checkArgument(lit.value() > 0,
"Unsupported width for transform: %s", transform.describe());
return lit.value();
} else if (((Literal) expr).dataType() instanceof LongType) {
Literal<Long> lit = (Literal<Long>) expr;
Preconditions.checkArgument(lit.value() > 0 && lit.value() < Integer.MAX_VALUE,
"Unsupported width for transform: %s", transform.describe());
if (lit.value() > Integer.MAX_VALUE) {
throw new IllegalArgumentException();
}
return lit.value().intValue();
}
}
}
throw new IllegalArgumentException("Cannot find width for transform: " + transform.describe());
}
private static String leafName(String[] fieldNames) {
Preconditions.checkArgument(fieldNames.length > 0, "Invalid field name: at least one name is required");
return fieldNames[fieldNames.length - 1];
}
private static String peerName(String[] fieldNames, String fieldName) {
if (fieldNames.length > 1) {
String[] peerNames = Arrays.copyOf(fieldNames, fieldNames.length);
peerNames[fieldNames.length - 1] = fieldName;
return DOT.join(peerNames);
}
return fieldName;
}
private static String parentName(String[] fieldNames) {
if (fieldNames.length > 1) {
return DOT.join(Arrays.copyOfRange(fieldNames, 0, fieldNames.length - 1));
}
return null;
}
public static String describe(org.apache.iceberg.expressions.Expression expr) {
return ExpressionVisitors.visit(expr, DescribeExpressionVisitor.INSTANCE);
}
public static String describe(Schema schema) {
return TypeUtil.visit(schema, DescribeSchemaVisitor.INSTANCE);
}
public static String describe(Type type) {
return TypeUtil.visit(type, DescribeSchemaVisitor.INSTANCE);
}
public static boolean isLocalityEnabled(FileIO io, String location, CaseInsensitiveStringMap readOptions) {
InputFile in = io.newInputFile(location);
if (in instanceof HadoopInputFile) {
String scheme = ((HadoopInputFile) in).getFileSystem().getScheme();
return readOptions.getBoolean("locality", LOCALITY_WHITELIST_FS.contains(scheme));
}
return false;
}
public static boolean isVectorizationEnabled(FileFormat fileFormat,
Map<String, String> properties,
RuntimeConfig sessionConf,
CaseInsensitiveStringMap readOptions) {
String readOptionValue = readOptions.get(SparkReadOptions.VECTORIZATION_ENABLED);
if (readOptionValue != null) {
return Boolean.parseBoolean(readOptionValue);
}
String sessionConfValue = sessionConf.get("spark.sql.iceberg.vectorization.enabled", null);
if (sessionConfValue != null) {
return Boolean.parseBoolean(sessionConfValue);
}
switch (fileFormat) {
case PARQUET:
return PropertyUtil.propertyAsBoolean(
properties,
TableProperties.PARQUET_VECTORIZATION_ENABLED,
TableProperties.PARQUET_VECTORIZATION_ENABLED_DEFAULT);
case ORC:
// TODO: add a table property to enable/disable ORC vectorized reads
return false;
default:
return false;
}
}
public static int batchSize(Map<String, String> properties, CaseInsensitiveStringMap readOptions) {
return readOptions.getInt(SparkReadOptions.VECTORIZATION_BATCH_SIZE,
PropertyUtil.propertyAsInt(properties,
TableProperties.PARQUET_BATCH_SIZE, TableProperties.PARQUET_BATCH_SIZE_DEFAULT));
}
public static Long propertyAsLong(CaseInsensitiveStringMap options, String property, Long defaultValue) {
if (defaultValue != null) {
return options.getLong(property, defaultValue);
}
String value = options.get(property);
if (value != null) {
return Long.parseLong(value);
}
return null;
}
public static Integer propertyAsInt(CaseInsensitiveStringMap options, String property, Integer defaultValue) {
if (defaultValue != null) {
return options.getInt(property, defaultValue);
}
String value = options.get(property);
if (value != null) {
return Integer.parseInt(value);
}
return null;
}
public static class DescribeSchemaVisitor extends TypeUtil.SchemaVisitor<String> {
private static final Joiner COMMA = Joiner.on(',');
private static final DescribeSchemaVisitor INSTANCE = new DescribeSchemaVisitor();
private DescribeSchemaVisitor() {
}
@Override
public String schema(Schema schema, String structResult) {
return structResult;
}
@Override
public String struct(Types.StructType struct, List<String> fieldResults) {
return "struct<" + COMMA.join(fieldResults) + ">";
}
@Override
public String field(Types.NestedField field, String fieldResult) {
return field.name() + ": " + fieldResult + (field.isRequired() ? " not null" : "");
}
@Override
public String list(Types.ListType list, String elementResult) {
return "map<" + elementResult + ">";
}
@Override
public String map(Types.MapType map, String keyResult, String valueResult) {
return "map<" + keyResult + ", " + valueResult + ">";
}
@Override
public String primitive(Type.PrimitiveType primitive) {
switch (primitive.typeId()) {
case BOOLEAN:
return "boolean";
case INTEGER:
return "int";
case LONG:
return "bigint";
case FLOAT:
return "float";
case DOUBLE:
return "double";
case DATE:
return "date";
case TIME:
return "time";
case TIMESTAMP:
return "timestamp";
case STRING:
case UUID:
return "string";
case FIXED:
case BINARY:
return "binary";
case DECIMAL:
Types.DecimalType decimal = (Types.DecimalType) primitive;
return "decimal(" + decimal.precision() + "," + decimal.scale() + ")";
}
throw new UnsupportedOperationException("Cannot convert type to SQL: " + primitive);
}
}
private static class DescribeExpressionVisitor extends ExpressionVisitors.ExpressionVisitor<String> {
private static final DescribeExpressionVisitor INSTANCE = new DescribeExpressionVisitor();
private DescribeExpressionVisitor() {
}
@Override
public String alwaysTrue() {
return "true";
}
@Override
public String alwaysFalse() {
return "false";
}
@Override
public String not(String result) {
return "NOT (" + result + ")";
}
@Override
public String and(String leftResult, String rightResult) {
return "(" + leftResult + " AND " + rightResult + ")";
}
@Override
public String or(String leftResult, String rightResult) {
return "(" + leftResult + " OR " + rightResult + ")";
}
@Override
public <T> String predicate(BoundPredicate<T> pred) {
throw new UnsupportedOperationException("Cannot convert bound predicates to SQL");
}
@Override
public <T> String predicate(UnboundPredicate<T> pred) {
switch (pred.op()) {
case IS_NULL:
return pred.ref().name() + " IS NULL";
case NOT_NULL:
return pred.ref().name() + " IS NOT NULL";
case IS_NAN:
return "is_nan(" + pred.ref().name() + ")";
case NOT_NAN:
return "not_nan(" + pred.ref().name() + ")";
case LT:
return pred.ref().name() + " < " + sqlString(pred.literal());
case LT_EQ:
return pred.ref().name() + " <= " + sqlString(pred.literal());
case GT:
return pred.ref().name() + " > " + sqlString(pred.literal());
case GT_EQ:
return pred.ref().name() + " >= " + sqlString(pred.literal());
case EQ:
return pred.ref().name() + " = " + sqlString(pred.literal());
case NOT_EQ:
return pred.ref().name() + " != " + sqlString(pred.literal());
case STARTS_WITH:
return pred.ref().name() + " LIKE '" + pred.literal() + "%'";
case IN:
return pred.ref().name() + " IN (" + sqlString(pred.literals()) + ")";
case NOT_IN:
return pred.ref().name() + " NOT IN (" + sqlString(pred.literals()) + ")";
default:
throw new UnsupportedOperationException("Cannot convert predicate to SQL: " + pred);
}
}
private static <T> String sqlString(List<org.apache.iceberg.expressions.Literal<T>> literals) {
return literals.stream().map(DescribeExpressionVisitor::sqlString).collect(Collectors.joining(", "));
}
private static String sqlString(org.apache.iceberg.expressions.Literal<?> lit) {
if (lit.value() instanceof String) {
return "'" + lit.value() + "'";
} else if (lit.value() instanceof ByteBuffer) {
throw new IllegalArgumentException("Cannot convert bytes to SQL literal: " + lit);
} else {
return lit.value().toString();
}
}
}
/**
* Returns a Metadata Table Dataset if it can be loaded from a Spark V2 Catalog
*
* Because Spark does not allow more than 1 piece in the namespace for a Session Catalog table, we circumvent
* the entire resolution path for tables and instead look up the table directly ourselves. This lets us correctly
* get metadata tables for the SessionCatalog, if we didn't have to work around this we could just use spark.table.
*
* @param spark SparkSession used for looking up catalog references and tables
* @param name The multipart identifier of the base Iceberg table
* @param type The type of metadata table to load
* @return null if we cannot find the Metadata Table, a Dataset of rows otherwise
*/
private static Dataset<Row> loadCatalogMetadataTable(SparkSession spark, String name, MetadataTableType type) {
try {
CatalogAndIdentifier catalogAndIdentifier = catalogAndIdentifier(spark, name);
if (catalogAndIdentifier.catalog instanceof BaseCatalog) {
BaseCatalog catalog = (BaseCatalog) catalogAndIdentifier.catalog;
Identifier baseId = catalogAndIdentifier.identifier;
Identifier metaId = Identifier.of(ArrayUtil.add(baseId.namespace(), baseId.name()), type.name());
Table metaTable = catalog.loadTable(metaId);
return Dataset.ofRows(spark, DataSourceV2Relation.create(metaTable, Some.apply(catalog), Some.apply(metaId)));
}
} catch (NoSuchTableException | ParseException e) {
// Could not find table
return null;
}
// Could not find table
return null;
}
public static CatalogAndIdentifier catalogAndIdentifier(SparkSession spark, String name) throws ParseException {
return catalogAndIdentifier(spark, name, spark.sessionState().catalogManager().currentCatalog());
}
public static CatalogAndIdentifier catalogAndIdentifier(SparkSession spark, String name,
CatalogPlugin defaultCatalog) throws ParseException {
ParserInterface parser = spark.sessionState().sqlParser();
Seq<String> multiPartIdentifier = parser.parseMultipartIdentifier(name);
List<String> javaMultiPartIdentifier = JavaConverters.seqAsJavaList(multiPartIdentifier);
return catalogAndIdentifier(spark, javaMultiPartIdentifier, defaultCatalog);
}
public static CatalogAndIdentifier catalogAndIdentifier(String description, SparkSession spark, String name) {
return catalogAndIdentifier(description, spark, name, spark.sessionState().catalogManager().currentCatalog());
}
public static CatalogAndIdentifier catalogAndIdentifier(String description, SparkSession spark,
String name, CatalogPlugin defaultCatalog) {
try {
return catalogAndIdentifier(spark, name, defaultCatalog);
} catch (ParseException e) {
throw new IllegalArgumentException("Cannot parse " + description + ": " + name, e);
}
}
public static CatalogAndIdentifier catalogAndIdentifier(SparkSession spark, List<String> nameParts) {
return catalogAndIdentifier(spark, nameParts, spark.sessionState().catalogManager().currentCatalog());
}
/**
* A modified version of Spark's LookupCatalog.CatalogAndIdentifier.unapply
* Attempts to find the catalog and identifier a multipart identifier represents
* @param spark Spark session to use for resolution
* @param nameParts Multipart identifier representing a table
* @param defaultCatalog Catalog to use if none is specified
* @return The CatalogPlugin and Identifier for the table
*/
public static CatalogAndIdentifier catalogAndIdentifier(SparkSession spark, List<String> nameParts,
CatalogPlugin defaultCatalog) {
CatalogManager catalogManager = spark.sessionState().catalogManager();
String[] currentNamespace;
if (defaultCatalog.equals(catalogManager.currentCatalog())) {
currentNamespace = catalogManager.currentNamespace();
} else {
currentNamespace = defaultCatalog.defaultNamespace();
}
Pair<CatalogPlugin, Identifier> catalogIdentifier = SparkUtil.catalogAndIdentifier(nameParts,
catalogName -> {
try {
return catalogManager.catalog(catalogName);
} catch (Exception e) {
return null;
}
},
Identifier::of,
defaultCatalog,
currentNamespace
);
return new CatalogAndIdentifier(catalogIdentifier);
}
/**
* This mimics a class inside of Spark which is private inside of LookupCatalog.
*/
public static class CatalogAndIdentifier {
private final CatalogPlugin catalog;
private final Identifier identifier;
public CatalogAndIdentifier(CatalogPlugin catalog, Identifier identifier) {
this.catalog = catalog;
this.identifier = identifier;
}
public CatalogAndIdentifier(Pair<CatalogPlugin, Identifier> identifier) {
this.catalog = identifier.first();
this.identifier = identifier.second();
}
public CatalogPlugin catalog() {
return catalog;
}
public Identifier identifier() {
return identifier;
}
}
public static TableIdentifier identifierToTableIdentifier(Identifier identifier) {
return TableIdentifier.of(Namespace.of(identifier.namespace()), identifier.name());
}
}
| 1 | 34,219 | Should this use `boolean` instead of `Boolean`? | apache-iceberg | java |
@@ -135,7 +135,6 @@ namespace OpenTelemetry.Exporter.Zipkin.Tests
[Theory]
[InlineData(true, false, false)]
[InlineData(false, false, false)]
- [InlineData(false, true, false)]
[InlineData(false, false, true)]
[InlineData(false, false, false, StatusCode.Ok)]
[InlineData(false, false, false, StatusCode.Ok, null, true)] | 1 | // <copyright file="ZipkinExporterTests.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Diagnostics;
using System.IO;
using System.Linq;
using System.Net;
using System.Text;
using OpenTelemetry.Exporter.Zipkin.Implementation;
using OpenTelemetry.Resources;
using OpenTelemetry.Tests;
using OpenTelemetry.Trace;
using Xunit;
namespace OpenTelemetry.Exporter.Zipkin.Tests
{
public class ZipkinExporterTests : IDisposable
{
private const string TraceId = "e8ea7e9ac72de94e91fabc613f9686b2";
private static readonly ConcurrentDictionary<Guid, string> Responses = new ConcurrentDictionary<Guid, string>();
private readonly IDisposable testServer;
private readonly string testServerHost;
private readonly int testServerPort;
static ZipkinExporterTests()
{
Activity.DefaultIdFormat = ActivityIdFormat.W3C;
Activity.ForceDefaultIdFormat = true;
var listener = new ActivityListener
{
ShouldListenTo = _ => true,
Sample = (ref ActivityCreationOptions<ActivityContext> options) => ActivitySamplingResult.AllData,
};
ActivitySource.AddActivityListener(listener);
}
public ZipkinExporterTests()
{
this.testServer = TestHttpServer.RunServer(
ctx => ProcessServerRequest(ctx),
out this.testServerHost,
out this.testServerPort);
static void ProcessServerRequest(HttpListenerContext context)
{
context.Response.StatusCode = 200;
using StreamReader readStream = new StreamReader(context.Request.InputStream);
string requestContent = readStream.ReadToEnd();
Responses.TryAdd(
Guid.Parse(context.Request.QueryString["requestId"]),
requestContent);
context.Response.OutputStream.Close();
}
}
public void Dispose()
{
this.testServer.Dispose();
}
[Fact]
public void BadArgs()
{
TracerProviderBuilder builder = null;
Assert.Throws<ArgumentNullException>(() => builder.AddZipkinExporter());
}
[Fact]
public void SuppresssesInstrumentation()
{
const string ActivitySourceName = "zipkin.test";
Guid requestId = Guid.NewGuid();
TestActivityProcessor testActivityProcessor = new TestActivityProcessor();
int endCalledCount = 0;
testActivityProcessor.EndAction =
(a) =>
{
endCalledCount++;
};
var exporterOptions = new ZipkinExporterOptions
{
ServiceName = "test-zipkin",
Endpoint = new Uri($"http://{this.testServerHost}:{this.testServerPort}/api/v2/spans?requestId={requestId}"),
};
var zipkinExporter = new ZipkinExporter(exporterOptions);
var exportActivityProcessor = new BatchExportProcessor<Activity>(zipkinExporter);
var openTelemetrySdk = Sdk.CreateTracerProviderBuilder()
.AddSource(ActivitySourceName)
.AddProcessor(testActivityProcessor)
.AddProcessor(exportActivityProcessor)
.AddHttpClientInstrumentation()
.Build();
var source = new ActivitySource(ActivitySourceName);
var activity = source.StartActivity("Test Zipkin Activity");
activity?.Stop();
// We call ForceFlush on the exporter twice, so that in the event
// of a regression, this should give any operations performed in
// the Zipkin exporter itself enough time to be instrumented and
// loop back through the exporter.
exportActivityProcessor.ForceFlush();
exportActivityProcessor.ForceFlush();
Assert.Equal(1, endCalledCount);
}
[Theory]
[InlineData(true, false, false)]
[InlineData(false, false, false)]
[InlineData(false, true, false)]
[InlineData(false, false, true)]
[InlineData(false, false, false, StatusCode.Ok)]
[InlineData(false, false, false, StatusCode.Ok, null, true)]
[InlineData(false, false, false, StatusCode.Error)]
[InlineData(false, false, false, StatusCode.Error, "Error description")]
public void IntegrationTest(
bool useShortTraceIds,
bool useTestResource,
bool isRootSpan,
StatusCode statusCode = StatusCode.Unset,
string statusDescription = null,
bool addErrorTag = false)
{
var status = statusCode switch
{
StatusCode.Unset => Status.Unset,
StatusCode.Ok => Status.Ok,
StatusCode.Error => Status.Error,
_ => throw new InvalidOperationException(),
};
if (!string.IsNullOrEmpty(statusDescription))
{
status = status.WithDescription(statusDescription);
}
Guid requestId = Guid.NewGuid();
ZipkinExporter exporter = new ZipkinExporter(
new ZipkinExporterOptions
{
Endpoint = new Uri($"http://{this.testServerHost}:{this.testServerPort}/api/v2/spans?requestId={requestId}"),
UseShortTraceIds = useShortTraceIds,
});
var serviceName = ZipkinExporterOptions.DefaultServiceName;
var resoureTags = string.Empty;
var activity = CreateTestActivity(isRootSpan: isRootSpan, status: status);
if (useTestResource)
{
serviceName = "MyService";
exporter.SetLocalEndpointFromResource(ResourceBuilder.CreateEmpty().AddAttributes(new Dictionary<string, object>
{
[ResourceSemanticConventions.AttributeServiceName] = serviceName,
["service.tag"] = "hello world",
}).Build());
resoureTags = "\"service.tag\":\"hello world\",";
}
else
{
exporter.SetLocalEndpointFromResource(Resource.Empty);
}
if (addErrorTag)
{
activity.SetTag(ZipkinActivityConversionExtensions.ZipkinErrorFlagTagName, "This should be removed.");
}
var processor = new SimpleExportProcessor<Activity>(exporter);
processor.OnEnd(activity);
var context = activity.Context;
var timestamp = activity.StartTimeUtc.ToEpochMicroseconds();
var eventTimestamp = activity.Events.First().Timestamp.ToEpochMicroseconds();
StringBuilder ipInformation = new StringBuilder();
if (!string.IsNullOrEmpty(exporter.LocalEndpoint.Ipv4))
{
ipInformation.Append($@",""ipv4"":""{exporter.LocalEndpoint.Ipv4}""");
}
if (!string.IsNullOrEmpty(exporter.LocalEndpoint.Ipv6))
{
ipInformation.Append($@",""ipv6"":""{exporter.LocalEndpoint.Ipv6}""");
}
var parentId = isRootSpan ? string.Empty : $@"""parentId"":""{ZipkinActivityConversionExtensions.EncodeSpanId(activity.ParentSpanId)}"",";
var traceId = useShortTraceIds ? TraceId.Substring(TraceId.Length - 16, 16) : TraceId;
string statusTag;
string errorTag = string.Empty;
switch (statusCode)
{
case StatusCode.Ok:
statusTag = $@"""{SpanAttributeConstants.StatusCodeKey}"":""OK"",";
break;
case StatusCode.Unset:
statusTag = string.Empty;
break;
case StatusCode.Error:
statusTag = $@"""{SpanAttributeConstants.StatusCodeKey}"":""ERROR"",";
errorTag = $@",""{ZipkinActivityConversionExtensions.ZipkinErrorFlagTagName}"":""{statusDescription}""";
break;
default:
throw new NotSupportedException();
}
Assert.Equal(
$@"[{{""traceId"":""{traceId}"",""name"":""Name"",{parentId}""id"":""{ZipkinActivityConversionExtensions.EncodeSpanId(context.SpanId)}"",""kind"":""CLIENT"",""timestamp"":{timestamp},""duration"":60000000,""localEndpoint"":{{""serviceName"":""{serviceName}""{ipInformation}}},""remoteEndpoint"":{{""serviceName"":""http://localhost:44312/""}},""annotations"":[{{""timestamp"":{eventTimestamp},""value"":""Event1""}},{{""timestamp"":{eventTimestamp},""value"":""Event2""}}],""tags"":{{{resoureTags}""stringKey"":""value"",""longKey"":""1"",""longKey2"":""1"",""doubleKey"":""1"",""doubleKey2"":""1"",""longArrayKey"":""1,2"",""boolKey"":""true"",""boolArrayKey"":""true,false"",""http.host"":""http://localhost:44312/"",{statusTag}""otel.library.name"":""CreateTestActivity"",""peer.service"":""http://localhost:44312/""{errorTag}}}}}]",
Responses[requestId]);
}
internal static Activity CreateTestActivity(
bool isRootSpan = false,
bool setAttributes = true,
Dictionary<string, object> additionalAttributes = null,
bool addEvents = true,
bool addLinks = true,
Resource resource = null,
ActivityKind kind = ActivityKind.Client,
Status? status = null)
{
var startTimestamp = DateTime.UtcNow;
var endTimestamp = startTimestamp.AddSeconds(60);
var eventTimestamp = DateTime.UtcNow;
var traceId = ActivityTraceId.CreateFromString("e8ea7e9ac72de94e91fabc613f9686b2".AsSpan());
var parentSpanId = isRootSpan ? default : ActivitySpanId.CreateFromBytes(new byte[] { 12, 23, 34, 45, 56, 67, 78, 89 });
var attributes = new Dictionary<string, object>
{
{ "stringKey", "value" },
{ "longKey", 1L },
{ "longKey2", 1 },
{ "doubleKey", 1D },
{ "doubleKey2", 1F },
{ "longArrayKey", new long[] { 1, 2 } },
{ "boolKey", true },
{ "boolArrayKey", new bool[] { true, false } },
{ "http.host", "http://localhost:44312/" }, // simulating instrumentation tag adding http.host
};
if (additionalAttributes != null)
{
foreach (var attribute in additionalAttributes)
{
if (!attributes.ContainsKey(attribute.Key))
{
attributes.Add(attribute.Key, attribute.Value);
}
}
}
var events = new List<ActivityEvent>
{
new ActivityEvent(
"Event1",
eventTimestamp,
new ActivityTagsCollection(new Dictionary<string, object>
{
{ "key", "value" },
})),
new ActivityEvent(
"Event2",
eventTimestamp,
new ActivityTagsCollection(new Dictionary<string, object>
{
{ "key", "value" },
})),
};
var linkedSpanId = ActivitySpanId.CreateFromString("888915b6286b9c41".AsSpan());
var activitySource = new ActivitySource(nameof(CreateTestActivity));
var tags = setAttributes ?
attributes.Select(kvp => new KeyValuePair<string, object>(kvp.Key, kvp.Value))
: null;
var links = addLinks ?
new[]
{
new ActivityLink(new ActivityContext(
traceId,
linkedSpanId,
ActivityTraceFlags.Recorded)),
}
: null;
var activity = activitySource.StartActivity(
"Name",
kind,
parentContext: new ActivityContext(traceId, parentSpanId, ActivityTraceFlags.Recorded),
tags,
links,
startTime: startTimestamp);
if (addEvents)
{
foreach (var evnt in events)
{
activity.AddEvent(evnt);
}
}
if (status.HasValue)
{
activity.SetStatus(status.Value);
}
activity.SetEndTime(endTimestamp);
activity.Stop();
return activity;
}
}
}
| 1 | 18,688 | Currently in our Zipkin tests, only one passed parameter would instantiate a Resource and checks for its tags populating. I left the If(UseTestResource) clause in the code in case we would like to return to the old resource tag checking, but if it makes more sense I can remove that parameter entirely. | open-telemetry-opentelemetry-dotnet | .cs |
@@ -944,6 +944,11 @@ public class ZkStateReader implements SolrCloseable {
return null;
}
+ public boolean isNodeLive(String node) {
+ return liveNodes.contains(node);
+
+ }
+
/**
* Get shard leader properties, with retry if none exist.
*/ | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.common.cloud;
import java.lang.invoke.MethodHandles;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Predicate;
import java.util.function.UnaryOperator;
import java.util.stream.Collectors;
import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
import org.apache.solr.common.AlreadyClosedException;
import org.apache.solr.common.Callable;
import org.apache.solr.common.SolrCloseable;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.common.params.AutoScalingParams;
import org.apache.solr.common.params.CollectionAdminParams;
import org.apache.solr.common.params.CoreAdminParams;
import org.apache.solr.common.util.ExecutorUtil;
import org.apache.solr.common.util.ObjectReleaseTracker;
import org.apache.solr.common.util.Pair;
import org.apache.solr.common.util.SolrjNamedThreadFactory;
import org.apache.solr.common.util.Utils;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.KeeperException.NoNodeException;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher;
import org.apache.zookeeper.Watcher.Event.EventType;
import org.apache.zookeeper.data.Stat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static java.util.Collections.EMPTY_MAP;
import static java.util.Collections.emptyMap;
import static java.util.Collections.emptySet;
import static java.util.Collections.emptySortedSet;
import static org.apache.solr.common.util.Utils.fromJSON;
public class ZkStateReader implements SolrCloseable {
public static final int STATE_UPDATE_DELAY = Integer.getInteger("solr.OverseerStateUpdateDelay", 2000); // delay between cloud state updates
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
public static final String BASE_URL_PROP = "base_url";
public static final String NODE_NAME_PROP = "node_name";
public static final String CORE_NODE_NAME_PROP = "core_node_name";
public static final String ROLES_PROP = "roles";
public static final String STATE_PROP = "state";
// if this flag equals to false and the replica does not exist in cluster state, set state op become no op (default is true)
public static final String FORCE_SET_STATE_PROP = "force_set_state";
/**
* SolrCore name.
*/
public static final String CORE_NAME_PROP = "core";
public static final String COLLECTION_PROP = "collection";
public static final String ELECTION_NODE_PROP = "election_node";
public static final String SHARD_ID_PROP = "shard";
public static final String REPLICA_PROP = "replica";
public static final String SHARD_RANGE_PROP = "shard_range";
public static final String SHARD_STATE_PROP = "shard_state";
public static final String SHARD_PARENT_PROP = "shard_parent";
public static final String NUM_SHARDS_PROP = "numShards";
public static final String LEADER_PROP = "leader";
public static final String SHARED_STORAGE_PROP = "shared_storage";
public static final String PROPERTY_PROP = "property";
public static final String PROPERTY_PROP_PREFIX = "property.";
public static final String PROPERTY_VALUE_PROP = "property.value";
public static final String MAX_AT_ONCE_PROP = "maxAtOnce";
public static final String MAX_WAIT_SECONDS_PROP = "maxWaitSeconds";
public static final String STATE_TIMESTAMP_PROP = "stateTimestamp";
public static final String COLLECTIONS_ZKNODE = "/collections";
public static final String LIVE_NODES_ZKNODE = "/live_nodes";
public static final String ALIASES = "/aliases.json";
public static final String CLUSTER_STATE = "/clusterstate.json";
public static final String CLUSTER_PROPS = "/clusterprops.json";
public static final String COLLECTION_PROPS_ZKNODE = "collectionprops.json";
public static final String REJOIN_AT_HEAD_PROP = "rejoinAtHead";
public static final String SOLR_SECURITY_CONF_PATH = "/security.json";
public static final String SOLR_AUTOSCALING_CONF_PATH = "/autoscaling.json";
public static final String SOLR_AUTOSCALING_EVENTS_PATH = "/autoscaling/events";
public static final String SOLR_AUTOSCALING_TRIGGER_STATE_PATH = "/autoscaling/triggerState";
public static final String SOLR_AUTOSCALING_NODE_ADDED_PATH = "/autoscaling/nodeAdded";
public static final String SOLR_AUTOSCALING_NODE_LOST_PATH = "/autoscaling/nodeLost";
public static final String SOLR_PKGS_PATH = "/packages.json";
public static final String DEFAULT_SHARD_PREFERENCES = "defaultShardPreferences";
public static final String REPLICATION_FACTOR = "replicationFactor";
public static final String MAX_SHARDS_PER_NODE = "maxShardsPerNode";
public static final String AUTO_ADD_REPLICAS = "autoAddReplicas";
public static final String MAX_CORES_PER_NODE = "maxCoresPerNode";
public static final String PULL_REPLICAS = "pullReplicas";
public static final String NRT_REPLICAS = "nrtReplicas";
public static final String TLOG_REPLICAS = "tlogReplicas";
public static final String READ_ONLY = "readOnly";
public static final String ROLES = "/roles.json";
public static final String CONFIGS_ZKNODE = "/configs";
public final static String CONFIGNAME_PROP = "configName";
public static final String LEGACY_CLOUD = "legacyCloud";
public static final String SAMPLE_PERCENTAGE = "samplePercentage";
/**
* @deprecated use {@link org.apache.solr.common.params.CollectionAdminParams#DEFAULTS} instead.
*/
@Deprecated
public static final String COLLECTION_DEF = "collectionDefaults";
public static final String URL_SCHEME = "urlScheme";
private static final String SOLR_ENVIRONMENT = "environment";
public static final String REPLICA_TYPE = "type";
/**
* A view of the current state of all collections; combines all the different state sources into a single view.
*/
protected volatile ClusterState clusterState;
private static final int GET_LEADER_RETRY_INTERVAL_MS = 50;
private static final int GET_LEADER_RETRY_DEFAULT_TIMEOUT = Integer.parseInt(System.getProperty("zkReaderGetLeaderRetryTimeoutMs", "4000"));
;
public static final String LEADER_ELECT_ZKNODE = "leader_elect";
public static final String SHARD_LEADERS_ZKNODE = "leaders";
public static final String ELECTION_NODE = "election";
/**
* Collections tracked in the legacy (shared) state format, reflects the contents of clusterstate.json.
*/
private Map<String, ClusterState.CollectionRef> legacyCollectionStates = emptyMap();
/**
* Last seen ZK version of clusterstate.json.
*/
private int legacyClusterStateVersion = 0;
/**
* Collections with format2 state.json, "interesting" and actively watched.
*/
private final ConcurrentHashMap<String, DocCollection> watchedCollectionStates = new ConcurrentHashMap<>();
/**
* Collections with format2 state.json, not "interesting" and not actively watched.
*/
private final ConcurrentHashMap<String, LazyCollectionRef> lazyCollectionStates = new ConcurrentHashMap<>();
/**
* Collection properties being actively watched
*/
private final ConcurrentHashMap<String, VersionedCollectionProps> watchedCollectionProps = new ConcurrentHashMap<>();
/**
* Collection properties being actively watched
*/
private final ConcurrentHashMap<String, PropsWatcher> collectionPropsWatchers = new ConcurrentHashMap<>();
private volatile SortedSet<String> liveNodes = emptySortedSet();
private volatile Map<String, Object> clusterProperties = Collections.emptyMap();
private final ZkConfigManager configManager;
private ConfigData securityData;
private final Runnable securityNodeListener;
private ConcurrentHashMap<String, CollectionWatch<DocCollectionWatcher>> collectionWatches = new ConcurrentHashMap<>();
// named this observers so there's less confusion between CollectionPropsWatcher map and the PropsWatcher map.
private ConcurrentHashMap<String, CollectionWatch<CollectionPropsWatcher>> collectionPropsObservers = new ConcurrentHashMap<>();
private Set<CloudCollectionsListener> cloudCollectionsListeners = ConcurrentHashMap.newKeySet();
private final ExecutorService notifications = ExecutorUtil.newMDCAwareCachedThreadPool("watches");
private Set<LiveNodesListener> liveNodesListeners = ConcurrentHashMap.newKeySet();
private Set<ClusterPropertiesListener> clusterPropertiesListeners = ConcurrentHashMap.newKeySet();
/**
* Used to submit notifications to Collection Properties watchers in order
**/
private final ExecutorService collectionPropsNotifications = ExecutorUtil.newMDCAwareSingleThreadExecutor(new SolrjNamedThreadFactory("collectionPropsNotifications"));
private static final long LAZY_CACHE_TIME = TimeUnit.NANOSECONDS.convert(STATE_UPDATE_DELAY, TimeUnit.MILLISECONDS);
private Future<?> collectionPropsCacheCleaner; // only kept to identify if the cleaner has already been started.
/**
* Get current {@link AutoScalingConfig}.
*
* @return current configuration from <code>autoscaling.json</code>. NOTE:
* this data is retrieved from ZK on each call.
*/
public AutoScalingConfig getAutoScalingConfig() throws KeeperException, InterruptedException {
return getAutoScalingConfig(null);
}
/**
* Get current {@link AutoScalingConfig}.
*
* @param watcher optional {@link Watcher} to set on a znode to watch for config changes.
* @return current configuration from <code>autoscaling.json</code>. NOTE:
* this data is retrieved from ZK on each call.
*/
public AutoScalingConfig getAutoScalingConfig(Watcher watcher) throws KeeperException, InterruptedException {
Stat stat = new Stat();
Map<String, Object> map = new HashMap<>();
try {
byte[] bytes = zkClient.getData(SOLR_AUTOSCALING_CONF_PATH, watcher, stat, true);
if (bytes != null && bytes.length > 0) {
map = (Map<String, Object>) fromJSON(bytes);
}
} catch (KeeperException.NoNodeException e) {
// ignore
}
map.put(AutoScalingParams.ZK_VERSION, stat.getVersion());
return new AutoScalingConfig(map);
}
private static class CollectionWatch<T> {
int coreRefCount = 0;
Set<T> stateWatchers = ConcurrentHashMap.newKeySet();
public boolean canBeRemoved() {
return coreRefCount + stateWatchers.size() == 0;
}
}
public static final Set<String> KNOWN_CLUSTER_PROPS = Set.of(
LEGACY_CLOUD,
URL_SCHEME,
AUTO_ADD_REPLICAS,
CoreAdminParams.BACKUP_LOCATION,
DEFAULT_SHARD_PREFERENCES,
MAX_CORES_PER_NODE,
SAMPLE_PERCENTAGE,
SOLR_ENVIRONMENT,
CollectionAdminParams.DEFAULTS);
/**
* Returns config set name for collection.
*
* @param collection to return config set name for
*/
public String readConfigName(String collection) throws KeeperException {
String configName = null;
String path = COLLECTIONS_ZKNODE + "/" + collection;
log.debug("Loading collection config from: [{}]", path);
try {
if (zkClient.exists(path, true) == false) {
log.warn("No collection found at path {}.", path);
throw new KeeperException.NoNodeException("No collection found at path: " + path);
}
byte[] data = zkClient.getData(path, null, null, true);
if (data == null) {
log.warn("No config data found at path {}.", path);
throw new KeeperException.NoNodeException("No config data found at path: " + path);
}
ZkNodeProps props = ZkNodeProps.load(data);
configName = props.getStr(CONFIGNAME_PROP);
if (configName == null) {
log.warn("No config data found at path{}. ", path);
throw new KeeperException.NoNodeException("No config data found at path: " + path);
}
String configPath = CONFIGS_ZKNODE + "/" + configName;
if (zkClient.exists(configPath, true) == false) {
log.error("Specified config=[{}] does not exist in ZooKeeper at location=[{}]", configName, configPath);
throw new KeeperException.NoNodeException("Specified config=[" + configName + "] does not exist in ZooKeeper at location=[" + configPath + "]");
} else {
log.debug("path=[{}] [{}]=[{}] specified config exists in ZooKeeper", configPath, CONFIGNAME_PROP, configName);
}
} catch (InterruptedException e) {
SolrZkClient.checkInterrupted(e);
log.warn("Thread interrupted when loading config name for collection {}", collection);
throw new SolrException(ErrorCode.SERVER_ERROR, "Thread interrupted when loading config name for collection " + collection, e);
}
return configName;
}
private final SolrZkClient zkClient;
private final boolean closeClient;
private volatile boolean closed = false;
private Set<CountDownLatch> waitLatches = ConcurrentHashMap.newKeySet();
public ZkStateReader(SolrZkClient zkClient) {
this(zkClient, null);
}
public ZkStateReader(SolrZkClient zkClient, Runnable securityNodeListener) {
this.zkClient = zkClient;
this.configManager = new ZkConfigManager(zkClient);
this.closeClient = false;
this.securityNodeListener = securityNodeListener;
assert ObjectReleaseTracker.track(this);
}
public ZkStateReader(String zkServerAddress, int zkClientTimeout, int zkClientConnectTimeout) {
this.zkClient = new SolrZkClient(zkServerAddress, zkClientTimeout, zkClientConnectTimeout,
// on reconnect, reload cloud info
new OnReconnect() {
@Override
public void command() {
try {
ZkStateReader.this.createClusterStateWatchersAndUpdate();
} catch (KeeperException e) {
log.error("A ZK error has occurred", e);
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "A ZK error has occurred", e);
} catch (InterruptedException e) {
// Restore the interrupted status
Thread.currentThread().interrupt();
log.error("Interrupted", e);
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "Interrupted", e);
}
}
});
this.configManager = new ZkConfigManager(zkClient);
this.closeClient = true;
this.securityNodeListener = null;
assert ObjectReleaseTracker.track(this);
}
public ZkConfigManager getConfigManager() {
return configManager;
}
/**
* Forcibly refresh cluster state from ZK. Do this only to avoid race conditions because it's expensive.
* <p>
* It is cheaper to call {@link #forceUpdateCollection(String)} on a single collection if you must.
*
* @lucene.internal
*/
public void forciblyRefreshAllClusterStateSlow() throws KeeperException, InterruptedException {
synchronized (getUpdateLock()) {
if (clusterState == null) {
// Never initialized, just run normal initialization.
createClusterStateWatchersAndUpdate();
return;
}
// No need to set watchers because we should already have watchers registered for everything.
refreshCollectionList(null);
refreshLiveNodes(null);
refreshLegacyClusterState(null);
// Need a copy so we don't delete from what we're iterating over.
Collection<String> safeCopy = new ArrayList<>(watchedCollectionStates.keySet());
Set<String> updatedCollections = new HashSet<>();
for (String coll : safeCopy) {
DocCollection newState = fetchCollectionState(coll, null);
if (updateWatchedCollection(coll, newState)) {
updatedCollections.add(coll);
}
}
constructState(updatedCollections);
}
}
/**
* Forcibly refresh a collection's internal state from ZK. Try to avoid having to resort to this when
* a better design is possible.
*/
//TODO shouldn't we call ZooKeeper.sync() at the right places to prevent reading a stale value? We do so for aliases.
public void forceUpdateCollection(String collection) throws KeeperException, InterruptedException {
synchronized (getUpdateLock()) {
if (clusterState == null) {
log.warn("ClusterState watchers have not been initialized");
return;
}
ClusterState.CollectionRef ref = clusterState.getCollectionRef(collection);
if (ref == null || legacyCollectionStates.containsKey(collection)) {
// We either don't know anything about this collection (maybe it's new?) or it's legacy.
// First update the legacy cluster state.
log.debug("Checking legacy cluster state for collection {}", collection);
refreshLegacyClusterState(null);
if (!legacyCollectionStates.containsKey(collection)) {
// No dice, see if a new collection just got created.
LazyCollectionRef tryLazyCollection = new LazyCollectionRef(collection);
if (tryLazyCollection.get() != null) {
// What do you know, it exists!
log.debug("Adding lazily-loaded reference for collection {}", collection);
lazyCollectionStates.putIfAbsent(collection, tryLazyCollection);
constructState(Collections.singleton(collection));
}
}
} else if (ref.isLazilyLoaded()) {
log.debug("Refreshing lazily-loaded state for collection {}", collection);
if (ref.get() != null) {
return;
}
// Edge case: if there's no external collection, try refreshing legacy cluster state in case it's there.
refreshLegacyClusterState(null);
} else if (watchedCollectionStates.containsKey(collection)) {
// Exists as a watched collection, force a refresh.
log.debug("Forcing refresh of watched collection state for {}", collection);
DocCollection newState = fetchCollectionState(collection, null);
if (updateWatchedCollection(collection, newState)) {
constructState(Collections.singleton(collection));
}
} else {
log.error("Collection {} is not lazy or watched!", collection);
}
}
}
/**
* Refresh the set of live nodes.
*/
public void updateLiveNodes() throws KeeperException, InterruptedException {
refreshLiveNodes(null);
}
public Integer compareStateVersions(String coll, int version) {
DocCollection collection = clusterState.getCollectionOrNull(coll);
if (collection == null) return null;
if (collection.getZNodeVersion() < version) {
log.debug("Server older than client {}<{}", collection.getZNodeVersion(), version);
DocCollection nu = getCollectionLive(this, coll);
if (nu == null) return -1;
if (nu.getZNodeVersion() > collection.getZNodeVersion()) {
if (updateWatchedCollection(coll, nu)) {
synchronized (getUpdateLock()) {
constructState(Collections.singleton(coll));
}
}
collection = nu;
}
}
if (collection.getZNodeVersion() == version) {
return null;
}
log.debug("Wrong version from client [{}]!=[{}]", version, collection.getZNodeVersion());
return collection.getZNodeVersion();
}
public synchronized void createClusterStateWatchersAndUpdate() throws KeeperException,
InterruptedException {
// We need to fetch the current cluster state and the set of live nodes
log.debug("Updating cluster state from ZooKeeper... ");
// Sanity check ZK structure.
if (!zkClient.exists(CLUSTER_STATE, true)) {
throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE,
"Cannot connect to cluster at " + zkClient.getZkServerAddress() + ": cluster not found/not ready");
}
// on reconnect of SolrZkClient force refresh and re-add watches.
loadClusterProperties();
refreshLiveNodes(new LiveNodeWatcher());
refreshLegacyClusterState(new LegacyClusterStateWatcher());
refreshStateFormat2Collections();
refreshCollectionList(new CollectionsChildWatcher());
refreshAliases(aliasesManager);
if (securityNodeListener != null) {
addSecurityNodeWatcher(pair -> {
ConfigData cd = new ConfigData();
cd.data = pair.first() == null || pair.first().length == 0 ? EMPTY_MAP : Utils.getDeepCopy((Map) fromJSON(pair.first()), 4, false);
cd.version = pair.second() == null ? -1 : pair.second().getVersion();
securityData = cd;
securityNodeListener.run();
});
securityData = getSecurityProps(true);
}
collectionPropsObservers.forEach((k, v) -> {
collectionPropsWatchers.computeIfAbsent(k, PropsWatcher::new).refreshAndWatch(true);
});
}
private void addSecurityNodeWatcher(final Callable<Pair<byte[], Stat>> callback)
throws KeeperException, InterruptedException {
zkClient.exists(SOLR_SECURITY_CONF_PATH,
new Watcher() {
@Override
public void process(WatchedEvent event) {
// session events are not change events, and do not remove the watcher
if (EventType.None.equals(event.getType())) {
return;
}
try {
synchronized (ZkStateReader.this.getUpdateLock()) {
log.debug("Updating [{}] ... ", SOLR_SECURITY_CONF_PATH);
// remake watch
final Watcher thisWatch = this;
final Stat stat = new Stat();
final byte[] data = getZkClient().getData(SOLR_SECURITY_CONF_PATH, thisWatch, stat, true);
try {
callback.call(new Pair<>(data, stat));
} catch (Exception e) {
log.error("Error running collections node listener", e);
}
}
} catch (KeeperException.ConnectionLossException | KeeperException.SessionExpiredException e) {
log.warn("ZooKeeper watch triggered, but Solr cannot talk to ZK: [{}]", e.getMessage());
} catch (KeeperException e) {
log.error("A ZK error has occurred", e);
throw new ZooKeeperException(ErrorCode.SERVER_ERROR, "", e);
} catch (InterruptedException e) {
// Restore the interrupted status
Thread.currentThread().interrupt();
log.warn("Interrupted", e);
}
}
}, true);
}
/**
* Construct the total state view from all sources.
* Must hold {@link #getUpdateLock()} before calling this.
*
* @param changedCollections collections that have changed since the last call,
* and that should fire notifications
*/
private void constructState(Set<String> changedCollections) {
Set<String> liveNodes = this.liveNodes; // volatile read
// Legacy clusterstate is authoritative, for backwards compatibility.
// To move a collection's state to format2, first create the new state2 format node, then remove legacy entry.
Map<String, ClusterState.CollectionRef> result = new LinkedHashMap<>(legacyCollectionStates);
// Add state format2 collections, but don't override legacy collection states.
for (Map.Entry<String, DocCollection> entry : watchedCollectionStates.entrySet()) {
result.putIfAbsent(entry.getKey(), new ClusterState.CollectionRef(entry.getValue()));
}
// Finally, add any lazy collections that aren't already accounted for.
for (Map.Entry<String, LazyCollectionRef> entry : lazyCollectionStates.entrySet()) {
result.putIfAbsent(entry.getKey(), entry.getValue());
}
this.clusterState = new ClusterState(liveNodes, result, legacyClusterStateVersion);
log.debug("clusterStateSet: legacy [{}] interesting [{}] watched [{}] lazy [{}] total [{}]",
legacyCollectionStates.keySet().size(),
collectionWatches.keySet().size(),
watchedCollectionStates.keySet().size(),
lazyCollectionStates.keySet().size(),
clusterState.getCollectionStates().size());
if (log.isTraceEnabled()) {
log.trace("clusterStateSet: legacy [{}] interesting [{}] watched [{}] lazy [{}] total [{}]",
legacyCollectionStates.keySet(),
collectionWatches.keySet(),
watchedCollectionStates.keySet(),
lazyCollectionStates.keySet(),
clusterState.getCollectionStates());
}
notifyCloudCollectionsListeners();
for (String collection : changedCollections) {
notifyStateWatchers(collection, clusterState.getCollectionOrNull(collection));
}
}
/**
* Refresh legacy (shared) clusterstate.json
*/
private void refreshLegacyClusterState(Watcher watcher) throws KeeperException, InterruptedException {
try {
final Stat stat = new Stat();
final byte[] data = zkClient.getData(CLUSTER_STATE, watcher, stat, true);
final ClusterState loadedData = ClusterState.load(stat.getVersion(), data, emptySet(), CLUSTER_STATE);
synchronized (getUpdateLock()) {
if (this.legacyClusterStateVersion >= stat.getVersion()) {
// Nothing to do, someone else updated same or newer.
return;
}
Set<String> updatedCollections = new HashSet<>();
for (String coll : this.collectionWatches.keySet()) {
ClusterState.CollectionRef ref = this.legacyCollectionStates.get(coll);
// legacy collections are always in-memory
DocCollection oldState = ref == null ? null : ref.get();
ClusterState.CollectionRef newRef = loadedData.getCollectionStates().get(coll);
DocCollection newState = newRef == null ? null : newRef.get();
if (newState == null) {
// check that we haven't just migrated
newState = watchedCollectionStates.get(coll);
}
if (!Objects.equals(oldState, newState)) {
updatedCollections.add(coll);
}
}
this.legacyCollectionStates = loadedData.getCollectionStates();
this.legacyClusterStateVersion = stat.getVersion();
constructState(updatedCollections);
}
} catch (KeeperException.NoNodeException e) {
// Ignore missing legacy clusterstate.json.
synchronized (getUpdateLock()) {
this.legacyCollectionStates = emptyMap();
this.legacyClusterStateVersion = 0;
constructState(Collections.emptySet());
}
}
}
/**
* Refresh state format2 collections.
*/
private void refreshStateFormat2Collections() {
for (String coll : collectionWatches.keySet()) {
new StateWatcher(coll).refreshAndWatch();
}
}
// We don't get a Stat or track versions on getChildren() calls, so force linearization.
private final Object refreshCollectionListLock = new Object();
/**
* Search for any lazy-loadable state format2 collections.
* <p>
* A stateFormat=1 collection which is not interesting to us can also
* be put into the {@link #lazyCollectionStates} map here. But that is okay
* because {@link #constructState(Set)} will give priority to collections in the
* shared collection state over this map.
* In fact this is a clever way to avoid doing a ZK exists check on
* the /collections/collection_name/state.json znode
* Such an exists check is done in {@link ClusterState#hasCollection(String)} and
* {@link ClusterState#getCollectionsMap()} methods
* have a safeguard against exposing wrong collection names to the users
*/
private void refreshCollectionList(Watcher watcher) throws KeeperException, InterruptedException {
synchronized (refreshCollectionListLock) {
List<String> children = null;
try {
children = zkClient.getChildren(COLLECTIONS_ZKNODE, watcher, true);
} catch (KeeperException.NoNodeException e) {
log.warn("Error fetching collection names: [{}]", e.getMessage());
// fall through
}
if (children == null || children.isEmpty()) {
lazyCollectionStates.clear();
return;
}
// Don't lock getUpdateLock() here, we don't need it and it would cause deadlock.
// Don't mess with watchedCollections, they should self-manage.
// First, drop any children that disappeared.
this.lazyCollectionStates.keySet().retainAll(children);
for (String coll : children) {
// We will create an eager collection for any interesting collections, so don't add to lazy.
if (!collectionWatches.containsKey(coll)) {
// Double check contains just to avoid allocating an object.
LazyCollectionRef existing = lazyCollectionStates.get(coll);
if (existing == null) {
lazyCollectionStates.putIfAbsent(coll, new LazyCollectionRef(coll));
}
}
}
}
}
// We don't get a Stat or track versions on getChildren() calls, so force linearization.
private final Object refreshCollectionsSetLock = new Object();
// Ensures that only the latest getChildren fetch gets applied.
private final AtomicReference<Set<String>> lastFetchedCollectionSet = new AtomicReference<>();
/**
* Register a CloudCollectionsListener to be called when the set of collections within a cloud changes.
*/
public void registerCloudCollectionsListener(CloudCollectionsListener cloudCollectionsListener) {
cloudCollectionsListeners.add(cloudCollectionsListener);
notifyNewCloudCollectionsListener(cloudCollectionsListener);
}
/**
* Remove a registered CloudCollectionsListener.
*/
public void removeCloudCollectionsListener(CloudCollectionsListener cloudCollectionsListener) {
cloudCollectionsListeners.remove(cloudCollectionsListener);
}
private void notifyNewCloudCollectionsListener(CloudCollectionsListener listener) {
listener.onChange(Collections.emptySet(), lastFetchedCollectionSet.get());
}
private void notifyCloudCollectionsListeners() {
notifyCloudCollectionsListeners(false);
}
private void notifyCloudCollectionsListeners(boolean notifyIfSame) {
synchronized (refreshCollectionsSetLock) {
final Set<String> newCollections = getCurrentCollections();
final Set<String> oldCollections = lastFetchedCollectionSet.getAndSet(newCollections);
if (!newCollections.equals(oldCollections) || notifyIfSame) {
cloudCollectionsListeners.forEach(listener -> listener.onChange(oldCollections, newCollections));
}
}
}
private Set<String> getCurrentCollections() {
Set<String> collections = new HashSet<>();
collections.addAll(legacyCollectionStates.keySet());
collections.addAll(watchedCollectionStates.keySet());
collections.addAll(lazyCollectionStates.keySet());
return collections;
}
private class LazyCollectionRef extends ClusterState.CollectionRef {
private final String collName;
private long lastUpdateTime;
private DocCollection cachedDocCollection;
public LazyCollectionRef(String collName) {
super(null);
this.collName = collName;
this.lastUpdateTime = -1;
}
@Override
public synchronized DocCollection get(boolean allowCached) {
gets.incrementAndGet();
if (!allowCached || lastUpdateTime < 0 || System.nanoTime() - lastUpdateTime > LAZY_CACHE_TIME) {
boolean shouldFetch = true;
if (cachedDocCollection != null) {
Stat exists = null;
try {
exists = zkClient.exists(getCollectionPath(collName), null, true);
} catch (Exception e) {
}
if (exists != null && exists.getVersion() == cachedDocCollection.getZNodeVersion()) {
shouldFetch = false;
}
}
if (shouldFetch) {
cachedDocCollection = getCollectionLive(ZkStateReader.this, collName);
lastUpdateTime = System.nanoTime();
}
}
return cachedDocCollection;
}
@Override
public boolean isLazilyLoaded() {
return true;
}
@Override
public String toString() {
return "LazyCollectionRef(" + collName + ")";
}
}
// We don't get a Stat or track versions on getChildren() calls, so force linearization.
private final Object refreshLiveNodesLock = new Object();
// Ensures that only the latest getChildren fetch gets applied.
private final AtomicReference<SortedSet<String>> lastFetchedLiveNodes = new AtomicReference<>();
/**
* Refresh live_nodes.
*/
private void refreshLiveNodes(Watcher watcher) throws KeeperException, InterruptedException {
synchronized (refreshLiveNodesLock) {
SortedSet<String> newLiveNodes;
try {
List<String> nodeList = zkClient.getChildren(LIVE_NODES_ZKNODE, watcher, true);
newLiveNodes = new TreeSet<>(nodeList);
} catch (KeeperException.NoNodeException e) {
newLiveNodes = emptySortedSet();
}
lastFetchedLiveNodes.set(newLiveNodes);
}
// Can't lock getUpdateLock() until we release the other, it would cause deadlock.
SortedSet<String> oldLiveNodes, newLiveNodes;
synchronized (getUpdateLock()) {
newLiveNodes = lastFetchedLiveNodes.getAndSet(null);
if (newLiveNodes == null) {
// Someone else won the race to apply the last update, just exit.
return;
}
oldLiveNodes = this.liveNodes;
this.liveNodes = newLiveNodes;
if (clusterState != null) {
clusterState.setLiveNodes(newLiveNodes);
}
}
if (oldLiveNodes.size() != newLiveNodes.size()) {
log.info("Updated live nodes from ZooKeeper... ({}) -> ({})", oldLiveNodes.size(), newLiveNodes.size());
}
if (log.isDebugEnabled()) {
log.debug("Updated live nodes from ZooKeeper... {} -> {}", oldLiveNodes, newLiveNodes);
}
if (!oldLiveNodes.equals(newLiveNodes)) { // fire listeners
liveNodesListeners.forEach(listener -> {
if (listener.onChange(new TreeSet<>(oldLiveNodes), new TreeSet<>(newLiveNodes))) {
removeLiveNodesListener(listener);
}
});
}
}
public void registerClusterPropertiesListener(ClusterPropertiesListener listener) {
// fire it once with current properties
if (listener.onChange(getClusterProperties())) {
removeClusterPropertiesListener(listener);
} else {
clusterPropertiesListeners.add(listener);
}
}
public void removeClusterPropertiesListener(ClusterPropertiesListener listener) {
clusterPropertiesListeners.remove(listener);
}
public void registerLiveNodesListener(LiveNodesListener listener) {
// fire it once with current live nodes
if (listener.onChange(new TreeSet<>(getClusterState().getLiveNodes()), new TreeSet<>(getClusterState().getLiveNodes()))) {
removeLiveNodesListener(listener);
}
liveNodesListeners.add(listener);
}
public void removeLiveNodesListener(LiveNodesListener listener) {
liveNodesListeners.remove(listener);
}
/**
* @return information about the cluster from ZooKeeper
*/
public ClusterState getClusterState() {
return clusterState;
}
public Object getUpdateLock() {
return this;
}
public void close() {
this.closed = true;
notifications.shutdownNow();
waitLatches.parallelStream().forEach(c -> {
c.countDown();
});
ExecutorUtil.shutdownAndAwaitTermination(notifications);
ExecutorUtil.shutdownAndAwaitTermination(collectionPropsNotifications);
if (closeClient) {
zkClient.close();
}
assert ObjectReleaseTracker.release(this);
}
@Override
public boolean isClosed() {
return closed;
}
public String getLeaderUrl(String collection, String shard, int timeout) throws InterruptedException {
ZkCoreNodeProps props = new ZkCoreNodeProps(getLeaderRetry(collection, shard, timeout));
return props.getCoreUrl();
}
public Replica getLeader(Set<String> liveNodes, DocCollection docCollection, String shard) {
Replica replica = docCollection != null ? docCollection.getLeader(shard) : null;
if (replica != null && liveNodes.contains(replica.getNodeName())) {
return replica;
}
return null;
}
public Replica getLeader(String collection, String shard) {
if (clusterState != null) {
DocCollection docCollection = clusterState.getCollectionOrNull(collection);
Replica replica = docCollection != null ? docCollection.getLeader(shard) : null;
if (replica != null && getClusterState().liveNodesContain(replica.getNodeName())) {
return replica;
}
}
return null;
}
/**
* Get shard leader properties, with retry if none exist.
*/
public Replica getLeaderRetry(String collection, String shard) throws InterruptedException {
return getLeaderRetry(collection, shard, GET_LEADER_RETRY_DEFAULT_TIMEOUT);
}
/**
* Get shard leader properties, with retry if none exist.
*/
public Replica getLeaderRetry(String collection, String shard, int timeout) throws InterruptedException {
AtomicReference<Replica> leader = new AtomicReference<>();
try {
waitForState(collection, timeout, TimeUnit.MILLISECONDS, (n, c) -> {
if (c == null)
return false;
Replica l = getLeader(n, c, shard);
if (l != null) {
leader.set(l);
return true;
}
return false;
});
} catch (TimeoutException e) {
throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE, "No registered leader was found after waiting for "
+ timeout + "ms " + ", collection: " + collection + " slice: " + shard + " saw state=" + clusterState.getCollectionOrNull(collection)
+ " with live_nodes=" + clusterState.getLiveNodes());
}
return leader.get();
}
/**
* Get path where shard leader properties live in zookeeper.
*/
public static String getShardLeadersPath(String collection, String shardId) {
return COLLECTIONS_ZKNODE + "/" + collection + "/"
+ SHARD_LEADERS_ZKNODE + (shardId != null ? ("/" + shardId)
: "") + "/leader";
}
/**
* Get path where shard leader elections ephemeral nodes are.
*/
public static String getShardLeadersElectPath(String collection, String shardId) {
return COLLECTIONS_ZKNODE + "/" + collection + "/"
+ LEADER_ELECT_ZKNODE + (shardId != null ? ("/" + shardId + "/" + ELECTION_NODE)
: "");
}
public List<ZkCoreNodeProps> getReplicaProps(String collection, String shardId, String thisCoreNodeName) {
return getReplicaProps(collection, shardId, thisCoreNodeName, null);
}
public List<ZkCoreNodeProps> getReplicaProps(String collection, String shardId, String thisCoreNodeName,
Replica.State mustMatchStateFilter) {
return getReplicaProps(collection, shardId, thisCoreNodeName, mustMatchStateFilter, null);
}
public List<ZkCoreNodeProps> getReplicaProps(String collection, String shardId, String thisCoreNodeName,
Replica.State mustMatchStateFilter, Replica.State mustNotMatchStateFilter) {
//TODO: We don't need all these getReplicaProps method overloading. Also, it's odd that the default is to return replicas of type TLOG and NRT only
return getReplicaProps(collection, shardId, thisCoreNodeName, mustMatchStateFilter, null, EnumSet.of(Replica.Type.TLOG, Replica.Type.NRT));
}
public List<ZkCoreNodeProps> getReplicaProps(String collection, String shardId, String thisCoreNodeName,
Replica.State mustMatchStateFilter, Replica.State mustNotMatchStateFilter, final EnumSet<Replica.Type> acceptReplicaType) {
assert thisCoreNodeName != null;
ClusterState clusterState = this.clusterState;
if (clusterState == null) {
return null;
}
final DocCollection docCollection = clusterState.getCollectionOrNull(collection);
if (docCollection == null || docCollection.getSlicesMap() == null) {
throw new ZooKeeperException(ErrorCode.BAD_REQUEST,
"Could not find collection in zk: " + collection);
}
Map<String, Slice> slices = docCollection.getSlicesMap();
Slice replicas = slices.get(shardId);
if (replicas == null) {
throw new ZooKeeperException(ErrorCode.BAD_REQUEST, "Could not find shardId in zk: " + shardId);
}
Map<String, Replica> shardMap = replicas.getReplicasMap();
List<ZkCoreNodeProps> nodes = new ArrayList<>(shardMap.size());
for (Entry<String, Replica> entry : shardMap.entrySet().stream().filter((e) -> acceptReplicaType.contains(e.getValue().getType())).collect(Collectors.toList())) {
ZkCoreNodeProps nodeProps = new ZkCoreNodeProps(entry.getValue());
String coreNodeName = entry.getValue().getName();
if (clusterState.liveNodesContain(nodeProps.getNodeName()) && !coreNodeName.equals(thisCoreNodeName)) {
if (mustMatchStateFilter == null || mustMatchStateFilter == Replica.State.getState(nodeProps.getState())) {
if (mustNotMatchStateFilter == null || mustNotMatchStateFilter != Replica.State.getState(nodeProps.getState())) {
nodes.add(nodeProps);
}
}
}
}
if (nodes.size() == 0) {
// no replicas
return null;
}
return nodes;
}
public SolrZkClient getZkClient() {
return zkClient;
}
/**
* Get a cluster property
* <p>
* N.B. Cluster properties are updated via ZK watchers, and so may not necessarily
* be completely up-to-date. If you need to get the latest version, then use a
* {@link ClusterProperties} instance.
*
* @param key the property to read
* @param defaultValue a default value to use if no such property exists
* @param <T> the type of the property
* @return the cluster property, or a default if the property is not set
*/
@SuppressWarnings("unchecked")
public <T> T getClusterProperty(String key, T defaultValue) {
T value = (T) Utils.getObjectByPath(clusterProperties, false, key);
if (value == null)
return defaultValue;
return value;
}
/**
* Same as the above but allows a full json path as a list of parts
*
* @param keyPath path to the property example ["collectionDefauls", "numShards"]
* @param defaultValue a default value to use if no such property exists
* @return the cluster property, or a default if the property is not set
*/
public <T> T getClusterProperty(List<String> keyPath, T defaultValue) {
T value = (T) Utils.getObjectByPath(clusterProperties, false, keyPath);
if (value == null)
return defaultValue;
return value;
}
/**
* Get all cluster properties for this cluster
* <p>
* N.B. Cluster properties are updated via ZK watchers, and so may not necessarily
* be completely up-to-date. If you need to get the latest version, then use a
* {@link ClusterProperties} instance.
*
* @return a Map of cluster properties
*/
public Map<String, Object> getClusterProperties() {
return Collections.unmodifiableMap(clusterProperties);
}
private final Watcher clusterPropertiesWatcher = event -> {
// session events are not change events, and do not remove the watcher
if (Watcher.Event.EventType.None.equals(event.getType())) {
return;
}
loadClusterProperties();
};
@SuppressWarnings("unchecked")
private void loadClusterProperties() {
try {
while (true) {
try {
byte[] data = zkClient.getData(ZkStateReader.CLUSTER_PROPS, clusterPropertiesWatcher, new Stat(), true);
this.clusterProperties = ClusterProperties.convertCollectionDefaultsToNestedFormat((Map<String, Object>) Utils.fromJSON(data));
log.debug("Loaded cluster properties: {}", this.clusterProperties);
for (ClusterPropertiesListener listener : clusterPropertiesListeners) {
listener.onChange(getClusterProperties());
}
return;
} catch (KeeperException.NoNodeException e) {
this.clusterProperties = Collections.emptyMap();
log.debug("Loaded empty cluster properties");
// set an exists watch, and if the node has been created since the last call,
// read the data again
if (zkClient.exists(ZkStateReader.CLUSTER_PROPS, clusterPropertiesWatcher, true) == null)
return;
}
}
} catch (KeeperException | InterruptedException e) {
log.error("Error reading cluster properties from zookeeper", SolrZkClient.checkInterrupted(e));
}
}
/**
* Get collection properties for a given collection. If the collection is watched, simply return it from the cache,
* otherwise fetch it directly from zookeeper. This is a convenience for {@code getCollectionProperties(collection,0)}
*
* @param collection the collection for which properties are desired
* @return a map representing the key/value properties for the collection.
*/
public Map<String, String> getCollectionProperties(final String collection) {
return getCollectionProperties(collection, 0);
}
/**
* Get and cache collection properties for a given collection. If the collection is watched, or still cached
* simply return it from the cache, otherwise fetch it directly from zookeeper and retain the value for at
* least cacheForMillis milliseconds. Cached properties are watched in zookeeper and updated automatically.
* This version of {@code getCollectionProperties} should be used when properties need to be consulted
* frequently in the absence of an active {@link CollectionPropsWatcher}.
*
* @param collection The collection for which properties are desired
* @param cacheForMillis The minimum number of milliseconds to maintain a cache for the specified collection's
* properties. Setting a {@code CollectionPropsWatcher} will override this value and retain
* the cache for the life of the watcher. A lack of changes in zookeeper may allow the
* caching to remain for a greater duration up to the cycle time of {@link CacheCleaner}.
* Passing zero for this value will explicitly remove the cached copy if and only if it is
* due to expire and no watch exists. Any positive value will extend the expiration time
* if required.
* @return a map representing the key/value properties for the collection.
*/
public Map<String, String> getCollectionProperties(final String collection, long cacheForMillis) {
synchronized (watchedCollectionProps) { // making decisions based on the result of a get...
Watcher watcher = null;
if (cacheForMillis > 0) {
watcher = collectionPropsWatchers.compute(collection,
(c, w) -> w == null ? new PropsWatcher(c, cacheForMillis) : w.renew(cacheForMillis));
}
VersionedCollectionProps vprops = watchedCollectionProps.get(collection);
boolean haveUnexpiredProps = vprops != null && vprops.cacheUntilNs > System.nanoTime();
long untilNs = System.nanoTime() + TimeUnit.NANOSECONDS.convert(cacheForMillis, TimeUnit.MILLISECONDS);
Map<String, String> properties;
if (haveUnexpiredProps) {
properties = vprops.props;
vprops.cacheUntilNs = Math.max(vprops.cacheUntilNs, untilNs);
} else {
try {
VersionedCollectionProps vcp = fetchCollectionProperties(collection, watcher);
properties = vcp.props;
if (cacheForMillis > 0) {
vcp.cacheUntilNs = untilNs;
watchedCollectionProps.put(collection, vcp);
} else {
// we're synchronized on watchedCollectionProps and we can only get here if we have found an expired
// vprops above, so it is safe to remove the cached value and let the GC free up some mem a bit sooner.
if (!collectionPropsObservers.containsKey(collection)) {
watchedCollectionProps.remove(collection);
}
}
} catch (Exception e) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Error reading collection properties", SolrZkClient.checkInterrupted(e));
}
}
return properties;
}
}
private class VersionedCollectionProps {
int zkVersion;
Map<String, String> props;
long cacheUntilNs = 0;
VersionedCollectionProps(int zkVersion, Map<String, String> props) {
this.zkVersion = zkVersion;
this.props = props;
}
}
static String getCollectionPropsPath(final String collection) {
return COLLECTIONS_ZKNODE + '/' + collection + '/' + COLLECTION_PROPS_ZKNODE;
}
@SuppressWarnings("unchecked")
private VersionedCollectionProps fetchCollectionProperties(String collection, Watcher watcher) throws KeeperException, InterruptedException {
final String znodePath = getCollectionPropsPath(collection);
// lazy init cache cleaner once we know someone is using collection properties.
if (collectionPropsCacheCleaner == null) {
synchronized (this) { // There can be only one! :)
if (collectionPropsCacheCleaner == null) {
collectionPropsCacheCleaner = notifications.submit(new CacheCleaner());
}
}
}
while (true) {
try {
Stat stat = new Stat();
byte[] data = zkClient.getData(znodePath, watcher, stat, true);
return new VersionedCollectionProps(stat.getVersion(), (Map<String, String>) Utils.fromJSON(data));
} catch (ClassCastException e) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Unable to parse collection properties for collection " + collection, e);
} catch (KeeperException.NoNodeException e) {
if (watcher != null) {
// Leave an exists watch in place in case a collectionprops.json is created later.
Stat exists = zkClient.exists(znodePath, watcher, true);
if (exists != null) {
// Rare race condition, we tried to fetch the data and couldn't find it, then we found it exists.
// Loop and try again.
continue;
}
}
return new VersionedCollectionProps(-1, EMPTY_MAP);
}
}
}
/**
* Returns the content of /security.json from ZooKeeper as a Map
* If the files doesn't exist, it returns null.
*/
public ConfigData getSecurityProps(boolean getFresh) {
if (!getFresh) {
if (securityData == null) return new ConfigData(EMPTY_MAP, -1);
return new ConfigData(securityData.data, securityData.version);
}
try {
Stat stat = new Stat();
if (getZkClient().exists(SOLR_SECURITY_CONF_PATH, true)) {
final byte[] data = getZkClient().getData(ZkStateReader.SOLR_SECURITY_CONF_PATH, null, stat, true);
return data != null && data.length > 0 ?
new ConfigData((Map<String, Object>) Utils.fromJSON(data), stat.getVersion()) :
null;
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new SolrException(ErrorCode.SERVER_ERROR, "Error reading security properties", e);
} catch (KeeperException e) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Error reading security properties", e);
}
return null;
}
/**
* Returns the baseURL corresponding to a given node's nodeName --
* NOTE: does not (currently) imply that the nodeName (or resulting
* baseURL) exists in the cluster.
*
* @lucene.experimental
*/
public String getBaseUrlForNodeName(final String nodeName) {
return Utils.getBaseUrlForNodeName(nodeName, getClusterProperty(URL_SCHEME, "http"));
}
/**
* Watches a single collection's format2 state.json.
*/
class StateWatcher implements Watcher {
private final String coll;
StateWatcher(String coll) {
this.coll = coll;
}
@Override
public void process(WatchedEvent event) {
// session events are not change events, and do not remove the watcher
if (EventType.None.equals(event.getType())) {
return;
}
if (!collectionWatches.containsKey(coll)) {
// This collection is no longer interesting, stop watching.
log.debug("Uninteresting collection {}", coll);
return;
}
Set<String> liveNodes = ZkStateReader.this.liveNodes;
log.info("A cluster state change: [{}] for collection [{}] has occurred - updating... (live nodes size: [{}])",
event, coll, liveNodes.size());
refreshAndWatch();
}
/**
* Refresh collection state from ZK and leave a watch for future changes.
* As a side effect, updates {@link #clusterState} and {@link #watchedCollectionStates}
* with the results of the refresh.
*/
public void refreshAndWatch() {
try {
DocCollection newState = fetchCollectionState(coll, this);
updateWatchedCollection(coll, newState);
synchronized (getUpdateLock()) {
constructState(Collections.singleton(coll));
}
} catch (KeeperException.SessionExpiredException | KeeperException.ConnectionLossException e) {
log.warn("ZooKeeper watch triggered, but Solr cannot talk to ZK: [{}]", e.getMessage());
} catch (KeeperException e) {
log.error("Unwatched collection: [{}]", coll, e);
throw new ZooKeeperException(ErrorCode.SERVER_ERROR, "A ZK error has occurred", e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
log.error("Unwatched collection: [{}]", coll, e);
}
}
}
/**
* Watches the legacy clusterstate.json.
*/
class LegacyClusterStateWatcher implements Watcher {
@Override
public void process(WatchedEvent event) {
// session events are not change events, and do not remove the watcher
if (EventType.None.equals(event.getType())) {
return;
}
int liveNodesSize = ZkStateReader.this.clusterState == null ? 0 : ZkStateReader.this.clusterState.getLiveNodes().size();
log.debug("A cluster state change: [{}], has occurred - updating... (live nodes size: [{}])", event, liveNodesSize);
refreshAndWatch();
}
/**
* Must hold {@link #getUpdateLock()} before calling this method.
*/
public void refreshAndWatch() {
try {
refreshLegacyClusterState(this);
} catch (KeeperException.NoNodeException e) {
throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE,
"Cannot connect to cluster at " + zkClient.getZkServerAddress() + ": cluster not found/not ready");
} catch (KeeperException.SessionExpiredException | KeeperException.ConnectionLossException e) {
log.warn("ZooKeeper watch triggered, but Solr cannot talk to ZK: [{}]", e.getMessage());
} catch (KeeperException e) {
log.error("A ZK error has occurred", e);
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "A ZK error has occurred", e);
} catch (InterruptedException e) {
// Restore the interrupted status
Thread.currentThread().interrupt();
log.warn("Interrupted", e);
}
}
}
/**
* Watches collection properties
*/
class PropsWatcher implements Watcher {
private final String coll;
private long watchUntilNs;
PropsWatcher(String coll) {
this.coll = coll;
watchUntilNs = 0;
}
PropsWatcher(String coll, long forMillis) {
this.coll = coll;
watchUntilNs = System.nanoTime() + TimeUnit.NANOSECONDS.convert(forMillis, TimeUnit.MILLISECONDS);
}
public PropsWatcher renew(long forMillis) {
watchUntilNs = System.nanoTime() + TimeUnit.NANOSECONDS.convert(forMillis, TimeUnit.MILLISECONDS);
return this;
}
@Override
public void process(WatchedEvent event) {
// session events are not change events, and do not remove the watcher
if (EventType.None.equals(event.getType())) {
return;
}
boolean expired = System.nanoTime() > watchUntilNs;
if (!collectionPropsObservers.containsKey(coll) && expired) {
// No one can be notified of the change, we can ignore it and "unset" the watch
log.debug("Ignoring property change for collection {}", coll);
return;
}
log.info("A collection property change: [{}] for collection [{}] has occurred - updating...",
event, coll);
refreshAndWatch(true);
}
/**
* Refresh collection properties from ZK and leave a watch for future changes. Updates the properties in
* watchedCollectionProps with the results of the refresh. Optionally notifies watchers
*/
void refreshAndWatch(boolean notifyWatchers) {
try {
synchronized (watchedCollectionProps) { // making decisions based on the result of a get...
VersionedCollectionProps vcp = fetchCollectionProperties(coll, this);
Map<String, String> properties = vcp.props;
VersionedCollectionProps existingVcp = watchedCollectionProps.get(coll);
if (existingVcp == null || // never called before, record what we found
vcp.zkVersion > existingVcp.zkVersion || // newer info we should update
vcp.zkVersion == -1) { // node was deleted start over
watchedCollectionProps.put(coll, vcp);
if (notifyWatchers) {
notifyPropsWatchers(coll, properties);
}
if (vcp.zkVersion == -1 && existingVcp != null) { // Collection DELETE detected
// We should not be caching a collection that has been deleted.
watchedCollectionProps.remove(coll);
// core ref counting not relevant here, don't need canRemove(), we just sent
// a notification of an empty set of properties, no reason to watch what doesn't exist.
collectionPropsObservers.remove(coll);
// This is the one time we know it's safe to throw this out. We just failed to set the watch
// due to an NoNodeException, so it isn't held by ZK and can't re-set itself due to an update.
collectionPropsWatchers.remove(coll);
}
}
}
} catch (KeeperException.SessionExpiredException | KeeperException.ConnectionLossException e) {
log.warn("ZooKeeper watch triggered, but Solr cannot talk to ZK: [{}]", e.getMessage());
} catch (KeeperException e) {
log.error("Lost collection property watcher for {} due to ZK error", coll, e);
throw new ZooKeeperException(ErrorCode.SERVER_ERROR, "A ZK error has occurred", e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
log.error("Lost collection property watcher for {} due to the thread being interrupted", coll, e);
}
}
}
/**
* Watches /collections children .
*/
class CollectionsChildWatcher implements Watcher {
@Override
public void process(WatchedEvent event) {
if (ZkStateReader.this.closed) {
return;
}
// session events are not change events, and do not remove the watcher
if (EventType.None.equals(event.getType())) {
return;
}
log.debug("A collections change: [{}], has occurred - updating...", event);
refreshAndWatch();
synchronized (getUpdateLock()) {
constructState(Collections.emptySet());
}
}
/**
* Must hold {@link #getUpdateLock()} before calling this method.
*/
public void refreshAndWatch() {
try {
refreshCollectionList(this);
} catch (KeeperException.SessionExpiredException | KeeperException.ConnectionLossException e) {
log.warn("ZooKeeper watch triggered, but Solr cannot talk to ZK: [{}]", e.getMessage());
} catch (KeeperException e) {
log.error("A ZK error has occurred", e);
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "A ZK error has occurred", e);
} catch (InterruptedException e) {
// Restore the interrupted status
Thread.currentThread().interrupt();
log.warn("Interrupted", e);
}
}
}
/**
* Watches the live_nodes and syncs changes.
*/
class LiveNodeWatcher implements Watcher {
@Override
public void process(WatchedEvent event) {
// session events are not change events, and do not remove the watcher
if (EventType.None.equals(event.getType())) {
return;
}
log.debug("A live node change: [{}], has occurred - updating... (live nodes size: [{}])", event, liveNodes.size());
refreshAndWatch();
}
public void refreshAndWatch() {
try {
refreshLiveNodes(this);
} catch (KeeperException.SessionExpiredException | KeeperException.ConnectionLossException e) {
log.warn("ZooKeeper watch triggered, but Solr cannot talk to ZK: [{}]", e.getMessage());
} catch (KeeperException e) {
log.error("A ZK error has occurred", e);
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "A ZK error has occurred", e);
} catch (InterruptedException e) {
// Restore the interrupted status
Thread.currentThread().interrupt();
log.warn("Interrupted", e);
}
}
}
public static DocCollection getCollectionLive(ZkStateReader zkStateReader, String coll) {
try {
return zkStateReader.fetchCollectionState(coll, null);
} catch (KeeperException e) {
throw new SolrException(ErrorCode.BAD_REQUEST, "Could not load collection from ZK: " + coll, e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new SolrException(ErrorCode.BAD_REQUEST, "Could not load collection from ZK: " + coll, e);
}
}
private DocCollection fetchCollectionState(String coll, Watcher watcher) throws KeeperException, InterruptedException {
String collectionPath = getCollectionPath(coll);
while (true) {
try {
Stat stat = new Stat();
byte[] data = zkClient.getData(collectionPath, watcher, stat, true);
ClusterState state = ClusterState.load(stat.getVersion(), data,
Collections.<String>emptySet(), collectionPath);
ClusterState.CollectionRef collectionRef = state.getCollectionStates().get(coll);
return collectionRef == null ? null : collectionRef.get();
} catch (KeeperException.NoNodeException e) {
if (watcher != null) {
// Leave an exists watch in place in case a state.json is created later.
Stat exists = zkClient.exists(collectionPath, watcher, true);
if (exists != null) {
// Rare race condition, we tried to fetch the data and couldn't find it, then we found it exists.
// Loop and try again.
continue;
}
}
return null;
}
}
}
public static String getCollectionPathRoot(String coll) {
return COLLECTIONS_ZKNODE + "/" + coll;
}
public static String getCollectionPath(String coll) {
return getCollectionPathRoot(coll) + "/state.json";
}
/**
* Notify this reader that a local Core is a member of a collection, and so that collection
* state should be watched.
* <p>
* Not a public API. This method should only be called from ZkController.
* <p>
* The number of cores per-collection is tracked, and adding multiple cores from the same
* collection does not increase the number of watches.
*
* @param collection the collection that the core is a member of
* @see ZkStateReader#unregisterCore(String)
*/
public void registerCore(String collection) {
AtomicBoolean reconstructState = new AtomicBoolean(false);
collectionWatches.compute(collection, (k, v) -> {
if (v == null) {
reconstructState.set(true);
v = new CollectionWatch<>();
}
v.coreRefCount++;
return v;
});
if (reconstructState.get()) {
new StateWatcher(collection).refreshAndWatch();
}
}
/**
* Notify this reader that a local core that is a member of a collection has been closed.
* <p>
* Not a public API. This method should only be called from ZkController.
* <p>
* If no cores are registered for a collection, and there are no {@link CollectionStateWatcher}s
* for that collection either, the collection watch will be removed.
*
* @param collection the collection that the core belongs to
*/
public void unregisterCore(String collection) {
AtomicBoolean reconstructState = new AtomicBoolean(false);
collectionWatches.compute(collection, (k, v) -> {
if (v == null)
return null;
if (v.coreRefCount > 0)
v.coreRefCount--;
if (v.canBeRemoved()) {
watchedCollectionStates.remove(collection);
lazyCollectionStates.put(collection, new LazyCollectionRef(collection));
reconstructState.set(true);
return null;
}
return v;
});
if (reconstructState.get()) {
synchronized (getUpdateLock()) {
constructState(Collections.emptySet());
}
}
}
/**
* Register a CollectionStateWatcher to be called when the state of a collection changes
* <em>or</em> the set of live nodes changes.
*
* <p>
* The Watcher will automatically be removed when it's
* <code>onStateChanged</code> returns <code>true</code>
* </p>
*
* <p>
* This is method is just syntactic sugar for registering both a {@link DocCollectionWatcher} and
* a {@link LiveNodesListener}. Callers that only care about one or the other (but not both) are
* encouraged to use the more specific methods register methods as it may reduce the number of
* ZooKeeper watchers needed, and reduce the amount of network/cpu used.
* </p>
*
* @see #registerDocCollectionWatcher
* @see #registerLiveNodesListener
*/
public void registerCollectionStateWatcher(String collection, CollectionStateWatcher stateWatcher) {
final DocCollectionAndLiveNodesWatcherWrapper wrapper
= new DocCollectionAndLiveNodesWatcherWrapper(collection, stateWatcher);
registerDocCollectionWatcher(collection, wrapper);
registerLiveNodesListener(wrapper);
DocCollection state = clusterState.getCollectionOrNull(collection);
if (stateWatcher.onStateChanged(liveNodes, state) == true) {
removeCollectionStateWatcher(collection, stateWatcher);
}
}
/**
* Register a DocCollectionWatcher to be called when the state of a collection changes
*
* <p>
* The Watcher will automatically be removed when it's
* <code>onStateChanged</code> returns <code>true</code>
* </p>
*/
public void registerDocCollectionWatcher(String collection, DocCollectionWatcher stateWatcher) {
AtomicBoolean watchSet = new AtomicBoolean(false);
collectionWatches.compute(collection, (k, v) -> {
if (v == null) {
v = new CollectionWatch<>();
watchSet.set(true);
}
v.stateWatchers.add(stateWatcher);
return v;
});
if (watchSet.get()) {
new StateWatcher(collection).refreshAndWatch();
}
DocCollection state = clusterState.getCollectionOrNull(collection);
if (stateWatcher.onStateChanged(state) == true) {
removeDocCollectionWatcher(collection, stateWatcher);
}
}
/**
* Block until a CollectionStatePredicate returns true, or the wait times out
*
* <p>
* Note that the predicate may be called again even after it has returned true, so
* implementors should avoid changing state within the predicate call itself.
* </p>
*
* <p>
* This implementation utilizes {@link CollectionStateWatcher} internally.
* Callers that don't care about liveNodes are encouraged to use a {@link DocCollection} {@link Predicate}
* instead
* </p>
*
* @param collection the collection to watch
* @param wait how long to wait
* @param unit the units of the wait parameter
* @param predicate the predicate to call on state changes
* @throws InterruptedException on interrupt
* @throws TimeoutException on timeout
* @see #waitForState(String, long, TimeUnit, Predicate)
* @see #registerCollectionStateWatcher
*/
public void waitForState(final String collection, long wait, TimeUnit unit, CollectionStatePredicate predicate)
throws InterruptedException, TimeoutException {
if (closed) {
throw new AlreadyClosedException();
}
final CountDownLatch latch = new CountDownLatch(1);
waitLatches.add(latch);
AtomicReference<DocCollection> docCollection = new AtomicReference<>();
CollectionStateWatcher watcher = (n, c) -> {
docCollection.set(c);
boolean matches = predicate.matches(n, c);
if (matches)
latch.countDown();
return matches;
};
registerCollectionStateWatcher(collection, watcher);
try {
// wait for the watcher predicate to return true, or time out
if (!latch.await(wait, unit))
throw new TimeoutException("Timeout waiting to see state for collection=" + collection + " :" + docCollection.get());
} finally {
removeCollectionStateWatcher(collection, watcher);
waitLatches.remove(latch);
}
}
/**
* Block until a Predicate returns true, or the wait times out
*
* <p>
* Note that the predicate may be called again even after it has returned true, so
* implementors should avoid changing state within the predicate call itself.
* </p>
*
* @param collection the collection to watch
* @param wait how long to wait
* @param unit the units of the wait parameter
* @param predicate the predicate to call on state changes
* @throws InterruptedException on interrupt
* @throws TimeoutException on timeout
*/
public void waitForState(final String collection, long wait, TimeUnit unit, Predicate<DocCollection> predicate)
throws InterruptedException, TimeoutException {
if (closed) {
throw new AlreadyClosedException();
}
final CountDownLatch latch = new CountDownLatch(1);
waitLatches.add(latch);
AtomicReference<DocCollection> docCollection = new AtomicReference<>();
DocCollectionWatcher watcher = (c) -> {
docCollection.set(c);
boolean matches = predicate.test(c);
if (matches)
latch.countDown();
return matches;
};
registerDocCollectionWatcher(collection, watcher);
try {
// wait for the watcher predicate to return true, or time out
if (!latch.await(wait, unit))
throw new TimeoutException("Timeout waiting to see state for collection=" + collection + " :" + docCollection.get());
} finally {
removeDocCollectionWatcher(collection, watcher);
waitLatches.remove(latch);
}
}
/**
* Block until a LiveNodesStatePredicate returns true, or the wait times out
* <p>
* Note that the predicate may be called again even after it has returned true, so
* implementors should avoid changing state within the predicate call itself.
* </p>
*
* @param wait how long to wait
* @param unit the units of the wait parameter
* @param predicate the predicate to call on state changes
* @throws InterruptedException on interrupt
* @throws TimeoutException on timeout
*/
public void waitForLiveNodes(long wait, TimeUnit unit, LiveNodesPredicate predicate)
throws InterruptedException, TimeoutException {
if (closed) {
throw new AlreadyClosedException();
}
final CountDownLatch latch = new CountDownLatch(1);
waitLatches.add(latch);
LiveNodesListener listener = (o, n) -> {
boolean matches = predicate.matches(o, n);
if (matches)
latch.countDown();
return matches;
};
registerLiveNodesListener(listener);
try {
// wait for the watcher predicate to return true, or time out
if (!latch.await(wait, unit))
throw new TimeoutException("Timeout waiting for live nodes, currently they are: " + getClusterState().getLiveNodes());
} finally {
removeLiveNodesListener(listener);
waitLatches.remove(latch);
}
}
/**
* Remove a watcher from a collection's watch list.
* <p>
* This allows Zookeeper watches to be removed if there is no interest in the
* collection.
* </p>
*
* @param collection the collection
* @param watcher the watcher
* @see #registerCollectionStateWatcher
*/
public void removeCollectionStateWatcher(String collection, CollectionStateWatcher watcher) {
final DocCollectionAndLiveNodesWatcherWrapper wrapper
= new DocCollectionAndLiveNodesWatcherWrapper(collection, watcher);
removeDocCollectionWatcher(collection, wrapper);
removeLiveNodesListener(wrapper);
}
/**
* Remove a watcher from a collection's watch list.
* <p>
* This allows Zookeeper watches to be removed if there is no interest in the
* collection.
* </p>
*
* @param collection the collection
* @param watcher the watcher
* @see #registerDocCollectionWatcher
*/
public void removeDocCollectionWatcher(String collection, DocCollectionWatcher watcher) {
AtomicBoolean reconstructState = new AtomicBoolean(false);
collectionWatches.compute(collection, (k, v) -> {
if (v == null)
return null;
v.stateWatchers.remove(watcher);
if (v.canBeRemoved()) {
watchedCollectionStates.remove(collection);
lazyCollectionStates.put(collection, new LazyCollectionRef(collection));
reconstructState.set(true);
return null;
}
return v;
});
if (reconstructState.get()) {
synchronized (getUpdateLock()) {
constructState(Collections.emptySet());
}
}
}
/* package-private for testing */
Set<DocCollectionWatcher> getStateWatchers(String collection) {
final Set<DocCollectionWatcher> watchers = new HashSet<>();
collectionWatches.compute(collection, (k, v) -> {
if (v != null) {
watchers.addAll(v.stateWatchers);
}
return v;
});
return watchers;
}
// returns true if the state has changed
private boolean updateWatchedCollection(String coll, DocCollection newState) {
if (newState == null) {
log.debug("Removing cached collection state for [{}]", coll);
watchedCollectionStates.remove(coll);
return true;
}
boolean updated = false;
// CAS update loop
while (true) {
if (!collectionWatches.containsKey(coll)) {
break;
}
DocCollection oldState = watchedCollectionStates.get(coll);
if (oldState == null) {
if (watchedCollectionStates.putIfAbsent(coll, newState) == null) {
log.debug("Add data for [{}] ver [{}]", coll, newState.getZNodeVersion());
updated = true;
break;
}
} else {
if (oldState.getZNodeVersion() >= newState.getZNodeVersion()) {
// no change to state, but we might have been triggered by the addition of a
// state watcher, so run notifications
updated = true;
break;
}
if (watchedCollectionStates.replace(coll, oldState, newState)) {
log.debug("Updating data for [{}] from [{}] to [{}]", coll, oldState.getZNodeVersion(), newState.getZNodeVersion());
updated = true;
break;
}
}
}
// Resolve race with unregisterCore.
if (!collectionWatches.containsKey(coll)) {
watchedCollectionStates.remove(coll);
log.debug("Removing uninteresting collection [{}]", coll);
}
return updated;
}
public void registerCollectionPropsWatcher(final String collection, CollectionPropsWatcher propsWatcher) {
AtomicBoolean watchSet = new AtomicBoolean(false);
collectionPropsObservers.compute(collection, (k, v) -> {
if (v == null) {
v = new CollectionWatch<>();
watchSet.set(true);
}
v.stateWatchers.add(propsWatcher);
return v;
});
if (watchSet.get()) {
collectionPropsWatchers.computeIfAbsent(collection, PropsWatcher::new).refreshAndWatch(false);
}
}
public void removeCollectionPropsWatcher(String collection, CollectionPropsWatcher watcher) {
collectionPropsObservers.compute(collection, (k, v) -> {
if (v == null)
return null;
v.stateWatchers.remove(watcher);
if (v.canBeRemoved()) {
// don't want this to happen in middle of other blocks that might add it back.
synchronized (watchedCollectionProps) {
watchedCollectionProps.remove(collection);
}
return null;
}
return v;
});
}
public static class ConfigData {
public Map<String, Object> data;
public int version;
public ConfigData() {
}
public ConfigData(Map<String, Object> data, int version) {
this.data = data;
this.version = version;
}
}
private void notifyStateWatchers(String collection, DocCollection collectionState) {
if (this.closed) {
return;
}
try {
notifications.submit(new Notification(collection, collectionState));
} catch (RejectedExecutionException e) {
if (closed == false) {
log.error("Couldn't run collection notifications for {}", collection, e);
}
}
}
private class Notification implements Runnable {
final String collection;
final DocCollection collectionState;
private Notification(String collection, DocCollection collectionState) {
this.collection = collection;
this.collectionState = collectionState;
}
@Override
public void run() {
List<DocCollectionWatcher> watchers = new ArrayList<>();
collectionWatches.compute(collection, (k, v) -> {
if (v == null)
return null;
watchers.addAll(v.stateWatchers);
return v;
});
for (DocCollectionWatcher watcher : watchers) {
try {
if (watcher.onStateChanged(collectionState)) {
removeDocCollectionWatcher(collection, watcher);
}
} catch (Exception exception) {
log.warn("Error on calling watcher", exception);
}
}
}
}
//
// Aliases related
//
/**
* Access to the {@link Aliases}.
*/
public final AliasesManager aliasesManager = new AliasesManager();
/**
* Get an immutable copy of the present state of the aliases. References to this object should not be retained
* in any context where it will be important to know if aliases have changed.
*
* @return The current aliases, Aliases.EMPTY if not solr cloud, or no aliases have existed yet. Never returns null.
*/
public Aliases getAliases() {
return aliasesManager.getAliases();
}
// called by createClusterStateWatchersAndUpdate()
private void refreshAliases(AliasesManager watcher) throws KeeperException, InterruptedException {
synchronized (getUpdateLock()) {
constructState(Collections.emptySet());
zkClient.exists(ALIASES, watcher, true);
}
aliasesManager.update();
}
/**
* A class to manage the aliases instance, including watching for changes.
* There should only ever be one instance of this class
* per instance of ZkStateReader. Normally it will not be useful to create a new instance since
* this watcher automatically re-registers itself every time it is updated.
*/
public class AliasesManager implements Watcher { // the holder is a Zk watcher
// note: as of this writing, this class if very generic. Is it useful to use for other ZK managed things?
private final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private volatile Aliases aliases = Aliases.EMPTY;
public Aliases getAliases() {
return aliases; // volatile read
}
/**
* Writes an updated {@link Aliases} to zk.
* It will retry if there are races with other modifications, giving up after 30 seconds with a SolrException.
* The caller should understand it's possible the aliases has further changed if it examines it.
*/
public void applyModificationAndExportToZk(UnaryOperator<Aliases> op) {
// The current aliases hasn't been update()'ed yet -- which is impossible? Any way just update it first.
if (aliases.getZNodeVersion() == -1) {
try {
boolean updated = update();
assert updated;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new ZooKeeperException(ErrorCode.SERVER_ERROR, e.toString(), e);
} catch (KeeperException e) {
throw new ZooKeeperException(ErrorCode.SERVER_ERROR, e.toString(), e);
}
}
final long deadlineNanos = System.nanoTime() + TimeUnit.SECONDS.toNanos(30);
// note: triesLeft tuning is based on ConcurrentCreateRoutedAliasTest
for (int triesLeft = 30; triesLeft > 0; triesLeft--) {
// we could synchronize on "this" but there doesn't seem to be a point; we have a retry loop.
Aliases curAliases = getAliases();
Aliases modAliases = op.apply(curAliases);
final byte[] modAliasesJson = modAliases.toJSON();
if (curAliases == modAliases) {
log.debug("Current aliases has the desired modification; no further ZK interaction needed.");
return;
}
try {
try {
final Stat stat = getZkClient().setData(ALIASES, modAliasesJson, curAliases.getZNodeVersion(), true);
setIfNewer(Aliases.fromJSON(modAliasesJson, stat.getVersion()));
return;
} catch (KeeperException.BadVersionException e) {
log.debug(e.toString(), e);
log.warn("Couldn't save aliases due to race with another modification; will update and retry until timeout");
// considered a backoff here, but we really do want to compete strongly since the normal case is
// that we will do one update and succeed. This is left as a hot loop for limited tries intentionally.
// More failures than that here probably indicate a bug or a very strange high write frequency usage for
// aliases.json, timeouts mean zk is being very slow to respond, or this node is being crushed
// by other processing and just can't find any cpu cycles at all.
update();
if (deadlineNanos < System.nanoTime()) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Timed out trying to update aliases! " +
"Either zookeeper or this node may be overloaded.");
}
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new ZooKeeperException(ErrorCode.SERVER_ERROR, e.toString(), e);
} catch (KeeperException e) {
throw new ZooKeeperException(ErrorCode.SERVER_ERROR, e.toString(), e);
}
}
throw new SolrException(ErrorCode.SERVER_ERROR, "Too many successive version failures trying to update aliases");
}
/**
* Ensures the internal aliases is up to date. If there is a change, return true.
*
* @return true if an update was performed
*/
public boolean update() throws KeeperException, InterruptedException {
log.debug("Checking ZK for most up to date Aliases {}", ALIASES);
// Call sync() first to ensure the subsequent read (getData) is up to date.
zkClient.getSolrZooKeeper().sync(ALIASES, null, null);
Stat stat = new Stat();
final byte[] data = zkClient.getData(ALIASES, null, stat, true);
return setIfNewer(Aliases.fromJSON(data, stat.getVersion()));
}
// ZK Watcher interface
@Override
public void process(WatchedEvent event) {
// session events are not change events, and do not remove the watcher
if (EventType.None.equals(event.getType())) {
return;
}
try {
log.debug("Aliases: updating");
// re-register the watch
Stat stat = new Stat();
final byte[] data = zkClient.getData(ALIASES, this, stat, true);
// note: it'd be nice to avoid possibly needlessly parsing if we don't update aliases but not a big deal
setIfNewer(Aliases.fromJSON(data, stat.getVersion()));
} catch (NoNodeException e) {
// /aliases.json will not always exist
} catch (KeeperException.ConnectionLossException | KeeperException.SessionExpiredException e) {
// note: aliases.json is required to be present
log.warn("ZooKeeper watch triggered, but Solr cannot talk to ZK: [{}]", e.getMessage());
} catch (KeeperException e) {
log.error("A ZK error has occurred", e);
throw new ZooKeeperException(ErrorCode.SERVER_ERROR, "A ZK error has occurred", e);
} catch (InterruptedException e) {
// Restore the interrupted status
Thread.currentThread().interrupt();
log.warn("Interrupted", e);
}
}
/**
* Update the internal aliases reference with a new one, provided that its ZK version has increased.
*
* @param newAliases the potentially newer version of Aliases
* @return true if aliases have been updated to a new version, false otherwise
*/
private boolean setIfNewer(Aliases newAliases) {
assert newAliases.getZNodeVersion() >= 0;
synchronized (this) {
int cmp = Integer.compare(aliases.getZNodeVersion(), newAliases.getZNodeVersion());
if (cmp < 0) {
log.debug("Aliases: cmp={}, new definition is: {}", cmp, newAliases);
aliases = newAliases;
this.notifyAll();
return true;
} else {
log.debug("Aliases: cmp={}, not overwriting ZK version.", cmp);
assert cmp != 0 || Arrays.equals(aliases.toJSON(), newAliases.toJSON()) : aliases + " != " + newAliases;
return false;
}
}
}
}
private void notifyPropsWatchers(String collection, Map<String, String> properties) {
try {
collectionPropsNotifications.submit(new PropsNotification(collection, properties));
} catch (RejectedExecutionException e) {
if (!closed) {
log.error("Couldn't run collection properties notifications for {}", collection, e);
}
}
}
private class PropsNotification implements Runnable {
private final String collection;
private final Map<String, String> collectionProperties;
private final List<CollectionPropsWatcher> watchers = new ArrayList<>();
private PropsNotification(String collection, Map<String, String> collectionProperties) {
this.collection = collection;
this.collectionProperties = collectionProperties;
// guarantee delivery of notification regardless of what happens to collectionPropsObservers
// while we wait our turn in the executor by capturing the list on creation.
collectionPropsObservers.compute(collection, (k, v) -> {
if (v == null)
return null;
watchers.addAll(v.stateWatchers);
return v;
});
}
@Override
public void run() {
for (CollectionPropsWatcher watcher : watchers) {
if (watcher.onStateChanged(collectionProperties)) {
removeCollectionPropsWatcher(collection, watcher);
}
}
}
}
private class CacheCleaner implements Runnable {
public void run() {
while (!Thread.interrupted()) {
try {
Thread.sleep(60000);
} catch (InterruptedException e) {
// Executor shutdown will send us an interrupt
break;
}
watchedCollectionProps.entrySet().removeIf(entry ->
entry.getValue().cacheUntilNs < System.nanoTime() && !collectionPropsObservers.containsKey(entry.getKey()));
}
}
}
/**
* Helper class that acts as both a {@link DocCollectionWatcher} and a {@link LiveNodesListener}
* while wraping and delegating to a {@link CollectionStateWatcher}
*/
private final class DocCollectionAndLiveNodesWatcherWrapper implements DocCollectionWatcher, LiveNodesListener {
private final String collectionName;
private final CollectionStateWatcher delegate;
public int hashCode() {
return collectionName.hashCode() * delegate.hashCode();
}
public boolean equals(Object other) {
if (other instanceof DocCollectionAndLiveNodesWatcherWrapper) {
DocCollectionAndLiveNodesWatcherWrapper that
= (DocCollectionAndLiveNodesWatcherWrapper) other;
return this.collectionName.equals(that.collectionName)
&& this.delegate.equals(that.delegate);
}
return false;
}
public DocCollectionAndLiveNodesWatcherWrapper(final String collectionName,
final CollectionStateWatcher delegate) {
this.collectionName = collectionName;
this.delegate = delegate;
}
@Override
public boolean onStateChanged(DocCollection collectionState) {
final boolean result = delegate.onStateChanged(ZkStateReader.this.liveNodes,
collectionState);
if (result) {
// it might be a while before live nodes changes, so proactively remove ourselves
removeLiveNodesListener(this);
}
return result;
}
@Override
public boolean onChange(SortedSet<String> oldLiveNodes, SortedSet<String> newLiveNodes) {
final DocCollection collection = ZkStateReader.this.clusterState.getCollectionOrNull(collectionName);
final boolean result = delegate.onStateChanged(newLiveNodes, collection);
if (result) {
// it might be a while before collection changes, so proactively remove ourselves
removeDocCollectionWatcher(collectionName, this);
}
return result;
}
}
}
| 1 | 31,382 | While this is potentially convenient it seems off topic for the PR/Issue. Also if it is kept, in the realm of taste/style I tend to not use get/set/is for things that are not properties of the object. maybe hasLiveNode(String node) thus someone using it might write `if (zkReader.hasLiveNode("foo")) ...` which reads quite nicely. | apache-lucene-solr | java |
@@ -10,11 +10,13 @@ import (
)
var (
- svidPath = path.Join(ProjectRoot(), "test/fixture/certs/svid.pem")
- svidKeyPath = path.Join(ProjectRoot(), "test/fixture/certs/svid_key.pem")
- caPath = path.Join(ProjectRoot(), "test/fixture/certs/ca.pem")
- caKeyPath = path.Join(ProjectRoot(), "test/fixture/certs/ca_key.pem")
- bundlePath = path.Join(ProjectRoot(), "test/fixture/certs/bundle.der")
+ svidPath = path.Join(ProjectRoot(), "test/fixture/certs/svid.pem")
+ svidKeyPath = path.Join(ProjectRoot(), "test/fixture/certs/svid_key.pem")
+ caPath = path.Join(ProjectRoot(), "test/fixture/certs/ca.pem")
+ caKeyPath = path.Join(ProjectRoot(), "test/fixture/certs/ca_key.pem")
+ bundlePath = path.Join(ProjectRoot(), "test/fixture/certs/bundle.der")
+ blogCsrPath = path.Join(ProjectRoot(), "test/fixture/certs/blog_csr.pem")
+ blogSvidPath = path.Join(ProjectRoot(), "test/fixture/certs/blog_cert.pem")
)
// LoadCAFixture reads, parses, and returns the pre-defined CA fixture and key | 1 | package util
import (
"crypto/ecdsa"
"crypto/x509"
"encoding/pem"
"fmt"
"io/ioutil"
"path"
)
var (
svidPath = path.Join(ProjectRoot(), "test/fixture/certs/svid.pem")
svidKeyPath = path.Join(ProjectRoot(), "test/fixture/certs/svid_key.pem")
caPath = path.Join(ProjectRoot(), "test/fixture/certs/ca.pem")
caKeyPath = path.Join(ProjectRoot(), "test/fixture/certs/ca_key.pem")
bundlePath = path.Join(ProjectRoot(), "test/fixture/certs/bundle.der")
)
// LoadCAFixture reads, parses, and returns the pre-defined CA fixture and key
func LoadCAFixture() (ca *x509.Certificate, key *ecdsa.PrivateKey, err error) {
return LoadCertAndKey(caPath, caKeyPath)
}
// LoadCAFixture reads, parses, and returns the pre-defined SVID fixture and key
func LoadSVIDFixture() (svid *x509.Certificate, key *ecdsa.PrivateKey, err error) {
return LoadCertAndKey(svidPath, svidKeyPath)
}
func LoadBundleFixture() ([]*x509.Certificate, error) {
return LoadBundle(bundlePath)
}
// LoadCertAndKey reads and parses both a certificate and a private key at once
func LoadCertAndKey(crtPath, keyPath string) (*x509.Certificate, *ecdsa.PrivateKey, error) {
crt, err := LoadCert(crtPath)
if err != nil {
return crt, nil, err
}
key, err := LoadKey(keyPath)
return crt, key, err
}
// LoadCert reads and parses an X.509 certificate at the specified path
func LoadCert(path string) (*x509.Certificate, error) {
block, err := LoadPEM(path)
if err != nil {
return nil, err
}
crt, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return nil, err
}
return crt, nil
}
// LoadKey reads and parses the ECDSA private key at the specified path
func LoadKey(path string) (*ecdsa.PrivateKey, error) {
block, err := LoadPEM(path)
if err != nil {
return nil, err
}
key, err := x509.ParseECPrivateKey(block.Bytes)
if err != nil {
return nil, err
}
return key, nil
}
// LoadPEM reads and parses the PEM structure at the specified path
func LoadPEM(path string) (*pem.Block, error) {
dat, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
blk, rest := pem.Decode(dat)
if len(rest) > 0 {
return nil, fmt.Errorf("error decoding certificate at %s", path)
}
return blk, nil
}
func LoadBundle(path string) ([]*x509.Certificate, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
return nil, fmt.Errorf("error reading bundle at %s: %s", path, err)
}
bundle, err := x509.ParseCertificates(data)
if err != nil {
return nil, fmt.Errorf("error parsing bundle at %s: %s", path, err)
}
return bundle, nil
}
| 1 | 9,299 | What's the difference between blogSvid and the SVID above? Looks like the functions that use these aren't being called currently - can they be removed? | spiffe-spire | go |
@@ -67,7 +67,7 @@ namespace Nethermind.Serialization.Json
return (long) reader.Value;
}
- string s = (string) reader.Value;
+ string s = reader.Value?.ToString();
if (s == "0x0")
{
return BigInteger.Zero; | 1 | // Copyright (c) 2018 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using System;
using System.Globalization;
using System.Numerics;
using Nethermind.Core.Extensions;
using Newtonsoft.Json;
namespace Nethermind.Serialization.Json
{
public class BigIntegerConverter : JsonConverter<BigInteger>
{
private readonly NumberConversion _conversion;
public BigIntegerConverter()
: this(NumberConversion.Hex)
{
}
public BigIntegerConverter(NumberConversion conversion)
{
_conversion = conversion;
}
public override void WriteJson(JsonWriter writer, BigInteger value, JsonSerializer serializer)
{
if (value.IsZero)
{
writer.WriteValue("0x0");
return;
}
switch (_conversion)
{
case NumberConversion.Hex:
writer.WriteValue(string.Concat("0x", value.ToByteArray(false, true).ToHexString()));
break;
case NumberConversion.Decimal:
writer.WriteValue(value.ToString());
break;
case NumberConversion.Raw:
writer.WriteValue(value);
break;
default:
throw new NotSupportedException();
}
}
public override BigInteger ReadJson(JsonReader reader, Type objectType, BigInteger existingValue, bool hasExistingValue, JsonSerializer serializer)
{
if (reader.Value is long || reader.Value is int)
{
return (long) reader.Value;
}
string s = (string) reader.Value;
if (s == "0x0")
{
return BigInteger.Zero;
}
bool isHex = false;
Span<char> withZero = null;
if (s.StartsWith("0x0"))
{
withZero = s.AsSpan(2).ToArray();
isHex = true;
}
else if (s.StartsWith("0x"))
{
withZero = new Span<char>(new char[s.Length - 1]);
withZero[0] = '0';
s.AsSpan(2).CopyTo(withZero.Slice(1));
isHex = true;
}
if (isHex)
{
// withZero.Reverse();
return BigInteger.Parse(withZero, NumberStyles.AllowHexSpecifier);
}
return BigInteger.Parse(s, NumberStyles.Integer);
}
}
} | 1 | 24,601 | can you add numbers with this change? the CLI is a javascript engine and it can worh number - java adds strings vi concatenation | NethermindEth-nethermind | .cs |
@@ -116,6 +116,8 @@ class OrderDirective
// Note the `lower()` in the `addOrderBy()`. It is essential to sorting the
// results correctly. See also https://github.com/bolt/core/issues/1190
// again: lower breaks postgresql jsonb compatibility, first cast as txt
+ // cast as TEXT or CHAR, depending on SQL support. See Bolt\Doctrine\Query\Cast.php
+ // and https://github.com/bolt/core/issues/2241
$query
->getQueryBuilder()
->addOrderBy('lower(CAST(' . $translationsAlias . '.value as TEXT))', $direction); | 1 | <?php
declare(strict_types=1);
namespace Bolt\Storage\Directive;
use Bolt\Doctrine\Version;
use Bolt\Entity\Field\NumberField;
use Bolt\Storage\QueryInterface;
use Bolt\Twig\Notifications;
use Bolt\Utils\ContentHelper;
use Bolt\Utils\LocaleHelper;
use Twig\Environment;
/**
* Directive to alter query based on 'order' parameter.
*
* eg: 'pages', ['order'=>'-publishedAt']
*/
class OrderDirective
{
public const NAME = 'order';
/** @var LocaleHelper */
private $localeHelper;
/** @var Environment */
private $twig;
/** @var Notifications */
private $notifications;
public function __construct(LocaleHelper $localeHelper, Environment $twig, Notifications $notifications)
{
$this->localeHelper = $localeHelper;
$this->twig = $twig;
$this->notifications = $notifications;
}
public function __invoke(QueryInterface $query, string $order): void
{
if ($order === '') {
return;
}
$locale = $this->localeHelper->getCurrentLocale($this->twig)->get('code');
// remove default order
$query->getQueryBuilder()->resetDQLPart('orderBy');
$separatedOrders = $this->getOrderBys($order);
foreach ($separatedOrders as $order) {
[ $order, $direction ] = $this->createSortBy($order);
if ($order === 'title' && $this->getTitleFormat($query) !== null) {
$order = ContentHelper::getFieldNames($this->getTitleFormat($query));
}
if (is_array($order)) {
foreach ($order as $orderitem) {
$this->setOrderBy($query, $orderitem, $direction, $locale);
}
} else {
$this->setOrderBy($query, $order, $direction, $locale);
}
}
}
/**
* Set the query OrderBy directives
* given an order (e.g. 'heading', 'id') and direction (ASC|DESC)
*/
private function setOrderBy(QueryInterface $query, string $order, string $direction, string $locale): void
{
if (in_array($order, $query->getCoreFields(), true)) {
$query->getQueryBuilder()->addOrderBy('content.' . $order, $direction);
} elseif ($order === 'author') {
$query
->getQueryBuilder()
->leftJoin('content.author', 'user')
->addOrderBy('user.username', $direction);
} elseif (in_array($order, $query->getTaxonomyFields(), true)) {
$taxonomy = 'taxonomy_' . $query->getIndex();
$taxonomySlug = 'taxonomy_slug_' . $query->getIndex();
$query
->getQueryBuilder()
->leftJoin('content.taxonomies', $taxonomy)
->andWhere($taxonomy . '.type = :' . $taxonomySlug)
->setParameter($taxonomySlug, $order)
->addOrderBy($taxonomy . '.name', $direction);
} elseif ($this->isActualField($query, $order)) {
$fieldsAlias = 'fields_order_' . $query->getIndex();
$fieldAlias = 'order_' . $query->getIndex();
$translationsAlias = 'translations_order_' . $query->getIndex();
$query
->getQueryBuilder()
->leftJoin('content.fields', $fieldsAlias)
->leftJoin($fieldsAlias . '.translations', $translationsAlias)
->andWhere($fieldsAlias . '.name = :' . $fieldAlias)
->setParameter($fieldAlias, $order);
if ($this->isLocalizedField($query, $order)) {
// If the field is localized, we limit the query to the
// value for the current locale only.
$query
->getQueryBuilder()
->andWhere($translationsAlias . '.locale = :' . $fieldAlias . '_locale')
->setParameter($fieldAlias . '_locale', $locale);
}
if ($this->isNumericField($query, $order)) {
$this->orderByNumericField($query, $translationsAlias, $direction);
} else {
// Note the `lower()` in the `addOrderBy()`. It is essential to sorting the
// results correctly. See also https://github.com/bolt/core/issues/1190
// again: lower breaks postgresql jsonb compatibility, first cast as txt
$query
->getQueryBuilder()
->addOrderBy('lower(CAST(' . $translationsAlias . '.value as TEXT))', $direction);
}
$query->incrementIndex();
} else {
$this->notifications->warning('Incorrect OrderBy clause for field that does not exist',
"A query with ordering on a Field or Taxonomy (`${order}`) that's not defined, will yield unexpected results. Update your `{% setcontent %}`-statement");
}
}
/**
* Cobble together the sorting order, and whether or not it's a column in `content` or `fields`.
*/
private function createSortBy(string $order): array
{
if (mb_strpos($order, '-') === 0) {
$direction = 'DESC';
$order = mb_substr($order, 1);
} elseif (mb_strpos($order, ' DESC') !== false) {
$direction = 'DESC';
$order = str_replace(' DESC', '', $order);
} else {
$order = str_replace(' ASC', '', $order);
$direction = 'ASC';
}
return [$order, $direction];
}
protected function getOrderBys(string $order): array
{
$separatedOrders = [$order];
if ($this->isMultiOrderQuery($order)) {
$separatedOrders = explode(',', $order);
}
return $separatedOrders;
}
protected function isMultiOrderQuery(string $order): bool
{
return mb_strpos($order, ',') !== false;
}
protected function isActualField(QueryInterface $query, string $name): bool
{
$contentType = $query->getConfig()->get('contenttypes/' . $query->getContentType());
return in_array($name, $contentType->get('fields')->keys()->all(), true);
}
private function getTitleFormat(QueryInterface $query): ?string
{
$contentType = $query->getConfig()->get('contenttypes/' . $query->getContentType());
return $contentType->get('title_format', null);
}
private function orderByNumericField(QueryInterface $query, string $translationsAlias, string $direction): void
{
$qb = $query->getQueryBuilder();
// For older bundled SQLite in PHP 7.2 that do not have `CAST` built in, we fall
// back to the "dumb" sorting instead. C'est la vie.
$doctrineVersion = new Version($query->getQueryBuilder()->getEntityManager()->getConnection());
if (! $doctrineVersion->hasCast()) {
$qb->addOrderBy($translationsAlias . '.value', $direction);
return;
}
$substring = $qb
->expr()
->substring($translationsAlias . '.value', 3, $query->getQueryBuilder()->expr()->length($translationsAlias . '.value'));
$qb->addOrderBy('CAST(' . $substring . ' as decimal) ', $direction);
}
private function isNumericField(QueryInterface $query, $fieldname): bool
{
$contentType = $query->getConfig()->get('contenttypes/' . $query->getContentType());
$type = $contentType->get('fields')->get($fieldname)->get('type', false);
return $type === NumberField::TYPE;
}
private function isLocalizedField(QueryInterface $query, $fieldname): bool
{
$contentType = $query->getConfig()->get('contenttypes/' . $query->getContentType());
return $contentType->get('fields')->get($fieldname)->get('localize', false);
}
}
| 1 | 12,630 | Shouldn't this `TEXT` also be dependent on the platform, then? | bolt-core | php |
@@ -17,6 +17,15 @@ type Typer interface {
Type() semantic.MonoType
}
+// ITableObject serves as sort of a "marker trait" to allow us to check if a
+// value is a TableObject without having to import TableObject which would be a
+// cyclical import.
+// Identical purpose to the interface in the interpreter package, but sadly we
+// can't import it here because of yet another potential cycle.
+type ITableObject interface {
+ TableObject()
+}
+
type Value interface {
Typer
IsNull() bool | 1 | // Package values declares the flux data types and implements them.
package values
import (
"bytes"
"fmt"
"regexp"
"runtime/debug"
"strconv"
"github.com/influxdata/flux/codes"
"github.com/influxdata/flux/internal/errors"
"github.com/influxdata/flux/semantic"
)
type Typer interface {
Type() semantic.MonoType
}
type Value interface {
Typer
IsNull() bool
Str() string
Bytes() []byte
Int() int64
UInt() uint64
Float() float64
Bool() bool
Time() Time
Duration() Duration
Regexp() *regexp.Regexp
Array() Array
Object() Object
Function() Function
Dict() Dictionary
Equal(Value) bool
}
type value struct {
t semantic.MonoType
v interface{}
}
func (v value) Type() semantic.MonoType {
return v.t
}
func (v value) IsNull() bool {
return v.v == nil
}
func (v value) Str() string {
CheckKind(v.t.Nature(), semantic.String)
return v.v.(string)
}
func (v value) Bytes() []byte {
CheckKind(v.t.Nature(), semantic.Bytes)
return v.v.([]byte)
}
func (v value) Int() int64 {
CheckKind(v.t.Nature(), semantic.Int)
return v.v.(int64)
}
func (v value) UInt() uint64 {
CheckKind(v.t.Nature(), semantic.UInt)
return v.v.(uint64)
}
func (v value) Float() float64 {
CheckKind(v.t.Nature(), semantic.Float)
return v.v.(float64)
}
func (v value) Bool() bool {
CheckKind(v.t.Nature(), semantic.Bool)
return v.v.(bool)
}
func (v value) Time() Time {
CheckKind(v.t.Nature(), semantic.Time)
return v.v.(Time)
}
func (v value) Duration() Duration {
CheckKind(v.t.Nature(), semantic.Duration)
return v.v.(Duration)
}
func (v value) Regexp() *regexp.Regexp {
CheckKind(v.t.Nature(), semantic.Regexp)
return v.v.(*regexp.Regexp)
}
func (v value) Array() Array {
CheckKind(v.t.Nature(), semantic.Array)
return v.v.(Array)
}
func (v value) Object() Object {
CheckKind(v.t.Nature(), semantic.Object)
return v.v.(Object)
}
func (v value) Function() Function {
CheckKind(v.t.Nature(), semantic.Function)
return v.v.(Function)
}
func (v value) Dict() Dictionary {
CheckKind(v.t.Nature(), semantic.Dictionary)
return v.v.(Dictionary)
}
func (v value) Equal(r Value) bool {
if v.Type().Nature() != r.Type().Nature() {
return false
}
if v.IsNull() || r.IsNull() {
return false
}
switch k := v.Type().Nature(); k {
case semantic.Bool:
return v.Bool() == r.Bool()
case semantic.UInt:
return v.UInt() == r.UInt()
case semantic.Int:
return v.Int() == r.Int()
case semantic.Float:
return v.Float() == r.Float()
case semantic.String:
return v.Str() == r.Str()
case semantic.Bytes:
return bytes.Equal(v.Bytes(), r.Bytes())
case semantic.Time:
return v.Time() == r.Time()
case semantic.Duration:
return v.Duration() == r.Duration()
case semantic.Regexp:
return v.Regexp().String() == r.Regexp().String()
case semantic.Object:
return v.Object().Equal(r.Object())
case semantic.Array:
return v.Array().Equal(r.Array())
case semantic.Function:
return v.Function().Equal(r.Function())
case semantic.Dictionary:
return v.Dict().Equal(r.Dict())
default:
return false
}
}
func (v value) String() string {
return fmt.Sprintf("%v", v.v)
}
var (
// InvalidValue is a non nil value who's type is semantic.Invalid
InvalidValue = value{}
// Null is an untyped nil value.
Null = null{}
// Void is an empty record used to represent a void value.
Void = NewObject(semantic.NewObjectType(nil))
)
// Unwrap will extract the primitive value from the Value interface.
func Unwrap(v Value) interface{} {
if v.IsNull() {
return nil
}
switch n := v.Type().Nature(); n {
case semantic.String:
return v.Str()
case semantic.Bytes:
return v.Bytes()
case semantic.Int:
return v.Int()
case semantic.UInt:
return v.UInt()
case semantic.Float:
return v.Float()
case semantic.Bool:
return v.Bool()
case semantic.Time:
return v.Time()
case semantic.Duration:
return v.Duration()
case semantic.Regexp:
return v.Regexp()
case semantic.Array:
arr := v.Array()
a := make([]interface{}, arr.Len())
arr.Range(func(i int, v Value) {
val := Unwrap(v)
a[i] = val
})
return a
case semantic.Object:
obj := v.Object()
o := make(map[string]interface{}, obj.Len())
obj.Range(func(k string, v Value) {
val := Unwrap(v)
o[k] = val
})
return o
case semantic.Dictionary:
dict := v.Dict()
d := make(map[interface{}]interface{}, dict.Len())
dict.Range(func(key, value Value) {
k := Unwrap(key)
d[k] = Unwrap(value)
})
return d
case semantic.Function:
// there is no primitive value for a Function object, just return itself.
return v
default:
panic(errors.Newf(codes.Unknown, "cannot unwrap a %v type value", n))
}
}
// New constructs a new Value by inferring the type from the interface.
// Note this method will panic if passed a nil value. If the interface
// does not translate to a valid Value type, then InvalidValue is returned.
func New(v interface{}) Value {
if v == nil {
return Null
}
switch v := v.(type) {
case string:
return NewString(v)
case []byte:
return NewBytes(v)
case int64:
return NewInt(v)
case uint64:
return NewUInt(v)
case float64:
return NewFloat(v)
case bool:
return NewBool(v)
case Time:
return NewTime(v)
case Duration:
return NewDuration(v)
case *regexp.Regexp:
return NewRegexp(v)
default:
return InvalidValue
}
}
func NewNull(t semantic.MonoType) Value {
return value{
t: t,
v: nil,
}
}
func NewString(v string) Value {
return value{
t: semantic.BasicString,
v: v,
}
}
func NewBytes(v []byte) Value {
return value{
t: semantic.BasicBytes,
v: v,
}
}
func NewInt(v int64) Value {
return value{
t: semantic.BasicInt,
v: v,
}
}
func NewUInt(v uint64) Value {
return value{
t: semantic.BasicUint,
v: v,
}
}
func NewFloat(v float64) Value {
return value{
t: semantic.BasicFloat,
v: v,
}
}
func NewTime(v Time) Value {
return value{
t: semantic.BasicTime,
v: v,
}
}
func NewDuration(v Duration) Value {
return value{
t: semantic.BasicDuration,
v: v,
}
}
func NewRegexp(v *regexp.Regexp) Value {
return value{
t: semantic.BasicRegexp,
v: v,
}
}
func Stringify(v Value) (Value, error) {
val := Unwrap(v)
switch v.Type().Nature() {
case semantic.Bool:
return NewString(strconv.FormatBool(val.(bool))), nil
case semantic.Int:
return NewString(strconv.FormatInt(val.(int64), 10)), nil
case semantic.UInt:
return NewString(strconv.FormatUint(val.(uint64), 10)), nil
case semantic.Float:
return NewString(strconv.FormatFloat(val.(float64), 'f', -1, 64)), nil
case semantic.Time:
return NewString(val.(Time).String()), nil
case semantic.Duration:
return NewString(val.(Duration).String()), nil
case semantic.String:
return v, nil
}
return nil, errors.Newf(codes.Invalid, "invalid interpolation type")
}
func UnexpectedKind(got, exp semantic.Nature) error {
return unexpectedKind{got: got, want: exp}
}
// CheckKind panics if got != exp.
func CheckKind(got, exp semantic.Nature) {
if got != exp {
panic(UnexpectedKind(got, exp))
}
}
type unexpectedKind struct {
got, want semantic.Nature
}
func (e unexpectedKind) Error() string {
// Reuse the Error method from flux.Error by converting
// this to a flux.Error type and then calling Error.
var err *errors.Error
e.As(&err)
return err.Error()
}
func (e unexpectedKind) As(target interface{}) bool {
if err, ok := target.(**errors.Error); ok {
*err = errors.Newf(codes.Internal, "unexpected kind: got %q expected %q, trace: %s", e.got, e.want, string(debug.Stack()))
return true
}
return false
}
// IsTimeable checks if value v is Timeable.
func IsTimeable(v Value) bool {
return v.Type().Nature() == semantic.Time || v.Type().Nature() == semantic.Duration
}
type null struct{}
func (n null) Type() semantic.MonoType { return semantic.MonoType{} }
func (n null) IsNull() bool { return true }
func (n null) Str() string { panic(UnexpectedKind(semantic.Invalid, semantic.String)) }
func (n null) Bytes() []byte { panic(UnexpectedKind(semantic.Invalid, semantic.Bytes)) }
func (n null) Int() int64 { panic(UnexpectedKind(semantic.Invalid, semantic.Int)) }
func (n null) UInt() uint64 { panic(UnexpectedKind(semantic.Invalid, semantic.UInt)) }
func (n null) Float() float64 { panic(UnexpectedKind(semantic.Invalid, semantic.Float)) }
func (n null) Bool() bool { panic(UnexpectedKind(semantic.Invalid, semantic.Bool)) }
func (n null) Time() Time { panic(UnexpectedKind(semantic.Invalid, semantic.Time)) }
func (n null) Duration() Duration { panic(UnexpectedKind(semantic.Invalid, semantic.Duration)) }
func (n null) Regexp() *regexp.Regexp { panic(UnexpectedKind(semantic.Invalid, semantic.Regexp)) }
func (n null) Array() Array { panic(UnexpectedKind(semantic.Invalid, semantic.Array)) }
func (n null) Object() Object { panic(UnexpectedKind(semantic.Invalid, semantic.Object)) }
func (n null) Function() Function { panic(UnexpectedKind(semantic.Invalid, semantic.Function)) }
func (n null) Dict() Dictionary { panic(UnexpectedKind(semantic.Invalid, semantic.Dictionary)) }
func (n null) Equal(Value) bool { return false }
| 1 | 17,564 | Can we name this `TableObject`? I'm not a big fan of the `I` prefix for interfaces and that's not really used in Go. Is it also possible to have the method be unexported? I don't know if that's possible. If it's not, this is good. If it is possible, I'd prefer this method to be unexported. | influxdata-flux | go |
@@ -31,11 +31,15 @@
#include <stdlib.h> // malloc and free
#include <tbb/tbb.h>
#include <tbb/spin_mutex.h>
- #include "tbb/scalable_allocator.h"
+ #include <tbb/scalable_allocator.h>
#include <tbb/global_control.h>
#include <tbb/task_arena.h>
#include "services/daal_atomic_int.h"
+ #if defined(TBB_INTERFACE_VERSION) && TBB_INTERFACE_VERSION >= 12002
+ #include <tbb/task.h>
+ #endif
+
using namespace daal::services;
#else
#include "src/externals/service_service.h" | 1 | /* file: threading.cpp */
/*******************************************************************************
* Copyright 2014-2020 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
/*
//++
// Implementation of threading layer functions.
//--
*/
#include "src/threading/threading.h"
#include "services/daal_memory.h"
#if defined(__DO_TBB_LAYER__)
#define TBB_PREVIEW_GLOBAL_CONTROL 1
#define TBB_PREVIEW_TASK_ARENA 1
#include <stdlib.h> // malloc and free
#include <tbb/tbb.h>
#include <tbb/spin_mutex.h>
#include "tbb/scalable_allocator.h"
#include <tbb/global_control.h>
#include <tbb/task_arena.h>
#include "services/daal_atomic_int.h"
using namespace daal::services;
#else
#include "src/externals/service_service.h"
#endif
DAAL_EXPORT void * _threaded_scalable_malloc(const size_t size, const size_t alignment)
{
#if defined(__DO_TBB_LAYER__)
return scalable_aligned_malloc(size, alignment);
#else
return daal::internal::Service<>::serv_malloc(size, alignment);
#endif
}
DAAL_EXPORT void _threaded_scalable_free(void * ptr)
{
#if defined(__DO_TBB_LAYER__)
scalable_aligned_free(ptr);
#else
daal::internal::Service<>::serv_free(ptr);
#endif
}
DAAL_EXPORT void _daal_tbb_task_scheduler_free(void *& globalControl)
{
#if defined(__DO_TBB_LAYER__)
if (globalControl)
{
delete reinterpret_cast<tbb::global_control *>(globalControl);
globalControl = nullptr;
}
#endif
}
DAAL_EXPORT size_t _setNumberOfThreads(const size_t numThreads, void ** globalControl)
{
#if defined(__DO_TBB_LAYER__)
static tbb::spin_mutex mt;
tbb::spin_mutex::scoped_lock lock(mt);
if (numThreads != 0)
{
_daal_tbb_task_scheduler_free(*globalControl);
*globalControl = reinterpret_cast<void *>(new tbb::global_control(tbb::global_control::max_allowed_parallelism, numThreads));
daal::threader_env()->setNumberOfThreads(numThreads);
return numThreads;
}
#endif
daal::threader_env()->setNumberOfThreads(1);
return 1;
}
DAAL_EXPORT void _daal_threader_for(int n, int threads_request, const void * a, daal::functype func)
{
#if defined(__DO_TBB_LAYER__)
tbb::parallel_for(tbb::blocked_range<int>(0, n, 1), [&](tbb::blocked_range<int> r) {
int i;
for (i = r.begin(); i < r.end(); i++)
{
func(i, a);
}
});
#elif defined(__DO_SEQ_LAYER__)
int i;
for (i = 0; i < n; i++)
{
func(i, a);
}
#endif
}
DAAL_EXPORT void _daal_threader_for_blocked(int n, int threads_request, const void * a, daal::functype2 func)
{
#if defined(__DO_TBB_LAYER__)
tbb::parallel_for(tbb::blocked_range<int>(0, n, 1), [&](tbb::blocked_range<int> r) { func(r.begin(), r.end() - r.begin(), a); });
#elif defined(__DO_SEQ_LAYER__)
func(0, n, a);
#endif
}
DAAL_EXPORT void _daal_threader_for_optional(int n, int threads_request, const void * a, daal::functype func)
{
#if defined(__DO_TBB_LAYER__)
if (_daal_is_in_parallel())
{
int i;
for (i = 0; i < n; i++)
{
func(i, a);
}
}
else
{
_daal_threader_for(n, threads_request, a, func);
}
#elif defined(__DO_SEQ_LAYER__)
_daal_threader_for(n, threads_request, a, func);
#endif
}
DAAL_EXPORT int _daal_threader_get_max_threads()
{
#if defined(__DO_TBB_LAYER__)
return tbb::this_task_arena::max_concurrency();
#elif defined(__DO_SEQ_LAYER__)
return 1;
#endif
}
DAAL_EXPORT void * _daal_get_tls_ptr(void * a, daal::tls_functype func)
{
#if defined(__DO_TBB_LAYER__)
tbb::enumerable_thread_specific<void *> * p = new tbb::enumerable_thread_specific<void *>([=]() -> void * { return func(a); });
return (void *)p;
#elif defined(__DO_SEQ_LAYER__)
return func(a);
#endif
}
DAAL_EXPORT void _daal_del_tls_ptr(void * tlsPtr)
{
#if defined(__DO_TBB_LAYER__)
tbb::enumerable_thread_specific<void *> * p = static_cast<tbb::enumerable_thread_specific<void *> *>(tlsPtr);
delete p;
#elif defined(__DO_SEQ_LAYER__)
#endif
}
DAAL_EXPORT void * _daal_get_tls_local(void * tlsPtr)
{
#if defined(__DO_TBB_LAYER__)
tbb::enumerable_thread_specific<void *> * p = static_cast<tbb::enumerable_thread_specific<void *> *>(tlsPtr);
return p->local();
#elif defined(__DO_SEQ_LAYER__)
return tlsPtr;
#endif
}
DAAL_EXPORT void _daal_reduce_tls(void * tlsPtr, void * a, daal::tls_reduce_functype func)
{
#if defined(__DO_TBB_LAYER__)
tbb::enumerable_thread_specific<void *> * p = static_cast<tbb::enumerable_thread_specific<void *> *>(tlsPtr);
for (auto it = p->begin(); it != p->end(); ++it)
{
func((*it), a);
}
#elif defined(__DO_SEQ_LAYER__)
func(tlsPtr, a);
#endif
}
DAAL_EXPORT void _daal_parallel_reduce_tls(void * tlsPtr, void * a, daal::tls_reduce_functype func)
{
#if defined(__DO_TBB_LAYER__)
size_t n = 0;
tbb::enumerable_thread_specific<void *> * p = static_cast<tbb::enumerable_thread_specific<void *> *>(tlsPtr);
for (auto it = p->begin(); it != p->end(); ++it, ++n)
;
if (n)
{
typedef void * mptr;
mptr * aDataPtr = (mptr *)(::malloc(sizeof(mptr) * n));
if (aDataPtr)
{
size_t i = 0;
for (auto it = p->begin(); it != p->end(); ++it) aDataPtr[i++] = *it;
tbb::parallel_for(tbb::blocked_range<size_t>(0, n, 1), [&](tbb::blocked_range<size_t> r) {
for (size_t i = r.begin(); i < r.end(); i++) func(aDataPtr[i], a);
});
::free(aDataPtr);
}
}
#elif defined(__DO_SEQ_LAYER__)
func(tlsPtr, a);
#endif
}
DAAL_EXPORT void * _daal_new_mutex()
{
#if defined(__DO_TBB_LAYER__)
return new tbb::spin_mutex();
#elif defined(__DO_SEQ_LAYER__)
return NULL;
#endif
}
DAAL_EXPORT void _daal_lock_mutex(void * mutexPtr)
{
#if defined(__DO_TBB_LAYER__)
static_cast<tbb::spin_mutex *>(mutexPtr)->lock();
#endif
}
DAAL_EXPORT void _daal_unlock_mutex(void * mutexPtr)
{
#if defined(__DO_TBB_LAYER__)
static_cast<tbb::spin_mutex *>(mutexPtr)->unlock();
#endif
}
DAAL_EXPORT void _daal_del_mutex(void * mutexPtr)
{
#if defined(__DO_TBB_LAYER__)
delete static_cast<tbb::spin_mutex *>(mutexPtr);
#endif
}
DAAL_EXPORT bool _daal_is_in_parallel()
{
#if defined(__DO_TBB_LAYER__)
#if defined(TBB_INTERFACE_VERSION) && TBB_INTERFACE_VERSION >= 12001
return tbb::detail::d1::task::current_execute_data() != nullptr;
#else
return tbb::task::self().state() == tbb::task::executing;
#endif
#else
return false;
#endif
}
DAAL_EXPORT void * _daal_threader_env()
{
static daal::ThreaderEnvironment env;
return &env;
}
#if defined(__DO_TBB_LAYER__)
template <typename T, typename Key, typename Pred>
//Returns an index of the first element in the range[ar, ar + n) that is not less than(i.e.greater or equal to) value.
size_t lower_bound(size_t n, const T * ar, const Key & value)
{
const T * first = ar;
while (n > 0)
{
auto it = first;
auto step = (n >> 1);
it += step;
if (Pred::less(*it, value))
{
first = ++it;
n -= step + 1;
}
else
n = step;
}
return first - ar;
}
class SimpleAllocator
{
public:
static void * alloc(size_t n) { return ::malloc(n); }
static void free(void * p) { ::free(p); }
};
template <class T, class Allocator>
class Collection
{
public:
/**
* Default constructor. Sets the size and capacity to 0.
*/
Collection() : _array(NULL), _size(0), _capacity(0) {}
/**
* Destructor
*/
virtual ~Collection()
{
for (size_t i = 0; i < _capacity; i++) _array[i].~T();
Allocator::free(_array);
}
/**
* Element access
* \param[in] index Index of an accessed element
* \return Reference to the element
*/
T & operator[](size_t index) { return _array[index]; }
/**
* Const element access
* \param[in] index Index of an accessed element
* \return Reference to the element
*/
const T & operator[](size_t index) const { return _array[index]; }
/**
* Size of a collection
* \return Size of the collection
*/
size_t size() const { return _size; }
/**
* Changes the size of a storage
* \param[in] newCapacity Size of a new storage.
*/
bool resize(size_t newCapacity)
{
if (newCapacity <= _capacity)
{
return true;
}
T * newArray = (T *)Allocator::alloc(sizeof(T) * newCapacity);
if (!newArray)
{
return false;
}
for (size_t i = 0; i < newCapacity; i++)
{
T * elementMemory = &(newArray[i]);
::new (elementMemory) T;
}
size_t minSize = newCapacity < _size ? newCapacity : _size;
for (size_t i = 0; i < minSize; i++) newArray[i] = _array[i];
for (size_t i = 0; i < _capacity; i++) _array[i].~T();
Allocator::free(_array);
_array = newArray;
_capacity = newCapacity;
return true;
}
/**
* Clears a collection: removes an array, sets the size and capacity to 0
*/
void clear()
{
for (size_t i = 0; i < _capacity; i++) _array[i].~T();
Allocator::free(_array);
_array = NULL;
_size = 0;
_capacity = 0;
}
/**
* Insert an element into a position
* \param[in] pos Position to set
* \param[in] x Element to set
*/
bool insert(const size_t pos, const T & x)
{
if (pos > this->size()) return true;
size_t newSize = 1 + this->size();
if (newSize > _capacity)
{
if (!_resize()) return false;
}
size_t tail = _size - pos;
for (size_t i = 0; i < tail; i++) _array[_size - i] = _array[_size - 1 - i];
_array[pos] = x;
_size = newSize;
return true;
}
/**
* Erase an element from a position
* \param[in] pos Position to erase
*/
void erase(size_t pos)
{
if (pos >= this->size()) return;
_size--;
for (size_t i = 0; i < _size - pos; i++) _array[pos + i] = _array[pos + 1 + i];
}
private:
static const size_t _default_capacity = 16;
bool _resize()
{
size_t newCapacity = 2 * _capacity;
if (_capacity == 0) newCapacity = _default_capacity;
return resize(newCapacity);
}
protected:
T * _array;
size_t _size;
size_t _capacity;
};
#if _WIN32 || _WIN64
typedef DWORD ThreadId;
ThreadId getCurrentThreadId()
{
return ::GetCurrentThreadId();
}
#else
typedef pthread_t ThreadId;
ThreadId getCurrentThreadId()
{
return pthread_self();
}
#endif // _WIN32||_WIN64
class LocalStorage
{
public:
LocalStorage(void * a, daal::tls_functype func) : _a(a), _func(func) {}
LocalStorage(const LocalStorage & o) = delete;
LocalStorage & operator=(const LocalStorage & o) = delete;
void * get()
{
auto tid = getCurrentThreadId();
{
tbb::spin_mutex::scoped_lock lock(_mt);
size_t i;
if (findFree(tid, i))
{
void * res = _free[i].value;
addUsed(_free[i]);
_free.erase(i);
return res;
}
}
Pair p(tid, _func(_a));
if (p.value)
{
tbb::spin_mutex::scoped_lock lock(_mt);
addUsed(p);
}
return p.value;
}
void release(void * data)
{
tbb::spin_mutex::scoped_lock lock(_mt);
size_t i = findUsed(data);
addFree(_used[i]);
_used.erase(i);
}
void reduce(void * a, daal::tls_reduce_functype func)
{
tbb::spin_mutex::scoped_lock lock(_mt);
for (size_t i = 0; i < _free.size(); ++i) func(_free[i].value, a);
for (size_t i = 0; i < _used.size(); ++i) func(_used[i].value, a);
_free.clear();
_used.clear();
}
private:
struct Pair
{
Pair() : tid(0), value(NULL) {}
Pair(const ThreadId & id, void * v) : tid(id), value(v) {}
Pair(const Pair & o) : tid(o.tid), value(o.value) {}
Pair & operator=(const Pair & o)
{
tid = o.tid;
value = o.value;
return *this;
}
ThreadId tid;
void * value;
};
struct CompareByTid
{
static bool less(const Pair & p, const ThreadId & tid) { return p.tid < tid; }
};
struct CompareByValue
{
static bool less(const Pair & p, const void * val) { return p.value < val; }
};
bool findFree(const ThreadId & tid, size_t & i) const
{
if (!_free.size()) return false;
i = lower_bound<Pair, ThreadId, CompareByTid>(_free.size(), &_free[0], tid);
if (i == _free.size()) --i;
return true;
}
size_t findUsed(void * data) const
{
size_t i = lower_bound<Pair, void *, CompareByValue>(_used.size(), &_used[0], data);
//DAAL_ASSERT(i < _used.size());
return i;
}
void addFree(const Pair & p)
{
size_t i = lower_bound<Pair, ThreadId, CompareByTid>(_free.size(), &_free[0], p.tid);
_free.insert(i, p);
}
void addUsed(const Pair & p)
{
size_t i = lower_bound<Pair, void *, CompareByValue>(_used.size(), &_used[0], p.value);
_used.insert(i, p);
}
private:
void * _a;
daal::tls_functype _func;
Collection<Pair, SimpleAllocator> _free; //sorted by tid
Collection<Pair, SimpleAllocator> _used; //sorted by value
tbb::spin_mutex _mt;
};
DAAL_EXPORT void * _daal_get_ls_ptr(void * a, daal::tls_functype func)
{
return new LocalStorage(a, func);
}
DAAL_EXPORT void * _daal_get_ls_local(void * lsPtr)
{
return ((LocalStorage *)lsPtr)->get();
}
DAAL_EXPORT void _daal_reduce_ls(void * lsPtr, void * a, daal::tls_reduce_functype func)
{
((LocalStorage *)lsPtr)->reduce(a, func);
}
DAAL_EXPORT void _daal_del_ls_ptr(void * lsPtr)
{
delete ((LocalStorage *)lsPtr);
}
DAAL_EXPORT void _daal_release_ls_local(void * lsPtr, void * p)
{
((LocalStorage *)lsPtr)->release(p);
}
DAAL_EXPORT void * _daal_new_task_group()
{
return new tbb::task_group();
}
DAAL_EXPORT void _daal_del_task_group(void * taskGroupPtr)
{
delete (tbb::task_group *)taskGroupPtr;
}
DAAL_EXPORT void _daal_run_task_group(void * taskGroupPtr, daal::task * t)
{
struct shared_task
{
typedef Atomic<int> RefCounterType;
shared_task(daal::task & t) : _t(t), _nRefs(nullptr)
{
_nRefs = new RefCounterType;
(*_nRefs).set(1);
}
shared_task(const shared_task & o) : _t(o._t), _nRefs(o._nRefs) { (*_nRefs).inc(); }
~shared_task()
{
if (_nRefs && !(*_nRefs).dec())
{
_t.destroy();
delete _nRefs;
}
}
void operator()() const { _t.run(); }
daal::task & _t;
RefCounterType * _nRefs;
private:
shared_task & operator=(const shared_task &);
};
tbb::task_group * group = (tbb::task_group *)taskGroupPtr;
group->run(shared_task(*t));
}
DAAL_EXPORT void _daal_wait_task_group(void * taskGroupPtr)
{
((tbb::task_group *)taskGroupPtr)->wait();
}
#else
DAAL_EXPORT void * _daal_get_ls_ptr(void * a, daal::tls_functype func)
{
return func(a);
}
DAAL_EXPORT void * _daal_get_ls_local(void * lsPtr)
{
return lsPtr;
}
DAAL_EXPORT void _daal_reduce_ls(void * lsPtr, void * a, daal::tls_reduce_functype func)
{
func(lsPtr, a);
}
DAAL_EXPORT void _daal_del_ls_ptr(void * lsPtr) {}
DAAL_EXPORT void _daal_release_ls_local(void * lsPtr, void * p) {}
DAAL_EXPORT void * _daal_new_task_group()
{
return nullptr;
}
DAAL_EXPORT void _daal_del_task_group(void * taskGroupPtr) {}
DAAL_EXPORT void _daal_run_task_group(void * taskGroupPtr, daal::task * task) {}
DAAL_EXPORT void _daal_wait_task_group(void * taskGroupPtr) {}
#endif
namespace daal
{}
| 1 | 22,869 | If tbb.h is included, there is no big sense to include other TBB headers. | oneapi-src-oneDAL | cpp |
@@ -915,12 +915,9 @@ def uses_path_addressing(headers):
def get_bucket_name(path, headers):
parsed = urlparse.urlparse(path)
- # try pick the bucket_name from the path
- bucket_name = parsed.path.split('/')[1]
-
- # is the hostname not starting with a bucket name?
- if uses_path_addressing(headers):
- return normalize_bucket_name(bucket_name)
+ # matches the common endpoints like
+ # - '<bucket_name>.s3.<region>.*'
+ localstack_pattern = re.compile(r'^(.+)\.s3[.\-][a-z]{2}-[a-z]+-[0-9]{1,}.*')
# matches the common endpoints like
# - '<bucket_name>.s3.<region>.amazonaws.com' | 1 | import time
import re
import json
import uuid
import base64
import codecs
import random
import logging
import datetime
import xmltodict
import collections
import dateutil.parser
import urllib.parse
import six
import botocore.config
from pytz import timezone
from urllib.parse import parse_qs
from botocore.compat import urlsplit
from botocore.client import ClientError
from botocore.credentials import Credentials
from localstack.utils.auth import HmacV1QueryAuth
from botocore.awsrequest import create_request_object
from requests.models import Response, Request
from six.moves.urllib import parse as urlparse
from localstack import config, constants
from localstack.config import HOSTNAME, HOSTNAME_EXTERNAL, LOCALHOST_IP
from localstack.constants import TEST_AWS_ACCESS_KEY_ID, TEST_AWS_SECRET_ACCESS_KEY
from localstack.utils.aws import aws_stack
from localstack.services.s3 import multipart_content
from localstack.utils.common import (
short_uid, timestamp_millis, to_str, to_bytes, clone, md5, get_service_protocol, now_utc, is_base64
)
from localstack.utils.analytics import event_publisher
from localstack.utils.http_utils import uses_chunked_encoding
from localstack.utils.persistence import PersistingProxyListener
from localstack.utils.aws.aws_responses import requests_response, requests_error_response_xml_signature_calculation
from localstack.services.cloudformation.service_models import S3Bucket
CONTENT_SHA256_HEADER = 'x-amz-content-sha256'
STREAMING_HMAC_PAYLOAD = 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD'
# backend port (configured in s3_starter.py on startup)
PORT_S3_BACKEND = None
# mappings for S3 bucket notifications
S3_NOTIFICATIONS = {}
# mappings for bucket CORS settings
BUCKET_CORS = {}
# maps bucket name to lifecycle settings
BUCKET_LIFECYCLE = {}
# maps bucket name to replication settings
BUCKET_REPLICATIONS = {}
# maps bucket name to encryption settings
BUCKET_ENCRYPTIONS = {}
# maps bucket name to object lock settings
OBJECT_LOCK_CONFIGS = {}
# map to store the s3 expiry dates
OBJECT_EXPIRY = {}
# set up logger
LOGGER = logging.getLogger(__name__)
# XML namespace constants
XMLNS_S3 = 'http://s3.amazonaws.com/doc/2006-03-01/'
# see https://stackoverflow.com/questions/50480924/regex-for-s3-bucket-name#50484916
BUCKET_NAME_REGEX = (r'(?=^.{3,63}$)(?!^(\d+\.)+\d+$)' +
r'(^(([a-z0-9]|[a-z0-9][a-z0-9\-]*[a-z0-9])\.)*([a-z0-9]|[a-z0-9][a-z0-9\-]*[a-z0-9])$)')
# list of destination types for bucket notifications
NOTIFICATION_DESTINATION_TYPES = ('Queue', 'Topic', 'CloudFunction', 'LambdaFunction')
# prefix for object metadata keys in headers and query params
OBJECT_METADATA_KEY_PREFIX = 'x-amz-meta-'
# response header overrides the client may request
ALLOWED_HEADER_OVERRIDES = {
'response-content-type': 'Content-Type',
'response-content-language': 'Content-Language',
'response-expires': 'Expires',
'response-cache-control': 'Cache-Control',
'response-content-disposition': 'Content-Disposition',
'response-content-encoding': 'Content-Encoding',
}
# STS policy expiration date format
POLICY_EXPIRATION_FORMAT1 = '%Y-%m-%dT%H:%M:%SZ'
POLICY_EXPIRATION_FORMAT2 = '%Y-%m-%dT%H:%M:%S.%IZ'
# ignored_headers_lower conatins headers which don't get involved in signature calculations process
# these headers are being sent by the localstack by default.
IGNORED_HEADERS_LOWER = [
'remote-addr', 'host', 'user-agent', 'accept-encoding',
'accept', 'connection', 'origin',
'x-forwarded-for', 'x-localstack-edge', 'authorization', 'date'
]
# params are required in presigned url
PRESIGN_QUERY_PARAMS = ['Signature', 'Expires', 'AWSAccessKeyId']
CORS_HEADERS = [
'Access-Control-Allow-Origin', 'Access-Control-Allow-Methods', 'Access-Control-Allow-Headers',
'Access-Control-Max-Age', 'Access-Control-Allow-Credentials', 'Access-Control-Expose-Headers',
'Access-Control-Request-Headers', 'Access-Control-Request-Method'
]
SIGNATURE_V4_PARAMS = ['X-Amz-Algorithm']
def event_type_matches(events, action, api_method):
""" check whether any of the event types in `events` matches the
given `action` and `api_method`, and return the first match. """
events = events or []
for event in events:
regex = event.replace('*', '[^:]*')
action_string = 's3:%s:%s' % (action, api_method)
match = re.match(regex, action_string)
if match:
return match
return False
def filter_rules_match(filters, object_path):
""" check whether the given object path matches all of the given filters """
filters = filters or {}
s3_filter = _get_s3_filter(filters)
for rule in s3_filter.get('FilterRule', []):
rule_name_lower = rule['Name'].lower()
if rule_name_lower == 'prefix':
if not prefix_with_slash(object_path).startswith(prefix_with_slash(rule['Value'])):
return False
elif rule_name_lower == 'suffix':
if not object_path.endswith(rule['Value']):
return False
else:
LOGGER.warning('Unknown filter name: "%s"' % rule['Name'])
return True
def _get_s3_filter(filters):
return filters.get('S3Key', filters.get('Key', {}))
def prefix_with_slash(s):
return s if s[0] == '/' else '/%s' % s
def get_event_message(event_name, bucket_name, file_name='testfile.txt', etag='', version_id=None, file_size=0):
# Based on: http://docs.aws.amazon.com/AmazonS3/latest/dev/notification-content-structure.html
bucket_name = normalize_bucket_name(bucket_name)
return {
'Records': [{
'eventVersion': '2.1',
'eventSource': 'aws:s3',
'awsRegion': aws_stack.get_region(),
'eventTime': timestamp_millis(),
'eventName': event_name,
'userIdentity': {
'principalId': 'AIDAJDPLRKLG7UEXAMPLE'
},
'requestParameters': {
'sourceIPAddress': '127.0.0.1' # TODO determine real source IP
},
'responseElements': {
'x-amz-request-id': short_uid(),
'x-amz-id-2': 'eftixk72aD6Ap51TnqcoF8eFidJG9Z/2' # Amazon S3 host that processed the request
},
's3': {
's3SchemaVersion': '1.0',
'configurationId': 'testConfigRule',
'bucket': {
'name': bucket_name,
'ownerIdentity': {
'principalId': 'A3NL1KOZZKExample'
},
'arn': 'arn:aws:s3:::%s' % bucket_name
},
'object': {
'key': urllib.parse.quote(file_name),
'size': file_size,
'eTag': etag,
'versionId': version_id,
'sequencer': '0055AED6DCD90281E5'
}
}
}]
}
def send_notifications(method, bucket_name, object_path, version_id):
for bucket, notifs in S3_NOTIFICATIONS.items():
if normalize_bucket_name(bucket) == normalize_bucket_name(bucket_name):
action = {'PUT': 'ObjectCreated', 'POST': 'ObjectCreated', 'DELETE': 'ObjectRemoved'}[method]
# TODO: support more detailed methods, e.g., DeleteMarkerCreated
# http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html
if action == 'ObjectCreated' and method == 'POST':
api_method = 'CompleteMultipartUpload'
else:
api_method = {'PUT': 'Put', 'POST': 'Post', 'DELETE': 'Delete'}[method]
event_name = '%s:%s' % (action, api_method)
for notif in notifs:
send_notification_for_subscriber(notif, bucket_name, object_path,
version_id, api_method, action, event_name)
def send_notification_for_subscriber(notif, bucket_name, object_path, version_id, api_method, action, event_name):
bucket_name = normalize_bucket_name(bucket_name)
if not event_type_matches(notif['Event'], action, api_method) or \
not filter_rules_match(notif.get('Filter'), object_path):
return
key = urlparse.unquote(object_path.replace('//', '/'))[1:]
s3_client = aws_stack.connect_to_service('s3')
object_data = {}
try:
object_data = s3_client.head_object(Bucket=bucket_name, Key=key)
except botocore.exceptions.ClientError:
pass
# build event message
message = get_event_message(
event_name=event_name,
bucket_name=bucket_name,
file_name=key,
etag=object_data.get('ETag', ''),
file_size=object_data.get('ContentLength', 0),
version_id=version_id
)
message = json.dumps(message)
if notif.get('Queue'):
sqs_client = aws_stack.connect_to_service('sqs')
try:
queue_url = aws_stack.sqs_queue_url_for_arn(notif['Queue'])
sqs_client.send_message(QueueUrl=queue_url, MessageBody=message)
except Exception as e:
LOGGER.warning('Unable to send notification for S3 bucket "%s" to SQS queue "%s": %s' %
(bucket_name, notif['Queue'], e))
if notif.get('Topic'):
sns_client = aws_stack.connect_to_service('sns')
try:
sns_client.publish(TopicArn=notif['Topic'], Message=message, Subject='Amazon S3 Notification')
except Exception as e:
LOGGER.warning('Unable to send notification for S3 bucket "%s" to SNS topic "%s": %s' %
(bucket_name, notif['Topic'], e))
# CloudFunction and LambdaFunction are semantically identical
lambda_function_config = notif.get('CloudFunction') or notif.get('LambdaFunction')
if lambda_function_config:
# make sure we don't run into a socket timeout
connection_config = botocore.config.Config(read_timeout=300)
lambda_client = aws_stack.connect_to_service('lambda', config=connection_config)
try:
lambda_client.invoke(FunctionName=lambda_function_config,
InvocationType='Event', Payload=message)
except Exception:
LOGGER.warning('Unable to send notification for S3 bucket "%s" to Lambda function "%s".' %
(bucket_name, lambda_function_config))
if not filter(lambda x: notif.get(x), NOTIFICATION_DESTINATION_TYPES):
LOGGER.warning('Neither of %s defined for S3 notification.' %
'/'.join(NOTIFICATION_DESTINATION_TYPES))
# TODO: refactor/unify the 3 functions below...
def get_cors(bucket_name):
bucket_name = normalize_bucket_name(bucket_name)
response = Response()
exists, code = bucket_exists(bucket_name)
if not exists:
response.status_code = int(code)
return response
cors = BUCKET_CORS.get(bucket_name)
if not cors:
cors = {
'CORSConfiguration': []
}
body = xmltodict.unparse(cors)
response._content = body
response.status_code = 200
return response
def set_cors(bucket_name, cors):
bucket_name = normalize_bucket_name(bucket_name)
response = Response()
exists, code = bucket_exists(bucket_name)
if not exists:
response.status_code = int(code)
return response
if not isinstance(cors, dict):
cors = xmltodict.parse(cors)
BUCKET_CORS[bucket_name] = cors
response.status_code = 200
return response
def delete_cors(bucket_name):
bucket_name = normalize_bucket_name(bucket_name)
response = Response()
exists, code = bucket_exists(bucket_name)
if not exists:
response.status_code = int(code)
return response
BUCKET_CORS.pop(bucket_name, {})
response.status_code = 200
return response
def convert_origins_into_list(allowed_origins):
if isinstance(allowed_origins, list):
return allowed_origins
return [allowed_origins]
def get_origin_host(headers):
origin = headers.get('Origin') or get_forwarded_for_host(headers)
return origin
def get_forwarded_for_host(headers):
x_forwarded_header = re.split(r',\s?', headers.get('X-Forwarded-For', ''))
host = x_forwarded_header[len(x_forwarded_header) - 1]
return host
def append_cors_headers(bucket_name, request_method, request_headers, response):
bucket_name = normalize_bucket_name(bucket_name)
# Checking CORS is allowed or not
cors = BUCKET_CORS.get(bucket_name)
if not cors:
return
# Cleaning headers
for header in CORS_HEADERS:
if header in response.headers:
del response.headers[header]
# Fetching origin of the request
origin = get_origin_host(request_headers)
rules = cors['CORSConfiguration']['CORSRule']
if not isinstance(rules, list):
rules = [rules]
response.headers['Access-Control-Allow-Origin'] = ''
response.headers['Access-Control-Allow-Methods'] = ''
response.headers['Access-Control-Allow-Headers'] = ''
response.headers['Access-Control-Expose-Headers'] = ''
for rule in rules:
# add allow-origin header
allowed_methods = rule.get('AllowedMethod', [])
if request_method in allowed_methods:
allowed_origins = rule.get('AllowedOrigin', [])
# when only one origin is being set in cors then the allowed_origins is being
# reflected as a string here,so making it a list and then proceeding.
allowed_origins = convert_origins_into_list(allowed_origins)
for allowed in allowed_origins:
if origin in allowed or re.match(allowed.replace('*', '.*'), origin):
response.headers['Access-Control-Allow-Origin'] = origin
if 'AllowedMethod' in rule:
response.headers['Access-Control-Allow-Methods'] = \
','.join(allowed_methods) if isinstance(allowed_methods, list) else allowed_methods
if 'AllowedHeader' in rule:
allowed_headers = rule['AllowedHeader']
response.headers['Access-Control-Allow-Headers'] = \
','.join(allowed_headers) if isinstance(allowed_headers, list) else allowed_headers
if 'ExposeHeader' in rule:
expose_headers = rule['ExposeHeader']
response.headers['Access-Control-Expose-Headers'] = \
','.join(expose_headers) if isinstance(expose_headers, list) else expose_headers
if 'MaxAgeSeconds' in rule:
maxage_header = rule['MaxAgeSeconds']
response.headers['Access-Control-Max-Age'] = maxage_header
break
if response.headers['Access-Control-Allow-Origin'] != '*':
response.headers['Access-Control-Allow-Credentials'] = 'true'
def append_aws_request_troubleshooting_headers(response):
gen_amz_request_id = ''.join(random.choice('0123456789ABCDEF') for i in range(16))
if response.headers.get('x-amz-request-id') is None:
response.headers['x-amz-request-id'] = gen_amz_request_id
if response.headers.get('x-amz-id-2') is None:
response.headers['x-amz-id-2'] = 'MzRISOwyjmnup' + gen_amz_request_id + '7/JypPGXLh0OVFGcJaaO3KW/hRAqKOpIEEp'
def add_accept_range_header(response):
if response.headers.get('accept-ranges') is None:
response.headers['accept-ranges'] = 'bytes'
def is_object_expired(path):
object_expiry = get_object_expiry(path)
if not object_expiry:
return False
if dateutil.parser.parse(object_expiry) > \
datetime.datetime.now(timezone(dateutil.parser.parse(object_expiry).tzname())):
return False
return True
def set_object_expiry(path, headers):
OBJECT_EXPIRY[path] = headers.get('expires')
def get_object_expiry(path):
return OBJECT_EXPIRY.get(path)
def is_url_already_expired(expiry_timestamp):
if int(expiry_timestamp) < int(now_utc()):
return True
return False
def add_response_metadata_headers(response):
if response.headers.get('content-language') is None:
response.headers['content-language'] = 'en-US'
if response.headers.get('cache-control') is None:
response.headers['cache-control'] = 'no-cache'
if response.headers.get('content-encoding') is None:
if not uses_chunked_encoding(response):
response.headers['content-encoding'] = 'identity'
def append_last_modified_headers(response, content=None):
"""Add Last-Modified header with current time
(if the response content is an XML containing <LastModified>, add that instead)"""
time_format = '%a, %d %b %Y %H:%M:%S GMT' # TimeFormat
try:
if content:
last_modified_str = re.findall(r'<LastModified>([^<]*)</LastModified>', content)
if last_modified_str:
last_modified_str = last_modified_str[0]
last_modified_time_format = dateutil.parser.parse(last_modified_str).strftime(time_format)
response.headers['Last-Modified'] = last_modified_time_format
except TypeError as err:
LOGGER.debug('No parsable content: %s' % err)
except ValueError as err:
LOGGER.error('Failed to parse LastModified: %s' % err)
except Exception as err:
LOGGER.error('Caught generic exception (parsing LastModified): %s' % err)
# if cannot parse any LastModified, just continue
try:
if response.headers.get('Last-Modified', '') == '':
response.headers['Last-Modified'] = datetime.datetime.now().strftime(time_format)
except Exception as err:
LOGGER.error('Caught generic exception (setting LastModified header): %s' % err)
def append_list_objects_marker(method, path, data, response):
if 'marker=' in path:
marker = ''
content = to_str(response.content)
if '<ListBucketResult' in content and '<Marker>' not in content:
parsed = urlparse.urlparse(path)
query_map = urlparse.parse_qs(parsed.query)
if query_map.get('marker') and query_map.get('marker')[0]:
marker = query_map.get('marker')[0]
insert = '<Marker>%s</Marker>' % marker
response._content = content.replace('</ListBucketResult>', '%s</ListBucketResult>' % insert)
response.headers.pop('Content-Length', None)
def append_metadata_headers(method, query_map, headers):
for key, value in query_map.items():
if key.lower().startswith(OBJECT_METADATA_KEY_PREFIX):
if headers.get(key) is None:
headers[key] = value[0]
def fix_location_constraint(response):
""" Make sure we return a valid non-empty LocationConstraint, as this otherwise breaks Serverless. """
try:
content = to_str(response.content or '') or ''
except Exception:
content = ''
if 'LocationConstraint' in content:
pattern = r'<LocationConstraint([^>]*)>\s*</LocationConstraint>'
replace = r'<LocationConstraint\1>%s</LocationConstraint>' % aws_stack.get_region()
response._content = re.sub(pattern, replace, content)
remove_xml_preamble(response)
def fix_range_content_type(bucket_name, path, headers, response):
# Fix content type for Range requests - https://github.com/localstack/localstack/issues/1259
if 'Range' not in headers:
return
if response.status_code >= 400:
return
s3_client = aws_stack.connect_to_service('s3')
path = urlparse.unquote(path)
key_name = get_key_name(path, headers)
result = s3_client.head_object(Bucket=bucket_name, Key=key_name)
content_type = result['ContentType']
if response.headers.get('Content-Type') == 'text/html; charset=utf-8':
response.headers['Content-Type'] = content_type
def fix_delete_objects_response(bucket_name, method, parsed_path, data, headers, response):
# Deleting non-existing keys should not result in errors.
# Fixes https://github.com/localstack/localstack/issues/1893
if not (method == 'POST' and parsed_path.query == 'delete' and '<Delete' in to_str(data or '')):
return
content = to_str(response._content)
if '<Error>' not in content:
return
result = xmltodict.parse(content).get('DeleteResult')
errors = result.get('Error')
errors = errors if isinstance(errors, list) else [errors]
deleted = result.get('Deleted')
if not isinstance(result.get('Deleted'), list):
deleted = result['Deleted'] = [deleted] if deleted else []
for entry in list(errors):
if set(entry.keys()) == set(['Key']):
errors.remove(entry)
deleted.append(entry)
if not errors:
result.pop('Error')
response._content = xmltodict.unparse({'DeleteResult': result})
def fix_metadata_key_underscores(request_headers={}, response=None):
# fix for https://github.com/localstack/localstack/issues/1790
underscore_replacement = '---'
meta_header_prefix = 'x-amz-meta-'
prefix_len = len(meta_header_prefix)
updated = False
for key in list(request_headers.keys()):
if key.lower().startswith(meta_header_prefix):
key_new = meta_header_prefix + key[prefix_len:].replace('_', underscore_replacement)
if key != key_new:
request_headers[key_new] = request_headers.pop(key)
updated = True
if response is not None:
for key in list(response.headers.keys()):
if key.lower().startswith(meta_header_prefix):
key_new = meta_header_prefix + key[prefix_len:].replace(underscore_replacement, '_')
if key != key_new:
response.headers[key_new] = response.headers.pop(key)
return updated
def fix_creation_date(method, path, response):
if method != 'GET' or path != '/':
return
response._content = re.sub(r'(\.[0-9]+)(\+00:00)?</CreationDate>',
r'\1Z</CreationDate>', to_str(response._content))
def fix_delimiter(data, headers, response):
if response.status_code == 200 and response._content:
c, xml_prefix, delimiter = response._content, '<?xml', '<Delimiter><'
pattern = '[<]Delimiter[>]None[<]'
if isinstance(c, bytes):
xml_prefix, delimiter = xml_prefix.encode(), delimiter.encode()
pattern = pattern.encode()
if c.startswith(xml_prefix):
response._content = re.compile(pattern).sub(delimiter, c)
def convert_to_chunked_encoding(method, path, response):
if method != 'GET' or path != '/':
return
if response.headers.get('Transfer-Encoding', '').lower() == 'chunked':
return
response.headers['Transfer-Encoding'] = 'chunked'
response.headers.pop('Content-Encoding', None)
response.headers.pop('Content-Length', None)
def unquote(s):
if (s[0], s[-1]) in (('"', '"'), ("'", "'")):
return s[1:-1]
return s
def ret304_on_etag(data, headers, response):
etag = response.headers.get('ETag')
if etag:
match = headers.get('If-None-Match')
if match and unquote(match) == unquote(etag):
response.status_code = 304
response._content = ''
def fix_etag_for_multipart(data, headers, response):
# Fix for https://github.com/localstack/localstack/issues/1978
if headers.get(CONTENT_SHA256_HEADER) == STREAMING_HMAC_PAYLOAD:
try:
if b'chunk-signature=' not in to_bytes(data):
return
correct_hash = md5(strip_chunk_signatures(data))
tags = r'<ETag>%s</ETag>'
pattern = r'(")?([^<&]+)(")?'
replacement = r'\g<1>%s\g<3>' % correct_hash
response._content = re.sub(tags % pattern, tags % replacement, to_str(response.content))
if response.headers.get('ETag'):
response.headers['ETag'] = re.sub(pattern, replacement, response.headers['ETag'])
except Exception:
pass
def remove_xml_preamble(response):
""" Removes <?xml ... ?> from a response content """
response._content = re.sub(r'^<\?[^\?]+\?>', '', to_str(response._content))
# --------------
# HELPER METHODS
# for lifecycle/replication/encryption/...
# --------------
def get_lifecycle(bucket_name):
bucket_name = normalize_bucket_name(bucket_name)
exists, code, body = is_bucket_available(bucket_name)
if not exists:
return requests_response(body, status_code=code)
lifecycle = BUCKET_LIFECYCLE.get(bucket_name)
status_code = 200
if not lifecycle:
lifecycle = {
'Error': {
'Code': 'NoSuchLifecycleConfiguration',
'Message': 'The lifecycle configuration does not exist'
}
}
status_code = 404
body = xmltodict.unparse(lifecycle)
return requests_response(body, status_code=status_code)
def get_replication(bucket_name):
bucket_name = normalize_bucket_name(bucket_name)
exists, code, body = is_bucket_available(bucket_name)
if not exists:
return requests_response(body, status_code=code)
replication = BUCKET_REPLICATIONS.get(bucket_name)
status_code = 200
if not replication:
replication = {
'Error': {
'Code': 'ReplicationConfigurationNotFoundError',
'Message': 'The replication configuration was not found'
}
}
status_code = 404
body = xmltodict.unparse(replication)
return requests_response(body, status_code=status_code)
def get_encryption(bucket_name):
bucket_name = normalize_bucket_name(bucket_name)
exists, code, body = is_bucket_available(bucket_name)
if not exists:
return requests_response(body, status_code=code)
encryption = BUCKET_ENCRYPTIONS.get(bucket_name)
status_code = 200
if not encryption:
encryption = {
'Error': {
'Code': 'ServerSideEncryptionConfigurationNotFoundError',
'Message': 'The server side encryption configuration was not found'
}
}
status_code = 404
body = xmltodict.unparse(encryption)
return requests_response(body, status_code=status_code)
def get_object_lock(bucket_name):
bucket_name = normalize_bucket_name(bucket_name)
exists, code, body = is_bucket_available(bucket_name)
if not exists:
return requests_response(body, status_code=code)
lock_config = OBJECT_LOCK_CONFIGS.get(bucket_name)
status_code = 200
if not lock_config:
lock_config = {
'Error': {
'Code': 'ObjectLockConfigurationNotFoundError',
'Message': 'Object Lock configuration does not exist for this bucket'
}
}
status_code = 404
body = xmltodict.unparse(lock_config)
return requests_response(body, status_code=status_code)
def set_lifecycle(bucket_name, lifecycle):
bucket_name = normalize_bucket_name(bucket_name)
exists, code, body = is_bucket_available(bucket_name)
if not exists:
return requests_response(body, status_code=code)
if isinstance(to_str(lifecycle), six.string_types):
lifecycle = xmltodict.parse(lifecycle)
BUCKET_LIFECYCLE[bucket_name] = lifecycle
return 200
def delete_lifecycle(bucket_name):
bucket_name = normalize_bucket_name(bucket_name)
exists, code, body = is_bucket_available(bucket_name)
if not exists:
return requests_response(body, status_code=code)
if BUCKET_LIFECYCLE.get(bucket_name):
BUCKET_LIFECYCLE.pop(bucket_name)
def set_replication(bucket_name, replication):
bucket_name = normalize_bucket_name(bucket_name)
exists, code, body = is_bucket_available(bucket_name)
if not exists:
return requests_response(body, status_code=code)
if isinstance(to_str(replication), six.string_types):
replication = xmltodict.parse(replication)
BUCKET_REPLICATIONS[bucket_name] = replication
return 200
def set_encryption(bucket_name, encryption):
bucket_name = normalize_bucket_name(bucket_name)
exists, code, body = is_bucket_available(bucket_name)
if not exists:
return requests_response(body, status_code=code)
if isinstance(to_str(encryption), six.string_types):
encryption = xmltodict.parse(encryption)
BUCKET_ENCRYPTIONS[bucket_name] = encryption
return 200
def set_object_lock(bucket_name, lock_config):
bucket_name = normalize_bucket_name(bucket_name)
exists, code, body = is_bucket_available(bucket_name)
if not exists:
return requests_response(body, status_code=code)
if isinstance(to_str(lock_config), six.string_types):
lock_config = xmltodict.parse(lock_config)
OBJECT_LOCK_CONFIGS[bucket_name] = lock_config
return 200
# -------------
# UTIL METHODS
# -------------
def strip_chunk_signatures(data):
# For clients that use streaming v4 authentication, the request contains chunk signatures
# in the HTTP body (see example below) which we need to strip as moto cannot handle them
#
# 17;chunk-signature=6e162122ec4962bea0b18bc624025e6ae4e9322bdc632762d909e87793ac5921
# <payload data ...>
# 0;chunk-signature=927ab45acd82fc90a3c210ca7314d59fedc77ce0c914d79095f8cc9563cf2c70
data_new = ''
if data is not None:
data_new = re.sub(b'(^|\r\n)[0-9a-fA-F]+;chunk-signature=[0-9a-f]{64}(\r\n)(\r\n$)?', b'',
to_bytes(data), flags=re.MULTILINE | re.DOTALL)
return data_new
def is_bucket_available(bucket_name):
body = {'Code': '200'}
exists, code = bucket_exists(bucket_name)
if not exists:
body = {
'Error': {
'Code': code,
'Message': 'The bucket does not exist'
}
}
return exists, code, body
return True, 200, body
def bucket_exists(bucket_name):
"""Tests for the existence of the specified bucket. Returns the error code
if the bucket does not exist (200 if the bucket does exist).
"""
bucket_name = normalize_bucket_name(bucket_name)
s3_client = aws_stack.connect_to_service('s3')
try:
s3_client.head_bucket(Bucket=bucket_name)
except ClientError as err:
error_code = err.response.get('Error').get('Code')
return False, error_code
return True, 200
def check_content_md5(data, headers):
actual = md5(strip_chunk_signatures(data))
try:
md5_header = headers['Content-MD5']
if not is_base64(md5_header):
raise Exception('Content-MD5 header is not in Base64 format: "%s"' % md5_header)
expected = to_str(codecs.encode(base64.b64decode(md5_header), 'hex'))
except Exception:
return error_response('The Content-MD5 you specified is not valid.', 'InvalidDigest', status_code=400)
if actual != expected:
return error_response('The Content-MD5 you specified did not match what we received.',
'BadDigest', status_code=400)
def error_response(message, code, status_code=400):
result = {'Error': {'Code': code, 'Message': message}}
content = xmltodict.unparse(result)
headers = {'content-type': 'application/xml'}
return requests_response(content, status_code=status_code, headers=headers)
def no_such_key_error(resource, requestId=None, status_code=400):
result = {'Error': {'Code': 'NoSuchKey',
'Message': 'The resource you requested does not exist',
'Resource': resource, 'RequestId': requestId}}
content = xmltodict.unparse(result)
headers = {'content-type': 'application/xml'}
return requests_response(content, status_code=status_code, headers=headers)
def token_expired_error(resource, requestId=None, status_code=400):
result = {'Error': {'Code': 'ExpiredToken',
'Message': 'The provided token has expired.',
'Resource': resource, 'RequestId': requestId}}
content = xmltodict.unparse(result)
headers = {'content-type': 'application/xml'}
return requests_response(content, status_code=status_code, headers=headers)
def expand_redirect_url(starting_url, key, bucket):
""" Add key and bucket parameters to starting URL query string. """
parsed = urlparse.urlparse(starting_url)
query = collections.OrderedDict(urlparse.parse_qsl(parsed.query))
query.update([('key', key), ('bucket', bucket)])
redirect_url = urlparse.urlunparse((
parsed.scheme, parsed.netloc, parsed.path,
parsed.params, urlparse.urlencode(query), None))
return redirect_url
def is_bucket_specified_in_domain_name(path, headers):
host = headers.get('host', '')
return re.match(r'.*s3(\-website)?\.([^\.]+\.)?amazonaws.com', host)
def is_object_specific_request(path, headers):
""" Return whether the given request is specific to a certain S3 object.
Note: the bucket name is usually specified as a path parameter,
but may also be part of the domain name! """
bucket_in_domain = is_bucket_specified_in_domain_name(path, headers)
parts = len(path.split('/'))
return parts > (1 if bucket_in_domain else 2)
def normalize_bucket_name(bucket_name):
return S3Bucket.normalize_bucket_name(bucket_name)
def get_key_name(path, headers):
parsed = urlparse.urlparse(path)
path_parts = parsed.path.lstrip('/').split('/', 1)
if uses_path_addressing(headers):
return path_parts[1]
return path_parts[0]
def uses_path_addressing(headers):
# we can assume that the host header we are receiving here is actually the header we originally recieved
# from the client (because the edge service is forwarding the request in memory)
host = headers.get('host') or headers.get(constants.HEADER_LOCALSTACK_EDGE_URL, '').split('://')[-1]
return host.startswith(HOSTNAME) or host.startswith(HOSTNAME_EXTERNAL) or host.startswith(LOCALHOST_IP)
def get_bucket_name(path, headers):
parsed = urlparse.urlparse(path)
# try pick the bucket_name from the path
bucket_name = parsed.path.split('/')[1]
# is the hostname not starting with a bucket name?
if uses_path_addressing(headers):
return normalize_bucket_name(bucket_name)
# matches the common endpoints like
# - '<bucket_name>.s3.<region>.amazonaws.com'
# - '<bucket_name>.s3-<region>.amazonaws.com.cn'
common_pattern = re.compile(r'^(.+)\.s3[.\-][a-z]{2}-[a-z]+-[0-9]{1,}'
r'\.amazonaws\.com(\.[a-z]+)?$')
# matches dualstack endpoints like
# - <bucket_name>.s3.dualstack.<region>.amazonaws.com'
# - <bucket_name>.s3.dualstack.<region>.amazonaws.com.cn'
dualstack_pattern = re.compile(r'^(.+)\.s3\.dualstack\.[a-z]{2}-[a-z]+-[0-9]{1,}'
r'\.amazonaws\.com(\.[a-z]+)?$')
# matches legacy endpoints like
# - '<bucket_name>.s3.amazonaws.com'
# - '<bucket_name>.s3-external-1.amazonaws.com.cn'
legacy_patterns = re.compile(r'^(.+)\.s3\.?(-external-1)?\.amazonaws\.com(\.[a-z]+)?$')
# if any of the above patterns match, the first captured group
# will be returned as the bucket name
host = headers['host']
for pattern in [common_pattern, dualstack_pattern, legacy_patterns]:
match = pattern.match(host)
if match:
bucket_name = match.groups()[0]
break
# we're either returning the original bucket_name,
# or a pattern matched the host and we're returning that name instead
return normalize_bucket_name(bucket_name)
def handle_notification_request(bucket, method, data):
response = Response()
response.status_code = 200
response._content = ''
if method == 'GET':
# TODO check if bucket exists
result = '<NotificationConfiguration xmlns="%s">' % XMLNS_S3
if bucket in S3_NOTIFICATIONS:
notifs = S3_NOTIFICATIONS[bucket]
for notif in notifs:
for dest in NOTIFICATION_DESTINATION_TYPES:
if dest in notif:
dest_dict = {
'%sConfiguration' % dest: {
'Id': notif['Id'],
dest: notif[dest],
'Event': notif['Event'],
'Filter': notif['Filter']
}
}
result += xmltodict.unparse(dest_dict, full_document=False)
result += '</NotificationConfiguration>'
response._content = result
if method == 'PUT':
parsed = xmltodict.parse(data)
notif_config = parsed.get('NotificationConfiguration')
S3_NOTIFICATIONS[bucket] = []
for dest in NOTIFICATION_DESTINATION_TYPES:
config = notif_config.get('%sConfiguration' % (dest))
configs = config if isinstance(config, list) else [config] if config else []
for config in configs:
events = config.get('Event')
if isinstance(events, six.string_types):
events = [events]
event_filter = config.get('Filter', {})
# make sure FilterRule is an array
s3_filter = _get_s3_filter(event_filter)
if s3_filter and not isinstance(s3_filter.get('FilterRule', []), list):
s3_filter['FilterRule'] = [s3_filter['FilterRule']]
# create final details dict
notification_details = {
'Id': config.get('Id', str(uuid.uuid4())),
'Event': events,
dest: config.get(dest),
'Filter': event_filter
}
S3_NOTIFICATIONS[bucket].append(clone(notification_details))
return response
def remove_bucket_notification(bucket):
S3_NOTIFICATIONS.pop(bucket, None)
def not_none_or(value, alternative):
return value if value is not None else alternative
class ProxyListenerS3(PersistingProxyListener):
def api_name(self):
return 's3'
@staticmethod
def is_s3_copy_request(headers, path):
return 'x-amz-copy-source' in headers or 'x-amz-copy-source' in path
@staticmethod
def is_create_multipart_request(query):
return query.startswith('uploads')
@staticmethod
def is_multipart_upload(query):
return query.startswith('uploadId')
@staticmethod
def get_201_response(key, bucket_name):
return """
<PostResponse>
<Location>{protocol}://{host}/{encoded_key}</Location>
<Bucket>{bucket}</Bucket>
<Key>{key}</Key>
<ETag>{etag}</ETag>
</PostResponse>
""".format(
protocol=get_service_protocol(),
host=config.HOSTNAME_EXTERNAL,
encoded_key=urlparse.quote(key, safe=''),
key=key,
bucket=bucket_name,
etag='d41d8cd98f00b204e9800998ecf8427f',
)
@staticmethod
def _update_location(content, bucket_name):
bucket_name = normalize_bucket_name(bucket_name)
host = config.HOSTNAME_EXTERNAL
if ':' not in host:
host = '%s:%s' % (host, config.PORT_S3)
return re.sub(r'<Location>\s*([a-zA-Z0-9\-]+)://[^/]+/([^<]+)\s*</Location>',
r'<Location>%s://%s/%s/\2</Location>' % (get_service_protocol(), host, bucket_name),
content, flags=re.MULTILINE)
@staticmethod
def is_query_allowable(method, query):
# Generally if there is a query (some/path/with?query) we don't want to send notifications
if not query:
return True
# Except we do want to notify on multipart and presigned url upload completion
contains_cred = 'X-Amz-Credential' in query and 'X-Amz-Signature' in query
contains_key = 'AWSAccessKeyId' in query and 'Signature' in query
if (method == 'POST' and query.startswith('uploadId')) or contains_cred or contains_key:
return True
@staticmethod
def parse_policy_expiration_date(expiration_string):
try:
return datetime.datetime.strptime(expiration_string, POLICY_EXPIRATION_FORMAT1)
except Exception:
return datetime.datetime.strptime(expiration_string, POLICY_EXPIRATION_FORMAT2)
def forward_request(self, method, path, data, headers):
# Create list of query parameteres from the url
parsed = urlparse.urlparse('{}{}'.format(config.get_edge_url(), path))
query_params = parse_qs(parsed.query)
path_orig = path
path = path.replace('#', '%23') # support key names containing hashes (e.g., required by Amplify)
# Detecting pre-sign url and checking signature
if any([p in query_params for p in PRESIGN_QUERY_PARAMS]):
response = authenticate_presign_url(method=method, path=path, data=data, headers=headers)
if response is not None:
return response
# parse path and query params
parsed_path = urlparse.urlparse(path)
# Make sure we use 'localhost' as forward host, to ensure moto uses path style addressing.
# Note that all S3 clients using LocalStack need to enable path style addressing.
if 's3.amazonaws.com' not in headers.get('host', ''):
headers['host'] = 'localhost'
# check content md5 hash integrity if not a copy request or multipart initialization
if 'Content-MD5' in headers and not self.is_s3_copy_request(headers, path) \
and not self.is_create_multipart_request(parsed_path.query):
response = check_content_md5(data, headers)
if response is not None:
return response
modified_data = None
# check bucket name
bucket_name = get_bucket_name(path, headers)
if method == 'PUT' and not re.match(BUCKET_NAME_REGEX, bucket_name):
if len(parsed_path.path) <= 1:
return error_response('Unable to extract valid bucket name. Please ensure that your AWS SDK is ' +
'configured to use path style addressing, or send a valid <Bucket>.s3.amazonaws.com "Host" header',
'InvalidBucketName', status_code=400)
return error_response('The specified bucket is not valid.', 'InvalidBucketName', status_code=400)
# TODO: For some reason, moto doesn't allow us to put a location constraint on us-east-1
to_find1 = to_bytes('<LocationConstraint>us-east-1</LocationConstraint>')
to_find2 = to_bytes('<CreateBucketConfiguration')
if data and data.startswith(to_bytes('<')) and to_find1 in data and to_find2 in data:
# Note: with the latest version, <CreateBucketConfiguration> must either
# contain a valid <LocationConstraint>, or not be present at all in the body.
modified_data = b''
# If this request contains streaming v4 authentication signatures, strip them from the message
# Related isse: https://github.com/localstack/localstack/issues/98
# TODO we should evaluate whether to replace moto s3 with scality/S3:
# https://github.com/scality/S3/issues/237
is_streaming_payload = headers.get(CONTENT_SHA256_HEADER) == STREAMING_HMAC_PAYLOAD
if is_streaming_payload:
modified_data = strip_chunk_signatures(not_none_or(modified_data, data))
headers['Content-Length'] = headers.get('x-amz-decoded-content-length')
# POST requests to S3 may include a "${filename}" placeholder in the
# key, which should be replaced with an actual file name before storing.
if method == 'POST':
original_data = not_none_or(modified_data, data)
expanded_data = multipart_content.expand_multipart_filename(original_data, headers)
if expanded_data is not original_data:
modified_data = expanded_data
# If no content-type is provided, 'binary/octet-stream' should be used
# src: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
if method == 'PUT' and not headers.get('content-type'):
headers['content-type'] = 'binary/octet-stream'
# parse query params
query = parsed_path.query
path = parsed_path.path
bucket = path.split('/')[1]
query_map = urlparse.parse_qs(query, keep_blank_values=True)
# remap metadata query params (not supported in moto) to request headers
append_metadata_headers(method, query_map, headers)
# apply fixes
headers_changed = fix_metadata_key_underscores(request_headers=headers)
if query == 'notification' or 'notification' in query_map:
# handle and return response for ?notification request
response = handle_notification_request(bucket, method, data)
return response
# if the Expires key in the url is already expired then return error
if method == 'GET' and 'Expires' in query_map:
if is_url_already_expired(query_map.get('Expires')[0]):
return token_expired_error(path, headers.get('x-amz-request-id'), 400)
# If multipart POST with policy in the params, return error if the policy has expired
if method == 'POST':
policy_key, policy_value = multipart_content.find_multipart_key_value(data, headers, 'policy')
if policy_key and policy_value:
policy = json.loads(base64.b64decode(policy_value).decode('utf-8'))
expiration_string = policy.get('expiration', None) # Example: 2020-06-05T13:37:12Z
if expiration_string:
expiration_datetime = self.parse_policy_expiration_date(expiration_string)
expiration_timestamp = expiration_datetime.timestamp()
if is_url_already_expired(expiration_timestamp):
return token_expired_error(path, headers.get('x-amz-request-id'), 400)
if query == 'cors' or 'cors' in query_map:
if method == 'GET':
return get_cors(bucket)
if method == 'PUT':
return set_cors(bucket, data)
if method == 'DELETE':
return delete_cors(bucket)
if query == 'lifecycle' or 'lifecycle' in query_map:
if method == 'GET':
return get_lifecycle(bucket)
if method == 'PUT':
return set_lifecycle(bucket, data)
if method == 'DELETE':
delete_lifecycle(bucket)
if query == 'replication' or 'replication' in query_map:
if method == 'GET':
return get_replication(bucket)
if method == 'PUT':
return set_replication(bucket, data)
if query == 'encryption' or 'encryption' in query_map:
if method == 'GET':
return get_encryption(bucket)
if method == 'PUT':
return set_encryption(bucket, data)
if query == 'object-lock' or 'object-lock' in query_map:
if method == 'GET':
return get_object_lock(bucket)
if method == 'PUT':
return set_object_lock(bucket, data)
if method == 'DELETE' and re.match(BUCKET_NAME_REGEX, bucket_name):
delete_lifecycle(bucket_name)
path_orig_escaped = path_orig.replace('#', '%23')
if modified_data is not None or headers_changed or path_orig != path_orig_escaped:
data_to_return = not_none_or(modified_data, data)
if modified_data is not None:
headers['Content-Length'] = str(len(data_to_return or ''))
return Request(url=path_orig_escaped, data=data_to_return, headers=headers, method=method)
return True
def get_forward_url(self, method, path, data, headers):
def sub(match):
# make sure to convert any bucket names to lower case
bucket_name = normalize_bucket_name(match.group(1))
return '/%s%s' % (bucket_name, match.group(2) or '')
path_new = re.sub(r'/([^?/]+)([?/].*)?', sub, path)
if path == path_new:
return
url = 'http://%s:%s%s' % (constants.LOCALHOST, PORT_S3_BACKEND, path_new)
return url
def return_response(self, method, path, data, headers, response, request_handler=None):
path = to_str(path)
method = to_str(method)
path = path.replace('#', '%23')
# persist this API call to disk
super(ProxyListenerS3, self).return_response(method, path, data, headers, response, request_handler)
# No path-name based bucket name? Try host-based
bucket_name = get_bucket_name(path, headers)
hostname_parts = headers['host'].split('.')
if (not bucket_name or len(bucket_name) == 0) and len(hostname_parts) > 1:
bucket_name = hostname_parts[0]
# POST requests to S3 may include a success_action_redirect or
# success_action_status field, which should be used to redirect a
# client to a new location.
key = None
if method == 'POST':
key, redirect_url = multipart_content.find_multipart_key_value(data, headers)
if key and redirect_url:
response.status_code = 303
response.headers['Location'] = expand_redirect_url(redirect_url, key, bucket_name)
LOGGER.debug('S3 POST {} to {}'.format(response.status_code, response.headers['Location']))
expanded_data = multipart_content.expand_multipart_filename(data, headers)
key, status_code = multipart_content.find_multipart_key_value(
expanded_data, headers, 'success_action_status'
)
if response.status_code == 201 and key:
response._content = self.get_201_response(key, bucket_name)
response.headers['Content-Length'] = str(len(response._content))
response.headers['Content-Type'] = 'application/xml; charset=utf-8'
return response
if method == 'GET' and response.status_code == 416:
return error_response('The requested range cannot be satisfied.', 'InvalidRange', 416)
parsed = urlparse.urlparse(path)
bucket_name_in_host = headers['host'].startswith(bucket_name)
should_send_notifications = all([
method in ('PUT', 'POST', 'DELETE'),
'/' in path[1:] or bucket_name_in_host or key,
# check if this is an actual put object request, because it could also be
# a put bucket request with a path like this: /bucket_name/
bucket_name_in_host or key or (len(path[1:].split('/')) > 1 and len(path[1:].split('/')[1]) > 0),
self.is_query_allowable(method, parsed.query)
])
# get subscribers and send bucket notifications
if should_send_notifications:
# if we already have a good key, use it, otherwise examine the path
if key:
object_path = '/' + key
elif bucket_name_in_host:
object_path = parsed.path
else:
parts = parsed.path[1:].split('/', 1)
object_path = parts[1] if parts[1][0] == '/' else '/%s' % parts[1]
version_id = response.headers.get('x-amz-version-id', None)
send_notifications(method, bucket_name, object_path, version_id)
# publish event for creation/deletion of buckets:
if method in ('PUT', 'DELETE') and ('/' not in path[1:] or len(path[1:].split('/')[1]) <= 0):
event_type = (event_publisher.EVENT_S3_CREATE_BUCKET if method == 'PUT'
else event_publisher.EVENT_S3_DELETE_BUCKET)
event_publisher.fire_event(event_type, payload={'n': event_publisher.get_hash(bucket_name)})
# fix an upstream issue in moto S3 (see https://github.com/localstack/localstack/issues/382)
if method == 'PUT' and parsed.query == 'policy':
response._content = ''
response.status_code = 204
return response
# emulate ErrorDocument functionality if a website is configured
if method == 'GET' and response.status_code == 404 and parsed.query != 'website':
s3_client = aws_stack.connect_to_service('s3')
try:
# Verify the bucket exists in the first place--if not, we want normal processing of the 404
s3_client.head_bucket(Bucket=bucket_name)
website_config = s3_client.get_bucket_website(Bucket=bucket_name)
error_doc_key = website_config.get('ErrorDocument', {}).get('Key')
if error_doc_key:
error_doc_path = '/' + bucket_name + '/' + error_doc_key
if parsed.path != error_doc_path:
error_object = s3_client.get_object(Bucket=bucket_name, Key=error_doc_key)
response.status_code = 200
response._content = error_object['Body'].read()
response.headers['Content-Length'] = str(len(response._content))
except ClientError:
# Pass on the 404 as usual
pass
if response is not None:
reset_content_length = False
# append CORS headers and other annotations/patches to response
append_cors_headers(bucket_name, request_method=method, request_headers=headers, response=response)
append_last_modified_headers(response=response)
append_list_objects_marker(method, path, data, response)
fix_location_constraint(response)
fix_range_content_type(bucket_name, path, headers, response)
fix_delete_objects_response(bucket_name, method, parsed, data, headers, response)
fix_metadata_key_underscores(response=response)
fix_creation_date(method, path, response=response)
fix_etag_for_multipart(data, headers, response)
ret304_on_etag(data, headers, response)
append_aws_request_troubleshooting_headers(response)
fix_delimiter(data, headers, response)
if method == 'PUT':
set_object_expiry(path, headers)
# Remove body from PUT response on presigned URL
# https://github.com/localstack/localstack/issues/1317
if method == 'PUT' and int(response.status_code) < 400 and ('X-Amz-Security-Token=' in path or
'X-Amz-Credential=' in path or 'AWSAccessKeyId=' in path):
response._content = ''
reset_content_length = True
response_content_str = None
try:
response_content_str = to_str(response._content)
except Exception:
pass
# Honor response header overrides
# https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html
if method == 'GET':
add_accept_range_header(response)
add_response_metadata_headers(response)
if is_object_expired(path):
return no_such_key_error(path, headers.get('x-amz-request-id'), 400)
query_map = urlparse.parse_qs(parsed.query, keep_blank_values=True)
for param_name, header_name in ALLOWED_HEADER_OVERRIDES.items():
if param_name in query_map:
response.headers[header_name] = query_map[param_name][0]
if response_content_str and response_content_str.startswith('<'):
is_bytes = isinstance(response._content, six.binary_type)
response._content = response_content_str
append_last_modified_headers(response=response, content=response_content_str)
# We need to un-pretty-print the XML, otherwise we run into this issue with Spark:
# https://github.com/jserver/mock-s3/pull/9/files
# https://github.com/localstack/localstack/issues/183
# Note: yet, we need to make sure we have a newline after the first line: <?xml ...>\n
# Note: make sure to return XML docs verbatim: https://github.com/localstack/localstack/issues/1037
if method != 'GET' or not is_object_specific_request(path, headers):
response._content = re.sub(r'([^\?])>\n\s*<', r'\1><', response_content_str, flags=re.MULTILINE)
# update Location information in response payload
response._content = self._update_location(response._content, bucket_name)
# convert back to bytes
if is_bytes:
response._content = to_bytes(response._content)
# fix content-type: https://github.com/localstack/localstack/issues/618
# https://github.com/localstack/localstack/issues/549
# https://github.com/localstack/localstack/issues/854
if 'text/html' in response.headers.get('Content-Type', '') \
and not response_content_str.lower().startswith('<!doctype html'):
response.headers['Content-Type'] = 'application/xml; charset=utf-8'
reset_content_length = True
# update Content-Length headers (fix https://github.com/localstack/localstack/issues/541)
if method == 'DELETE':
reset_content_length = True
if reset_content_length:
response.headers['Content-Length'] = str(len(response._content))
# convert to chunked encoding, for compatibility with certain SDKs (e.g., AWS PHP SDK)
convert_to_chunked_encoding(method, path, response)
def authenticate_presign_url(method, path, headers, data=None):
sign_headers = []
url = '{}{}'.format(config.get_edge_url(), path)
parsed = urlparse.urlparse(url)
query_params = parse_qs(parsed.query)
# Checking required parameters are present in url or not
if not all([p in query_params for p in PRESIGN_QUERY_PARAMS]):
return requests_error_response_xml_signature_calculation(
code=403,
message='Query-string authentication requires the Signature, Expires and AWSAccessKeyId parameters',
code_string='AccessDenied'
)
# Fetching headers which has been sent to the requets
for header in headers:
key = header[0]
if key.lower() not in IGNORED_HEADERS_LOWER:
sign_headers.append(header)
# Request's headers are more essentials than the query parameters in the request.
# Different values of header in the header of the request and in the query parameter of the
# request URL will fail the signature calulation. As per the AWS behaviour
presign_params_lower = [p.lower() for p in PRESIGN_QUERY_PARAMS]
if len(query_params) > 2:
for key in query_params:
if key.lower() not in presign_params_lower:
if key.lower() not in (header[0].lower() for header in headers):
sign_headers.append((key, query_params[key][0]))
# Preparnig dictionary of request to build AWSRequest's object of the botocore
request_url = url.split('?')[0]
forwarded_for = get_forwarded_for_host(headers)
if forwarded_for:
request_url = re.sub('://[^/]+', '://%s' % forwarded_for, request_url)
request_dict = {
'url_path': path.split('?')[0],
'query_string': {},
'method': method,
'headers': dict(sign_headers),
'body': b'',
'url': request_url,
'context': {
'is_presign_request': True,
'use_global_endpoint': True,
'signing': {
'bucket': str(path.split('?')[0]).split('/')[1]
}
}
}
aws_request = create_request_object(request_dict)
# Calculating Signature
credentials = Credentials(access_key=TEST_AWS_ACCESS_KEY_ID, secret_key=TEST_AWS_SECRET_ACCESS_KEY)
auth = HmacV1QueryAuth(credentials=credentials, expires=query_params['Expires'][0])
split = urlsplit(aws_request.url)
string_to_sign = auth.get_string_to_sign(method=method, split=split, headers=aws_request.headers)
signature = auth.get_signature(string_to_sign=string_to_sign)
# Comparing the signature in url with signature we calculated
query_sig = urlparse.unquote(query_params['Signature'][0])
if query_sig != signature:
# older signature calculation methods are not supported
# so logging a warning to the user to use the v4 calculation method
for param in SIGNATURE_V4_PARAMS:
if param.lower() not in (query_param.lower() for query_param in query_params):
LOGGER.warning(
'Older version of signature calculation method detected. Please use v4 calculation method')
return requests_error_response_xml_signature_calculation(
code=403,
code_string='SignatureDoesNotMatch',
aws_access_token=TEST_AWS_ACCESS_KEY_ID,
string_to_sign=string_to_sign,
signature=signature,
message='The request signature we calculated does not match the signature you provided. \
Check your key and signing method.')
# Checking whether the url is expired or not
if int(query_params['Expires'][0]) < time.time():
return requests_error_response_xml_signature_calculation(
code=403,
code_string='AccessDenied',
message='Request has expired',
expires=query_params['Expires'][0]
)
# instantiate listener
UPDATE_S3 = ProxyListenerS3()
| 1 | 12,180 | Let's use the same `S3_HOSTNAME_PATTERN` as above here. | localstack-localstack | py |
@@ -215,6 +215,7 @@ function parseConnectionString(url, options) {
let r = parser.parse(f('mongodb://%s', hosts[i].trim()));
if (r.path && r.path.indexOf('.sock') !== -1) continue;
if (r.path && r.path.indexOf(':') !== -1) {
+ console.log("R: ",r)
// Not connecting to a socket so check for an extra slash in the hostname.
// Using String#split as perf is better than match.
if (r.path.split('/').length > 1 && r.path.indexOf('::') === -1) { | 1 | 'use strict';
const ReadPreference = require('./core').ReadPreference,
parser = require('url'),
f = require('util').format,
Logger = require('./core').Logger,
dns = require('dns');
const ReadConcern = require('./read_concern');
module.exports = function(url, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
let result;
try {
result = parser.parse(url, true);
} catch (e) {
return callback(new Error('URL malformed, cannot be parsed'));
}
if (result.protocol !== 'mongodb:' && result.protocol !== 'mongodb+srv:') {
return callback(new Error('Invalid schema, expected `mongodb` or `mongodb+srv`'));
}
if (result.protocol === 'mongodb:') {
return parseHandler(url, options, callback);
}
// Otherwise parse this as an SRV record
if (result.hostname.split('.').length < 3) {
return callback(new Error('URI does not have hostname, domain name and tld'));
}
result.domainLength = result.hostname.split('.').length;
if (result.pathname && result.pathname.match(',')) {
return callback(new Error('Invalid URI, cannot contain multiple hostnames'));
}
if (result.port) {
return callback(new Error('Ports not accepted with `mongodb+srv` URIs'));
}
let srvAddress = `_mongodb._tcp.${result.host}`;
dns.resolveSrv(srvAddress, function(err, addresses) {
if (err) return callback(err);
if (addresses.length === 0) {
return callback(new Error('No addresses found at host'));
}
for (let i = 0; i < addresses.length; i++) {
if (!matchesParentDomain(addresses[i].name, result.hostname, result.domainLength)) {
return callback(new Error('Server record does not share hostname with parent URI'));
}
}
let base = result.auth ? `mongodb://${result.auth}@` : `mongodb://`;
let connectionStrings = addresses.map(function(address, i) {
if (i === 0) return `${base}${address.name}:${address.port}`;
else return `${address.name}:${address.port}`;
});
let connectionString = connectionStrings.join(',') + '/';
let connectionStringOptions = [];
// Add the default database if needed
if (result.path) {
let defaultDb = result.path.slice(1);
if (defaultDb.indexOf('?') !== -1) {
defaultDb = defaultDb.slice(0, defaultDb.indexOf('?'));
}
connectionString += defaultDb;
}
// Default to SSL true
if (!options.ssl && !result.search) {
connectionStringOptions.push('ssl=true');
} else if (!options.ssl && result.search && !result.search.match('ssl')) {
connectionStringOptions.push('ssl=true');
}
// Keep original uri options
if (result.search) {
connectionStringOptions.push(result.search.replace('?', ''));
}
dns.resolveTxt(result.host, function(err, record) {
if (err && err.code !== 'ENODATA') return callback(err);
if (err && err.code === 'ENODATA') record = null;
if (record) {
if (record.length > 1) {
return callback(new Error('Multiple text records not allowed'));
}
record = record[0];
if (record.length > 1) record = record.join('');
else record = record[0];
if (!record.includes('authSource') && !record.includes('replicaSet')) {
return callback(new Error('Text record must only set `authSource` or `replicaSet`'));
}
connectionStringOptions.push(record);
}
// Add any options to the connection string
if (connectionStringOptions.length) {
connectionString += `?${connectionStringOptions.join('&')}`;
}
parseHandler(connectionString, options, callback);
});
});
};
function matchesParentDomain(srvAddress, parentDomain) {
let regex = /^.*?\./;
let srv = `.${srvAddress.replace(regex, '')}`;
let parent = `.${parentDomain.replace(regex, '')}`;
if (srv.endsWith(parent)) return true;
else return false;
}
function parseHandler(address, options, callback) {
let result, err;
try {
result = parseConnectionString(address, options);
} catch (e) {
err = e;
}
return err ? callback(err, null) : callback(null, result);
}
function parseConnectionString(url, options) {
// Variables
let connection_part = '';
let auth_part = '';
let query_string_part = '';
let dbName = 'admin';
// Url parser result
let result = parser.parse(url, true);
if ((result.hostname == null || result.hostname === '') && url.indexOf('.sock') === -1) {
throw new Error('No hostname or hostnames provided in connection string');
}
if (result.port === '0') {
throw new Error('Invalid port (zero) with hostname');
}
if (!isNaN(parseInt(result.port, 10)) && parseInt(result.port, 10) > 65535) {
throw new Error('Invalid port (larger than 65535) with hostname');
}
if (
result.path &&
result.path.length > 0 &&
result.path[0] !== '/' &&
url.indexOf('.sock') === -1
) {
throw new Error('Missing delimiting slash between hosts and options');
}
if (result.query) {
for (let name in result.query) {
if (name.indexOf('::') !== -1) {
throw new Error('Double colon in host identifier');
}
if (result.query[name] === '') {
throw new Error('Query parameter ' + name + ' is an incomplete value pair');
}
}
}
if (result.auth) {
let parts = result.auth.split(':');
if (url.indexOf(result.auth) !== -1 && parts.length > 2) {
throw new Error('Username with password containing an unescaped colon');
}
if (url.indexOf(result.auth) !== -1 && result.auth.indexOf('@') !== -1) {
throw new Error('Username containing an unescaped at-sign');
}
}
// Remove query
let clean = url.split('?').shift();
// Extract the list of hosts
let strings = clean.split(',');
let hosts = [];
for (let i = 0; i < strings.length; i++) {
let hostString = strings[i];
if (hostString.indexOf('mongodb') !== -1) {
if (hostString.indexOf('@') !== -1) {
hosts.push(hostString.split('@').pop());
} else {
hosts.push(hostString.substr('mongodb://'.length));
}
} else if (hostString.indexOf('/') !== -1) {
hosts.push(hostString.split('/').shift());
} else if (hostString.indexOf('/') === -1) {
hosts.push(hostString.trim());
}
}
for (let i = 0; i < hosts.length; i++) {
let r = parser.parse(f('mongodb://%s', hosts[i].trim()));
if (r.path && r.path.indexOf('.sock') !== -1) continue;
if (r.path && r.path.indexOf(':') !== -1) {
// Not connecting to a socket so check for an extra slash in the hostname.
// Using String#split as perf is better than match.
if (r.path.split('/').length > 1 && r.path.indexOf('::') === -1) {
throw new Error('Slash in host identifier');
} else {
throw new Error('Double colon in host identifier');
}
}
}
// If we have a ? mark cut the query elements off
if (url.indexOf('?') !== -1) {
query_string_part = url.substr(url.indexOf('?') + 1);
connection_part = url.substring('mongodb://'.length, url.indexOf('?'));
} else {
connection_part = url.substring('mongodb://'.length);
}
// Check if we have auth params
if (connection_part.indexOf('@') !== -1) {
auth_part = connection_part.split('@')[0];
connection_part = connection_part.split('@')[1];
}
// Check there is not more than one unescaped slash
if (connection_part.split('/').length > 2) {
throw new Error(
"Unsupported host '" +
connection_part.split('?')[0] +
"', hosts must be URL encoded and contain at most one unencoded slash"
);
}
// Check if the connection string has a db
if (connection_part.indexOf('.sock') !== -1) {
if (connection_part.indexOf('.sock/') !== -1) {
dbName = connection_part.split('.sock/')[1];
// Check if multiple database names provided, or just an illegal trailing backslash
if (dbName.indexOf('/') !== -1) {
if (dbName.split('/').length === 2 && dbName.split('/')[1].length === 0) {
throw new Error('Illegal trailing backslash after database name');
}
throw new Error('More than 1 database name in URL');
}
connection_part = connection_part.split(
'/',
connection_part.indexOf('.sock') + '.sock'.length
);
}
} else if (connection_part.indexOf('/') !== -1) {
// Check if multiple database names provided, or just an illegal trailing backslash
if (connection_part.split('/').length > 2) {
if (connection_part.split('/')[2].length === 0) {
throw new Error('Illegal trailing backslash after database name');
}
throw new Error('More than 1 database name in URL');
}
dbName = connection_part.split('/')[1];
connection_part = connection_part.split('/')[0];
}
// URI decode the host information
connection_part = decodeURIComponent(connection_part);
// Result object
let object = {};
// Pick apart the authentication part of the string
let authPart = auth_part || '';
let auth = authPart.split(':', 2);
// Decode the authentication URI components and verify integrity
let user = decodeURIComponent(auth[0]);
if (auth[0] !== encodeURIComponent(user)) {
throw new Error('Username contains an illegal unescaped character');
}
auth[0] = user;
if (auth[1]) {
let pass = decodeURIComponent(auth[1]);
if (auth[1] !== encodeURIComponent(pass)) {
throw new Error('Password contains an illegal unescaped character');
}
auth[1] = pass;
}
// Add auth to final object if we have 2 elements
if (auth.length === 2) object.auth = { user: auth[0], password: auth[1] };
// if user provided auth options, use that
if (options && options.auth != null) object.auth = options.auth;
// Variables used for temporary storage
let hostPart;
let urlOptions;
let servers;
let compression;
let serverOptions = { socketOptions: {} };
let dbOptions = { read_preference_tags: [] };
let replSetServersOptions = { socketOptions: {} };
let mongosOptions = { socketOptions: {} };
// Add server options to final object
object.server_options = serverOptions;
object.db_options = dbOptions;
object.rs_options = replSetServersOptions;
object.mongos_options = mongosOptions;
// Let's check if we are using a domain socket
if (url.match(/\.sock/)) {
// Split out the socket part
let domainSocket = url.substring(
url.indexOf('mongodb://') + 'mongodb://'.length,
url.lastIndexOf('.sock') + '.sock'.length
);
// Clean out any auth stuff if any
if (domainSocket.indexOf('@') !== -1) domainSocket = domainSocket.split('@')[1];
domainSocket = decodeURIComponent(domainSocket);
servers = [{ domain_socket: domainSocket }];
} else {
// Split up the db
hostPart = connection_part;
// Deduplicate servers
let deduplicatedServers = {};
// Parse all server results
servers = hostPart
.split(',')
.map(function(h) {
let _host, _port, ipv6match;
//check if it matches [IPv6]:port, where the port number is optional
if ((ipv6match = /\[([^\]]+)\](?::(.+))?/.exec(h))) {
_host = ipv6match[1];
_port = parseInt(ipv6match[2], 10) || 27017;
} else {
//otherwise assume it's IPv4, or plain hostname
let hostPort = h.split(':', 2);
_host = hostPort[0] || 'localhost';
_port = hostPort[1] != null ? parseInt(hostPort[1], 10) : 27017;
// Check for localhost?safe=true style case
if (_host.indexOf('?') !== -1) _host = _host.split(/\?/)[0];
}
// No entry returned for duplicate server
if (deduplicatedServers[_host + '_' + _port]) return null;
deduplicatedServers[_host + '_' + _port] = 1;
// Return the mapped object
return { host: _host, port: _port };
})
.filter(function(x) {
return x != null;
});
}
// Get the db name
object.dbName = dbName || 'admin';
// Split up all the options
urlOptions = (query_string_part || '').split(/[&;]/);
// Ugh, we have to figure out which options go to which constructor manually.
urlOptions.forEach(function(opt) {
if (!opt) return;
var splitOpt = opt.split('='),
name = splitOpt[0],
value = splitOpt[1];
// Options implementations
switch (name) {
case 'slaveOk':
case 'slave_ok':
serverOptions.slave_ok = value === 'true';
dbOptions.slaveOk = value === 'true';
break;
case 'maxPoolSize':
case 'poolSize':
serverOptions.poolSize = parseInt(value, 10);
replSetServersOptions.poolSize = parseInt(value, 10);
break;
case 'appname':
object.appname = decodeURIComponent(value);
break;
case 'autoReconnect':
case 'auto_reconnect':
serverOptions.auto_reconnect = value === 'true';
break;
case 'ssl':
if (value === 'prefer') {
serverOptions.ssl = value;
replSetServersOptions.ssl = value;
mongosOptions.ssl = value;
break;
}
serverOptions.ssl = value === 'true';
replSetServersOptions.ssl = value === 'true';
mongosOptions.ssl = value === 'true';
break;
case 'sslValidate':
serverOptions.sslValidate = value === 'true';
replSetServersOptions.sslValidate = value === 'true';
mongosOptions.sslValidate = value === 'true';
break;
case 'replicaSet':
case 'rs_name':
replSetServersOptions.rs_name = value;
break;
case 'reconnectWait':
replSetServersOptions.reconnectWait = parseInt(value, 10);
break;
case 'retries':
replSetServersOptions.retries = parseInt(value, 10);
break;
case 'readSecondary':
case 'read_secondary':
replSetServersOptions.read_secondary = value === 'true';
break;
case 'fsync':
dbOptions.fsync = value === 'true';
break;
case 'journal':
dbOptions.j = value === 'true';
break;
case 'safe':
dbOptions.safe = value === 'true';
break;
case 'nativeParser':
case 'native_parser':
dbOptions.native_parser = value === 'true';
break;
case 'readConcernLevel':
dbOptions.readConcern = new ReadConcern(value);
break;
case 'connectTimeoutMS':
serverOptions.socketOptions.connectTimeoutMS = parseInt(value, 10);
replSetServersOptions.socketOptions.connectTimeoutMS = parseInt(value, 10);
mongosOptions.socketOptions.connectTimeoutMS = parseInt(value, 10);
break;
case 'socketTimeoutMS':
serverOptions.socketOptions.socketTimeoutMS = parseInt(value, 10);
replSetServersOptions.socketOptions.socketTimeoutMS = parseInt(value, 10);
mongosOptions.socketOptions.socketTimeoutMS = parseInt(value, 10);
break;
case 'w':
dbOptions.w = parseInt(value, 10);
if (isNaN(dbOptions.w)) dbOptions.w = value;
break;
case 'authSource':
dbOptions.authSource = value;
break;
case 'gssapiServiceName':
dbOptions.gssapiServiceName = value;
break;
case 'authMechanism':
if (value === 'GSSAPI') {
// If no password provided decode only the principal
if (object.auth == null) {
let urlDecodeAuthPart = decodeURIComponent(authPart);
if (urlDecodeAuthPart.indexOf('@') === -1)
throw new Error('GSSAPI requires a provided principal');
object.auth = { user: urlDecodeAuthPart, password: null };
} else {
object.auth.user = decodeURIComponent(object.auth.user);
}
} else if (value === 'MONGODB-X509') {
object.auth = { user: decodeURIComponent(authPart) };
}
// Only support GSSAPI or MONGODB-CR for now
if (
value !== 'GSSAPI' &&
value !== 'MONGODB-X509' &&
value !== 'MONGODB-CR' &&
value !== 'DEFAULT' &&
value !== 'SCRAM-SHA-1' &&
value !== 'SCRAM-SHA-256' &&
value !== 'PLAIN'
)
throw new Error(
'Only DEFAULT, GSSAPI, PLAIN, MONGODB-X509, or SCRAM-SHA-1 is supported by authMechanism'
);
// Authentication mechanism
dbOptions.authMechanism = value;
break;
case 'authMechanismProperties':
{
// Split up into key, value pairs
let values = value.split(',');
let o = {};
// For each value split into key, value
values.forEach(function(x) {
let v = x.split(':');
o[v[0]] = v[1];
});
// Set all authMechanismProperties
dbOptions.authMechanismProperties = o;
// Set the service name value
if (typeof o.SERVICE_NAME === 'string') dbOptions.gssapiServiceName = o.SERVICE_NAME;
if (typeof o.SERVICE_REALM === 'string') dbOptions.gssapiServiceRealm = o.SERVICE_REALM;
if (typeof o.CANONICALIZE_HOST_NAME === 'string')
dbOptions.gssapiCanonicalizeHostName =
o.CANONICALIZE_HOST_NAME === 'true' ? true : false;
}
break;
case 'wtimeoutMS':
dbOptions.wtimeout = parseInt(value, 10);
break;
case 'readPreference':
if (!ReadPreference.isValid(value))
throw new Error(
'readPreference must be either primary/primaryPreferred/secondary/secondaryPreferred/nearest'
);
dbOptions.readPreference = value;
break;
case 'maxStalenessSeconds':
dbOptions.maxStalenessSeconds = parseInt(value, 10);
break;
case 'readPreferenceTags':
{
// Decode the value
value = decodeURIComponent(value);
// Contains the tag object
let tagObject = {};
if (value == null || value === '') {
dbOptions.read_preference_tags.push(tagObject);
break;
}
// Split up the tags
let tags = value.split(/,/);
for (let i = 0; i < tags.length; i++) {
let parts = tags[i].trim().split(/:/);
tagObject[parts[0]] = parts[1];
}
// Set the preferences tags
dbOptions.read_preference_tags.push(tagObject);
}
break;
case 'compressors':
{
compression = serverOptions.compression || {};
let compressors = value.split(',');
if (
!compressors.every(function(compressor) {
return compressor === 'snappy' || compressor === 'zlib';
})
) {
throw new Error('Compressors must be at least one of snappy or zlib');
}
compression.compressors = compressors;
serverOptions.compression = compression;
}
break;
case 'zlibCompressionLevel':
{
compression = serverOptions.compression || {};
let zlibCompressionLevel = parseInt(value, 10);
if (zlibCompressionLevel < -1 || zlibCompressionLevel > 9) {
throw new Error('zlibCompressionLevel must be an integer between -1 and 9');
}
compression.zlibCompressionLevel = zlibCompressionLevel;
serverOptions.compression = compression;
}
break;
case 'retryWrites':
dbOptions.retryWrites = value === 'true';
break;
case 'minSize':
dbOptions.minSize = parseInt(value, 10);
break;
default:
{
let logger = Logger('URL Parser');
logger.warn(`${name} is not supported as a connection string option`);
}
break;
}
});
// No tags: should be null (not [])
if (dbOptions.read_preference_tags.length === 0) {
dbOptions.read_preference_tags = null;
}
// Validate if there are an invalid write concern combinations
if (
(dbOptions.w === -1 || dbOptions.w === 0) &&
(dbOptions.journal === true || dbOptions.fsync === true || dbOptions.safe === true)
)
throw new Error('w set to -1 or 0 cannot be combined with safe/w/journal/fsync');
// If no read preference set it to primary
if (!dbOptions.readPreference) {
dbOptions.readPreference = 'primary';
}
// make sure that user-provided options are applied with priority
dbOptions = Object.assign(dbOptions, options);
// Add servers to result
object.servers = servers;
// Returned parsed object
return object;
}
| 1 | 16,347 | This shouldn't be here. | mongodb-node-mongodb-native | js |
@@ -24,11 +24,16 @@ module ValueHelper
end
def property_to_s(val)
- # assume all decimals are currency
- if decimal?(val)
+ if decimal?(val) # assume all decimals are currency
number_to_currency(val)
+ elsif val.is_a?(ActiveSupport::TimeWithZone)
+ I18n.l(val, format: :date)
+ elsif val == true
+ "Yes"
+ elsif val == false
+ "No"
else
- val.to_s
+ val
end
end
end | 1 | module ValueHelper
include ActionView::Helpers::NumberHelper
def date_with_tooltip(time, ago = false)
# make sure we are dealing with a Time object
unless time.is_a?(Time)
time = Time.zone.parse(time.to_s)
end
# timezone adjustment is handled via browser-timezone-rails gem
# so coerce into Time.zone explicitly
adjusted_time = time.in_time_zone
adjusted_time_str = adjusted_time.strftime("%b %-d, %Y at %l:%M%P")
if ago
content_tag("span", time_ago_in_words(adjusted_time) + " ago", title: adjusted_time_str)
else
content_tag("span", adjusted_time_str, title: adjusted_time_str)
end
end
def decimal?(val)
val.is_a?(Numeric) && !val.is_a?(Integer)
end
def property_to_s(val)
# assume all decimals are currency
if decimal?(val)
number_to_currency(val)
else
val.to_s
end
end
end
| 1 | 16,978 | A `case` statement may be simpler here. | 18F-C2 | rb |
@@ -1081,10 +1081,15 @@ public class JDBCConnection implements ObjectStoreConnection {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int curTagCount = getDomainTagsCount(domainId);
- int remainingTagsToInsert = domainTagsLimit - curTagCount;
+ int newTagCount = calculateTagCount(tags);
+ if (curTagCount + newTagCount > domainTagsLimit) {
+ throw ZMSUtils.quotaLimitError("domain tag quota exceeded - limit: "
+ + domainTagsLimit + ", current tags count: " + curTagCount + ", new tags count: " + newTagCount, caller);
+ }
+
boolean res = true;
for (Map.Entry<String, StringList> e : tags.entrySet()) {
- for (int i = 0; i < e.getValue().getList().size() && remainingTagsToInsert-- > 0; i++) {
+ for (int i = 0; i < e.getValue().getList().size(); i++) {
String tagValue = e.getValue().getList().get(i);
try (PreparedStatement ps = con.prepareStatement(SQL_INSERT_DOMAIN_TAG)) {
ps.setInt(1, domainId); | 1 | /*
* Copyright 2016 Yahoo Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.yahoo.athenz.zms.store.impl.jdbc;
import java.sql.*;
import java.util.*;
import com.yahoo.athenz.auth.AuthorityConsts;
import com.yahoo.athenz.common.server.util.ResourceUtils;
import com.yahoo.athenz.zms.*;
import org.eclipse.jetty.util.StringUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.yahoo.athenz.zms.store.AthenzDomain;
import com.yahoo.athenz.zms.store.ObjectStoreConnection;
import com.yahoo.athenz.zms.utils.ZMSUtils;
import com.yahoo.athenz.zms.ZMSConsts;
import com.yahoo.rdl.JSON;
import com.yahoo.rdl.Struct;
import com.yahoo.rdl.Timestamp;
import com.yahoo.rdl.UUID;
public class JDBCConnection implements ObjectStoreConnection {
private static final Logger LOG = LoggerFactory.getLogger(JDBCConnection.class);
private static final int MYSQL_ER_OPTION_PREVENTS_STATEMENT = 1290;
private static final int MYSQL_ER_OPTION_DUPLICATE_ENTRY = 1062;
private static final String SQL_DELETE_DOMAIN = "DELETE FROM domain WHERE name=?;";
private static final String SQL_GET_DOMAIN = "SELECT * FROM domain WHERE name=?;";
private static final String SQL_GET_DOMAIN_ID = "SELECT domain_id FROM domain WHERE name=?;";
private static final String SQL_GET_ACTIVE_DOMAIN_ID = "SELECT domain_id FROM domain WHERE name=? AND enabled=true;";
private static final String SQL_GET_DOMAINS_WITH_NAME = "SELECT name FROM domain WHERE name LIKE ?;";
private static final String SQL_GET_DOMAIN_WITH_ACCOUNT = "SELECT name FROM domain WHERE account=?;";
private static final String SQL_GET_DOMAIN_WITH_SUBSCRIPTION = "SELECT name FROM domain WHERE azure_subscription=?;";
private static final String SQL_GET_DOMAIN_WITH_PRODUCT_ID = "SELECT name FROM domain WHERE ypm_id=?;";
private static final String SQL_LIST_DOMAIN_WITH_BUSINESS_SERVICE = "SELECT name FROM domain WHERE business_service=?;";
private static final String SQL_INSERT_DOMAIN = "INSERT INTO domain "
+ "(name, description, org, uuid, enabled, audit_enabled, account, ypm_id, application_id, cert_dns_domain,"
+ " member_expiry_days, token_expiry_mins, service_cert_expiry_mins, role_cert_expiry_mins, sign_algorithm,"
+ " service_expiry_days, user_authority_filter, group_expiry_days, azure_subscription, business_service)"
+ " VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?);";
private static final String SQL_UPDATE_DOMAIN = "UPDATE domain "
+ "SET description=?, org=?, uuid=?, enabled=?, audit_enabled=?, account=?, ypm_id=?, application_id=?,"
+ " cert_dns_domain=?, member_expiry_days=?, token_expiry_mins=?, service_cert_expiry_mins=?,"
+ " role_cert_expiry_mins=?, sign_algorithm=?, service_expiry_days=?, user_authority_filter=?,"
+ " group_expiry_days=?, azure_subscription=?, business_service=? WHERE name=?;";
private static final String SQL_UPDATE_DOMAIN_MOD_TIMESTAMP = "UPDATE domain "
+ "SET modified=CURRENT_TIMESTAMP(3) WHERE name=?;";
private static final String SQL_GET_DOMAIN_MOD_TIMESTAMP = "SELECT modified FROM domain WHERE name=?;";
private static final String SQL_LIST_DOMAIN = "SELECT * FROM domain;";
private static final String SQL_LIST_DOMAIN_PREFIX = "SELECT name, modified FROM domain WHERE name>=? AND name<?;";
private static final String SQL_LIST_DOMAIN_MODIFIED = "SELECT * FROM domain WHERE modified>?;";
private static final String SQL_LIST_DOMAIN_PREFIX_MODIFIED = "SELECT name, modified FROM domain "
+ "WHERE name>=? AND name<? AND modified>?;";
private static final String SQL_LIST_DOMAIN_ROLE_NAME_MEMBER = "SELECT domain.name FROM domain "
+ "JOIN role ON role.domain_id=domain.domain_id "
+ "JOIN role_member ON role_member.role_id=role.role_id "
+ "JOIN principal ON principal.principal_id=role_member.principal_id "
+ "WHERE principal.name=? AND role.name=?;";
private static final String SQL_LIST_DOMAIN_ROLE_MEMBER = "SELECT domain.name FROM domain "
+ "JOIN role ON role.domain_id=domain.domain_id "
+ "JOIN role_member ON role_member.role_id=role.role_id "
+ "JOIN principal ON principal.principal_id=role_member.principal_id "
+ "WHERE principal.name=?;";
private static final String SQL_LIST_DOMAIN_ROLE_NAME = "SELECT domain.name FROM domain "
+ "JOIN role ON role.domain_id=domain.domain_id WHERE role.name=?;";
private static final String SQL_LIST_DOMAIN_AWS = "SELECT name, account FROM domain WHERE account!='';";
private static final String SQL_GET_ROLE = "SELECT * FROM role "
+ "JOIN domain ON domain.domain_id=role.domain_id "
+ "WHERE domain.name=? AND role.name=?;";
private static final String SQL_GET_ROLE_ID = "SELECT role_id FROM role WHERE domain_id=? AND name=?;";
private static final String SQL_INSERT_ROLE = "INSERT INTO role (name, domain_id, trust, audit_enabled, self_serve,"
+ " member_expiry_days, token_expiry_mins, cert_expiry_mins, sign_algorithm, service_expiry_days,"
+ " member_review_days, service_review_days, review_enabled, notify_roles, user_authority_filter, "
+ " user_authority_expiration, group_expiry_days) "
+ "VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?);";
private static final String SQL_UPDATE_ROLE = "UPDATE role SET trust=?, audit_enabled=?, self_serve=?, "
+ "member_expiry_days=?, token_expiry_mins=?, cert_expiry_mins=?, sign_algorithm=?, "
+ "service_expiry_days=?, member_review_days=?, service_review_days=?, review_enabled=?, notify_roles=?, "
+ "user_authority_filter=?, user_authority_expiration=?, group_expiry_days=? WHERE role_id=?;";
private static final String SQL_DELETE_ROLE = "DELETE FROM role WHERE domain_id=? AND name=?;";
private static final String SQL_UPDATE_ROLE_MOD_TIMESTAMP = "UPDATE role "
+ "SET modified=CURRENT_TIMESTAMP(3) WHERE role_id=?;";
private static final String SQL_LIST_ROLE = "SELECT name FROM role WHERE domain_id=?;";
private static final String SQL_COUNT_ROLE = "SELECT COUNT(*) FROM role WHERE domain_id=?;";
private static final String SQL_GET_ROLE_MEMBER = "SELECT principal.principal_id, role_member.expiration, "
+ "role_member.review_reminder, role_member.req_principal, role_member.system_disabled FROM principal "
+ "JOIN role_member ON role_member.principal_id=principal.principal_id "
+ "JOIN role ON role.role_id=role_member.role_id "
+ "WHERE role.role_id=? AND principal.name=?;";
private static final String SQL_GET_TEMP_ROLE_MEMBER = "SELECT principal.principal_id, role_member.expiration, "
+ "role_member.review_reminder, role_member.req_principal, role_member.system_disabled FROM principal "
+ "JOIN role_member ON role_member.principal_id=principal.principal_id "
+ "JOIN role ON role.role_id=role_member.role_id "
+ "WHERE role.role_id=? AND principal.name=? AND role_member.expiration=?;";
private static final String SQL_GET_PENDING_ROLE_MEMBER = "SELECT principal.principal_id, pending_role_member.expiration, pending_role_member.review_reminder, pending_role_member.req_principal FROM principal "
+ "JOIN pending_role_member ON pending_role_member.principal_id=principal.principal_id "
+ "JOIN role ON role.role_id=pending_role_member.role_id "
+ "WHERE role.role_id=? AND principal.name=?;";
private static final String SQL_GET_TEMP_PENDING_ROLE_MEMBER = "SELECT principal.principal_id, pending_role_member.expiration, pending_role_member.review_reminder, pending_role_member.req_principal FROM principal "
+ "JOIN pending_role_member ON pending_role_member.principal_id=principal.principal_id "
+ "JOIN role ON role.role_id=pending_role_member.role_id "
+ "WHERE role.role_id=? AND principal.name=? AND pending_role_member.expiration=?;";
private static final String SQL_STD_ROLE_MEMBER_EXISTS = "SELECT principal_id FROM role_member WHERE role_id=? AND principal_id=?;";
private static final String SQL_PENDING_ROLE_MEMBER_EXISTS = "SELECT principal_id FROM pending_role_member WHERE role_id=? AND principal_id=?;";
private static final String SQL_LIST_ROLE_MEMBERS = "SELECT principal.name, role_member.expiration, "
+ "role_member.review_reminder, role_member.active, role_member.audit_ref, role_member.system_disabled FROM principal "
+ "JOIN role_member ON role_member.principal_id=principal.principal_id "
+ "JOIN role ON role.role_id=role_member.role_id WHERE role.role_id=?;";
private static final String SQL_LIST_PENDING_ROLE_MEMBERS = "SELECT principal.name, pending_role_member.expiration, pending_role_member.review_reminder, pending_role_member.req_time, pending_role_member.audit_ref FROM principal "
+ "JOIN pending_role_member ON pending_role_member.principal_id=principal.principal_id "
+ "JOIN role ON role.role_id=pending_role_member.role_id WHERE role.role_id=?;";
private static final String SQL_COUNT_ROLE_MEMBERS = "SELECT COUNT(*) FROM role_member WHERE role_id=?;";
private static final String SQL_GET_PRINCIPAL_ID = "SELECT principal_id FROM principal WHERE name=?;";
private static final String SQL_INSERT_PRINCIPAL = "INSERT INTO principal (name) VALUES (?);";
private static final String SQL_DELETE_PRINCIPAL = "DELETE FROM principal WHERE name=?;";
private static final String SQL_DELETE_SUB_PRINCIPALS = "DELETE FROM principal WHERE name LIKE ?;";
private static final String SQL_LIST_PRINCIPAL = "SELECT * FROM principal;";
private static final String SQL_LIST_PRINCIPAL_DOMAIN = "SELECT * FROM principal WHERE name LIKE ?;";
private static final String SQL_LAST_INSERT_ID = "SELECT LAST_INSERT_ID();";
private static final String SQL_INSERT_ROLE_MEMBER = "INSERT INTO role_member "
+ "(role_id, principal_id, expiration, review_reminder, active, audit_ref, req_principal) VALUES (?,?,?,?,?,?,?);";
private static final String SQL_INSERT_PENDING_ROLE_MEMBER = "INSERT INTO pending_role_member "
+ "(role_id, principal_id, expiration, review_reminder, audit_ref, req_principal) VALUES (?,?,?,?,?,?);";
private static final String SQL_DELETE_ROLE_MEMBER = "DELETE FROM role_member WHERE role_id=? AND principal_id=?;";
private static final String SQL_DELETE_PENDING_ROLE_MEMBER = "DELETE FROM pending_role_member WHERE role_id=? AND principal_id=?;";
private static final String SQL_UPDATE_ROLE_MEMBER = "UPDATE role_member "
+ "SET expiration=?, review_reminder=?, active=?, audit_ref=?, req_principal=? WHERE role_id=? AND principal_id=?;";
private static final String SQL_UPDATE_ROLE_MEMBER_DISABLED_STATE = "UPDATE role_member "
+ "SET system_disabled=?, audit_ref=?, req_principal=? WHERE role_id=? AND principal_id=?;";
private static final String SQL_UPDATE_PENDING_ROLE_MEMBER = "UPDATE pending_role_member "
+ "SET expiration=?, review_reminder=?, audit_ref=?, req_time=CURRENT_TIMESTAMP(3), req_principal=? WHERE role_id=? AND principal_id=?;";
private static final String SQL_INSERT_ROLE_AUDIT_LOG = "INSERT INTO role_audit_log "
+ "(role_id, admin, member, action, audit_ref) VALUES (?,?,?,?,?);";
private static final String SQL_LIST_ROLE_AUDIT_LOGS = "SELECT * FROM role_audit_log WHERE role_id=?;";
private static final String SQL_GET_POLICY = "SELECT * FROM policy "
+ "JOIN domain ON domain.domain_id=policy.domain_id WHERE domain.name=? AND policy.name=?;";
private static final String SQL_INSERT_POLICY = "INSERT INTO policy (name, domain_id) VALUES (?,?);";
private static final String SQL_UPDATE_POLICY = "UPDATE policy SET name=? WHERE policy_id=?;";
private static final String SQL_UPDATE_POLICY_MOD_TIMESTAMP = "UPDATE policy "
+ "SET modified=CURRENT_TIMESTAMP(3) WHERE policy_id=?;";
private static final String SQL_GET_POLICY_ID = "SELECT policy_id FROM policy WHERE domain_id=? AND name=?;";
private static final String SQL_DELETE_POLICY = "DELETE FROM policy WHERE domain_id=? AND name=?;";
private static final String SQL_LIST_POLICY = "SELECT name FROM policy WHERE domain_id=?";
private static final String SQL_COUNT_POLICY = "SELECT COUNT(*) FROM policy WHERE domain_id=?";
private static final String SQL_LIST_ASSERTION = "SELECT * FROM assertion WHERE policy_id=?";
private static final String SQL_COUNT_ASSERTION = "SELECT COUNT(*) FROM assertion WHERE policy_id=?";
private static final String SQL_GET_ASSERTION = "SELECT * FROM assertion "
+ "JOIN policy ON assertion.policy_id=policy.policy_id "
+ "JOIN domain ON policy.domain_id=domain.domain_id "
+ "WHERE assertion.assertion_id=? AND domain.name=? AND policy.name=?;";
private static final String SQL_CHECK_ASSERTION = "SELECT assertion_id FROM assertion "
+ "WHERE policy_id=? AND role=? AND resource=? AND action=? AND effect=?;";
private static final String SQL_INSERT_ASSERTION = "INSERT INTO assertion "
+ "(policy_id, role, resource, action, effect) VALUES (?,?,?,?,?);";
private static final String SQL_DELETE_ASSERTION = "DELETE FROM assertion "
+ "WHERE policy_id=? AND assertion_id=?;";
private static final String SQL_GET_SERVICE = "SELECT * FROM service "
+ "JOIN domain ON domain.domain_id=service.domain_id WHERE domain.name=? AND service.name=?;";
private static final String SQL_INSERT_SERVICE = "INSERT INTO service "
+ "(name, description, provider_endpoint, executable, svc_user, svc_group, domain_id) VALUES (?,?,?,?,?,?,?);";
private static final String SQL_UPDATE_SERVICE = "UPDATE service SET "
+ "description=?, provider_endpoint=?, executable=?, svc_user=?, svc_group=? WHERE service_id=?;";
private static final String SQL_UPDATE_SERVICE_MOD_TIMESTAMP = "UPDATE service "
+ "SET modified=CURRENT_TIMESTAMP(3) WHERE service_id=?;";
private static final String SQL_DELETE_SERVICE = "DELETE FROM service WHERE domain_id=? AND name=?;";
private static final String SQL_GET_SERVICE_ID = "SELECT service_id FROM service WHERE domain_id=? AND name=?;";
private static final String SQL_LIST_SERVICE = "SELECT name FROM service WHERE domain_id=?;";
private static final String SQL_COUNT_SERVICE = "SELECT COUNT(*) FROM service WHERE domain_id=?;";
private static final String SQL_LIST_PUBLIC_KEY = "SELECT * FROM public_key WHERE service_id=?;";
private static final String SQL_COUNT_PUBLIC_KEY = "SELECT COUNT(*) FROM public_key WHERE service_id=?;";
private static final String SQL_GET_PUBLIC_KEY = "SELECT key_value FROM public_key WHERE service_id=? AND key_id=?;";
private static final String SQL_INSERT_PUBLIC_KEY = "INSERT INTO public_key "
+ "(service_id, key_id, key_value) VALUES (?,?,?);";
private static final String SQL_UPDATE_PUBLIC_KEY = "UPDATE public_key SET key_value=? WHERE service_id=? AND key_id=?;";
private static final String SQL_DELETE_PUBLIC_KEY = "DELETE FROM public_key WHERE service_id=? AND key_id=?;";
private static final String SQL_LIST_SERVICE_HOST = "SELECT host.name FROM host "
+ "JOIN service_host ON service_host.host_id=host.host_id "
+ "WHERE service_host.service_id=?;";
private static final String SQL_INSERT_SERVICE_HOST = "INSERT INTO service_host (service_id, host_id) VALUES (?,?);";
private static final String SQL_DELETE_SERVICE_HOST = "DELETE FROM service_host WHERE service_id=? AND host_id=?;";
private static final String SQL_GET_HOST_ID = "SELECT host_id FROM host WHERE name=?;";
private static final String SQL_INSERT_HOST = "INSERT INTO host (name) VALUES (?);";
private static final String SQL_INSERT_ENTITY = "INSERT INTO entity (domain_id, name, value) VALUES (?,?,?);";
private static final String SQL_UPDATE_ENTITY = "UPDATE entity SET value=? WHERE domain_id=? AND name=?;";
private static final String SQL_DELETE_ENTITY = "DELETE FROM entity WHERE domain_id=? AND name=?;";
private static final String SQL_GET_ENTITY = "SELECT value FROM entity WHERE domain_id=? AND name=?;";
private static final String SQL_LIST_ENTITY = "SELECT name FROM entity WHERE domain_id=?;";
private static final String SQL_COUNT_ENTITY = "SELECT COUNT(*) FROM entity WHERE domain_id=?;";
private static final String SQL_INSERT_DOMAIN_TEMPLATE = "INSERT INTO domain_template (domain_id, template) VALUES (?,?);";
private static final String SQL_UPDATE_DOMAIN_TEMPLATE = "UPDATE domain_template SET current_version=? WHERE domain_id=? and template=?;";
private static final String SQL_DELETE_DOMAIN_TEMPLATE = "DELETE FROM domain_template WHERE domain_id=? AND template=?;";
private static final String SQL_LIST_DOMAIN_TEMPLATES = "SELECT * FROM domain_template WHERE domain_id=?;";
private static final String SQL_LIST_DOMAIN_TEMPLATE = "SELECT template FROM domain_template "
+ "JOIN domain ON domain_template.domain_id=domain.domain_id "
+ "WHERE domain.name=?;";
private static final String SQL_GET_DOMAIN_ENTITIES = "SELECT * FROM entity WHERE domain_id=?;";
private static final String SQL_GET_DOMAIN_ROLES = "SELECT * FROM role WHERE domain_id=?;";
private static final String SQL_GET_DOMAIN_ROLE_MEMBERS = "SELECT role.name, principal.name, role_member.expiration, "
+ "role_member.review_reminder, role_member.system_disabled FROM principal "
+ "JOIN role_member ON role_member.principal_id=principal.principal_id "
+ "JOIN role ON role.role_id=role_member.role_id "
+ "WHERE role.domain_id=?;";
private static final String SQL_GET_PRINCIPAL_ROLES = "SELECT role.name, domain.name, role_member.expiration, "
+ "role_member.review_reminder, role_member.system_disabled FROM role_member "
+ "JOIN role ON role.role_id=role_member.role_id "
+ "JOIN domain ON domain.domain_id=role.domain_id "
+ "WHERE role_member.principal_id=?;";
private static final String SQL_GET_PRINCIPAL_ROLES_DOMAIN = "SELECT role.name, domain.name, role_member.expiration, "
+ "role_member.review_reminder, role_member.system_disabled FROM role_member "
+ "JOIN role ON role.role_id=role_member.role_id "
+ "JOIN domain ON domain.domain_id=role.domain_id "
+ "WHERE role_member.principal_id=? AND domain.domain_id=?;";
private static final String SQL_GET_REVIEW_OVERDUE_DOMAIN_ROLE_MEMBERS = "SELECT role.name, principal.name, role_member.expiration, "
+ "role_member.review_reminder, role_member.system_disabled FROM principal "
+ "JOIN role_member ON role_member.principal_id=principal.principal_id "
+ "JOIN role ON role.role_id=role_member.role_id "
+ "WHERE role.domain_id=? AND role_member.review_reminder < CURRENT_TIME;";
private static final String SQL_GET_DOMAIN_POLICIES = "SELECT * FROM policy WHERE domain_id=?;";
private static final String SQL_GET_DOMAIN_POLICY_ASSERTIONS = "SELECT policy.name, "
+ "assertion.effect, assertion.action, assertion.role, assertion.resource, "
+ "assertion.assertion_id FROM assertion "
+ "JOIN policy ON policy.policy_id=assertion.policy_id "
+ "WHERE policy.domain_id=?;";
private static final String SQL_GET_DOMAIN_SERVICES = "SELECT * FROM service WHERE domain_id=?;";
private static final String SQL_GET_DOMAIN_SERVICES_HOSTS = "SELECT service.name, host.name FROM host "
+ "JOIN service_host ON host.host_id=service_host.host_id "
+ "JOIN service ON service.service_id=service_host.service_id "
+ "WHERE service.domain_id=?;";
private static final String SQL_GET_DOMAIN_SERVICES_PUBLIC_KEYS = "SELECT service.name, "
+ "public_key.key_id, public_key.key_value FROM public_key "
+ "JOIN service ON service.service_id=public_key.service_id "
+ "WHERE service.domain_id=?;";
private static final String SQL_LIST_POLICY_REFERENCING_ROLE = "SELECT name FROM policy "
+ "JOIN assertion ON policy.policy_id=assertion.policy_id "
+ "WHERE policy.domain_id=? AND assertion.role=?;";
private static final String SQL_LIST_ROLE_ASSERTIONS = "SELECT assertion.role, assertion.resource, "
+ "assertion.action, assertion.effect, assertion.assertion_id, policy.domain_id, domain.name FROM assertion "
+ "JOIN policy ON assertion.policy_id=policy.policy_id "
+ "JOIN domain ON policy.domain_id=domain.domain_id";
private static final String SQL_LIST_ROLE_ASSERTION_QUERY_ACTION = " WHERE assertion.action=?;";
private static final String SQL_LIST_ROLE_ASSERTION_NO_ACTION = " WHERE assertion.action!='assume_role';";
private static final String SQL_LIST_ROLE_PRINCIPALS = "SELECT role.domain_id, role.name AS role_name FROM principal "
+ "JOIN role_member ON principal.principal_id=role_member.principal_id "
+ "JOIN role ON role_member.role_id=role.role_id WHERE principal.name=? "
+ "AND principal.system_suspended=0 AND role_member.system_disabled=0 "
+ "AND (role_member.expiration IS NULL OR role_member.expiration > CURRENT_TIME);";
private static final String SQL_LIST_ROLE_GROUP_PRINCIPALS = "SELECT principal.name, role.domain_id, "
+ "role.name AS role_name FROM principal "
+ "JOIN role_member ON principal.principal_id=role_member.principal_id "
+ "JOIN role ON role_member.role_id=role.role_id WHERE principal.name LIKE '%:group.%' "
+ "AND principal.system_suspended=0 AND role_member.system_disabled=0 "
+ "AND (role_member.expiration IS NULL OR role_member.expiration > CURRENT_TIME);";
private static final String SQL_LIST_GROUP_FOR_PRINCIPAL = "SELECT principal_group.name, domain.name AS domain_name "
+ "FROM principal_group_member JOIN principal_group ON principal_group.group_id=principal_group_member.group_id "
+ "JOIN domain ON domain.domain_id=principal_group.domain_id JOIN principal ON principal.principal_id=principal_group_member.principal_id "
+ "WHERE principal.name=? AND principal.system_suspended=0 AND principal_group_member.system_disabled=0 "
+ "AND (principal_group_member.expiration IS NULL OR principal_group_member.expiration > CURRENT_TIME);";
private static final String SQL_LIST_TRUSTED_STANDARD_ROLES = "SELECT role.domain_id, role.name, "
+ "policy.domain_id AS assert_domain_id, assertion.role FROM role "
+ "JOIN domain ON domain.domain_id=role.domain_id "
+ "JOIN assertion ON assertion.resource=CONCAT(domain.name, \":role.\", role.name) "
+ "JOIN policy ON policy.policy_id=assertion.policy_id "
+ "WHERE assertion.action='assume_role';";
private static final String SQL_LIST_TRUSTED_WILDCARD_ROLES = "SELECT role.domain_id, role.name, "
+ "policy.domain_id AS assert_domain_id, assertion.role FROM role "
+ "JOIN domain ON domain.domain_id=role.domain_id "
+ "JOIN assertion ON assertion.resource=CONCAT(\"*:role.\", role.name) "
+ "JOIN policy ON policy.policy_id=assertion.policy_id "
+ "WHERE assertion.action='assume_role';";
private static final String SQL_LIST_PRINCIPAL_ROLES = "SELECT domain.name, "
+ "role.name AS role_name FROM role_member "
+ "JOIN role ON role_member.role_id=role.role_id "
+ "JOIN domain ON domain.domain_id=role.domain_id "
+ "WHERE role_member.principal_id=?;";
private static final String SQL_LIST_PRINCIPAL_DOMAIN_ROLES = "SELECT role.name AS role_name FROM role_member "
+ "JOIN role ON role_member.role_id=role.role_id "
+ "JOIN domain ON domain.domain_id=role.domain_id "
+ "WHERE role_member.principal_id=? AND domain.domain_id=?;";
private static final String SQL_GET_QUOTA = "SELECT * FROM quota WHERE domain_id=?;";
private static final String SQL_INSERT_QUOTA = "INSERT INTO quota (domain_id, role, role_member, "
+ "policy, assertion, service, service_host, public_key, entity, subdomain, principal_group, principal_group_member) "
+ "VALUES (?,?,?,?,?,?,?,?,?,?,?,?);";
private static final String SQL_UPDATE_QUOTA = "UPDATE quota SET role=?, role_member=?, "
+ "policy=?, assertion=?, service=?, service_host=?, public_key=?, entity=?, "
+ "subdomain=?, principal_group=?, principal_group_member=? WHERE domain_id=?;";
private static final String SQL_DELETE_QUOTA = "DELETE FROM quota WHERE domain_id=?;";
private static final String SQL_PENDING_ORG_AUDIT_ROLE_MEMBER_LIST = "SELECT do.name AS domain, ro.name AS role, "
+ "principal.name AS member, rmo.expiration, rmo.review_reminder, rmo.audit_ref, rmo.req_time, rmo.req_principal "
+ "FROM principal JOIN pending_role_member rmo "
+ "ON rmo.principal_id=principal.principal_id JOIN role ro ON ro.role_id=rmo.role_id JOIN domain do ON ro.domain_id=do.domain_id "
+ "WHERE ro.audit_enabled=true AND ro.domain_id IN ( select domain_id FROM domain WHERE org IN ( "
+ "SELECT DISTINCT role.name AS org FROM role_member JOIN role ON role.role_id=role_member.role_id "
+ "WHERE role_member.principal_id=? AND role.domain_id=?) ) order by do.name, ro.name, principal.name;";
private static final String SQL_PENDING_DOMAIN_AUDIT_ROLE_MEMBER_LIST = "SELECT do.name AS domain, ro.name AS role, "
+ "principal.name AS member, rmo.expiration, rmo.review_reminder, rmo.audit_ref, rmo.req_time, rmo.req_principal "
+ "FROM principal JOIN pending_role_member rmo "
+ "ON rmo.principal_id=principal.principal_id JOIN role ro ON ro.role_id=rmo.role_id JOIN domain do ON ro.domain_id=do.domain_id "
+ "WHERE ro.audit_enabled=true AND ro.domain_id IN ( select domain_id FROM domain WHERE name IN ( "
+ "SELECT DISTINCT role.name AS domain_name FROM role_member JOIN role ON role.role_id=role_member.role_id "
+ "WHERE role_member.principal_id=? AND role.domain_id=?) ) order by do.name, ro.name, principal.name;";
private static final String SQL_PENDING_DOMAIN_ADMIN_ROLE_MEMBER_LIST = "SELECT do.name AS domain, ro.name AS role, "
+ "principal.name AS member, rmo.expiration, rmo.review_reminder, rmo.audit_ref, rmo.req_time, rmo.req_principal "
+ "FROM principal JOIN pending_role_member rmo "
+ "ON rmo.principal_id=principal.principal_id JOIN role ro ON ro.role_id=rmo.role_id JOIN domain do ON ro.domain_id=do.domain_id "
+ "WHERE (ro.self_serve=true OR ro.review_enabled=true) AND ro.domain_id IN ( SELECT domain.domain_id FROM domain JOIN role "
+ "ON role.domain_id=domain.domain_id JOIN role_member ON role.role_id=role_member.role_id "
+ "WHERE role_member.principal_id=? AND role_member.active=true AND role.name='admin' ) "
+ "order by do.name, ro.name, principal.name;";
private static final String SQL_AUDIT_ENABLED_PENDING_MEMBERSHIP_REMINDER_ENTRIES =
"SELECT distinct d.org, d.name FROM pending_role_member rm " +
"JOIN role r ON r.role_id=rm.role_id JOIN domain d ON r.domain_id=d.domain_id " +
"WHERE r.audit_enabled=true AND rm.last_notified_time=? AND rm.server=?;";
private static final String SQL_ADMIN_PENDING_MEMBERSHIP_REMINDER_DOMAINS =
"SELECT distinct d.name FROM pending_role_member rm " +
"JOIN role r ON r.role_id=rm.role_id " +
"JOIN domain d ON r.domain_id=d.domain_id WHERE (r.self_serve=true OR r.review_enabled=true) AND rm.last_notified_time=? AND rm.server=?;";
private static final String SQL_GET_EXPIRED_PENDING_ROLE_MEMBERS = "SELECT d.name, r.name, p.name, prm.expiration, prm.review_reminder, prm.audit_ref, prm.req_time, prm.req_principal " +
"FROM principal p JOIN pending_role_member prm " +
"ON prm.principal_id=p.principal_id JOIN role r ON prm.role_id=r.role_id JOIN domain d ON d.domain_id=r.domain_id " +
"WHERE prm.req_time < (CURRENT_TIME - INTERVAL ? DAY);";
private static final String SQL_UPDATE_PENDING_ROLE_MEMBERS_NOTIFICATION_TIMESTAMP = "UPDATE pending_role_member SET last_notified_time=?, server=? " +
"WHERE DAYOFWEEK(req_time)=DAYOFWEEK(?) AND (last_notified_time IS NULL || last_notified_time < (CURRENT_TIME - INTERVAL ? DAY));";
private static final String SQL_UPDATE_ROLE_MEMBERS_EXPIRY_NOTIFICATION_TIMESTAMP =
"UPDATE role_member SET last_notified_time=?, server=? " +
"WHERE (" +
// Expiration is set and Review isn't (or after expiration) - start sending a month before expiration
"(expiration > CURRENT_TIME AND (review_reminder is NULL OR review_reminder >= expiration) AND DATEDIFF(expiration, CURRENT_TIME) IN (0,1,7,14,21,28)) OR" +
// Expiration and Review both set and review is before expiration - start sending from review date
"(expiration > CURRENT_TIME AND review_reminder is not NULL AND review_reminder <= CURRENT_TIME AND DATEDIFF(expiration, CURRENT_TIME) IN (0,1,7,14,21,28))" +
") AND " +
"(last_notified_time IS NULL || last_notified_time < (CURRENT_TIME - INTERVAL ? DAY));";
private static final String SQL_LIST_NOTIFY_TEMPORARY_ROLE_MEMBERS = "SELECT domain.name AS domain_name, role.name AS role_name, " +
"principal.name AS principal_name, role_member.expiration, role_member.review_reminder FROM role_member " +
"JOIN role ON role.role_id=role_member.role_id " +
"JOIN principal ON principal.principal_id=role_member.principal_id " +
"JOIN domain ON domain.domain_id=role.domain_id " +
"WHERE role_member.last_notified_time=? AND role_member.server=?;";
private static final String SQL_UPDATE_ROLE_MEMBERS_REVIEW_NOTIFICATION_TIMESTAMP =
"UPDATE role_member SET review_last_notified_time=?, review_server=? " +
"WHERE (" +
"review_reminder > CURRENT_TIME AND (expiration is NULL) AND DATEDIFF(review_reminder, CURRENT_TIME) IN (0,1,7,14,21,28) AND " +
"(review_last_notified_time IS NULL || review_last_notified_time < (CURRENT_TIME - INTERVAL ? DAY)));";
private static final String SQL_LIST_NOTIFY_REVIEW_ROLE_MEMBERS = "SELECT domain.name AS domain_name, role.name AS role_name, " +
"principal.name AS principal_name, role_member.expiration, role_member.review_reminder FROM role_member " +
"JOIN role ON role.role_id=role_member.role_id " +
"JOIN principal ON principal.principal_id=role_member.principal_id " +
"JOIN domain ON domain.domain_id=role.domain_id " +
"WHERE role_member.review_last_notified_time=? AND role_member.review_server=?;";
private static final String SQL_UPDATE_ROLE_REVIEW_TIMESTAMP = "UPDATE role SET last_reviewed_time=CURRENT_TIMESTAMP(3) WHERE role_id=?;";
private static final String SQL_LIST_ROLES_WITH_RESTRICTIONS = "SELECT domain.name as domain_name, "
+ "role.name as role_name, domain.user_authority_filter as domain_user_authority_filter FROM role "
+ "JOIN domain ON role.domain_id=domain.domain_id WHERE role.user_authority_filter!='' "
+ "OR role.user_authority_expiration!='' OR domain.user_authority_filter!='';";
private static final String SQL_GET_GROUP = "SELECT * FROM principal_group "
+ "JOIN domain ON domain.domain_id=principal_group.domain_id "
+ "WHERE domain.name=? AND principal_group.name=?;";
private static final String SQL_INSERT_GROUP = "INSERT INTO principal_group (name, domain_id, audit_enabled, self_serve,"
+ " review_enabled, notify_roles, user_authority_filter, user_authority_expiration, member_expiry_days, service_expiry_days) "
+ "VALUES (?,?,?,?,?,?,?,?,?,?);";
private static final String SQL_UPDATE_GROUP = "UPDATE principal_group SET audit_enabled=?, self_serve=?, "
+ "review_enabled=?, notify_roles=?, user_authority_filter=?, user_authority_expiration=?,"
+ "member_expiry_days=?, service_expiry_days=? WHERE group_id=?;";
private static final String SQL_GET_GROUP_ID = "SELECT group_id FROM principal_group WHERE domain_id=? AND name=?;";
private static final String SQL_DELETE_GROUP = "DELETE FROM principal_group WHERE domain_id=? AND name=?;";
private static final String SQL_UPDATE_GROUP_MOD_TIMESTAMP = "UPDATE principal_group "
+ "SET modified=CURRENT_TIMESTAMP(3) WHERE group_id=?;";
private static final String SQL_COUNT_GROUP = "SELECT COUNT(*) FROM principal_group WHERE domain_id=?;";
private static final String SQL_GET_GROUP_MEMBER = "SELECT principal.principal_id, principal_group_member.expiration, "
+ "principal_group_member.req_principal, principal_group_member.system_disabled FROM principal "
+ "JOIN principal_group_member ON principal_group_member.principal_id=principal.principal_id "
+ "JOIN principal_group ON principal_group.group_id=principal_group_member.group_id "
+ "WHERE principal_group.group_id=? AND principal.name=?;";
private static final String SQL_GET_TEMP_GROUP_MEMBER = "SELECT principal.principal_id, principal_group_member.expiration, "
+ "principal_group_member.req_principal, principal_group_member.system_disabled FROM principal "
+ "JOIN principal_group_member ON principal_group_member.principal_id=principal.principal_id "
+ "JOIN principal_group ON principal_group.group_id=principal_group_member.group_id "
+ "WHERE principal_group.group_id=? AND principal.name=? AND principal_group_member.expiration=?;";
private static final String SQL_GET_PENDING_GROUP_MEMBER = "SELECT principal.principal_id, "
+ "pending_principal_group_member.expiration, pending_principal_group_member.req_principal FROM principal "
+ "JOIN pending_principal_group_member ON pending_principal_group_member.principal_id=principal.principal_id "
+ "JOIN principal_group ON principal_group.group_id=pending_principal_group_member.group_id "
+ "WHERE principal_group.group_id=? AND principal.name=?;";
private static final String SQL_GET_TEMP_PENDING_GROUP_MEMBER = "SELECT principal.principal_id, "
+ "pending_principal_group_member.expiration, pending_principal_group_member.req_principal FROM principal "
+ "JOIN pending_principal_group_member ON pending_principal_group_member.principal_id=principal.principal_id "
+ "JOIN principal_group ON principal_group.group_id=pending_principal_group_member.group_id "
+ "WHERE principal_group.group_id=? AND principal.name=? AND pending_principal_group_member.expiration=?;";
private static final String SQL_LIST_GROUP_AUDIT_LOGS = "SELECT * FROM principal_group_audit_log WHERE group_id=?;";
private static final String SQL_UPDATE_GROUP_REVIEW_TIMESTAMP = "UPDATE principal_group SET last_reviewed_time=CURRENT_TIMESTAMP(3) WHERE group_id=?;";
private static final String SQL_LIST_GROUPS_WITH_RESTRICTIONS = "SELECT domain.name as domain_name, "
+ "principal_group.name as group_name, domain.user_authority_filter as domain_user_authority_filter FROM principal_group "
+ "JOIN domain ON principal_group.domain_id=domain.domain_id WHERE principal_group.user_authority_filter!='' "
+ "OR principal_group.user_authority_expiration!='' OR domain.user_authority_filter!='';";
private static final String SQL_LIST_GROUP_MEMBERS = "SELECT principal.name, principal_group_member.expiration, "
+ "principal_group_member.active, principal_group_member.audit_ref, principal_group_member.system_disabled FROM principal "
+ "JOIN principal_group_member ON principal_group_member.principal_id=principal.principal_id "
+ "JOIN principal_group ON principal_group.group_id=principal_group_member.group_id WHERE principal_group.group_id=?;";
private static final String SQL_LIST_PENDING_GROUP_MEMBERS = "SELECT principal.name, pending_principal_group_member.expiration, "
+ "pending_principal_group_member.req_time, pending_principal_group_member.audit_ref FROM principal "
+ "JOIN pending_principal_group_member ON pending_principal_group_member.principal_id=principal.principal_id "
+ "JOIN principal_group ON principal_group.group_id=pending_principal_group_member.group_id WHERE principal_group.group_id=?;";
private static final String SQL_COUNT_GROUP_MEMBERS = "SELECT COUNT(*) FROM principal_group_member WHERE group_id=?;";
private static final String SQL_STD_GROUP_MEMBER_EXISTS = "SELECT principal_id FROM principal_group_member WHERE group_id=? AND principal_id=?;";
private static final String SQL_PENDING_GROUP_MEMBER_EXISTS = "SELECT principal_id FROM pending_principal_group_member WHERE group_id=? AND principal_id=?;";
private static final String SQL_UPDATE_GROUP_MEMBER = "UPDATE principal_group_member "
+ "SET expiration=?, active=?, audit_ref=?, req_principal=? WHERE group_id=? AND principal_id=?;";
private static final String SQL_UPDATE_GROUP_MEMBER_DISABLED_STATE = "UPDATE principal_group_member "
+ "SET system_disabled=?, audit_ref=?, req_principal=? WHERE group_id=? AND principal_id=?;";
private static final String SQL_UPDATE_PENDING_GROUP_MEMBER = "UPDATE pending_principal_group_member "
+ "SET expiration=?, audit_ref=?, req_time=CURRENT_TIMESTAMP(3), req_principal=? WHERE group_id=? AND principal_id=?;";
private static final String SQL_INSERT_GROUP_MEMBER = "INSERT INTO principal_group_member "
+ "(group_id, principal_id, expiration, active, audit_ref, req_principal) VALUES (?,?,?,?,?,?);";
private static final String SQL_INSERT_PENDING_GROUP_MEMBER = "INSERT INTO pending_principal_group_member "
+ "(group_id, principal_id, expiration, audit_ref, req_principal) VALUES (?,?,?,?,?);";
private static final String SQL_DELETE_GROUP_MEMBER = "DELETE FROM principal_group_member WHERE group_id=? AND principal_id=?;";
private static final String SQL_DELETE_PENDING_GROUP_MEMBER = "DELETE FROM pending_principal_group_member WHERE group_id=? AND principal_id=?;";
private static final String SQL_INSERT_GROUP_AUDIT_LOG = "INSERT INTO principal_group_audit_log "
+ "(group_id, admin, member, action, audit_ref) VALUES (?,?,?,?,?);";
private static final String SQL_GET_PRINCIPAL_GROUPS = "SELECT principal_group.name, domain.name, principal_group_member.expiration, "
+ "principal_group_member.system_disabled FROM principal_group_member "
+ "JOIN principal_group ON principal_group.group_id=principal_group_member.group_id "
+ "JOIN domain ON domain.domain_id=principal_group.domain_id "
+ "WHERE principal_group_member.principal_id=?;";
private static final String SQL_GET_PRINCIPAL_GROUPS_DOMAIN = "SELECT principal_group.name, domain.name, principal_group_member.expiration, "
+ "principal_group_member.system_disabled FROM principal_group_member "
+ "JOIN principal_group ON principal_group.group_id=principal_group_member.group_id "
+ "JOIN domain ON domain.domain_id=principal_group.domain_id "
+ "WHERE principal_group_member.principal_id=? AND domain.domain_id=?;";
private static final String SQL_GET_DOMAIN_GROUPS = "SELECT * FROM principal_group WHERE domain_id=?;";
private static final String SQL_GET_DOMAIN_GROUP_MEMBERS = "SELECT principal_group.name, principal.name, "
+ "principal_group_member.expiration, principal_group_member.system_disabled FROM principal "
+ "JOIN principal_group_member ON principal_group_member.principal_id=principal.principal_id "
+ "JOIN principal_group ON principal_group.group_id=principal_group_member.group_id "
+ "WHERE principal_group.domain_id=?;";
private static final String SQL_PENDING_ORG_AUDIT_GROUP_MEMBER_LIST = "SELECT do.name AS domain, grp.name AS group_name, "
+ "principal.name AS member, pgm.expiration, pgm.audit_ref, pgm.req_time, pgm.req_principal "
+ "FROM principal JOIN pending_principal_group_member pgm "
+ "ON pgm.principal_id=principal.principal_id JOIN principal_group grp ON grp.group_id=pgm.group_id JOIN domain do ON grp.domain_id=do.domain_id "
+ "WHERE grp.audit_enabled=true AND grp.domain_id IN ( select domain_id FROM domain WHERE org IN ( "
+ "SELECT DISTINCT role.name AS org FROM role_member JOIN role ON role.role_id=role_member.role_id "
+ "WHERE role_member.principal_id=? AND role.domain_id=?) ) order by do.name, grp.name, principal.name;";
private static final String SQL_PENDING_DOMAIN_AUDIT_GROUP_MEMBER_LIST = "SELECT do.name AS domain, grp.name AS group_name, "
+ "principal.name AS member, pgm.expiration, pgm.audit_ref, pgm.req_time, pgm.req_principal "
+ "FROM principal JOIN pending_principal_group_member pgm "
+ "ON pgm.principal_id=principal.principal_id JOIN principal_group grp ON grp.group_id=pgm.group_id JOIN domain do ON grp.domain_id=do.domain_id "
+ "WHERE grp.audit_enabled=true AND grp.domain_id IN ( select domain_id FROM domain WHERE name IN ( "
+ "SELECT DISTINCT role.name AS domain_name FROM role_member JOIN role ON role.role_id=role_member.role_id "
+ "WHERE role_member.principal_id=? AND role.domain_id=?) ) order by do.name, grp.name, principal.name;";
private static final String SQL_PENDING_DOMAIN_ADMIN_GROUP_MEMBER_LIST = "SELECT do.name AS domain, grp.name AS group_name, "
+ "principal.name AS member, pgm.expiration, pgm.audit_ref, pgm.req_time, pgm.req_principal "
+ "FROM principal JOIN pending_principal_group_member pgm "
+ "ON pgm.principal_id=principal.principal_id JOIN principal_group grp ON grp.group_id=pgm.group_id JOIN domain do ON grp.domain_id=do.domain_id "
+ "WHERE (grp.self_serve=true OR grp.review_enabled=true) AND grp.domain_id IN ( SELECT domain.domain_id FROM domain JOIN role "
+ "ON role.domain_id=domain.domain_id JOIN role_member ON role.role_id=role_member.role_id "
+ "WHERE role_member.principal_id=? AND role_member.active=true AND role.name='admin' ) "
+ "order by do.name, grp.name, principal.name;";
private static final String SQL_GET_EXPIRED_PENDING_GROUP_MEMBERS = "SELECT d.name, grp.name, p.name, pgm.expiration, pgm.audit_ref, pgm.req_time, pgm.req_principal "
+ "FROM principal p JOIN pending_principal_group_member pgm "
+ "ON pgm.principal_id=p.principal_id JOIN principal_group grp ON pgm.group_id=grp.group_id JOIN domain d ON d.domain_id=grp.domain_id "
+ "WHERE pgm.req_time < (CURRENT_TIME - INTERVAL ? DAY);";
private static final String SQL_AUDIT_ENABLED_PENDING_GROUP_MEMBERSHIP_REMINDER_ENTRIES = "SELECT distinct d.org, d.name FROM pending_principal_group_member pgm "
+ "JOIN principal_group grp ON grp.group_id=pgm.group_id JOIN domain d ON grp.domain_id=d.domain_id "
+ "WHERE grp.audit_enabled=true AND pgm.last_notified_time=? AND pgm.server=?;";
private static final String SQL_UPDATE_PENDING_GROUP_MEMBERS_NOTIFICATION_TIMESTAMP = "UPDATE pending_principal_group_member SET last_notified_time=?, server=? "
+ "WHERE DAYOFWEEK(req_time)=DAYOFWEEK(?) AND (last_notified_time IS NULL || last_notified_time < (CURRENT_TIME - INTERVAL ? DAY));";
private static final String SQL_ADMIN_PENDING_GROUP_MEMBERSHIP_REMINDER_DOMAINS = "SELECT distinct d.name FROM pending_principal_group_member pgm "
+ "JOIN principal_group grp ON grp.group_id=pgm.group_id JOIN domain d ON grp.domain_id=d.domain_id "
+ "WHERE grp.self_serve=true AND pgm.last_notified_time=? AND pgm.server=?;";
private static final String SQL_UPDATE_GROUP_MEMBERS_EXPIRY_NOTIFICATION_TIMESTAMP = "UPDATE principal_group_member SET last_notified_time=?, server=? "
+ "WHERE expiration > CURRENT_TIME AND DATEDIFF(expiration, CURRENT_TIME) IN (0,1,7,14,21,28) "
+ "AND (last_notified_time IS NULL || last_notified_time < (CURRENT_TIME - INTERVAL ? DAY));";
private static final String SQL_LIST_NOTIFY_TEMPORARY_GROUP_MEMBERS = "SELECT domain.name AS domain_name, principal_group.name AS group_name, "
+ "principal.name AS principal_name, principal_group_member.expiration FROM principal_group_member "
+ "JOIN principal_group ON principal_group.group_id=principal_group_member.group_id "
+ "JOIN principal ON principal.principal_id=principal_group_member.principal_id "
+ "JOIN domain ON domain.domain_id=principal_group.domain_id "
+ "WHERE principal_group_member.last_notified_time=? AND principal_group_member.server=?;";
private static final String SQL_UPDATE_PRINCIPAL = "UPDATE principal SET system_suspended=? WHERE name=?;";
private static final String SQL_GET_PRINCIPAL = "SELECT name FROM principal WHERE system_suspended=?;";
private static final String SQL_INSERT_ROLE_TAG = "INSERT INTO role_tags"
+ "(role_id, role_tags.key, role_tags.value) VALUES (?,?,?);";
private static final String SQL_ROLE_TAG_COUNT = "SELECT COUNT(*) FROM role_tags WHERE role_id=?";
private static final String SQL_DELETE_ROLE_TAG = "DELETE FROM role_tags WHERE role_id=? AND role_tags.key=?;";
private static final String SQL_GET_ROLE_TAGS = "SELECT rt.key, rt.value FROM role_tags rt "
+ "JOIN role r ON rt.role_id = r.role_id JOIN domain ON domain.domain_id=r.domain_id "
+ "WHERE domain.name=? AND r.name=?";
private static final String SQL_GET_DOMAIN_ROLE_TAGS = "SELECT r.name, rt.key, rt.value FROM role_tags rt "
+ "JOIN role r ON rt.role_id = r.role_id JOIN domain ON domain.domain_id=r.domain_id "
+ "WHERE domain.name=?";
private static final String SQL_INSERT_DOMAIN_TAG = "INSERT INTO domain_tags"
+ "(domain_id, domain_tags.key, domain_tags.value) VALUES (?,?,?);";
private static final String SQL_DOMAIN_TAG_COUNT = "SELECT COUNT(*) FROM domain_tags WHERE domain_id=?";
private static final String SQL_DELETE_DOMAIN_TAG = "DELETE FROM domain_tags WHERE domain_id=? AND domain_tags.key=?;";
private static final String SQL_GET_DOMAIN_TAGS = "SELECT dt.key, dt.value FROM domain_tags dt "
+ "JOIN domain d ON dt.domain_id = d.domain_id WHERE d.name=?";
private static final String SQL_LOOKUP_DOMAIN_BY_TAG_KEY = "SELECT d.name FROM domain d "
+ "JOIN domain_tags dt ON dt.domain_id = d.domain_id WHERE dt.key=?";
private static final String SQL_LOOKUP_DOMAIN_BY_TAG_KEY_VAL = "SELECT d.name FROM domain d "
+ "JOIN domain_tags dt ON dt.domain_id = d.domain_id WHERE dt.key=? AND dt.value=?";
private static final String CACHE_DOMAIN = "d:";
private static final String CACHE_ROLE = "r:";
private static final String CACHE_GROUP = "g:";
private static final String CACHE_POLICY = "p:";
private static final String CACHE_SERVICE = "s:";
private static final String CACHE_PRINCIPAL = "u:";
private static final String CACHE_HOST = "h:";
private static final String ALL_PRINCIPALS = "*";
private static final String AWS_ARN_PREFIX = "arn:aws:iam::";
private static final String MYSQL_SERVER_TIMEZONE = System.getProperty(ZMSConsts.ZMS_PROP_MYSQL_SERVER_TIMEZONE, "GMT");
private int roleTagsLimit = ZMSConsts.ZMS_DEFAULT_TAG_LIMIT;
private int domainTagsLimit = ZMSConsts.ZMS_DEFAULT_TAG_LIMIT;
Connection con;
boolean transactionCompleted;
int queryTimeout = 60;
Map<String, Integer> objectMap;
public JDBCConnection(Connection con, boolean autoCommit) throws SQLException {
this.con = con;
con.setAutoCommit(autoCommit);
transactionCompleted = autoCommit;
objectMap = new HashMap<>();
}
@Override
public void setOperationTimeout(int queryTimeout) {
this.queryTimeout = queryTimeout;
}
@Override
public void setTagLimit(int domainLimit, int roleLimit) {
this.domainTagsLimit = domainLimit;
this.roleTagsLimit = roleLimit;
}
@Override
public void close() {
if (con == null) {
return;
}
// the client is always responsible for properly committing
// all changes before closing the connection, but in case
// we missed it, we're going to be safe and commit all
// changes before closing the connection
try {
commitChanges();
} catch (Exception ex) {
// error is already logged but we have to continue
// processing so we can close our connection
}
try {
con.close();
con = null;
} catch (SQLException ex) {
LOG.error("close: state - {}, code - {}, message - {}", ex.getSQLState(),
ex.getErrorCode(), ex.getMessage());
}
}
@Override
public void rollbackChanges() {
if (LOG.isDebugEnabled()) {
LOG.debug("rollback transaction changes...");
}
if (transactionCompleted) {
return;
}
try {
con.rollback();
} catch (SQLException ex) {
LOG.error("rollbackChanges: state - {}, code - {}, message - {}", ex.getSQLState(),
ex.getErrorCode(), ex.getMessage());
}
transactionCompleted = true;
try {
con.setAutoCommit(true);
} catch (SQLException ex) {
LOG.error("rollback auto-commit after failure: state - {}, code - {}, message - {}",
ex.getSQLState(), ex.getErrorCode(), ex.getMessage());
}
}
@Override
public void commitChanges() {
final String caller = "commitChanges";
if (transactionCompleted) {
return;
}
try {
con.commit();
transactionCompleted = true;
con.setAutoCommit(true);
} catch (SQLException ex) {
LOG.error("commitChanges: state - {}, code - {}, message - {}", ex.getSQLState(),
ex.getErrorCode(), ex.getMessage());
transactionCompleted = true;
throw sqlError(ex, caller);
}
}
int executeUpdate(PreparedStatement ps, String caller) throws SQLException {
if (LOG.isDebugEnabled()) {
LOG.debug("{}: {}", caller, ps.toString());
}
ps.setQueryTimeout(queryTimeout);
return ps.executeUpdate();
}
ResultSet executeQuery(PreparedStatement ps, String caller) throws SQLException {
if (LOG.isDebugEnabled()) {
LOG.debug("{}: {}", caller, ps.toString());
}
ps.setQueryTimeout(queryTimeout);
return ps.executeQuery();
}
Domain saveDomainSettings(String domainName, ResultSet rs, boolean fetchTags) throws SQLException {
Domain domain = new Domain().setName(domainName)
.setAuditEnabled(rs.getBoolean(ZMSConsts.DB_COLUMN_AUDIT_ENABLED))
.setEnabled(rs.getBoolean(ZMSConsts.DB_COLUMN_ENABLED))
.setModified(Timestamp.fromMillis(rs.getTimestamp(ZMSConsts.DB_COLUMN_MODIFIED).getTime()))
.setDescription(saveValue(rs.getString(ZMSConsts.DB_COLUMN_DESCRIPTION)))
.setOrg(saveValue(rs.getString(ZMSConsts.DB_COLUMN_ORG)))
.setId(saveUuidValue(rs.getString(ZMSConsts.DB_COLUMN_UUID)))
.setAccount(saveValue(rs.getString(ZMSConsts.DB_COLUMN_ACCOUNT)))
.setAzureSubscription(saveValue(rs.getString(ZMSConsts.DB_COLUMN_AZURE_SUBSCRIPTION)))
.setYpmId(rs.getInt(ZMSConsts.DB_COLUMN_PRODUCT_ID))
.setCertDnsDomain(saveValue(rs.getString(ZMSConsts.DB_COLUMN_CERT_DNS_DOMAIN)))
.setMemberExpiryDays(nullIfDefaultValue(rs.getInt(ZMSConsts.DB_COLUMN_MEMBER_EXPIRY_DAYS), 0))
.setTokenExpiryMins(nullIfDefaultValue(rs.getInt(ZMSConsts.DB_COLUMN_TOKEN_EXPIRY_MINS), 0))
.setRoleCertExpiryMins(nullIfDefaultValue(rs.getInt(ZMSConsts.DB_COLUMN_ROLE_CERT_EXPIRY_MINS), 0))
.setServiceCertExpiryMins(nullIfDefaultValue(rs.getInt(ZMSConsts.DB_COLUMN_SERVICE_CERT_EXPIRY_MINS), 0))
.setApplicationId(saveValue(rs.getString(ZMSConsts.DB_COLUMN_APPLICATION_ID)))
.setSignAlgorithm(saveValue(rs.getString(ZMSConsts.DB_COLUMN_SIGN_ALGORITHM)))
.setServiceExpiryDays(nullIfDefaultValue(rs.getInt(ZMSConsts.DB_COLUMN_SERVICE_EXPIRY_DAYS), 0))
.setGroupExpiryDays(nullIfDefaultValue(rs.getInt(ZMSConsts.DB_COLUMN_GROUP_EXPIRY_DAYS), 0))
.setUserAuthorityFilter(saveValue(rs.getString(ZMSConsts.DB_COLUMN_USER_AUTHORITY_FILTER)))
.setBusinessService(saveValue(rs.getString(ZMSConsts.DB_COLUMN_BUSINESS_SERVICE)));
if (fetchTags) {
domain.setTags(getDomainTags(domainName));
}
return domain;
}
@Override
public Domain getDomain(String domainName) {
final String caller = "getDomain";
try (PreparedStatement ps = con.prepareStatement(SQL_GET_DOMAIN)) {
ps.setString(1, domainName);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
return saveDomainSettings(domainName, rs, true);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return null;
}
@Override
public boolean insertDomain(Domain domain) {
int affectedRows;
final String caller = "insertDomain";
// we need to verify that our account and product ids are unique
// in the store. we can't rely on db uniqueness check since
// some of the domains will not have these attributes set
verifyDomainAccountUniqueness(domain.getName(), domain.getAccount(), caller);
verifyDomainSubscriptionUniqueness(domain.getName(), domain.getAzureSubscription(), caller);
verifyDomainProductIdUniqueness(domain.getName(), domain.getYpmId(), caller);
verifyDomainNameDashUniqueness(domain.getName(), caller);
try (PreparedStatement ps = con.prepareStatement(SQL_INSERT_DOMAIN)) {
ps.setString(1, domain.getName());
ps.setString(2, processInsertValue(domain.getDescription()));
ps.setString(3, processInsertValue(domain.getOrg()));
ps.setString(4, processInsertUuidValue(domain.getId()));
ps.setBoolean(5, processInsertValue(domain.getEnabled(), true));
ps.setBoolean(6, processInsertValue(domain.getAuditEnabled(), false));
ps.setString(7, processInsertValue(domain.getAccount()));
ps.setInt(8, processInsertValue(domain.getYpmId()));
ps.setString(9, processInsertValue(domain.getApplicationId()));
ps.setString(10, processInsertValue(domain.getCertDnsDomain()));
ps.setInt(11, processInsertValue(domain.getMemberExpiryDays()));
ps.setInt(12, processInsertValue(domain.getTokenExpiryMins()));
ps.setInt(13, processInsertValue(domain.getServiceCertExpiryMins()));
ps.setInt(14, processInsertValue(domain.getRoleCertExpiryMins()));
ps.setString(15, processInsertValue(domain.getSignAlgorithm()));
ps.setInt(16, processInsertValue(domain.getServiceExpiryDays()));
ps.setString(17, processInsertValue(domain.getUserAuthorityFilter()));
ps.setInt(18, processInsertValue(domain.getGroupExpiryDays()));
ps.setString(19, processInsertValue(domain.getAzureSubscription()));
ps.setString(20, processInsertValue(domain.getBusinessService()));
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
void verifyDomainNameDashUniqueness(final String name, String caller) {
// with our certificates we replace .'s with -'s
// so we need to make sure we don't allow creation
// of domains such as sports.api and sports-api since
// they'll have the same component value
final String domainMatch = name.replace('.', '-');
final String domainQuery = name.replace('.', '_').replace('-', '_');
try (PreparedStatement ps = con.prepareStatement(SQL_GET_DOMAINS_WITH_NAME)) {
ps.setString(1, domainQuery);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
final String domainName = rs.getString(1);
if (domainMatch.equals(domainName.replace('.', '-'))) {
throw requestError(caller, "Domain name conflict: " + domainName);
}
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
}
void verifyDomainProductIdUniqueness(String name, Integer productId, String caller) {
if (productId == null || productId == 0) {
return;
}
String domainName = lookupDomainById(null, null, productId);
if (domainName != null && !domainName.equals(name)) {
throw requestError(caller, "Product Id: " + productId +
" is already assigned to domain: " + domainName);
}
}
void verifyDomainAccountUniqueness(final String name, final String account, final String caller) {
if (account == null || account.isEmpty()) {
return;
}
String domainName = lookupDomainById(account, null, 0);
if (domainName != null && !domainName.equals(name)) {
throw requestError(caller, "Account Id: " + account +
" is already assigned to domain: " + domainName);
}
}
void verifyDomainSubscriptionUniqueness(final String name, final String subscription, final String caller) {
if (subscription == null || subscription.isEmpty()) {
return;
}
String domainName = lookupDomainById(null, subscription, 0);
if (domainName != null && !domainName.equals(name)) {
throw requestError(caller, "Subscription Id: " + subscription +
" is already assigned to domain: " + domainName);
}
}
@Override
public boolean updateDomain(Domain domain) {
int affectedRows;
final String caller = "updateDomain";
// we need to verify that our account and product ids are unique
// in the store. we can't rely on db uniqueness check since
// some of the domains will not have these attributes set
verifyDomainAccountUniqueness(domain.getName(), domain.getAccount(), caller);
verifyDomainSubscriptionUniqueness(domain.getName(), domain.getAzureSubscription(), caller);
verifyDomainProductIdUniqueness(domain.getName(), domain.getYpmId(), caller);
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_DOMAIN)) {
ps.setString(1, processInsertValue(domain.getDescription()));
ps.setString(2, processInsertValue(domain.getOrg()));
ps.setString(3, processInsertUuidValue(domain.getId()));
ps.setBoolean(4, processInsertValue(domain.getEnabled(), true));
ps.setBoolean(5, processInsertValue(domain.getAuditEnabled(), false));
ps.setString(6, processInsertValue(domain.getAccount()));
ps.setInt(7, processInsertValue(domain.getYpmId()));
ps.setString(8, processInsertValue(domain.getApplicationId()));
ps.setString(9, processInsertValue(domain.getCertDnsDomain()));
ps.setInt(10, processInsertValue(domain.getMemberExpiryDays()));
ps.setInt(11, processInsertValue(domain.getTokenExpiryMins()));
ps.setInt(12, processInsertValue(domain.getServiceCertExpiryMins()));
ps.setInt(13, processInsertValue(domain.getRoleCertExpiryMins()));
ps.setString(14, processInsertValue(domain.getSignAlgorithm()));
ps.setInt(15, processInsertValue(domain.getServiceExpiryDays()));
ps.setString(16, processInsertValue(domain.getUserAuthorityFilter()));
ps.setInt(17, processInsertValue(domain.getGroupExpiryDays()));
ps.setString(18, processInsertValue(domain.getAzureSubscription()));
ps.setString(19, processInsertValue(domain.getBusinessService()));
ps.setString(20, domain.getName());
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
// invalidate the cache domain entry
objectMap.remove(CACHE_DOMAIN + domain.getName());
return (affectedRows > 0);
}
@Override
public boolean updateDomainModTimestamp(String domainName) {
int affectedRows;
final String caller = "updateDomainModTimestamp";
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_DOMAIN_MOD_TIMESTAMP)) {
ps.setString(1, domainName);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public long getDomainModTimestamp(String domainName) {
long modTime = 0;
final String caller = "getDomainModTimestamp";
try (PreparedStatement ps = con.prepareStatement(SQL_GET_DOMAIN_MOD_TIMESTAMP)) {
ps.setString(1, domainName);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
modTime = rs.getTimestamp(1).getTime();
}
}
} catch (SQLException ex) {
// ignore any failures and return default value 0
}
return modTime;
}
@Override
public boolean deleteDomain(String domainName) {
int affectedRows;
final String caller = "deleteDomain";
try (PreparedStatement ps = con.prepareStatement(SQL_DELETE_DOMAIN)) {
ps.setString(1, domainName);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
PreparedStatement prepareDomainScanStatement(String prefix, long modifiedSince)
throws SQLException {
PreparedStatement ps;
if (prefix != null && prefix.length() > 0) {
int len = prefix.length();
char c = (char) (prefix.charAt(len - 1) + 1);
String stop = prefix.substring(0, len - 1) + c;
if (modifiedSince != 0) {
ps = con.prepareStatement(SQL_LIST_DOMAIN_PREFIX_MODIFIED);
ps.setString(1, prefix);
ps.setString(2, stop);
Calendar cal = Calendar.getInstance(TimeZone.getTimeZone(MYSQL_SERVER_TIMEZONE));
ps.setTimestamp(3, new java.sql.Timestamp(modifiedSince), cal);
} else {
ps = con.prepareStatement(SQL_LIST_DOMAIN_PREFIX);
ps.setString(1, prefix);
ps.setString(2, stop);
}
} else if (modifiedSince != 0) {
ps = con.prepareStatement(SQL_LIST_DOMAIN_MODIFIED);
Calendar cal = Calendar.getInstance(TimeZone.getTimeZone(MYSQL_SERVER_TIMEZONE));
ps.setTimestamp(1, new java.sql.Timestamp(modifiedSince), cal);
} else {
ps = con.prepareStatement(SQL_LIST_DOMAIN);
}
return ps;
}
PreparedStatement prepareScanByRoleStatement(String roleMember, String roleName)
throws SQLException {
PreparedStatement ps;
boolean memberPresent = (roleMember != null && !roleMember.isEmpty());
boolean rolePresent = (roleName != null && !roleName.isEmpty());
if (memberPresent && rolePresent) {
ps = con.prepareStatement(SQL_LIST_DOMAIN_ROLE_NAME_MEMBER);
ps.setString(1, roleMember);
ps.setString(2, roleName);
} else if (memberPresent) {
ps = con.prepareStatement(SQL_LIST_DOMAIN_ROLE_MEMBER);
ps.setString(1, roleMember);
} else if (rolePresent) {
ps = con.prepareStatement(SQL_LIST_DOMAIN_ROLE_NAME);
ps.setString(1, roleName);
} else {
ps = con.prepareStatement(SQL_LIST_DOMAIN);
}
return ps;
}
@Override
public List<String> lookupDomainByRole(String roleMember, String roleName) {
final String caller = "lookupDomainByRole";
// it's possible that we'll get duplicate domain names returned
// from this result - e.g. when no role name is filtered on so
// we're going to automatically skip those by using a set
Set<String> uniqueDomains = new HashSet<>();
try (PreparedStatement ps = prepareScanByRoleStatement(roleMember, roleName)) {
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
uniqueDomains.add(rs.getString(ZMSConsts.DB_COLUMN_NAME));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
List<String> domains = new ArrayList<>(uniqueDomains);
Collections.sort(domains);
return domains;
}
@Override
public List<String> lookupDomainByBusinessService(String businessService) {
final String caller = "lookupDomainByBusinessService";
List<String> domains = new ArrayList<>();
try (PreparedStatement ps = con.prepareStatement(SQL_LIST_DOMAIN_WITH_BUSINESS_SERVICE)) {
ps.setString(1, businessService.trim());
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
domains.add(rs.getString(ZMSConsts.DB_COLUMN_NAME));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return domains;
}
@Override
public String lookupDomainById(String account, String subscription, int productId) {
final String caller = "lookupDomain";
String sqlCmd;
if (account != null) {
sqlCmd = SQL_GET_DOMAIN_WITH_ACCOUNT;
} else if (subscription != null) {
sqlCmd = SQL_GET_DOMAIN_WITH_SUBSCRIPTION;
} else {
sqlCmd = SQL_GET_DOMAIN_WITH_PRODUCT_ID;
}
String domainName = null;
try (PreparedStatement ps = con.prepareStatement(sqlCmd)) {
if (account != null) {
ps.setString(1, account.trim());
} else if (subscription != null) {
ps.setString(1, subscription.trim());
} else {
ps.setInt(1, productId);
}
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
domainName = rs.getString(1);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return domainName;
}
@Override
public List<String> listDomains(String prefix, long modifiedSince) {
final String caller = "listDomains";
List<String> domains = new ArrayList<>();
try (PreparedStatement ps = prepareDomainScanStatement(prefix, modifiedSince)) {
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
domains.add(rs.getString(ZMSConsts.DB_COLUMN_NAME));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
Collections.sort(domains);
return domains;
}
public boolean deleteDomainTags(String domainName, Set<String> tagsToRemove) {
final String caller = "deleteDomainTags";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
boolean res = true;
for (String tagKey : tagsToRemove) {
try (PreparedStatement ps = con.prepareStatement(SQL_DELETE_DOMAIN_TAG)) {
ps.setInt(1, domainId);
ps.setString(2, processInsertValue(tagKey));
res &= (executeUpdate(ps, caller) > 0);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
}
return res;
}
public boolean insertDomainTags(String domainName, Map<String, StringList> tags) {
final String caller = "updateDomainTags";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int curTagCount = getDomainTagsCount(domainId);
int remainingTagsToInsert = domainTagsLimit - curTagCount;
boolean res = true;
for (Map.Entry<String, StringList> e : tags.entrySet()) {
for (int i = 0; i < e.getValue().getList().size() && remainingTagsToInsert-- > 0; i++) {
String tagValue = e.getValue().getList().get(i);
try (PreparedStatement ps = con.prepareStatement(SQL_INSERT_DOMAIN_TAG)) {
ps.setInt(1, domainId);
ps.setString(2, processInsertValue(e.getKey()));
ps.setString(3, processInsertValue(tagValue));
res &= (executeUpdate(ps, caller) > 0);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
}
}
if (remainingTagsToInsert < 0) {
LOG.info("Domain tags limit for domain: [{}] has reached", domainName);
}
return res;
}
private int getDomainTagsCount(int domainId) {
final String caller = "getDomainTagsCount";
int count = 0;
try (PreparedStatement ps = con.prepareStatement(SQL_DOMAIN_TAG_COUNT)) {
ps.setInt(1, domainId);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
count = rs.getInt(1);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return count;
}
public Map<String, StringList> getDomainTags(String domainName) {
final String caller = "getDomainTags";
Map<String, StringList> domainTag = null;
try (PreparedStatement ps = con.prepareStatement(SQL_GET_DOMAIN_TAGS)) {
ps.setString(1, domainName);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
String tagKey = rs.getString(1);
String tagValue = rs.getString(2);
if (domainTag == null) {
domainTag = new HashMap<>();
}
StringList tagValues = domainTag.computeIfAbsent(tagKey, k -> new StringList().setList(new ArrayList<>()));
tagValues.getList().add(tagValue);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return domainTag;
}
public List<String> lookupDomainByTags(String tagKey, String tagValue) {
final String caller = "lookupDomainByTags";
// since domain tag might include multiple values - duplicates
// are possible. use Set to avoid duplicates
Set<String> uniqueDomains = new HashSet<>();
try (PreparedStatement ps = prepareScanByTags(tagKey, tagValue)) {
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
uniqueDomains.add(rs.getString(ZMSConsts.DB_COLUMN_NAME));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
List<String> domains = new ArrayList<>(uniqueDomains);
Collections.sort(domains);
return domains;
}
PreparedStatement prepareScanByTags(String tagKey, String tagValue) throws SQLException {
PreparedStatement ps;
if (!StringUtil.isEmpty(tagValue)) {
ps = con.prepareStatement(SQL_LOOKUP_DOMAIN_BY_TAG_KEY_VAL);
ps.setString(1, tagKey);
ps.setString(2, tagValue);
} else {
ps = con.prepareStatement(SQL_LOOKUP_DOMAIN_BY_TAG_KEY);
ps.setString(1, tagKey);
}
return ps;
}
@Override
public boolean insertDomainTemplate(String domainName, String templateName, String params) {
final String caller = "insertDomainTemplate";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_INSERT_DOMAIN_TEMPLATE)) {
ps.setInt(1, domainId);
ps.setString(2, templateName);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean updateDomainTemplate(String domainName, String templateName, TemplateMetaData templateMetaData) {
final String caller = "updateDomainTemplate";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_DOMAIN_TEMPLATE)) {
ps.setInt(1, templateMetaData.getLatestVersion());
ps.setInt(2, domainId);
ps.setString(3, templateName);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean deleteDomainTemplate(String domainName, String templateName, String params) {
final String caller = "deleteDomainTemplate";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_DELETE_DOMAIN_TEMPLATE)) {
ps.setInt(1, domainId);
ps.setString(2, templateName);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public List<String> listDomainTemplates(String domainName) {
final String caller = "listDomainTemplates";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
List<String> templates = new ArrayList<>();
try (PreparedStatement ps = con.prepareStatement(SQL_LIST_DOMAIN_TEMPLATE)) {
ps.setString(1, domainName);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
templates.add(rs.getString(1));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
Collections.sort(templates);
return templates;
}
@Override
public Map<String, List<String>> getDomainFromTemplateName(Map<String, Integer> templateNameAndLatestVersion) {
final String caller = "getDomainsFromTemplate";
Map<String, List<String>> domainNameTemplateListMap = new HashMap<>();
try (PreparedStatement ps = con.prepareStatement(generateDomainTemplateVersionQuery(templateNameAndLatestVersion))) {
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
String domainName = rs.getString(ZMSConsts.DB_COLUMN_NAME);
String templateName = rs.getString(ZMSConsts.DB_COLUMN_TEMPLATE_NAME);
if (domainNameTemplateListMap.get(domainName) != null) {
List<String> tempTemplateList = domainNameTemplateListMap.get(domainName);
tempTemplateList.add(templateName);
} else {
List<String> templateList = new ArrayList<>();
templateList.add(templateName);
domainNameTemplateListMap.put(domainName, templateList);
}
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return domainNameTemplateListMap;
}
int getDomainId(String domainName) {
return getDomainId(domainName, false);
}
int getDomainId(String domainName, boolean domainStateCheck) {
final String caller = "getDomainId";
// first check to see if our cache contains this value
// otherwise we'll contact the MySQL Server
final String cacheKey = CACHE_DOMAIN + domainName;
Integer value = objectMap.get(cacheKey);
if (value != null) {
return value;
}
int domainId = 0;
final String sqlCommand = domainStateCheck ? SQL_GET_ACTIVE_DOMAIN_ID : SQL_GET_DOMAIN_ID;
try (PreparedStatement ps = con.prepareStatement(sqlCommand)) {
ps.setString(1, domainName);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
domainId = rs.getInt(1);
}
}
} catch (SQLException ex) {
LOG.error("unable to get domain id for name: {} error code: {} msg: {}",
domainName, ex.getErrorCode(), ex.getMessage());
}
// before returning the value update our cache
if (domainId != 0) {
objectMap.put(cacheKey, domainId);
}
return domainId;
}
int getPolicyId(int domainId, String policyName) {
final String caller = "getPolicyId";
// first check to see if our cache contains this value
// otherwise we'll contact the MySQL Server
final String cacheKey = CACHE_POLICY + domainId + '.' + policyName;
Integer value = objectMap.get(cacheKey);
if (value != null) {
return value;
}
int policyId = 0;
try (PreparedStatement ps = con.prepareStatement(SQL_GET_POLICY_ID)) {
ps.setInt(1, domainId);
ps.setString(2, policyName);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
policyId = rs.getInt(1);
}
}
} catch (SQLException ex) {
LOG.error("unable to get policy id for name: {} error code: {} msg: {}",
policyName, ex.getErrorCode(), ex.getMessage());
}
// before returning the value update our cache
if (policyId != 0) {
objectMap.put(cacheKey, policyId);
}
return policyId;
}
int getRoleId(int domainId, String roleName) {
final String caller = "getRoleId";
// first check to see if our cache contains this value
// otherwise we'll contact the MySQL Server
final String cacheKey = CACHE_ROLE + domainId + '.' + roleName;
Integer value = objectMap.get(cacheKey);
if (value != null) {
return value;
}
int roleId = 0;
try (PreparedStatement ps = con.prepareStatement(SQL_GET_ROLE_ID)) {
ps.setInt(1, domainId);
ps.setString(2, roleName);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
roleId = rs.getInt(1);
}
}
} catch (SQLException ex) {
LOG.error("unable to get role id for name: {} error code: {} msg: {}",
roleName, ex.getErrorCode(), ex.getMessage());
}
// before returning the value update our cache
if (roleId != 0) {
objectMap.put(cacheKey, roleId);
}
return roleId;
}
int getGroupId(int domainId, final String groupName) {
final String caller = "getGroupId";
// first check to see if our cache contains this value
// otherwise we'll contact the MySQL Server
final String cacheKey = CACHE_GROUP + domainId + '.' + groupName;
Integer value = objectMap.get(cacheKey);
if (value != null) {
return value;
}
int groupId = 0;
try (PreparedStatement ps = con.prepareStatement(SQL_GET_GROUP_ID)) {
ps.setInt(1, domainId);
ps.setString(2, groupName);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
groupId = rs.getInt(1);
}
}
} catch (SQLException ex) {
LOG.error("unable to get group id for name: {} error code: {} msg: {}",
groupName, ex.getErrorCode(), ex.getMessage());
}
// before returning the value update our cache
if (groupId != 0) {
objectMap.put(cacheKey, groupId);
}
return groupId;
}
int getServiceId(int domainId, String serviceName) {
final String caller = "getServiceId";
// first check to see if our cache contains this value
// otherwise we'll contact the MySQL Server
final String cacheKey = CACHE_SERVICE + domainId + '.' + serviceName;
Integer value = objectMap.get(cacheKey);
if (value != null) {
return value;
}
int serviceId = 0;
try (PreparedStatement ps = con.prepareStatement(SQL_GET_SERVICE_ID)) {
ps.setInt(1, domainId);
ps.setString(2, serviceName);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
serviceId = rs.getInt(1);
}
}
} catch (SQLException ex) {
LOG.error("unable to get service id for name: {} error code: {} msg: {}",
serviceName, ex.getErrorCode(), ex.getMessage());
}
// before returning the value update our cache
if (serviceId != 0) {
objectMap.put(cacheKey, serviceId);
}
return serviceId;
}
int getPrincipalId(String principal) {
final String caller = "getPrincipalId";
// first check to see if our cache contains this value
// otherwise we'll contact the MySQL Server
final String cacheKey = CACHE_PRINCIPAL + principal;
Integer value = objectMap.get(cacheKey);
if (value != null) {
return value;
}
int principalId = 0;
try (PreparedStatement ps = con.prepareStatement(SQL_GET_PRINCIPAL_ID)) {
ps.setString(1, principal);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
principalId = rs.getInt(1);
}
}
} catch (SQLException ex) {
LOG.error("unable to get principal id for name: {} error code: {} msg: {}",
principal, ex.getErrorCode(), ex.getMessage());
}
// before returning the value update our cache
if (principalId != 0) {
objectMap.put(cacheKey, principalId);
}
return principalId;
}
int getHostId(String hostName) {
final String caller = "getHostId";
// first check to see if our cache contains this value
// otherwise we'll contact the MySQL Server
final String cacheKey = CACHE_HOST + hostName;
Integer value = objectMap.get(cacheKey);
if (value != null) {
return value;
}
int hostId = 0;
try (PreparedStatement ps = con.prepareStatement(SQL_GET_HOST_ID)) {
ps.setString(1, hostName);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
hostId = rs.getInt(1);
}
}
} catch (SQLException ex) {
LOG.error("unable to get host id for name: {} error code: {} msg: {}",
hostName, ex.getErrorCode(), ex.getMessage());
}
// before returning the value update our cache
if (hostId != 0) {
objectMap.put(cacheKey, hostId);
}
return hostId;
}
int getLastInsertId() {
int lastInsertId = 0;
final String caller = "getLastInsertId";
try (PreparedStatement ps = con.prepareStatement(SQL_LAST_INSERT_ID)) {
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
lastInsertId = rs.getInt(1);
}
}
} catch (SQLException ex) {
LOG.error("unable to get last insert id - error code: {} msg: {}",
ex.getErrorCode(), ex.getMessage());
}
return lastInsertId;
}
PreparedStatement preparePrincipalScanStatement(String domainName)
throws SQLException {
PreparedStatement ps;
if (domainName != null && domainName.length() > 0) {
final String principalPattern = domainName + ".%";
ps = con.prepareStatement(SQL_LIST_PRINCIPAL_DOMAIN);
ps.setString(1, principalPattern);
} else {
ps = con.prepareStatement(SQL_LIST_PRINCIPAL);
}
return ps;
}
@Override
public List<String> listPrincipals(String domainName) {
final String caller = "listPrincipals";
List<String> principals = new ArrayList<>();
try (PreparedStatement ps = preparePrincipalScanStatement(domainName)) {
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
principals.add(rs.getString(ZMSConsts.DB_COLUMN_NAME));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return principals;
}
@Override
public boolean deletePrincipal(String principalName, boolean subDomains) {
final String caller = "deletePrincipal";
// first we're going to delete the principal from the principal table
try (PreparedStatement ps = con.prepareStatement(SQL_DELETE_PRINCIPAL)) {
ps.setString(1, principalName);
executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
// next delete any principal that was created in the principal's
// sub-domains. These will be in the format "principal.%"
if (subDomains) {
final String domainPattern = principalName + ".%";
try (PreparedStatement ps = con.prepareStatement(SQL_DELETE_SUB_PRINCIPALS)) {
ps.setString(1, domainPattern);
executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
}
return true;
}
@Override
public Role getRole(String domainName, String roleName) {
final String caller = "getRole";
try (PreparedStatement ps = con.prepareStatement(SQL_GET_ROLE)) {
ps.setString(1, domainName);
ps.setString(2, roleName);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
return retrieveRole(rs, domainName, roleName);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return null;
}
@Override
public boolean insertRole(String domainName, Role role) {
int affectedRows;
final String caller = "insertRole";
String roleName = ZMSUtils.extractRoleName(domainName, role.getName());
if (roleName == null) {
throw requestError(caller, "domain name mismatch: " + domainName +
" insert role name: " + role.getName());
}
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
try (PreparedStatement ps = con.prepareStatement(SQL_INSERT_ROLE)) {
ps.setString(1, roleName);
ps.setInt(2, domainId);
ps.setString(3, processInsertValue(role.getTrust()));
ps.setBoolean(4, processInsertValue(role.getAuditEnabled(), false));
ps.setBoolean(5, processInsertValue(role.getSelfServe(), false));
ps.setInt(6, processInsertValue(role.getMemberExpiryDays()));
ps.setInt(7, processInsertValue(role.getTokenExpiryMins()));
ps.setInt(8, processInsertValue(role.getCertExpiryMins()));
ps.setString(9, processInsertValue(role.getSignAlgorithm()));
ps.setInt(10, processInsertValue(role.getServiceExpiryDays()));
ps.setInt(11, processInsertValue(role.getMemberReviewDays()));
ps.setInt(12, processInsertValue(role.getServiceReviewDays()));
ps.setBoolean(13, processInsertValue(role.getReviewEnabled(), false));
ps.setString(14, processInsertValue(role.getNotifyRoles()));
ps.setString(15, processInsertValue(role.getUserAuthorityFilter()));
ps.setString(16, processInsertValue(role.getUserAuthorityExpiration()));
ps.setInt(17, processInsertValue(role.getGroupExpiryDays()));
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean updateRole(String domainName, Role role) {
int affectedRows;
final String caller = "updateRole";
String roleName = ZMSUtils.extractRoleName(domainName, role.getName());
if (roleName == null) {
throw requestError(caller, "domain name mismatch: " + domainName +
" update role name: " + role.getName());
}
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int roleId = getRoleId(domainId, roleName);
if (roleId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_ROLE, ResourceUtils.roleResourceName(domainName, roleName));
}
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_ROLE)) {
ps.setString(1, processInsertValue(role.getTrust()));
ps.setBoolean(2, processInsertValue(role.getAuditEnabled(), false));
ps.setBoolean(3, processInsertValue(role.getSelfServe(), false));
ps.setInt(4, processInsertValue(role.getMemberExpiryDays()));
ps.setInt(5, processInsertValue(role.getTokenExpiryMins()));
ps.setInt(6, processInsertValue(role.getCertExpiryMins()));
ps.setString(7, processInsertValue(role.getSignAlgorithm()));
ps.setInt(8, processInsertValue(role.getServiceExpiryDays()));
ps.setInt(9, processInsertValue(role.getMemberReviewDays()));
ps.setInt(10, processInsertValue(role.getServiceReviewDays()));
ps.setBoolean(11, processInsertValue(role.getReviewEnabled(), false));
ps.setString(12, processInsertValue(role.getNotifyRoles()));
ps.setString(13, processInsertValue(role.getUserAuthorityFilter()));
ps.setString(14, processInsertValue(role.getUserAuthorityExpiration()));
ps.setInt(15, processInsertValue(role.getGroupExpiryDays()));
ps.setInt(16, roleId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean updateRoleModTimestamp(String domainName, String roleName) {
int affectedRows;
final String caller = "updateRoleModTimestamp";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int roleId = getRoleId(domainId, roleName);
if (roleId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_ROLE, ResourceUtils.roleResourceName(domainName, roleName));
}
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_ROLE_MOD_TIMESTAMP)) {
ps.setInt(1, roleId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean updateRoleReviewTimestamp(String domainName, String roleName) {
int affectedRows;
final String caller = "updateRoleReviewTimestamp";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int roleId = getRoleId(domainId, roleName);
if (roleId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_ROLE, ResourceUtils.roleResourceName(domainName, roleName));
}
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_ROLE_REVIEW_TIMESTAMP)) {
ps.setInt(1, roleId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean updateServiceIdentityModTimestamp(String domainName, String serviceName) {
int affectedRows;
final String caller = "updateServiceIdentityModTimestamp";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int serviceId = getServiceId(domainId, serviceName);
if (serviceId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_SERVICE, ResourceUtils.serviceResourceName(domainName, serviceName));
}
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_SERVICE_MOD_TIMESTAMP)) {
ps.setInt(1, serviceId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean deleteRole(String domainName, String roleName) {
final String caller = "deleteRole";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_DELETE_ROLE)) {
ps.setInt(1, domainId);
ps.setString(2, roleName);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public List<String> listRoles(String domainName) {
final String caller = "listRoles";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
List<String> roles = new ArrayList<>();
try (PreparedStatement ps = con.prepareStatement(SQL_LIST_ROLE)) {
ps.setInt(1, domainId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
roles.add(rs.getString(1));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
Collections.sort(roles);
return roles;
}
@Override
public int countRoles(String domainName) {
final String caller = "countRoles";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int count = 0;
try (PreparedStatement ps = con.prepareStatement(SQL_COUNT_ROLE)) {
ps.setInt(1, domainId);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
count = rs.getInt(1);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return count;
}
public static Comparator<RoleMember> RoleMemberComparator = (roleMember1, roleMember2) -> {
String roleMember1Name = roleMember1.getMemberName().toLowerCase();
String roleMember2Name = roleMember2.getMemberName().toLowerCase();
return roleMember1Name.compareTo(roleMember2Name);
};
public static Comparator<GroupMember> GroupMemberComparator = (groupMember1, groupMember2) -> {
String groupMember1Name = groupMember1.getMemberName().toLowerCase();
String groupMember2Name = groupMember2.getMemberName().toLowerCase();
return groupMember1Name.compareTo(groupMember2Name);
};
void getStdRoleMembers(int roleId, List<RoleMember> members, final String caller) {
try (PreparedStatement ps = con.prepareStatement(SQL_LIST_ROLE_MEMBERS)) {
ps.setInt(1, roleId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
RoleMember roleMember = new RoleMember();
roleMember.setMemberName(rs.getString(1));
java.sql.Timestamp expiration = rs.getTimestamp(2);
if (expiration != null) {
roleMember.setExpiration(Timestamp.fromMillis(expiration.getTime()));
}
java.sql.Timestamp reviewReminder = rs.getTimestamp(3);
if (reviewReminder != null) {
roleMember.setReviewReminder(Timestamp.fromMillis(reviewReminder.getTime()));
}
roleMember.setActive(nullIfDefaultValue(rs.getBoolean(4), true));
roleMember.setAuditRef(rs.getString(5));
roleMember.setSystemDisabled(nullIfDefaultValue(rs.getInt(6), 0));
roleMember.setApproved(true);
members.add(roleMember);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
}
void getPendingRoleMembers(int roleId, List<RoleMember> members, final String caller) {
try (PreparedStatement ps = con.prepareStatement(SQL_LIST_PENDING_ROLE_MEMBERS)) {
ps.setInt(1, roleId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
RoleMember roleMember = new RoleMember();
roleMember.setMemberName(rs.getString(1));
java.sql.Timestamp timestamp = rs.getTimestamp(2);
if (timestamp != null) {
roleMember.setExpiration(Timestamp.fromMillis(timestamp.getTime()));
}
timestamp = rs.getTimestamp(3);
if (timestamp != null) {
roleMember.setReviewReminder(Timestamp.fromMillis(timestamp.getTime()));
}
timestamp = rs.getTimestamp(4);
if (timestamp != null) {
roleMember.setRequestTime(Timestamp.fromMillis(timestamp.getTime()));
}
roleMember.setAuditRef(rs.getString(5));
roleMember.setActive(false);
roleMember.setApproved(false);
members.add(roleMember);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
}
@Override
public List<RoleMember> listRoleMembers(String domainName, String roleName, Boolean pending) {
final String caller = "listRoleMembers";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int roleId = getRoleId(domainId, roleName);
if (roleId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_ROLE, ResourceUtils.roleResourceName(domainName, roleName));
}
// first get our standard role members
List<RoleMember> members = new ArrayList<>();
getStdRoleMembers(roleId, members, caller);
// if requested, include pending members as well
if (pending == Boolean.TRUE) {
getPendingRoleMembers(roleId, members, caller);
}
members.sort(RoleMemberComparator);
return members;
}
@Override
public int countRoleMembers(String domainName, String roleName) {
final String caller = "countRoleMembers";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int roleId = getRoleId(domainId, roleName);
if (roleId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_ROLE, ResourceUtils.roleResourceName(domainName, roleName));
}
int count = 0;
try (PreparedStatement ps = con.prepareStatement(SQL_COUNT_ROLE_MEMBERS)) {
ps.setInt(1, roleId);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
count = rs.getInt(1);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return count;
}
@Override
public List<PrincipalRole> listPrincipalRoles(String domainName, String principalName) {
final String caller = "listPrincipalRoles";
if (domainName == null) {
return listPrincipalRolesForAllDomains(principalName, caller);
} else {
return listPrincipalRolesForOneDomain(domainName, principalName, caller);
}
}
List<PrincipalRole> listPrincipalRolesForAllDomains(String principalName, String caller) {
int principalId = getPrincipalId(principalName);
if (principalId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_PRINCIPAL, principalName);
}
List<PrincipalRole> roles = new ArrayList<>();
try (PreparedStatement ps = con.prepareStatement(SQL_LIST_PRINCIPAL_ROLES)) {
ps.setInt(1, principalId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
PrincipalRole role = new PrincipalRole();
role.setDomainName(rs.getString(ZMSConsts.DB_COLUMN_NAME));
role.setRoleName(rs.getString(ZMSConsts.DB_COLUMN_ROLE_NAME));
roles.add(role);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return roles;
}
List<PrincipalRole> listPrincipalRolesForOneDomain(String domainName, String principalName, String caller) {
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int principalId = getPrincipalId(principalName);
if (principalId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_PRINCIPAL, principalName);
}
List<PrincipalRole> roles = new ArrayList<>();
try (PreparedStatement ps = con.prepareStatement(SQL_LIST_PRINCIPAL_DOMAIN_ROLES)) {
ps.setInt(1, principalId);
ps.setInt(2, domainId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
PrincipalRole role = new PrincipalRole();
role.setRoleName(rs.getString(ZMSConsts.DB_COLUMN_ROLE_NAME));
roles.add(role);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return roles;
}
@Override
public List<RoleAuditLog> listRoleAuditLogs(String domainName, String roleName) {
final String caller = "listRoleAuditLogs";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int roleId = getRoleId(domainId, roleName);
if (roleId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_ROLE, ResourceUtils.roleResourceName(domainName, roleName));
}
List<RoleAuditLog> logs = new ArrayList<>();
try (PreparedStatement ps = con.prepareStatement(SQL_LIST_ROLE_AUDIT_LOGS)) {
ps.setInt(1, roleId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
RoleAuditLog log = new RoleAuditLog();
log.setAction(rs.getString(ZMSConsts.DB_COLUMN_ACTION));
log.setMember(rs.getString(ZMSConsts.DB_COLUMN_MEMBER));
log.setAdmin(rs.getString(ZMSConsts.DB_COLUMN_ADMIN));
log.setAuditRef(saveValue(rs.getString(ZMSConsts.DB_COLUMN_AUDIT_REF)));
log.setCreated(Timestamp.fromMillis(rs.getTimestamp(ZMSConsts.DB_COLUMN_CREATED).getTime()));
logs.add(log);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return logs;
}
boolean parsePrincipal(String principal, StringBuilder domain, StringBuilder name) {
int idx = principal.lastIndexOf('.');
if (idx == -1 || idx == 0 || idx == principal.length() - 1) {
return false;
}
domain.append(principal, 0, idx);
name.append(principal.substring(idx + 1));
return true;
}
boolean getRoleMembership(final String query, int roleId, final String member, long expiration,
Membership membership, boolean disabledFlagCheck, final String caller) {
try (PreparedStatement ps = con.prepareStatement(query)) {
ps.setInt(1, roleId);
ps.setString(2, member);
if (expiration != 0) {
ps.setTimestamp(3, new java.sql.Timestamp(expiration));
}
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
membership.setIsMember(true);
java.sql.Timestamp expiry = rs.getTimestamp(ZMSConsts.DB_COLUMN_EXPIRATION);
if (expiry != null) {
membership.setExpiration(Timestamp.fromMillis(expiry.getTime()));
}
java.sql.Timestamp reviewReminder = rs.getTimestamp(ZMSConsts.DB_COLUMN_REVIEW_REMINDER);
if (reviewReminder != null) {
membership.setReviewReminder(Timestamp.fromMillis(reviewReminder.getTime()));
}
membership.setRequestPrincipal(rs.getString(ZMSConsts.DB_COLUMN_REQ_PRINCIPAL));
if (disabledFlagCheck) {
membership.setSystemDisabled(nullIfDefaultValue(rs.getInt(ZMSConsts.DB_COLUMN_SYSTEM_DISABLED), 0));
}
return true;
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return false;
}
@Override
public Membership getRoleMember(String domainName, String roleName, String member,
long expiration, boolean pending) {
final String caller = "getRoleMember";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int roleId = getRoleId(domainId, roleName);
if (roleId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_ROLE, ResourceUtils.roleResourceName(domainName, roleName));
}
Membership membership = new Membership()
.setMemberName(member)
.setRoleName(ResourceUtils.roleResourceName(domainName, roleName))
.setIsMember(false);
// first we're going to check if we have a standard user with the given
// details before checking for pending unless we're specifically asking
// for pending member only in which case we'll skip the first check
if (!pending) {
String query = expiration == 0 ? SQL_GET_ROLE_MEMBER : SQL_GET_TEMP_ROLE_MEMBER;
if (getRoleMembership(query, roleId, member, expiration, membership, true, caller)) {
membership.setApproved(true);
}
}
if (!membership.getIsMember()) {
String query = expiration == 0 ? SQL_GET_PENDING_ROLE_MEMBER : SQL_GET_TEMP_PENDING_ROLE_MEMBER;
if (getRoleMembership(query, roleId, member, expiration, membership, false, caller)) {
membership.setApproved(false);
}
}
return membership;
}
int insertPrincipal(String principal) {
int affectedRows;
final String caller = "insertPrincipal";
try (PreparedStatement ps = con.prepareStatement(SQL_INSERT_PRINCIPAL)) {
ps.setString(1, principal);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
// it's possible that 2 threads try to add the same principal
// into different roles. so we're going to have a special
// handling here - if we get back entry already exists exception
// we're just going to lookup the principal id and return
// that instead of returning an exception
if (ex.getErrorCode() == MYSQL_ER_OPTION_DUPLICATE_ENTRY) {
return getPrincipalId(principal);
}
throw sqlError(ex, caller);
}
int principalId = 0;
if (affectedRows == 1) {
principalId = getLastInsertId();
}
return principalId;
}
int insertHost(String hostName) {
int affectedRows;
final String caller = "insertHost";
try (PreparedStatement ps = con.prepareStatement(SQL_INSERT_HOST)) {
ps.setString(1, hostName);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
int hostId = 0;
if (affectedRows == 1) {
hostId = getLastInsertId();
}
return hostId;
}
boolean roleMemberExists(int roleId, int principalId, boolean pending, final String caller) {
String statement = pending ? SQL_PENDING_ROLE_MEMBER_EXISTS : SQL_STD_ROLE_MEMBER_EXISTS;
try (PreparedStatement ps = con.prepareStatement(statement)) {
ps.setInt(1, roleId);
ps.setInt(2, principalId);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
return true;
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return false;
}
@Override
public boolean insertRoleMember(String domainName, String roleName, RoleMember roleMember,
String admin, String auditRef) {
final String caller = "insertRoleMember";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int roleId = getRoleId(domainId, roleName);
if (roleId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_ROLE, ResourceUtils.roleResourceName(domainName, roleName));
}
String principal = roleMember.getMemberName();
if (!validatePrincipalDomain(principal)) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, principal);
}
int principalId = getPrincipalId(principal);
if (principalId == 0) {
principalId = insertPrincipal(principal);
if (principalId == 0) {
throw internalServerError(caller, "Unable to insert principal: " + principal);
}
}
// need to check if entry already exists
boolean pendingRequest = (roleMember.getApproved() == Boolean.FALSE);
boolean roleMemberExists = roleMemberExists(roleId, principalId, pendingRequest, caller);
// process the request based on the type of the request
// either pending request or standard insert
boolean result;
if (pendingRequest) {
result = insertPendingRoleMember(roleId, principalId, roleMember, admin,
auditRef, roleMemberExists, caller);
} else {
result = insertStandardRoleMember(roleId, principalId, roleMember, admin,
principal, auditRef, roleMemberExists, false, caller);
}
return result;
}
boolean insertPendingRoleMember(int roleId, int principalId, RoleMember roleMember,
final String admin, final String auditRef, boolean roleMemberExists, final String caller) {
java.sql.Timestamp expiration = null;
if (roleMember.getExpiration() != null) {
expiration = new java.sql.Timestamp(roleMember.getExpiration().toDate().getTime());
}
java.sql.Timestamp reviewReminder = null;
if (roleMember.getReviewReminder() != null) {
reviewReminder = new java.sql.Timestamp(roleMember.getReviewReminder().toDate().getTime());
}
int affectedRows;
if (roleMemberExists) {
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_PENDING_ROLE_MEMBER)) {
ps.setTimestamp(1, expiration);
ps.setTimestamp(2, reviewReminder);
ps.setString(3, processInsertValue(auditRef));
ps.setString(4, processInsertValue(admin));
ps.setInt(5, roleId);
ps.setInt(6, principalId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
} else {
try (PreparedStatement ps = con.prepareStatement(SQL_INSERT_PENDING_ROLE_MEMBER)) {
ps.setInt(1, roleId);
ps.setInt(2, principalId);
ps.setTimestamp(3, expiration);
ps.setTimestamp(4, reviewReminder);
ps.setString(5, processInsertValue(auditRef));
ps.setString(6, processInsertValue(admin));
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
}
return (affectedRows > 0);
}
boolean insertStandardRoleMember(int roleId, int principalId, RoleMember roleMember,
final String admin, final String principal, final String auditRef,
boolean roleMemberExists, boolean approveRequest, final String caller) {
java.sql.Timestamp expiration = null;
if (roleMember.getExpiration() != null) {
expiration = new java.sql.Timestamp(roleMember.getExpiration().toDate().getTime());
}
java.sql.Timestamp reviewReminder = null;
if (roleMember.getReviewReminder() != null) {
reviewReminder = new java.sql.Timestamp(roleMember.getReviewReminder().toDate().getTime());
}
boolean result;
String auditOperation;
if (roleMemberExists) {
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_ROLE_MEMBER)) {
ps.setTimestamp(1, expiration);
ps.setTimestamp(2, reviewReminder);
ps.setBoolean(3, processInsertValue(roleMember.getActive(), true));
ps.setString(4, processInsertValue(auditRef));
ps.setString(5, processInsertValue(admin));
ps.setInt(6, roleId);
ps.setInt(7, principalId);
executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
auditOperation = approveRequest ? "APPROVE" : "UPDATE";
result = true;
} else {
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_INSERT_ROLE_MEMBER)) {
ps.setInt(1, roleId);
ps.setInt(2, principalId);
ps.setTimestamp(3, expiration);
ps.setTimestamp(4, reviewReminder);
ps.setBoolean(5, processInsertValue(roleMember.getActive(), true));
ps.setString(6, processInsertValue(auditRef));
ps.setString(7, processInsertValue(admin));
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
auditOperation = approveRequest ? "APPROVE" : "ADD";
result = (affectedRows > 0);
}
// add audit log entry for this change if the operation was successful
// add return the result of the audit log insert operation
if (result) {
result = insertRoleAuditLog(roleId, admin, principal, auditOperation, auditRef);
}
return result;
}
@Override
public boolean updateRoleMemberDisabledState(String domainName, String roleName, String principal,
String admin, int disabledState, String auditRef) {
final String caller = "updateRoleMemberDisabledState";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int roleId = getRoleId(domainId, roleName);
if (roleId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_ROLE, ResourceUtils.roleResourceName(domainName, roleName));
}
int principalId = getPrincipalId(principal);
if (principalId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_PRINCIPAL, principal);
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_ROLE_MEMBER_DISABLED_STATE)) {
ps.setInt(1, disabledState);
ps.setString(2, processInsertValue(auditRef));
ps.setString(3, processInsertValue(admin));
ps.setInt(4, roleId);
ps.setInt(5, principalId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
boolean result = (affectedRows > 0);
// add audit log entry for this change if the disable was successful
// add return the result of the audit log insert operation
if (result) {
final String operation = disabledState == 0 ? "ENABLE" : "DISABLE";
result = insertRoleAuditLog(roleId, admin, principal, operation, auditRef);
}
return result;
}
@Override
public boolean deleteRoleMember(String domainName, String roleName, String principal,
String admin, String auditRef) {
final String caller = "deleteRoleMember";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int roleId = getRoleId(domainId, roleName);
if (roleId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_ROLE, ResourceUtils.roleResourceName(domainName, roleName));
}
int principalId = getPrincipalId(principal);
if (principalId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_PRINCIPAL, principal);
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_DELETE_ROLE_MEMBER)) {
ps.setInt(1, roleId);
ps.setInt(2, principalId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
boolean result = (affectedRows > 0);
// add audit log entry for this change if the delete was successful
// add return the result of the audit log insert operation
if (result) {
result = insertRoleAuditLog(roleId, admin, principal, "DELETE", auditRef);
}
return result;
}
boolean insertRoleAuditLog(int roleId, String admin, String member,
String action, String auditRef) {
int affectedRows;
final String caller = "insertRoleAuditEntry";
try (PreparedStatement ps = con.prepareStatement(SQL_INSERT_ROLE_AUDIT_LOG)) {
ps.setInt(1, roleId);
ps.setString(2, processInsertValue(admin));
ps.setString(3, member);
ps.setString(4, action);
ps.setString(5, processInsertValue(auditRef));
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public Assertion getAssertion(String domainName, String policyName, Long assertionId) {
final String caller = "getAssertion";
Assertion assertion = null;
try (PreparedStatement ps = con.prepareStatement(SQL_GET_ASSERTION)) {
ps.setInt(1, assertionId.intValue());
ps.setString(2, domainName);
ps.setString(3, policyName);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
assertion = new Assertion();
assertion.setRole(ResourceUtils.roleResourceName(domainName, rs.getString(ZMSConsts.DB_COLUMN_ROLE)));
assertion.setResource(rs.getString(ZMSConsts.DB_COLUMN_RESOURCE));
assertion.setAction(rs.getString(ZMSConsts.DB_COLUMN_ACTION));
assertion.setEffect(AssertionEffect.valueOf(rs.getString(ZMSConsts.DB_COLUMN_EFFECT)));
assertion.setId((long) rs.getInt(ZMSConsts.DB_COLUMN_ASSERT_ID));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return assertion;
}
@Override
public Policy getPolicy(String domainName, String policyName) {
final String caller = "getPolicy";
try (PreparedStatement ps = con.prepareStatement(SQL_GET_POLICY)) {
ps.setString(1, domainName);
ps.setString(2, policyName);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
return new Policy().setName(ResourceUtils.policyResourceName(domainName, policyName))
.setModified(Timestamp.fromMillis(rs.getTimestamp(ZMSConsts.DB_COLUMN_MODIFIED).getTime()));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return null;
}
@Override
public boolean insertPolicy(String domainName, Policy policy) {
int affectedRows;
final String caller = "insertPolicy";
String policyName = ZMSUtils.extractPolicyName(domainName, policy.getName());
if (policyName == null) {
throw requestError(caller, "domain name mismatch: " + domainName +
" insert policy name: " + policy.getName());
}
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
try (PreparedStatement ps = con.prepareStatement(SQL_INSERT_POLICY)) {
ps.setString(1, policyName);
ps.setInt(2, domainId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean updatePolicy(String domainName, Policy policy) {
int affectedRows;
final String caller = "updatePolicy";
String policyName = ZMSUtils.extractPolicyName(domainName, policy.getName());
if (policyName == null) {
throw requestError(caller, "domain name mismatch: " + domainName +
" update policy name: " + policy.getName());
}
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int policyId = getPolicyId(domainId, policyName);
if (policyId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_POLICY, ResourceUtils.policyResourceName(domainName, policyName));
}
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_POLICY)) {
ps.setString(1, policyName);
ps.setInt(2, policyId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean updatePolicyModTimestamp(String domainName, String policyName) {
int affectedRows;
final String caller = "updatePolicyModTimestamp";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int policyId = getPolicyId(domainId, policyName);
if (policyId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_POLICY, ResourceUtils.policyResourceName(domainName, policyName));
}
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_POLICY_MOD_TIMESTAMP)) {
ps.setInt(1, policyId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean deletePolicy(String domainName, String policyName) {
final String caller = "deletePolicy";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_DELETE_POLICY)) {
ps.setInt(1, domainId);
ps.setString(2, policyName);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public List<String> listPolicies(String domainName, String assertionRoleName) {
final String caller = "listPolicies";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
List<String> policies = new ArrayList<>();
final String sqlStatement = (assertionRoleName == null) ? SQL_LIST_POLICY : SQL_LIST_POLICY_REFERENCING_ROLE;
try (PreparedStatement ps = con.prepareStatement(sqlStatement)) {
ps.setInt(1, domainId);
if (assertionRoleName != null) {
ps.setString(2, assertionRoleName);
}
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
policies.add(rs.getString(1));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
Collections.sort(policies);
return policies;
}
@Override
public int countPolicies(String domainName) {
final String caller = "countPolicies";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int count = 0;
try (PreparedStatement ps = con.prepareStatement(SQL_COUNT_POLICY)) {
ps.setInt(1, domainId);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
count = rs.getInt(1);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return count;
}
@Override
public boolean insertAssertion(String domainName, String policyName, Assertion assertion) {
final String caller = "insertAssertion";
String roleName = ZMSUtils.extractRoleName(domainName, assertion.getRole());
if (roleName == null) {
throw requestError(caller, "domain name mismatch: " + domainName +
" assertion role name: " + assertion.getRole());
}
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int policyId = getPolicyId(domainId, policyName);
if (policyId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_POLICY, ResourceUtils.policyResourceName(domainName, policyName));
}
// special handling for assertions since we don't want to have duplicates
// and we don't want to setup a unique key across all values in the row
try (PreparedStatement ps = con.prepareStatement(SQL_CHECK_ASSERTION)) {
ps.setInt(1, policyId);
ps.setString(2, roleName);
ps.setString(3, assertion.getResource());
ps.setString(4, assertion.getAction());
ps.setString(5, processInsertValue(assertion.getEffect()));
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
return true;
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
// at this point we know we don't have another assertion with the same
// values so we'll go ahead and add one
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_INSERT_ASSERTION)) {
ps.setInt(1, policyId);
ps.setString(2, roleName);
ps.setString(3, assertion.getResource());
ps.setString(4, assertion.getAction());
ps.setString(5, processInsertValue(assertion.getEffect()));
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
boolean result = (affectedRows > 0);
if (result) {
assertion.setId((long) getLastInsertId());
}
return result;
}
@Override
public boolean deleteAssertion(String domainName, String policyName, Long assertionId) {
final String caller = "deleteAssertion";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int policyId = getPolicyId(domainId, policyName);
if (policyId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_POLICY, ResourceUtils.policyResourceName(domainName, policyName));
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_DELETE_ASSERTION)) {
ps.setInt(1, policyId);
ps.setInt(2, assertionId.intValue());
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public List<Assertion> listAssertions(String domainName, String policyName) {
final String caller = "listAssertions";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int policyId = getPolicyId(domainId, policyName);
if (policyId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_POLICY, ResourceUtils.policyResourceName(domainName, policyName));
}
List<Assertion> assertions = new ArrayList<>();
try (PreparedStatement ps = con.prepareStatement(SQL_LIST_ASSERTION)) {
ps.setInt(1, policyId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
Assertion assertion = new Assertion();
assertion.setRole(ResourceUtils.roleResourceName(domainName, rs.getString(ZMSConsts.DB_COLUMN_ROLE)));
assertion.setResource(rs.getString(ZMSConsts.DB_COLUMN_RESOURCE));
assertion.setAction(rs.getString(ZMSConsts.DB_COLUMN_ACTION));
assertion.setEffect(AssertionEffect.valueOf(rs.getString(ZMSConsts.DB_COLUMN_EFFECT)));
assertion.setId((long) rs.getInt(ZMSConsts.DB_COLUMN_ASSERT_ID));
assertions.add(assertion);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return assertions;
}
@Override
public int countAssertions(String domainName, String policyName) {
final String caller = "countAssertions";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int policyId = getPolicyId(domainId, policyName);
if (policyId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_POLICY, ResourceUtils.policyResourceName(domainName, policyName));
}
int count = 0;
try (PreparedStatement ps = con.prepareStatement(SQL_COUNT_ASSERTION)) {
ps.setInt(1, policyId);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
count = rs.getInt(1);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return count;
}
String saveValue(String value) {
return (value.isEmpty()) ? null : value;
}
UUID saveUuidValue(String value) {
return (value.isEmpty()) ? null : UUID.fromString(value);
}
@Override
public ServiceIdentity getServiceIdentity(String domainName, String serviceName) {
final String caller = "getServiceIdentity";
try (PreparedStatement ps = con.prepareStatement(SQL_GET_SERVICE)) {
ps.setString(1, domainName);
ps.setString(2, serviceName);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
return new ServiceIdentity()
.setName(ResourceUtils.serviceResourceName(domainName, serviceName))
.setDescription(saveValue(rs.getString(ZMSConsts.DB_COLUMN_DESCRIPTION)))
.setModified(Timestamp.fromMillis(rs.getTimestamp(ZMSConsts.DB_COLUMN_MODIFIED).getTime()))
.setProviderEndpoint(saveValue(rs.getString(ZMSConsts.DB_COLUMN_PROVIDER_ENDPOINT)))
.setExecutable(saveValue(rs.getString(ZMSConsts.DB_COLUMN_EXECUTABLE)))
.setUser(saveValue(rs.getString(ZMSConsts.DB_COLUMN_SVC_USER)))
.setGroup(saveValue(rs.getString(ZMSConsts.DB_COLUMN_SVC_GROUP)));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return null;
}
int processInsertValue(Integer value) {
return (value == null) ? 0 : value;
}
String processInsertValue(String value) {
return (value == null) ? "" : value.trim();
}
boolean processInsertValue(Boolean value, boolean defaultValue) {
return (value == null) ? defaultValue : value;
}
String processInsertValue(AssertionEffect value) {
return (value == null) ? ZMSConsts.ASSERTION_EFFECT_ALLOW : value.toString();
}
String processInsertUuidValue(UUID value) {
return (value == null) ? "" : value.toString();
}
@Override
public boolean insertServiceIdentity(String domainName, ServiceIdentity service) {
int affectedRows;
final String caller = "insertServiceIdentity";
String serviceName = ZMSUtils.extractServiceName(domainName, service.getName());
if (serviceName == null) {
throw requestError(caller, "domain name mismatch: " + domainName +
" insert service name: " + service.getName());
}
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
try (PreparedStatement ps = con.prepareStatement(SQL_INSERT_SERVICE)) {
ps.setString(1, serviceName);
ps.setString(2, processInsertValue(service.getDescription()));
ps.setString(3, processInsertValue(service.getProviderEndpoint()));
ps.setString(4, processInsertValue(service.getExecutable()));
ps.setString(5, processInsertValue(service.getUser()));
ps.setString(6, processInsertValue(service.getGroup()));
ps.setInt(7, domainId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean updateServiceIdentity(String domainName, ServiceIdentity service) {
int affectedRows;
final String caller = "updateServiceIdentity";
String serviceName = ZMSUtils.extractServiceName(domainName, service.getName());
if (serviceName == null) {
throw requestError(caller, "domain name mismatch: " + domainName +
" update service name: " + service.getName());
}
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int serviceId = getServiceId(domainId, serviceName);
if (serviceId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_SERVICE, ResourceUtils.serviceResourceName(domainName, serviceName));
}
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_SERVICE)) {
ps.setString(1, processInsertValue(service.getDescription()));
ps.setString(2, processInsertValue(service.getProviderEndpoint()));
ps.setString(3, processInsertValue(service.getExecutable()));
ps.setString(4, processInsertValue(service.getUser()));
ps.setString(5, processInsertValue(service.getGroup()));
ps.setInt(6, serviceId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean deleteServiceIdentity(String domainName, String serviceName) {
final String caller = "deleteServiceIdentity";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_DELETE_SERVICE)) {
ps.setInt(1, domainId);
ps.setString(2, serviceName);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public List<String> listServiceIdentities(String domainName) {
final String caller = "listServiceIdentities";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
List<String> services = new ArrayList<>();
try (PreparedStatement ps = con.prepareStatement(SQL_LIST_SERVICE)) {
ps.setInt(1, domainId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
services.add(rs.getString(1));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
Collections.sort(services);
return services;
}
@Override
public int countServiceIdentities(String domainName) {
final String caller = "countServiceIdentities";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int count = 0;
try (PreparedStatement ps = con.prepareStatement(SQL_COUNT_SERVICE)) {
ps.setInt(1, domainId);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
count = rs.getInt(1);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return count;
}
@Override
public List<PublicKeyEntry> listPublicKeys(String domainName, String serviceName) {
final String caller = "listPublicKeys";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int serviceId = getServiceId(domainId, serviceName);
if (serviceId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_SERVICE, ResourceUtils.serviceResourceName(domainName, serviceName));
}
List<PublicKeyEntry> publicKeys = new ArrayList<>();
try (PreparedStatement ps = con.prepareStatement(SQL_LIST_PUBLIC_KEY)) {
ps.setInt(1, serviceId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
PublicKeyEntry publicKey = new PublicKeyEntry()
.setId(rs.getString(ZMSConsts.DB_COLUMN_KEY_ID))
.setKey(rs.getString(ZMSConsts.DB_COLUMN_KEY_VALUE));
publicKeys.add(publicKey);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return publicKeys;
}
@Override
public int countPublicKeys(String domainName, String serviceName) {
final String caller = "countPublicKeys";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int serviceId = getServiceId(domainId, serviceName);
if (serviceId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_SERVICE, ResourceUtils.serviceResourceName(domainName, serviceName));
}
int count = 0;
try (PreparedStatement ps = con.prepareStatement(SQL_COUNT_PUBLIC_KEY)) {
ps.setInt(1, serviceId);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
count = rs.getInt(1);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return count;
}
@Override
public PublicKeyEntry getPublicKeyEntry(String domainName, String serviceName,
String keyId, boolean domainStateCheck) {
final String caller = "getPublicKeyEntry";
int domainId = getDomainId(domainName, domainStateCheck);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int serviceId = getServiceId(domainId, serviceName);
if (serviceId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_SERVICE, ResourceUtils.serviceResourceName(domainName, serviceName));
}
try (PreparedStatement ps = con.prepareStatement(SQL_GET_PUBLIC_KEY)) {
ps.setInt(1, serviceId);
ps.setString(2, keyId);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
return new PublicKeyEntry().setId(keyId)
.setKey(rs.getString(ZMSConsts.DB_COLUMN_KEY_VALUE));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return null;
}
@Override
public boolean insertPublicKeyEntry(String domainName, String serviceName, PublicKeyEntry publicKey) {
final String caller = "insertPublicKeyEntry";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int serviceId = getServiceId(domainId, serviceName);
if (serviceId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_SERVICE, ResourceUtils.serviceResourceName(domainName, serviceName));
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_INSERT_PUBLIC_KEY)) {
ps.setInt(1, serviceId);
ps.setString(2, publicKey.getId());
ps.setString(3, publicKey.getKey());
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean updatePublicKeyEntry(String domainName, String serviceName, PublicKeyEntry publicKey) {
final String caller = "updatePublicKeyEntry";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int serviceId = getServiceId(domainId, serviceName);
if (serviceId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_SERVICE, ResourceUtils.serviceResourceName(domainName, serviceName));
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_PUBLIC_KEY)) {
ps.setString(1, publicKey.getKey());
ps.setInt(2, serviceId);
ps.setString(3, publicKey.getId());
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean deletePublicKeyEntry(String domainName, String serviceName, String keyId) {
final String caller = "deletePublicKeyEntry";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int serviceId = getServiceId(domainId, serviceName);
if (serviceId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_SERVICE, ResourceUtils.serviceResourceName(domainName, serviceName));
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_DELETE_PUBLIC_KEY)) {
ps.setInt(1, serviceId);
ps.setString(2, keyId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public List<String> listServiceHosts(String domainName, String serviceName) {
final String caller = "listServiceHosts";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int serviceId = getServiceId(domainId, serviceName);
if (serviceId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_SERVICE, ResourceUtils.serviceResourceName(domainName, serviceName));
}
List<String> hosts = new ArrayList<>();
try (PreparedStatement ps = con.prepareStatement(SQL_LIST_SERVICE_HOST)) {
ps.setInt(1, serviceId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
hosts.add(rs.getString(ZMSConsts.DB_COLUMN_NAME));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return hosts;
}
@Override
public boolean insertServiceHost(String domainName, String serviceName, String hostName) {
final String caller = "insertServiceHost";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int serviceId = getServiceId(domainId, serviceName);
if (serviceId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_SERVICE, ResourceUtils.serviceResourceName(domainName, serviceName));
}
int hostId = getHostId(hostName);
if (hostId == 0) {
hostId = insertHost(hostName);
if (hostId == 0) {
throw internalServerError(caller, "Unable to insert host: " + hostName);
}
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_INSERT_SERVICE_HOST)) {
ps.setInt(1, serviceId);
ps.setInt(2, hostId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean deleteServiceHost(String domainName, String serviceName, String hostName) {
final String caller = "deleteServiceHost";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int serviceId = getServiceId(domainId, serviceName);
if (serviceId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_SERVICE, ResourceUtils.serviceResourceName(domainName, serviceName));
}
int hostId = getHostId(hostName);
if (hostId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_HOST, hostName);
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_DELETE_SERVICE_HOST)) {
ps.setInt(1, serviceId);
ps.setInt(2, hostId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean insertEntity(String domainName, Entity entity) {
final String caller = "insertEntity";
String entityName = ZMSUtils.extractEntityName(domainName, entity.getName());
if (entityName == null) {
throw requestError(caller, "domain name mismatch: " + domainName +
" insert entity name: " + entity.getName());
}
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_INSERT_ENTITY)) {
ps.setInt(1, domainId);
ps.setString(2, entityName);
ps.setString(3, JSON.string(entity.getValue()));
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean updateEntity(String domainName, Entity entity) {
final String caller = "updateEntity";
String entityName = ZMSUtils.extractEntityName(domainName, entity.getName());
if (entityName == null) {
throw requestError(caller, "domain name mismatch: " + domainName +
" insert entity name: " + entity.getName());
}
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_ENTITY)) {
ps.setString(1, JSON.string(entity.getValue()));
ps.setInt(2, domainId);
ps.setString(3, entityName);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean deleteEntity(String domainName, String entityName) {
final String caller = "deleteEntity";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_DELETE_ENTITY)) {
ps.setInt(1, domainId);
ps.setString(2, entityName);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public Entity getEntity(String domainName, String entityName) {
final String caller = "getEntity";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
try (PreparedStatement ps = con.prepareStatement(SQL_GET_ENTITY)) {
ps.setInt(1, domainId);
ps.setString(2, entityName);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
return new Entity().setName(ResourceUtils.entityResourceName(domainName, entityName))
.setValue(JSON.fromString(rs.getString(ZMSConsts.DB_COLUMN_VALUE), Struct.class));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return null;
}
@Override
public List<String> listEntities(String domainName) {
final String caller = "listEntities";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
List<String> entities = new ArrayList<>();
try (PreparedStatement ps = con.prepareStatement(SQL_LIST_ENTITY)) {
ps.setInt(1, domainId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
entities.add(rs.getString(1));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
Collections.sort(entities);
return entities;
}
@Override
public int countEntities(String domainName) {
final String caller = "countEntities";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int count = 0;
try (PreparedStatement ps = con.prepareStatement(SQL_COUNT_ENTITY)) {
ps.setInt(1, domainId);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
count = rs.getInt(1);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return count;
}
Role retrieveRole(ResultSet rs, final String domainName, final String roleName) throws SQLException {
Role role = new Role().setName(ResourceUtils.roleResourceName(domainName, roleName))
.setModified(Timestamp.fromMillis(rs.getTimestamp(ZMSConsts.DB_COLUMN_MODIFIED).getTime()))
.setTrust(saveValue(rs.getString(ZMSConsts.DB_COLUMN_TRUST)))
.setAuditEnabled(nullIfDefaultValue(rs.getBoolean(ZMSConsts.DB_COLUMN_AUDIT_ENABLED), false))
.setSelfServe(nullIfDefaultValue(rs.getBoolean(ZMSConsts.DB_COLUMN_SELF_SERVE), false))
.setMemberExpiryDays(nullIfDefaultValue(rs.getInt(ZMSConsts.DB_COLUMN_MEMBER_EXPIRY_DAYS), 0))
.setTokenExpiryMins(nullIfDefaultValue(rs.getInt(ZMSConsts.DB_COLUMN_TOKEN_EXPIRY_MINS), 0))
.setCertExpiryMins(nullIfDefaultValue(rs.getInt(ZMSConsts.DB_COLUMN_CERT_EXPIRY_MINS), 0))
.setSignAlgorithm(saveValue(rs.getString(ZMSConsts.DB_COLUMN_SIGN_ALGORITHM)))
.setServiceExpiryDays(nullIfDefaultValue(rs.getInt(ZMSConsts.DB_COLUMN_SERVICE_EXPIRY_DAYS), 0))
.setGroupExpiryDays(nullIfDefaultValue(rs.getInt(ZMSConsts.DB_COLUMN_GROUP_EXPIRY_DAYS), 0))
.setReviewEnabled(nullIfDefaultValue(rs.getBoolean(ZMSConsts.DB_COLUMN_REVIEW_ENABLED), false))
.setMemberReviewDays(nullIfDefaultValue(rs.getInt(ZMSConsts.DB_COLUMN_MEMBER_REVIEW_DAYS), 0))
.setServiceReviewDays(nullIfDefaultValue(rs.getInt(ZMSConsts.DB_COLUMN_SERVICE_REVIEW_DAYS), 0))
.setNotifyRoles(saveValue(rs.getString(ZMSConsts.DB_COLUMN_NOTIFY_ROLES)))
.setUserAuthorityFilter(saveValue(rs.getString(ZMSConsts.DB_COLUMN_USER_AUTHORITY_FILTER)))
.setUserAuthorityExpiration(saveValue(rs.getString(ZMSConsts.DB_COLUMN_USER_AUTHORITY_EXPIRATION)));
java.sql.Timestamp lastReviewedTime = rs.getTimestamp(ZMSConsts.DB_COLUMN_LAST_REVIEWED_TIME);
if (lastReviewedTime != null) {
role.setLastReviewedDate(Timestamp.fromMillis(lastReviewedTime.getTime()));
}
return role;
}
void getAthenzDomainRoles(String domainName, int domainId, AthenzDomain athenzDomain) {
final String caller = "getAthenzDomain";
Map<String, Role> roleMap = new HashMap<>();
try (PreparedStatement ps = con.prepareStatement(SQL_GET_DOMAIN_ROLES)) {
ps.setInt(1, domainId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
final String roleName = rs.getString(ZMSConsts.DB_COLUMN_NAME);
Role role = retrieveRole(rs, domainName, roleName);
roleMap.put(roleName, role);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
try (PreparedStatement ps = con.prepareStatement(SQL_GET_DOMAIN_ROLE_MEMBERS)) {
ps.setInt(1, domainId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
String roleName = rs.getString(1);
Role role = roleMap.get(roleName);
if (role == null) {
continue;
}
List<RoleMember> members = role.getRoleMembers();
if (members == null) {
members = new ArrayList<>();
role.setRoleMembers(members);
}
RoleMember roleMember = new RoleMember();
roleMember.setMemberName(rs.getString(2));
java.sql.Timestamp expiration = rs.getTimestamp(3);
if (expiration != null) {
roleMember.setExpiration(Timestamp.fromMillis(expiration.getTime()));
}
java.sql.Timestamp reviewReminder = rs.getTimestamp(4);
if (reviewReminder != null) {
roleMember.setReviewReminder(Timestamp.fromMillis(reviewReminder.getTime()));
}
roleMember.setSystemDisabled(nullIfDefaultValue(rs.getInt(5), 0));
members.add(roleMember);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
// add role tags
addTagsToRoles(roleMap, athenzDomain.getName());
athenzDomain.getRoles().addAll(roleMap.values());
}
void getAthenzDomainGroups(String domainName, int domainId, AthenzDomain athenzDomain) {
final String caller = "getAthenzDomain";
Map<String, Group> groupMap = new HashMap<>();
try (PreparedStatement ps = con.prepareStatement(SQL_GET_DOMAIN_GROUPS)) {
ps.setInt(1, domainId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
final String groupName = rs.getString(ZMSConsts.DB_COLUMN_NAME);
Group group = retrieveGroup(rs, domainName, groupName);
groupMap.put(groupName, group);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
try (PreparedStatement ps = con.prepareStatement(SQL_GET_DOMAIN_GROUP_MEMBERS)) {
ps.setInt(1, domainId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
final String groupName = rs.getString(1);
Group group = groupMap.get(groupName);
if (group == null) {
continue;
}
List<GroupMember> members = group.getGroupMembers();
if (members == null) {
members = new ArrayList<>();
group.setGroupMembers(members);
}
GroupMember groupMember = new GroupMember();
groupMember.setMemberName(rs.getString(2));
groupMember.setGroupName(group.getName());
java.sql.Timestamp expiration = rs.getTimestamp(3);
if (expiration != null) {
groupMember.setExpiration(Timestamp.fromMillis(expiration.getTime()));
}
groupMember.setSystemDisabled(nullIfDefaultValue(rs.getInt(4), 0));
members.add(groupMember);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
athenzDomain.getGroups().addAll(groupMap.values());
}
void getAthenzDomainPolicies(String domainName, int domainId, AthenzDomain athenzDomain) {
final String caller = "getAthenzDomain";
Map<String, Policy> policyMap = new HashMap<>();
try (PreparedStatement ps = con.prepareStatement(SQL_GET_DOMAIN_POLICIES)) {
ps.setInt(1, domainId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
String policyName = rs.getString(ZMSConsts.DB_COLUMN_NAME);
Policy policy = new Policy().setName(ResourceUtils.policyResourceName(domainName, policyName))
.setModified(Timestamp.fromMillis(rs.getTimestamp(ZMSConsts.DB_COLUMN_MODIFIED).getTime()));
policyMap.put(policyName, policy);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
try (PreparedStatement ps = con.prepareStatement(SQL_GET_DOMAIN_POLICY_ASSERTIONS)) {
ps.setInt(1, domainId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
String policyName = rs.getString(1);
Policy policy = policyMap.get(policyName);
if (policy == null) {
continue;
}
List<Assertion> assertions = policy.getAssertions();
if (assertions == null) {
assertions = new ArrayList<>();
policy.setAssertions(assertions);
}
Assertion assertion = new Assertion();
assertion.setRole(ResourceUtils.roleResourceName(domainName, rs.getString(ZMSConsts.DB_COLUMN_ROLE)));
assertion.setResource(rs.getString(ZMSConsts.DB_COLUMN_RESOURCE));
assertion.setAction(rs.getString(ZMSConsts.DB_COLUMN_ACTION));
assertion.setEffect(AssertionEffect.valueOf(rs.getString(ZMSConsts.DB_COLUMN_EFFECT)));
assertion.setId((long) rs.getInt(ZMSConsts.DB_COLUMN_ASSERT_ID));
assertions.add(assertion);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
athenzDomain.getPolicies().addAll(policyMap.values());
}
void getAthenzDomainServices(String domainName, int domainId, AthenzDomain athenzDomain) {
final String caller = "getAthenzDomain";
Map<String, ServiceIdentity> serviceMap = new HashMap<>();
try (PreparedStatement ps = con.prepareStatement(SQL_GET_DOMAIN_SERVICES)) {
ps.setInt(1, domainId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
String serviceName = rs.getString(ZMSConsts.DB_COLUMN_NAME);
ServiceIdentity service = new ServiceIdentity()
.setName(ResourceUtils.serviceResourceName(domainName, serviceName))
.setProviderEndpoint(saveValue(rs.getString(ZMSConsts.DB_COLUMN_PROVIDER_ENDPOINT)))
.setExecutable(saveValue(rs.getString(ZMSConsts.DB_COLUMN_EXECUTABLE)))
.setUser(saveValue(rs.getString(ZMSConsts.DB_COLUMN_SVC_USER)))
.setGroup(saveValue(rs.getString(ZMSConsts.DB_COLUMN_SVC_GROUP)))
.setModified(Timestamp.fromMillis(rs.getTimestamp(ZMSConsts.DB_COLUMN_MODIFIED).getTime()));
List<PublicKeyEntry> publicKeys = new ArrayList<>();
service.setPublicKeys(publicKeys);
serviceMap.put(serviceName, service);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
try (PreparedStatement ps = con.prepareStatement(SQL_GET_DOMAIN_SERVICES_HOSTS)) {
ps.setInt(1, domainId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
String serviceName = rs.getString(1);
ServiceIdentity service = serviceMap.get(serviceName);
if (service == null) {
continue;
}
List<String> hosts = service.getHosts();
if (hosts == null) {
hosts = new ArrayList<>();
service.setHosts(hosts);
}
hosts.add(rs.getString(2));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
try (PreparedStatement ps = con.prepareStatement(SQL_GET_DOMAIN_SERVICES_PUBLIC_KEYS)) {
ps.setInt(1, domainId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
String serviceName = rs.getString(1);
ServiceIdentity service = serviceMap.get(serviceName);
if (service == null) {
continue;
}
PublicKeyEntry publicKey = new PublicKeyEntry()
.setId(rs.getString(ZMSConsts.DB_COLUMN_KEY_ID))
.setKey(rs.getString(ZMSConsts.DB_COLUMN_KEY_VALUE));
service.getPublicKeys().add(publicKey);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
athenzDomain.getServices().addAll(serviceMap.values());
}
void getAthenzDomainEntities(String domainName, int domainId, AthenzDomain athenzDomain) {
final String caller = "getAthenzDomain";
try (PreparedStatement ps = con.prepareStatement(SQL_GET_DOMAIN_ENTITIES)) {
ps.setInt(1, domainId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
athenzDomain.getEntities().add(new Entity()
.setName(ResourceUtils.entityResourceName(domainName, rs.getString(ZMSConsts.DB_COLUMN_NAME)))
.setValue(JSON.fromString(rs.getString(ZMSConsts.DB_COLUMN_VALUE), Struct.class)));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
}
@Override
public AthenzDomain getAthenzDomain(String domainName) {
final String caller = "getAthenzDomain";
int domainId = 0;
AthenzDomain athenzDomain = new AthenzDomain(domainName);
try (PreparedStatement ps = con.prepareStatement(SQL_GET_DOMAIN)) {
ps.setString(1, domainName);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
athenzDomain.setDomain(saveDomainSettings(domainName, rs, true));
domainId = rs.getInt(ZMSConsts.DB_COLUMN_DOMAIN_ID);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
getAthenzDomainRoles(domainName, domainId, athenzDomain);
getAthenzDomainGroups(domainName, domainId, athenzDomain);
getAthenzDomainPolicies(domainName, domainId, athenzDomain);
getAthenzDomainServices(domainName, domainId, athenzDomain);
getAthenzDomainEntities(domainName, domainId, athenzDomain);
return athenzDomain;
}
@Override
public DomainMetaList listModifiedDomains(long modifiedSince) {
final String caller = "listModifiedDomains";
DomainMetaList domainModifiedList = new DomainMetaList();
List<Domain> nameMods = new ArrayList<>();
try (PreparedStatement ps = prepareDomainScanStatement(null, modifiedSince)) {
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
final String domainName = rs.getString(ZMSConsts.DB_COLUMN_NAME);
nameMods.add(saveDomainSettings(domainName, rs, false));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
domainModifiedList.setDomains(nameMods);
return domainModifiedList;
}
boolean validatePrincipalDomain(String principal) {
// special case for all principals
if (ALL_PRINCIPALS.equals(principal)) {
return true;
}
int idx = principal.indexOf(AuthorityConsts.GROUP_SEP);
if (idx == -1) {
idx = principal.lastIndexOf('.');
if (idx == -1 || idx == 0 || idx == principal.length() - 1) {
return false;
}
}
return getDomainId(principal.substring(0, idx)) != 0;
}
String roleIndex(String domainId, String roleName) {
return domainId + ':' + roleName;
}
PreparedStatement prepareRoleAssertionsStatement(String action)
throws SQLException {
PreparedStatement ps;
if (action != null && action.length() > 0) {
ps = con.prepareStatement(SQL_LIST_ROLE_ASSERTIONS + SQL_LIST_ROLE_ASSERTION_QUERY_ACTION);
ps.setString(1, action);
} else {
ps = con.prepareStatement(SQL_LIST_ROLE_ASSERTIONS + SQL_LIST_ROLE_ASSERTION_NO_ACTION);
}
return ps;
}
Map<String, List<Assertion>> getRoleAssertions(String action, String caller) {
Map<String, List<Assertion>> roleAssertions = new HashMap<>();
try (PreparedStatement ps = prepareRoleAssertionsStatement(action)) {
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
Assertion assertion = new Assertion();
String domainName = rs.getString(ZMSConsts.DB_COLUMN_NAME);
String roleName = rs.getString(ZMSConsts.DB_COLUMN_ROLE);
assertion.setRole(ResourceUtils.roleResourceName(domainName, roleName));
assertion.setResource(rs.getString(ZMSConsts.DB_COLUMN_RESOURCE));
assertion.setAction(rs.getString(ZMSConsts.DB_COLUMN_ACTION));
assertion.setEffect(AssertionEffect.valueOf(rs.getString(ZMSConsts.DB_COLUMN_EFFECT)));
assertion.setId((long) rs.getInt(ZMSConsts.DB_COLUMN_ASSERT_ID));
String index = roleIndex(rs.getString(ZMSConsts.DB_COLUMN_DOMAIN_ID), roleName);
List<Assertion> assertions = roleAssertions.computeIfAbsent(index, k -> new ArrayList<>());
if (LOG.isDebugEnabled()) {
LOG.debug("{}: adding assertion {} for {}", caller, assertion, index);
}
assertions.add(assertion);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return roleAssertions;
}
Set<String> getRolePrincipals(final String principalName, final String caller) {
// first let's find out all the roles that given principal is member of
Set<String> rolePrincipals = getRolesForPrincipal(principalName, caller);
// next let's extract all groups that the given principal is member of
// if the group list is not empty then we need to extract all the roles
// where groups are member of and include those roles that match our
// extracted groups in the role principals map
Set<String> groups = getGroupsForPrincipal(principalName, caller);
if (!groups.isEmpty()) {
updatePrincipalRoleGroupMembership(rolePrincipals, groups, principalName, caller);
}
return rolePrincipals;
}
void updatePrincipalRoleGroupMembership(Set<String> rolePrincipals, final Set<String> groups,
final String principalName, final String caller) {
try (PreparedStatement ps = con.prepareStatement(SQL_LIST_ROLE_GROUP_PRINCIPALS)) {
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
final String groupName = rs.getString(ZMSConsts.DB_COLUMN_NAME);
if (!groups.contains(groupName)) {
continue;
}
final String roleName = rs.getString(ZMSConsts.DB_COLUMN_ROLE_NAME);
final String index = roleIndex(rs.getString(ZMSConsts.DB_COLUMN_DOMAIN_ID), roleName);
if (LOG.isDebugEnabled()) {
LOG.debug("{}: adding principal {} for {}", caller, principalName, index);
}
rolePrincipals.add(index);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
}
Set<String> getGroupsForPrincipal(final String principalName, final String caller) {
Set<String> groups = new HashSet<>();
try (PreparedStatement ps = con.prepareStatement(SQL_LIST_GROUP_FOR_PRINCIPAL)) {
ps.setString(1, principalName);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
final String groupName = rs.getString(ZMSConsts.DB_COLUMN_NAME);
final String domainName = rs.getString(ZMSConsts.DB_COLUMN_DOMAIN_NAME);
groups.add(ResourceUtils.groupResourceName(domainName, groupName));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return groups;
}
Set<String> getRolesForPrincipal(final String principalName, final String caller) {
Set<String> rolePrincipals = new HashSet<>();
try (PreparedStatement ps = con.prepareStatement(SQL_LIST_ROLE_PRINCIPALS)) {
ps.setString(1, principalName);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
final String roleName = rs.getString(ZMSConsts.DB_COLUMN_ROLE_NAME);
final String index = roleIndex(rs.getString(ZMSConsts.DB_COLUMN_DOMAIN_ID), roleName);
if (LOG.isDebugEnabled()) {
LOG.debug("{}: adding principal {} for {}", caller, principalName, index);
}
rolePrincipals.add(index);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return rolePrincipals;
}
void getTrustedSubTypeRoles(String sqlCommand, Map<String, List<String>> trustedRoles,
String caller) {
try (PreparedStatement ps = con.prepareStatement(sqlCommand)) {
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
String trustDomainId = rs.getString(ZMSConsts.DB_COLUMN_DOMAIN_ID);
String trustRoleName = rs.getString(ZMSConsts.DB_COLUMN_NAME);
String assertDomainId = rs.getString(ZMSConsts.DB_COLUMN_ASSERT_DOMAIN_ID);
String assertRoleName = rs.getString(ZMSConsts.DB_COLUMN_ROLE);
String index = roleIndex(assertDomainId, assertRoleName);
List<String> roles = trustedRoles.computeIfAbsent(index, k -> new ArrayList<>());
String tRoleName = roleIndex(trustDomainId, trustRoleName);
roles.add(tRoleName);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
}
Map<String, List<String>> getTrustedRoles(String caller) {
Map<String, List<String>> trustedRoles = new HashMap<>();
getTrustedSubTypeRoles(SQL_LIST_TRUSTED_STANDARD_ROLES, trustedRoles, caller);
getTrustedSubTypeRoles(SQL_LIST_TRUSTED_WILDCARD_ROLES, trustedRoles, caller);
return trustedRoles;
}
Map<String, String> getAwsDomains(String caller) {
Map<String, String> awsDomains = new HashMap<>();
try (PreparedStatement ps = con.prepareStatement(SQL_LIST_DOMAIN_AWS)) {
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
awsDomains.put(rs.getString(ZMSConsts.DB_COLUMN_NAME), rs.getString(ZMSConsts.DB_COLUMN_ACCOUNT));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return awsDomains;
}
void addRoleAssertions(List<Assertion> principalAssertions, List<Assertion> roleAssertions,
Map<String, String> awsDomains) {
// if the role assertions is empty then we have nothing to do
if (roleAssertions == null || roleAssertions.isEmpty()) {
if (LOG.isDebugEnabled()) {
LOG.debug("addRoleAssertions: role assertion list is empty");
}
return;
}
// if this is not an aws request or the awsDomain list is empty,
// then we're just going to add the role assertions to the
// principal's assertion list as is
if (awsDomains == null || awsDomains.isEmpty()) {
principalAssertions.addAll(roleAssertions);
return;
}
// we're going to update each assertion and generate the
// resource in the expected aws role format. however, we
// going to skip any assertions where we do not have a
// valid syntax or no aws domain
for (Assertion assertion : roleAssertions) {
final String resource = assertion.getResource();
if (LOG.isDebugEnabled()) {
LOG.debug("addRoleAssertions: processing assertion: {}", resource);
}
// first we need to check if the assertion has already
// been processed and as such the resource has been
// rewritten to have aws format
if (resource.startsWith(AWS_ARN_PREFIX)) {
principalAssertions.add(assertion);
continue;
}
// otherwise we're going to look for the domain component
int idx = resource.indexOf(':');
if (idx == -1) {
if (LOG.isDebugEnabled()) {
LOG.debug("addRoleAssertions: resource without domain component: {}", resource);
}
continue;
}
final String resourceDomain = resource.substring(0, idx);
String awsDomain = awsDomains.get(resourceDomain);
if (awsDomain == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("addRoleAssertions: resource without aws domain: {}", resourceDomain);
}
continue;
}
assertion.setResource(AWS_ARN_PREFIX + awsDomain + ":role/" + resource.substring(idx + 1));
principalAssertions.add(assertion);
}
}
ResourceAccess getResourceAccessObject(String principal, List<Assertion> assertions) {
ResourceAccess rsrcAccess = new ResourceAccess();
rsrcAccess.setPrincipal(principal);
rsrcAccess.setAssertions(assertions != null ? assertions : new ArrayList<>());
return rsrcAccess;
}
@Override
public ResourceAccessList listResourceAccess(String principal, String action, String userDomain) {
final String caller = "listResourceAccess";
ResourceAccessList rsrcAccessList = new ResourceAccessList();
List<ResourceAccess> resources = new ArrayList<>();
rsrcAccessList.setResources(resources);
// check to see if this an aws request based on
// the action query
boolean awsQuery = (action != null && action.equals(ZMSConsts.ACTION_ASSUME_AWS_ROLE));
// first let's get the principal list that we're asked to check for
// since if we have no matches then we have nothing to do
Set<String> rolePrincipals = getRolePrincipals(principal, caller);
if (rolePrincipals.isEmpty()) {
// so the given principal is not available as a role member
// so before returning an empty response let's make sure
// that it has been registered in Athenz otherwise we'll
// just return 404 - not found exception
if (getPrincipalId(principal) == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_PRINCIPAL, principal);
}
resources.add(getResourceAccessObject(principal, null));
return rsrcAccessList;
}
// now let's get the list of role assertions. if we have
// no matches, then we have nothing to do
Map<String, List<Assertion>> roleAssertions = getRoleAssertions(action, caller);
if (roleAssertions.isEmpty()) {
resources.add(getResourceAccessObject(principal, null));
return rsrcAccessList;
}
// finally we need to get all the trusted role maps
Map<String, List<String>> trustedRoles = getTrustedRoles(caller);
// if we're asked for action assume_aws_role then we're looking
// for role access in AWS. So we're going to retrieve
// the domains that have aws account configured only and update
// the resource to generate aws role resources.
Map<String, String> awsDomains = null;
if (awsQuery) {
awsDomains = getAwsDomains(caller);
}
// now let's go ahead and combine all of our data together
// we're going to go through each principal, lookup
// the assertions for the role and add them to the return object
// if the role has no corresponding assertions, then we're going
// to look at the trust role map in case it's a trusted role
Map<String, List<Assertion>> principalAssertions = new HashMap<>();
for (String roleIndex : rolePrincipals) {
if (LOG.isDebugEnabled()) {
LOG.debug("{}: processing role: {}", caller, roleIndex);
}
List<Assertion> assertions = principalAssertions.computeIfAbsent(principal, k -> new ArrayList<>());
// retrieve the assertions for this role
addRoleAssertions(assertions, roleAssertions.get(roleIndex), awsDomains);
// check to see if this is a trusted role. There might be multiple
// roles all being mapped as trusted, so we need to process them all
List<String> mappedTrustedRoles = trustedRoles.get(roleIndex);
if (mappedTrustedRoles != null) {
for (String mappedTrustedRole : mappedTrustedRoles) {
if (LOG.isDebugEnabled()) {
LOG.debug("{}: processing trusted role: {}", caller, mappedTrustedRole);
}
addRoleAssertions(assertions, roleAssertions.get(mappedTrustedRole), awsDomains);
}
}
}
// finally we need to create resource access list objects and return
for (Map.Entry<String, List<Assertion>> entry : principalAssertions.entrySet()) {
resources.add(getResourceAccessObject(entry.getKey(), entry.getValue()));
}
return rsrcAccessList;
}
@Override
public Quota getQuota(String domainName) {
final String caller = "getQuota";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
Quota quota = null;
try (PreparedStatement ps = con.prepareStatement(SQL_GET_QUOTA)) {
ps.setInt(1, domainId);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
quota = new Quota().setName(domainName);
quota.setAssertion(rs.getInt(ZMSConsts.DB_COLUMN_ASSERTION));
quota.setRole(rs.getInt(ZMSConsts.DB_COLUMN_ROLE));
quota.setRoleMember(rs.getInt(ZMSConsts.DB_COLUMN_ROLE_MEMBER));
quota.setPolicy(rs.getInt(ZMSConsts.DB_COLUMN_POLICY));
quota.setService(rs.getInt(ZMSConsts.DB_COLUMN_SERVICE));
quota.setServiceHost(rs.getInt(ZMSConsts.DB_COLUMN_SERVICE_HOST));
quota.setPublicKey(rs.getInt(ZMSConsts.DB_COLUMN_PUBLIC_KEY));
quota.setEntity(rs.getInt(ZMSConsts.DB_COLUMN_ENTITY));
quota.setSubdomain(rs.getInt(ZMSConsts.DB_COLUMN_SUBDOMAIN));
quota.setGroup(rs.getInt(ZMSConsts.DB_COLUMN_PRINCIPAL_GROUP));
quota.setGroupMember(rs.getInt(ZMSConsts.DB_COLUMN_PRINCIPAL_GROUP_MEMBER));
quota.setModified(Timestamp.fromMillis(rs.getTimestamp(ZMSConsts.DB_COLUMN_MODIFIED).getTime()));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return quota;
}
@Override
public boolean insertQuota(String domainName, Quota quota) {
final String caller = "insertQuota";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_INSERT_QUOTA)) {
ps.setInt(1, domainId);
ps.setInt(2, quota.getRole());
ps.setInt(3, quota.getRoleMember());
ps.setInt(4, quota.getPolicy());
ps.setInt(5, quota.getAssertion());
ps.setInt(6, quota.getService());
ps.setInt(7, quota.getServiceHost());
ps.setInt(8, quota.getPublicKey());
ps.setInt(9, quota.getEntity());
ps.setInt(10, quota.getSubdomain());
ps.setInt(11, quota.getGroup());
ps.setInt(12, quota.getGroupMember());
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean updateQuota(String domainName, Quota quota) {
final String caller = "updateQuota";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_QUOTA)) {
ps.setInt(1, quota.getRole());
ps.setInt(2, quota.getRoleMember());
ps.setInt(3, quota.getPolicy());
ps.setInt(4, quota.getAssertion());
ps.setInt(5, quota.getService());
ps.setInt(6, quota.getServiceHost());
ps.setInt(7, quota.getPublicKey());
ps.setInt(8, quota.getEntity());
ps.setInt(9, quota.getSubdomain());
ps.setInt(10, quota.getGroup());
ps.setInt(11, quota.getGroupMember());
ps.setInt(12, domainId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean deleteQuota(String domainName) {
final String caller = "deleteQuota";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_DELETE_QUOTA)) {
ps.setInt(1, domainId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public DomainRoleMembers listDomainRoleMembers(String domainName) {
return listDomainRoleMembersWithQuery(domainName, SQL_GET_DOMAIN_ROLE_MEMBERS, "listDomainRoleMembers");
}
@Override
public DomainRoleMember getPrincipalRoles(String principal, String domainName) {
final String caller = "getPrincipalRoles";
int principalId = getPrincipalId(principal);
if (principalId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_PRINCIPAL, principal);
}
DomainRoleMember roleMember = new DomainRoleMember();
roleMember.setMemberRoles(new ArrayList<>());
roleMember.setMemberName(principal);
if (StringUtil.isEmpty(domainName)) {
try (PreparedStatement ps = con.prepareStatement(SQL_GET_PRINCIPAL_ROLES)) {
ps.setInt(1, principalId);
return getRolesForPrincipal(caller, roleMember, ps);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
} else {
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
try (PreparedStatement ps = con.prepareStatement(SQL_GET_PRINCIPAL_ROLES_DOMAIN)) {
ps.setInt(1, principalId);
ps.setInt(2, domainId);
return getRolesForPrincipal(caller, roleMember, ps);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
}
}
private DomainRoleMember getRolesForPrincipal(String caller, DomainRoleMember roleMember, PreparedStatement ps) throws SQLException {
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
final String roleName = rs.getString(1);
final String domain = rs.getString(2);
MemberRole memberRole = new MemberRole();
memberRole.setRoleName(roleName);
memberRole.setDomainName(domain);
java.sql.Timestamp expiration = rs.getTimestamp(3);
if (expiration != null) {
memberRole.setExpiration(Timestamp.fromMillis(expiration.getTime()));
}
java.sql.Timestamp reviewReminder = rs.getTimestamp(4);
if (reviewReminder != null) {
memberRole.setReviewReminder(Timestamp.fromMillis(reviewReminder.getTime()));
}
memberRole.setSystemDisabled(nullIfDefaultValue(rs.getInt(5), 0));
roleMember.getMemberRoles().add(memberRole);
}
return roleMember;
}
}
@Override
public DomainRoleMembers listOverdueReviewRoleMembers(String domainName) {
return listDomainRoleMembersWithQuery(domainName, SQL_GET_REVIEW_OVERDUE_DOMAIN_ROLE_MEMBERS, "listDomainRoleMembersWithQuery");
}
@Override
public Map<String, List<DomainGroupMember>> getPendingDomainGroupMembers(String principal) {
final String caller = "getPendingDomainGroupMembersList";
int principalId = getPrincipalId(principal);
if (principalId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_PRINCIPAL, principal);
}
Map<String, List<DomainGroupMember>> domainGroupMembersMap = new LinkedHashMap<>();
// first we're going to retrieve all the members that are waiting
// for approval based on their domain org values
processPendingGroupMembers(ZMSConsts.SYS_AUTH_AUDIT_BY_ORG, SQL_PENDING_ORG_AUDIT_GROUP_MEMBER_LIST,
principalId, domainGroupMembersMap, caller);
// then we're going to retrieve all the members that are waiting
// for approval based on their domain name values
processPendingGroupMembers(ZMSConsts.SYS_AUTH_AUDIT_BY_DOMAIN, SQL_PENDING_DOMAIN_AUDIT_GROUP_MEMBER_LIST,
principalId, domainGroupMembersMap, caller);
// finally retrieve the self serve groups
try (PreparedStatement ps = con.prepareStatement(SQL_PENDING_DOMAIN_ADMIN_GROUP_MEMBER_LIST)) {
ps.setInt(1, principalId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
populateDomainGroupMembersMapFromResultSet(domainGroupMembersMap, rs);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return domainGroupMembersMap;
}
@Override
public Map<String, List<DomainGroupMember>> getExpiredPendingDomainGroupMembers(int pendingGroupMemberLifespan) {
final String caller = "getExpiredPendingDomainGroupMembers";
//update audit log with details before deleting
Map<String, List<DomainGroupMember>> domainGroupMembersMap = new LinkedHashMap<>();
try (PreparedStatement ps = con.prepareStatement(SQL_GET_EXPIRED_PENDING_GROUP_MEMBERS)) {
ps.setInt(1, pendingGroupMemberLifespan);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
populateDomainGroupMembersMapFromResultSet(domainGroupMembersMap, rs);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return domainGroupMembersMap;
}
@Override
public Set<String> getPendingGroupMembershipApproverRoles(String server, long timestamp) {
final String caller = "getPendingGroupMembershipApproverGroups";
Set<String> targetRoles = new HashSet<>();
int orgDomainId = getDomainId(ZMSConsts.SYS_AUTH_AUDIT_BY_ORG);
int domDomainId = getDomainId(ZMSConsts.SYS_AUTH_AUDIT_BY_DOMAIN);
java.sql.Timestamp ts = new java.sql.Timestamp(timestamp);
//Get orgs and domains for audit enabled groups with pending membership
try (PreparedStatement ps = con.prepareStatement(SQL_AUDIT_ENABLED_PENDING_GROUP_MEMBERSHIP_REMINDER_ENTRIES)) {
ps.setTimestamp(1, ts);
ps.setString(2, server);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
// first process the org value
final String org = rs.getString(1);
if (org != null && !org.isEmpty()) {
int roleId = getRoleId(orgDomainId, org);
if (roleId != 0) {
targetRoles.add(ResourceUtils.roleResourceName(ZMSConsts.SYS_AUTH_AUDIT_BY_ORG, org));
}
}
// then process the domain value
final String domain = rs.getString(2);
int roleId = getRoleId(domDomainId, domain);
if (roleId != 0) {
targetRoles.add(ResourceUtils.roleResourceName(ZMSConsts.SYS_AUTH_AUDIT_BY_DOMAIN, domain));
}
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
// get admin groups of pending self-serve and review-enabled requests
getRecipientRoleForAdminGroupMembershipApproval(caller, targetRoles, ts, server);
return targetRoles;
}
@Override
public boolean updatePendingGroupMembersNotificationTimestamp(String server, long timestamp, int delayDays) {
final String caller = "updatePendingGroupMembersNotificationTimestamp";
int affectedRows;
java.sql.Timestamp ts = new java.sql.Timestamp(timestamp);
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_PENDING_GROUP_MEMBERS_NOTIFICATION_TIMESTAMP)) {
ps.setTimestamp(1, ts);
ps.setString(2, server);
ps.setTimestamp(3, ts);
ps.setInt(4, delayDays);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
private DomainRoleMembers listDomainRoleMembersWithQuery(String domainName, String query, String caller) {
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
DomainRoleMembers domainRoleMembers = new DomainRoleMembers();
domainRoleMembers.setDomainName(domainName);
Map<String, DomainRoleMember> memberMap = new HashMap<>();
try (PreparedStatement ps = con.prepareStatement(query)) {
ps.setInt(1, domainId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
final String roleName = rs.getString(1);
final String memberName = rs.getString(2);
DomainRoleMember domainRoleMember = memberMap.get(memberName);
if (domainRoleMember == null) {
domainRoleMember = new DomainRoleMember();
domainRoleMember.setMemberName(memberName);
memberMap.put(memberName, domainRoleMember);
}
List<MemberRole> memberRoles = domainRoleMember.getMemberRoles();
if (memberRoles == null) {
memberRoles = new ArrayList<>();
domainRoleMember.setMemberRoles(memberRoles);
}
MemberRole memberRole = new MemberRole();
memberRole.setRoleName(roleName);
java.sql.Timestamp expiration = rs.getTimestamp(3);
if (expiration != null) {
memberRole.setExpiration(Timestamp.fromMillis(expiration.getTime()));
}
java.sql.Timestamp reviewReminder = rs.getTimestamp(4);
if (reviewReminder != null) {
memberRole.setReviewReminder(Timestamp.fromMillis(reviewReminder.getTime()));
}
memberRole.setSystemDisabled(nullIfDefaultValue(rs.getInt(5), 0));
memberRoles.add(memberRole);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
if (!memberMap.isEmpty()) {
domainRoleMembers.setMembers(new ArrayList<>(memberMap.values()));
}
return domainRoleMembers;
}
@Override
public boolean deletePendingRoleMember(String domainName, String roleName, String principal,
String admin, String auditRef) {
final String caller = "deletePendingRoleMember";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int roleId = getRoleId(domainId, roleName);
if (roleId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_ROLE, ResourceUtils.roleResourceName(domainName, roleName));
}
int principalId = getPrincipalId(principal);
if (principalId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_PRINCIPAL, principal);
}
return executeDeletePendingRoleMember(roleId, principalId, admin, principal, auditRef, true, caller);
}
public boolean executeDeletePendingRoleMember(int roleId, int principalId, final String admin,
final String principal, final String auditRef, boolean auditLog, final String caller) {
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_DELETE_PENDING_ROLE_MEMBER)) {
ps.setInt(1, roleId);
ps.setInt(2, principalId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
boolean result = (affectedRows > 0);
if (result && auditLog) {
result = insertRoleAuditLog(roleId, admin, principal, "REJECT", auditRef);
}
return result;
}
@Override
public boolean confirmRoleMember(String domainName, String roleName, RoleMember roleMember,
String admin, String auditRef) {
final String caller = "confirmRoleMember";
String principal = roleMember.getMemberName();
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int roleId = getRoleId(domainId, roleName);
if (roleId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_ROLE, ResourceUtils.roleResourceName(domainName, roleName));
}
int principalId = getPrincipalId(principal);
if (principalId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_PRINCIPAL, principal);
}
// need to check if the pending entry already exists
// before doing any work
boolean roleMemberExists = roleMemberExists(roleId, principalId, true, caller);
if (!roleMemberExists) {
throw notFoundError(caller, ZMSConsts.OBJECT_PRINCIPAL, principal);
}
boolean result;
if (roleMember.getApproved() == Boolean.TRUE) {
roleMemberExists = roleMemberExists(roleId, principalId, false, caller);
result = insertStandardRoleMember(roleId, principalId, roleMember, admin,
principal, auditRef, roleMemberExists, true, caller);
if (result) {
executeDeletePendingRoleMember(roleId, principalId, admin, principal,
auditRef, false, caller);
}
} else {
result = executeDeletePendingRoleMember(roleId, principalId, admin,
principal, auditRef, true, caller);
}
return result;
}
void processPendingMembers(final String domainName, final String query, int principalId,
Map<String, List<DomainRoleMember>> domainRoleMembersMap, final String caller) {
int auditDomId = getDomainId(domainName);
try (PreparedStatement ps = con.prepareStatement(query)) {
ps.setInt(1, principalId);
ps.setInt(2, auditDomId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
populateDomainRoleMembersMapFromResultSet(domainRoleMembersMap, rs);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
}
void processPendingGroupMembers(final String domainName, final String query, int principalId,
Map<String, List<DomainGroupMember>> domainGroupMembersMap, final String caller) {
int auditDomId = getDomainId(domainName);
try (PreparedStatement ps = con.prepareStatement(query)) {
ps.setInt(1, principalId);
ps.setInt(2, auditDomId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
populateDomainGroupMembersMapFromResultSet(domainGroupMembersMap, rs);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
}
@Override
public Map<String, List<DomainRoleMember>> getPendingDomainRoleMembers(String principal) {
final String caller = "getPendingDomainRoleMembersList";
int principalId = getPrincipalId(principal);
if (principalId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_PRINCIPAL, principal);
}
Map<String, List<DomainRoleMember>> domainRoleMembersMap = new LinkedHashMap<>();
// first we're going to retrieve all the members that are waiting
// for approval based on their domain org values
processPendingMembers(ZMSConsts.SYS_AUTH_AUDIT_BY_ORG, SQL_PENDING_ORG_AUDIT_ROLE_MEMBER_LIST,
principalId, domainRoleMembersMap, caller);
// then we're going to retrieve all the members that are waiting
// for approval based on their domain name values
processPendingMembers(ZMSConsts.SYS_AUTH_AUDIT_BY_DOMAIN, SQL_PENDING_DOMAIN_AUDIT_ROLE_MEMBER_LIST,
principalId, domainRoleMembersMap, caller);
// finally retrieve the self serve roles
try (PreparedStatement ps = con.prepareStatement(SQL_PENDING_DOMAIN_ADMIN_ROLE_MEMBER_LIST)) {
ps.setInt(1, principalId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
populateDomainRoleMembersMapFromResultSet(domainRoleMembersMap, rs);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return domainRoleMembersMap;
}
private void populateDomainRoleMembersMapFromResultSet(Map<String, List<DomainRoleMember>> domainRoleMembersMap, ResultSet rs) throws SQLException {
List<DomainRoleMember> domainRoleMembers;
final String domain = rs.getString(1);
if (!domainRoleMembersMap.containsKey(domain)) {
domainRoleMembers = new ArrayList<>();
domainRoleMembersMap.put(domain, domainRoleMembers);
}
domainRoleMembers = domainRoleMembersMap.get(domain);
DomainRoleMember domainRoleMember = new DomainRoleMember();
domainRoleMember.setMemberName(rs.getString(3));
List<MemberRole> memberRoles = new ArrayList<>();
MemberRole memberRole = new MemberRole();
memberRole.setRoleName(rs.getString(2));
java.sql.Timestamp expiration = rs.getTimestamp(4);
if (expiration != null) {
memberRole.setExpiration(Timestamp.fromMillis(expiration.getTime()));
}
java.sql.Timestamp reviewReminder = rs.getTimestamp(5);
if (reviewReminder != null) {
memberRole.setReviewReminder(Timestamp.fromMillis(reviewReminder.getTime()));
}
memberRole.setActive(false);
memberRole.setAuditRef(rs.getString(6));
expiration = rs.getTimestamp(7);
if (expiration != null) {
memberRole.setRequestTime(Timestamp.fromMillis(expiration.getTime()));
}
memberRole.setRequestPrincipal(rs.getString(8));
memberRoles.add(memberRole);
domainRoleMember.setMemberRoles(memberRoles);
if (!domainRoleMembers.contains(domainRoleMember)) {
domainRoleMembers.add(domainRoleMember);
}
}
private void populateDomainGroupMembersMapFromResultSet(Map<String, List<DomainGroupMember>> domainGroupMembersMap, ResultSet rs) throws SQLException {
List<DomainGroupMember> domainGroupMembers;
final String domain = rs.getString(1);
if (!domainGroupMembersMap.containsKey(domain)) {
domainGroupMembers = new ArrayList<>();
domainGroupMembersMap.put(domain, domainGroupMembers);
}
domainGroupMembers = domainGroupMembersMap.get(domain);
DomainGroupMember domainGroupMember = new DomainGroupMember();
domainGroupMember.setMemberName(rs.getString(3));
List<GroupMember> memberGroups = new ArrayList<>();
GroupMember memberGroup = new GroupMember();
memberGroup.setGroupName(rs.getString(2));
java.sql.Timestamp expiration = rs.getTimestamp(4);
if (expiration != null) {
memberGroup.setExpiration(Timestamp.fromMillis(expiration.getTime()));
}
memberGroup.setActive(false);
memberGroup.setAuditRef(rs.getString(5));
expiration = rs.getTimestamp(6);
if (expiration != null) {
memberGroup.setRequestTime(Timestamp.fromMillis(expiration.getTime()));
}
memberGroup.setRequestPrincipal(rs.getString(7));
memberGroups.add(memberGroup);
domainGroupMember.setMemberGroups(memberGroups);
if (!domainGroupMembers.contains(domainGroupMember)) {
domainGroupMembers.add(domainGroupMember);
}
}
@Override
public Set<String> getPendingMembershipApproverRoles(String server, long timestamp) {
final String caller = "getPendingMembershipApproverRoles";
Set<String> targetRoles = new HashSet<>();
int orgDomainId = getDomainId(ZMSConsts.SYS_AUTH_AUDIT_BY_ORG);
int domDomainId = getDomainId(ZMSConsts.SYS_AUTH_AUDIT_BY_DOMAIN);
java.sql.Timestamp ts = new java.sql.Timestamp(timestamp);
//Get orgs and domains for audit enabled roles with pending membership
try (PreparedStatement ps = con.prepareStatement(SQL_AUDIT_ENABLED_PENDING_MEMBERSHIP_REMINDER_ENTRIES)) {
ps.setTimestamp(1, ts);
ps.setString(2, server);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
// first process the org value
final String org = rs.getString(1);
if (org != null && !org.isEmpty()) {
int roleId = getRoleId(orgDomainId, org);
if (roleId != 0) {
targetRoles.add(ResourceUtils.roleResourceName(ZMSConsts.SYS_AUTH_AUDIT_BY_ORG, org));
}
}
// then process the domain value
final String domain = rs.getString(2);
int roleId = getRoleId(domDomainId, domain);
if (roleId != 0) {
targetRoles.add(ResourceUtils.roleResourceName(ZMSConsts.SYS_AUTH_AUDIT_BY_DOMAIN, domain));
}
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
// get admin roles of pending self-serve and review-enabled requests
getRecipientRoleForAdminMembershipApproval(caller, targetRoles, ts, server);
return targetRoles;
}
@Override
public Map<String, List<DomainRoleMember>> getExpiredPendingDomainRoleMembers(int pendingRoleMemberLifespan) {
final String caller = "getExpiredPendingMembers";
//update audit log with details before deleting
Map<String, List<DomainRoleMember>> domainRoleMembersMap = new LinkedHashMap<>();
try (PreparedStatement ps = con.prepareStatement(SQL_GET_EXPIRED_PENDING_ROLE_MEMBERS)) {
ps.setInt(1, pendingRoleMemberLifespan);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
populateDomainRoleMembersMapFromResultSet(domainRoleMembersMap, rs);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return domainRoleMembersMap;
}
@Override
public boolean updatePendingRoleMembersNotificationTimestamp(String server, long timestamp, int delayDays) {
final String caller = "updatePendingRoleMembersNotificationTimestamp";
int affectedRows;
java.sql.Timestamp ts = new java.sql.Timestamp(timestamp);
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_PENDING_ROLE_MEMBERS_NOTIFICATION_TIMESTAMP)) {
ps.setTimestamp(1, ts);
ps.setString(2, server);
ps.setTimestamp(3, ts);
ps.setInt(4, delayDays);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
private void getRecipientRoleForAdminMembershipApproval(String caller, Set<String> targetRoles,
java.sql.Timestamp timestamp, String server) {
try (PreparedStatement ps = con.prepareStatement(SQL_ADMIN_PENDING_MEMBERSHIP_REMINDER_DOMAINS)) {
ps.setTimestamp(1, timestamp);
ps.setString(2, server);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
targetRoles.add(ResourceUtils.roleResourceName(rs.getString(1), ZMSConsts.ADMIN_ROLE_NAME));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
}
private void getRecipientRoleForAdminGroupMembershipApproval(String caller, Set<String> targetRoles,
java.sql.Timestamp timestamp, String server) {
try (PreparedStatement ps = con.prepareStatement(SQL_ADMIN_PENDING_GROUP_MEMBERSHIP_REMINDER_DOMAINS)) {
ps.setTimestamp(1, timestamp);
ps.setString(2, server);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
targetRoles.add(ResourceUtils.roleResourceName(rs.getString(1), ZMSConsts.ADMIN_ROLE_NAME));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
}
@Override
public Map<String, DomainRoleMember> getNotifyTemporaryRoleMembers(String server, long timestamp) {
return getNotifyRoleMembers(server, timestamp, SQL_LIST_NOTIFY_TEMPORARY_ROLE_MEMBERS, "listNotifyTemporaryRoleMembers");
}
@Override
public boolean updateRoleMemberExpirationNotificationTimestamp(String server, long timestamp, int delayDays) {
return updateMemberNotificationTimestamp(server, timestamp, delayDays,
SQL_UPDATE_ROLE_MEMBERS_EXPIRY_NOTIFICATION_TIMESTAMP, "updateRoleMemberExpirationNotificationTimestamp");
}
@Override
public Map<String, DomainGroupMember> getNotifyTemporaryGroupMembers(String server, long timestamp) {
final String caller = "getNotifyTemporaryGroupMembers";
Map<String, DomainGroupMember> memberMap = new HashMap<>();
try (PreparedStatement ps = con.prepareStatement(SQL_LIST_NOTIFY_TEMPORARY_GROUP_MEMBERS)) {
ps.setTimestamp(1, new java.sql.Timestamp(timestamp));
ps.setString(2, server);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
final String memberName = rs.getString(ZMSConsts.DB_COLUMN_PRINCIPAL_NAME);
java.sql.Timestamp expiration = rs.getTimestamp(ZMSConsts.DB_COLUMN_EXPIRATION);
DomainGroupMember domainGroupMember = memberMap.get(memberName);
if (domainGroupMember == null) {
domainGroupMember = new DomainGroupMember();
domainGroupMember.setMemberName(memberName);
memberMap.put(memberName, domainGroupMember);
}
List<GroupMember> memberGroups = domainGroupMember.getMemberGroups();
if (memberGroups == null) {
memberGroups = new ArrayList<>();
domainGroupMember.setMemberGroups(memberGroups);
}
GroupMember memberGroup = new GroupMember();
memberGroup.setMemberName(memberName);
memberGroup.setGroupName(rs.getString(ZMSConsts.DB_COLUMN_AS_GROUP_NAME));
memberGroup.setDomainName(rs.getString(ZMSConsts.DB_COLUMN_DOMAIN_NAME));
if (expiration != null) {
memberGroup.setExpiration(Timestamp.fromMillis(expiration.getTime()));
}
memberGroups.add(memberGroup);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return memberMap;
}
@Override
public boolean updateGroupMemberExpirationNotificationTimestamp(String server, long timestamp, int delayDays) {
return updateMemberNotificationTimestamp(server, timestamp, delayDays,
SQL_UPDATE_GROUP_MEMBERS_EXPIRY_NOTIFICATION_TIMESTAMP, "updateGroupMemberExpirationNotificationTimestamp");
}
@Override
public Map<String, DomainRoleMember> getNotifyReviewRoleMembers(String server, long timestamp) {
return getNotifyRoleMembers(server, timestamp, SQL_LIST_NOTIFY_REVIEW_ROLE_MEMBERS, "listNotifyReviewRoleMembers");
}
@Override
public boolean updateRoleMemberReviewNotificationTimestamp(String server, long timestamp, int delayDays) {
return updateMemberNotificationTimestamp(server, timestamp, delayDays,
SQL_UPDATE_ROLE_MEMBERS_REVIEW_NOTIFICATION_TIMESTAMP, "updateRoleMemberReviewNotificationTimestamp");
}
private boolean updateMemberNotificationTimestamp(final String server, long timestamp, int delayDays,
final String query, final String caller) {
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(query)) {
ps.setTimestamp(1, new java.sql.Timestamp(timestamp));
ps.setString(2, server);
ps.setInt(3, delayDays);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
private Map<String, DomainRoleMember> getNotifyRoleMembers(final String server, long timestamp, final String query,
final String caller) {
Map<String, DomainRoleMember> memberMap = new HashMap<>();
try (PreparedStatement ps = con.prepareStatement(query)) {
ps.setTimestamp(1, new java.sql.Timestamp(timestamp));
ps.setString(2, server);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
final String memberName = rs.getString(ZMSConsts.DB_COLUMN_PRINCIPAL_NAME);
java.sql.Timestamp expiration = rs.getTimestamp(ZMSConsts.DB_COLUMN_EXPIRATION);
java.sql.Timestamp reviewReminder = rs.getTimestamp(ZMSConsts.DB_COLUMN_REVIEW_REMINDER);
DomainRoleMember domainRoleMember = memberMap.get(memberName);
if (domainRoleMember == null) {
domainRoleMember = new DomainRoleMember();
domainRoleMember.setMemberName(memberName);
memberMap.put(memberName, domainRoleMember);
}
List<MemberRole> memberRoles = domainRoleMember.getMemberRoles();
if (memberRoles == null) {
memberRoles = new ArrayList<>();
domainRoleMember.setMemberRoles(memberRoles);
}
MemberRole memberRole = new MemberRole();
memberRole.setMemberName(memberName);
memberRole.setRoleName(rs.getString(ZMSConsts.DB_COLUMN_ROLE_NAME));
memberRole.setDomainName(rs.getString(ZMSConsts.DB_COLUMN_DOMAIN_NAME));
if (expiration != null) {
memberRole.setExpiration(Timestamp.fromMillis(expiration.getTime()));
}
if (reviewReminder != null) {
memberRole.setReviewReminder(Timestamp.fromMillis(reviewReminder.getTime()));
}
memberRoles.add(memberRole);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return memberMap;
}
@Override
public List<TemplateMetaData> getDomainTemplates(String domainName) {
TemplateMetaData templateDomainMapping;
List<TemplateMetaData> templateDomainMappingList = new ArrayList<>();
final String caller = "getDomainTemplates";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
try (PreparedStatement ps = con.prepareStatement(SQL_LIST_DOMAIN_TEMPLATES)) {
ps.setInt(1, domainId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
templateDomainMapping = new TemplateMetaData();
templateDomainMapping.setTemplateName(rs.getString(ZMSConsts.DB_COLUMN_TEMPLATE_NAME));
templateDomainMapping.setCurrentVersion(rs.getInt(ZMSConsts.DB_COLUMN_TEMPLATE_VERSION));
templateDomainMappingList.add(templateDomainMapping);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return templateDomainMappingList;
}
@Override
public List<PrincipalRole> listRolesWithUserAuthorityRestrictions() {
final String caller = "listRolesWithUserAuthorityRestrictions";
List<PrincipalRole> roles = new ArrayList<>();
try (PreparedStatement ps = con.prepareStatement(SQL_LIST_ROLES_WITH_RESTRICTIONS)) {
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
PrincipalRole prRole = new PrincipalRole();
prRole.setDomainName(rs.getString(ZMSConsts.DB_COLUMN_AS_DOMAIN_NAME));
prRole.setRoleName(rs.getString(ZMSConsts.DB_COLUMN_AS_ROLE_NAME));
prRole.setDomainUserAuthorityFilter(rs.getString(ZMSConsts.DB_COLUMN_AS_DOMAIN_USER_AUTHORITY_FILTER));
roles.add(prRole);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return roles;
}
Group retrieveGroup(ResultSet rs, final String domainName, final String groupName) throws SQLException {
Group group = new Group().setName(ResourceUtils.groupResourceName(domainName, groupName))
.setModified(Timestamp.fromMillis(rs.getTimestamp(ZMSConsts.DB_COLUMN_MODIFIED).getTime()))
.setAuditEnabled(nullIfDefaultValue(rs.getBoolean(ZMSConsts.DB_COLUMN_AUDIT_ENABLED), false))
.setSelfServe(nullIfDefaultValue(rs.getBoolean(ZMSConsts.DB_COLUMN_SELF_SERVE), false))
.setReviewEnabled(nullIfDefaultValue(rs.getBoolean(ZMSConsts.DB_COLUMN_REVIEW_ENABLED), false))
.setNotifyRoles(saveValue(rs.getString(ZMSConsts.DB_COLUMN_NOTIFY_ROLES)))
.setUserAuthorityFilter(saveValue(rs.getString(ZMSConsts.DB_COLUMN_USER_AUTHORITY_FILTER)))
.setUserAuthorityExpiration(saveValue(rs.getString(ZMSConsts.DB_COLUMN_USER_AUTHORITY_EXPIRATION)))
.setMemberExpiryDays(nullIfDefaultValue(rs.getInt(ZMSConsts.DB_COLUMN_MEMBER_EXPIRY_DAYS), 0))
.setServiceExpiryDays(nullIfDefaultValue(rs.getInt(ZMSConsts.DB_COLUMN_SERVICE_EXPIRY_DAYS), 0));
java.sql.Timestamp lastReviewedTime = rs.getTimestamp(ZMSConsts.DB_COLUMN_LAST_REVIEWED_TIME);
if (lastReviewedTime != null) {
group.setLastReviewedDate(Timestamp.fromMillis(lastReviewedTime.getTime()));
}
return group;
}
@Override
public Group getGroup(String domainName, String groupName) {
final String caller = "getGroup";
try (PreparedStatement ps = con.prepareStatement(SQL_GET_GROUP)) {
ps.setString(1, domainName);
ps.setString(2, groupName);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
return retrieveGroup(rs, domainName, groupName);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return null;
}
@Override
public boolean insertGroup(String domainName, Group group) {
int affectedRows;
final String caller = "insertGroup";
String groupName = ZMSUtils.extractGroupName(domainName, group.getName());
if (groupName == null) {
throw requestError(caller, "domain name mismatch: " + domainName +
" insert group name: " + group.getName());
}
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
try (PreparedStatement ps = con.prepareStatement(SQL_INSERT_GROUP)) {
ps.setString(1, groupName);
ps.setInt(2, domainId);
ps.setBoolean(3, processInsertValue(group.getAuditEnabled(), false));
ps.setBoolean(4, processInsertValue(group.getSelfServe(), false));
ps.setBoolean(5, processInsertValue(group.getReviewEnabled(), false));
ps.setString(6, processInsertValue(group.getNotifyRoles()));
ps.setString(7, processInsertValue(group.getUserAuthorityFilter()));
ps.setString(8, processInsertValue(group.getUserAuthorityExpiration()));
ps.setInt(9, processInsertValue(group.getMemberExpiryDays()));
ps.setInt(10, processInsertValue(group.getServiceExpiryDays()));
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean updateGroup(String domainName, Group group) {
int affectedRows;
final String caller = "updateGroup";
String groupName = ZMSUtils.extractGroupName(domainName, group.getName());
if (groupName == null) {
throw requestError(caller, "domain name mismatch: " + domainName +
" update group name: " + group.getName());
}
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int groupId = getGroupId(domainId, groupName);
if (groupId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_GROUP, ResourceUtils.groupResourceName(domainName, groupName));
}
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_GROUP)) {
ps.setBoolean(1, processInsertValue(group.getAuditEnabled(), false));
ps.setBoolean(2, processInsertValue(group.getSelfServe(), false));
ps.setBoolean(3, processInsertValue(group.getReviewEnabled(), false));
ps.setString(4, processInsertValue(group.getNotifyRoles()));
ps.setString(5, processInsertValue(group.getUserAuthorityFilter()));
ps.setString(6, processInsertValue(group.getUserAuthorityExpiration()));
ps.setInt(7, processInsertValue(group.getMemberExpiryDays()));
ps.setInt(8, processInsertValue(group.getServiceExpiryDays()));
ps.setInt(9, groupId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean deleteGroup(String domainName, String groupName) {
final String caller = "deleteGroup";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_DELETE_GROUP)) {
ps.setInt(1, domainId);
ps.setString(2, groupName);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean updateGroupModTimestamp(String domainName, String groupName) {
int affectedRows;
final String caller = "updateGroupModTimestamp";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int groupId = getGroupId(domainId, groupName);
if (groupId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_GROUP, ResourceUtils.groupResourceName(domainName, groupName));
}
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_GROUP_MOD_TIMESTAMP)) {
ps.setInt(1, groupId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public int countGroups(String domainName) {
final String caller = "countGroups";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int count = 0;
try (PreparedStatement ps = con.prepareStatement(SQL_COUNT_GROUP)) {
ps.setInt(1, domainId);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
count = rs.getInt(1);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return count;
}
@Override
public List<GroupAuditLog> listGroupAuditLogs(String domainName, String groupName) {
final String caller = "listGroupAuditLogs";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int groupId = getGroupId(domainId, groupName);
if (groupId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_GROUP, ResourceUtils.groupResourceName(domainName, groupName));
}
List<GroupAuditLog> logs = new ArrayList<>();
try (PreparedStatement ps = con.prepareStatement(SQL_LIST_GROUP_AUDIT_LOGS)) {
ps.setInt(1, groupId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
GroupAuditLog log = new GroupAuditLog();
log.setAction(rs.getString(ZMSConsts.DB_COLUMN_ACTION));
log.setMember(rs.getString(ZMSConsts.DB_COLUMN_MEMBER));
log.setAdmin(rs.getString(ZMSConsts.DB_COLUMN_ADMIN));
log.setAuditRef(saveValue(rs.getString(ZMSConsts.DB_COLUMN_AUDIT_REF)));
log.setCreated(Timestamp.fromMillis(rs.getTimestamp(ZMSConsts.DB_COLUMN_CREATED).getTime()));
logs.add(log);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return logs;
}
@Override
public boolean updateGroupReviewTimestamp(String domainName, String groupName) {
int affectedRows;
final String caller = "updateGroupReviewTimestamp";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int groupId = getGroupId(domainId, groupName);
if (groupId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_GROUP, ResourceUtils.groupResourceName(domainName, groupName));
}
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_GROUP_REVIEW_TIMESTAMP)) {
ps.setInt(1, groupId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
void getStdGroupMembers(int groupId, List<GroupMember> members, final String caller) {
try (PreparedStatement ps = con.prepareStatement(SQL_LIST_GROUP_MEMBERS)) {
ps.setInt(1, groupId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
GroupMember groupMember = new GroupMember();
groupMember.setMemberName(rs.getString(1));
java.sql.Timestamp expiration = rs.getTimestamp(2);
if (expiration != null) {
groupMember.setExpiration(Timestamp.fromMillis(expiration.getTime()));
}
groupMember.setActive(nullIfDefaultValue(rs.getBoolean(3), true));
groupMember.setAuditRef(rs.getString(4));
groupMember.setSystemDisabled(nullIfDefaultValue(rs.getInt(5), 0));
groupMember.setApproved(true);
members.add(groupMember);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
}
void getPendingGroupMembers(int groupId, List<GroupMember> members, final String caller) {
try (PreparedStatement ps = con.prepareStatement(SQL_LIST_PENDING_GROUP_MEMBERS)) {
ps.setInt(1, groupId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
GroupMember groupMember = new GroupMember();
groupMember.setMemberName(rs.getString(1));
java.sql.Timestamp timestamp = rs.getTimestamp(2);
if (timestamp != null) {
groupMember.setExpiration(Timestamp.fromMillis(timestamp.getTime()));
}
timestamp = rs.getTimestamp(3);
if (timestamp != null) {
groupMember.setRequestTime(Timestamp.fromMillis(timestamp.getTime()));
}
groupMember.setAuditRef(rs.getString(4));
groupMember.setActive(false);
groupMember.setApproved(false);
members.add(groupMember);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
}
@Override
public List<GroupMember> listGroupMembers(String domainName, String groupName, Boolean pending) {
final String caller = "listGroupMembers";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int groupId = getGroupId(domainId, groupName);
if (groupId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_GROUP, ResourceUtils.groupResourceName(domainName, groupName));
}
// first get our standard group members
List<GroupMember> members = new ArrayList<>();
getStdGroupMembers(groupId, members, caller);
// if requested, include pending members as well
if (pending == Boolean.TRUE) {
getPendingGroupMembers(groupId, members, caller);
}
members.sort(GroupMemberComparator);
return members;
}
@Override
public int countGroupMembers(String domainName, String groupName) {
final String caller = "countGroupMembers";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int groupId = getGroupId(domainId, groupName);
if (groupId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_GROUP, ResourceUtils.groupResourceName(domainName, groupName));
}
int count = 0;
try (PreparedStatement ps = con.prepareStatement(SQL_COUNT_GROUP_MEMBERS)) {
ps.setInt(1, groupId);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
count = rs.getInt(1);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return count;
}
boolean getGroupMembership(final String query, int groupId, final String member, long expiration,
GroupMembership membership, boolean disabledFlagCheck, final String caller) {
try (PreparedStatement ps = con.prepareStatement(query)) {
ps.setInt(1, groupId);
ps.setString(2, member);
if (expiration != 0) {
ps.setTimestamp(3, new java.sql.Timestamp(expiration));
}
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
membership.setIsMember(true);
java.sql.Timestamp expiry = rs.getTimestamp(ZMSConsts.DB_COLUMN_EXPIRATION);
if (expiry != null) {
membership.setExpiration(Timestamp.fromMillis(expiry.getTime()));
}
membership.setRequestPrincipal(rs.getString(ZMSConsts.DB_COLUMN_REQ_PRINCIPAL));
if (disabledFlagCheck) {
membership.setSystemDisabled(nullIfDefaultValue(rs.getInt(ZMSConsts.DB_COLUMN_SYSTEM_DISABLED), 0));
}
return true;
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return false;
}
@Override
public GroupMembership getGroupMember(String domainName, String groupName, String member, long expiration, boolean pending) {
final String caller = "getGroupMember";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int groupId = getGroupId(domainId, groupName);
if (groupId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_GROUP, ResourceUtils.groupResourceName(domainName, groupName));
}
GroupMembership membership = new GroupMembership()
.setMemberName(member)
.setGroupName(ResourceUtils.groupResourceName(domainName, groupName))
.setIsMember(false);
// first we're going to check if we have a standard user with the given
// details before checking for pending unless we're specifically asking
// for pending member only in which case we'll skip the first check
if (!pending) {
String query = expiration == 0 ? SQL_GET_GROUP_MEMBER : SQL_GET_TEMP_GROUP_MEMBER;
if (getGroupMembership(query, groupId, member, expiration, membership, true, caller)) {
membership.setApproved(true);
}
}
if (!membership.getIsMember()) {
String query = expiration == 0 ? SQL_GET_PENDING_GROUP_MEMBER : SQL_GET_TEMP_PENDING_GROUP_MEMBER;
if (getGroupMembership(query, groupId, member, expiration, membership, false, caller)) {
membership.setApproved(false);
}
}
return membership;
}
boolean groupMemberExists(int groupId, int principalId, boolean pending, final String caller) {
String statement = pending ? SQL_PENDING_GROUP_MEMBER_EXISTS : SQL_STD_GROUP_MEMBER_EXISTS;
try (PreparedStatement ps = con.prepareStatement(statement)) {
ps.setInt(1, groupId);
ps.setInt(2, principalId);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
return true;
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return false;
}
boolean insertGroupAuditLog(int groupId, String admin, String member,
String action, String auditRef) {
int affectedRows;
final String caller = "insertGroupAuditEntry";
try (PreparedStatement ps = con.prepareStatement(SQL_INSERT_GROUP_AUDIT_LOG)) {
ps.setInt(1, groupId);
ps.setString(2, processInsertValue(admin));
ps.setString(3, member);
ps.setString(4, action);
ps.setString(5, processInsertValue(auditRef));
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
boolean insertPendingGroupMember(int groupId, int principalId, GroupMember groupMember,
final String admin, final String auditRef, boolean groupMemberExists, final String caller) {
java.sql.Timestamp expiration = null;
if (groupMember.getExpiration() != null) {
expiration = new java.sql.Timestamp(groupMember.getExpiration().toDate().getTime());
}
int affectedRows;
if (groupMemberExists) {
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_PENDING_GROUP_MEMBER)) {
ps.setTimestamp(1, expiration);
ps.setString(2, processInsertValue(auditRef));
ps.setString(3, processInsertValue(admin));
ps.setInt(4, groupId);
ps.setInt(5, principalId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
} else {
try (PreparedStatement ps = con.prepareStatement(SQL_INSERT_PENDING_GROUP_MEMBER)) {
ps.setInt(1, groupId);
ps.setInt(2, principalId);
ps.setTimestamp(3, expiration);
ps.setString(4, processInsertValue(auditRef));
ps.setString(5, processInsertValue(admin));
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
}
return (affectedRows > 0);
}
boolean insertStandardGroupMember(int groupId, int principalId, GroupMember groupMember,
final String admin, final String principal, final String auditRef,
boolean groupMemberExists, boolean approveRequest, final String caller) {
java.sql.Timestamp expiration = null;
if (groupMember.getExpiration() != null) {
expiration = new java.sql.Timestamp(groupMember.getExpiration().toDate().getTime());
}
boolean result;
String auditOperation;
if (groupMemberExists) {
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_GROUP_MEMBER)) {
ps.setTimestamp(1, expiration);
ps.setBoolean(2, processInsertValue(groupMember.getActive(), true));
ps.setString(3, processInsertValue(auditRef));
ps.setString(4, processInsertValue(admin));
ps.setInt(5, groupId);
ps.setInt(6, principalId);
executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
auditOperation = approveRequest ? "APPROVE" : "UPDATE";
result = true;
} else {
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_INSERT_GROUP_MEMBER)) {
ps.setInt(1, groupId);
ps.setInt(2, principalId);
ps.setTimestamp(3, expiration);
ps.setBoolean(4, processInsertValue(groupMember.getActive(), true));
ps.setString(5, processInsertValue(auditRef));
ps.setString(6, processInsertValue(admin));
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
auditOperation = approveRequest ? "APPROVE" : "ADD";
result = (affectedRows > 0);
}
// add audit log entry for this change if the operation was successful
// add return the result of the audit log insert operation
if (result) {
result = insertGroupAuditLog(groupId, admin, principal, auditOperation, auditRef);
}
return result;
}
@Override
public boolean insertGroupMember(String domainName, String groupName, GroupMember groupMember,
String admin, String auditRef) {
final String caller = "insertGroupMember";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int groupId = getGroupId(domainId, groupName);
if (groupId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_GROUP, ResourceUtils.groupResourceName(domainName, groupName));
}
String principal = groupMember.getMemberName();
if (!validatePrincipalDomain(principal)) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, principal);
}
int principalId = getPrincipalId(principal);
if (principalId == 0) {
principalId = insertPrincipal(principal);
if (principalId == 0) {
throw internalServerError(caller, "Unable to insert principal: " + principal);
}
}
// need to check if entry already exists
boolean pendingRequest = (groupMember.getApproved() == Boolean.FALSE);
boolean groupMemberExists = groupMemberExists(groupId, principalId, pendingRequest, caller);
// process the request based on the type of the request
// either pending request or standard insert
boolean result;
if (pendingRequest) {
result = insertPendingGroupMember(groupId, principalId, groupMember, admin,
auditRef, groupMemberExists, caller);
} else {
result = insertStandardGroupMember(groupId, principalId, groupMember, admin,
principal, auditRef, groupMemberExists, false, caller);
}
return result;
}
@Override
public boolean deleteGroupMember(String domainName, String groupName, String principal, String admin, String auditRef) {
final String caller = "deleteGroupMember";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int groupId = getGroupId(domainId, groupName);
if (groupId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_GROUP, ResourceUtils.groupResourceName(domainName, groupName));
}
int principalId = getPrincipalId(principal);
if (principalId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_PRINCIPAL, principal);
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_DELETE_GROUP_MEMBER)) {
ps.setInt(1, groupId);
ps.setInt(2, principalId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
boolean result = (affectedRows > 0);
// add audit log entry for this change if the delete was successful
// add return the result of the audit log insert operation
if (result) {
result = insertGroupAuditLog(groupId, admin, principal, "DELETE", auditRef);
}
return result;
}
@Override
public boolean updateGroupMemberDisabledState(String domainName, String groupName, String principal, String admin,
int disabledState, String auditRef) {
final String caller = "updateGroupMemberDisabledState";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int groupId = getGroupId(domainId, groupName);
if (groupId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_GROUP, ResourceUtils.groupResourceName(domainName, groupName));
}
int principalId = getPrincipalId(principal);
if (principalId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_PRINCIPAL, principal);
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_GROUP_MEMBER_DISABLED_STATE)) {
ps.setInt(1, disabledState);
ps.setString(2, processInsertValue(auditRef));
ps.setString(3, processInsertValue(admin));
ps.setInt(4, groupId);
ps.setInt(5, principalId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
boolean result = (affectedRows > 0);
// add audit log entry for this change if the disable was successful
// add return the result of the audit log insert operation
if (result) {
final String operation = disabledState == 0 ? "ENABLE" : "DISABLE";
result = insertGroupAuditLog(groupId, admin, principal, operation, auditRef);
}
return result;
}
@Override
public boolean deletePendingGroupMember(String domainName, String groupName, String principal,
String admin, String auditRef) {
final String caller = "deletePendingGroupMember";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int groupId = getGroupId(domainId, groupName);
if (groupId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_GROUP, ResourceUtils.groupResourceName(domainName, groupName));
}
int principalId = getPrincipalId(principal);
if (principalId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_PRINCIPAL, principal);
}
return executeDeletePendingGroupMember(groupId, principalId, admin, principal, auditRef, true, caller);
}
public boolean executeDeletePendingGroupMember(int groupId, int principalId, final String admin,
final String principal, final String auditRef, boolean auditLog, final String caller) {
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_DELETE_PENDING_GROUP_MEMBER)) {
ps.setInt(1, groupId);
ps.setInt(2, principalId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
boolean result = (affectedRows > 0);
if (result && auditLog) {
result = insertGroupAuditLog(groupId, admin, principal, "REJECT", auditRef);
}
return result;
}
@Override
public boolean confirmGroupMember(String domainName, String groupName, GroupMember groupMember,
String admin, String auditRef) {
final String caller = "confirmGroupMember";
String principal = groupMember.getMemberName();
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int groupId = getGroupId(domainId, groupName);
if (groupId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_GROUP, ResourceUtils.groupResourceName(domainName, groupName));
}
int principalId = getPrincipalId(principal);
if (principalId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_PRINCIPAL, principal);
}
// need to check if the pending entry already exists
// before doing any work
boolean groupMemberExists = groupMemberExists(groupId, principalId, true, caller);
if (!groupMemberExists) {
throw notFoundError(caller, ZMSConsts.OBJECT_PRINCIPAL, principal);
}
boolean result;
if (groupMember.getApproved() == Boolean.TRUE) {
groupMemberExists = groupMemberExists(groupId, principalId, false, caller);
result = insertStandardGroupMember(groupId, principalId, groupMember, admin,
principal, auditRef, groupMemberExists, true, caller);
if (result) {
executeDeletePendingGroupMember(groupId, principalId, admin, principal,
auditRef, false, caller);
}
} else {
result = executeDeletePendingGroupMember(groupId, principalId, admin,
principal, auditRef, true, caller);
}
return result;
}
private DomainGroupMember getGroupsForPrincipal(String caller, DomainGroupMember domainGroupMember, PreparedStatement ps) throws SQLException {
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
final String groupName = rs.getString(1);
final String domain = rs.getString(2);
GroupMember groupMember = new GroupMember();
groupMember.setGroupName(groupName);
groupMember.setDomainName(domain);
java.sql.Timestamp expiration = rs.getTimestamp(3);
if (expiration != null) {
groupMember.setExpiration(Timestamp.fromMillis(expiration.getTime()));
}
groupMember.setSystemDisabled(nullIfDefaultValue(rs.getInt(4), 0));
domainGroupMember.getMemberGroups().add(groupMember);
}
return domainGroupMember;
}
}
@Override
public DomainGroupMember getPrincipalGroups(String principal, String domainName) {
final String caller = "getPrincipalGroups";
int principalId = getPrincipalId(principal);
if (principalId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_PRINCIPAL, principal);
}
DomainGroupMember domainGroupMember = new DomainGroupMember();
domainGroupMember.setMemberGroups(new ArrayList<>());
domainGroupMember.setMemberName(principal);
if (StringUtil.isEmpty(domainName)) {
try (PreparedStatement ps = con.prepareStatement(SQL_GET_PRINCIPAL_GROUPS)) {
ps.setInt(1, principalId);
return getGroupsForPrincipal(caller, domainGroupMember, ps);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
} else {
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
try (PreparedStatement ps = con.prepareStatement(SQL_GET_PRINCIPAL_GROUPS_DOMAIN)) {
ps.setInt(1, principalId);
ps.setInt(2, domainId);
return getGroupsForPrincipal(caller, domainGroupMember, ps);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
}
}
@Override
public List<PrincipalGroup> listGroupsWithUserAuthorityRestrictions() {
final String caller = "listGroupsWithUserAuthorityRestrictions";
List<PrincipalGroup> groups = new ArrayList<>();
try (PreparedStatement ps = con.prepareStatement(SQL_LIST_GROUPS_WITH_RESTRICTIONS)) {
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
PrincipalGroup group = new PrincipalGroup();
group.setDomainName(rs.getString(ZMSConsts.DB_COLUMN_AS_DOMAIN_NAME));
group.setGroupName(rs.getString(ZMSConsts.DB_COLUMN_AS_GROUP_NAME));
group.setDomainUserAuthorityFilter(rs.getString(ZMSConsts.DB_COLUMN_AS_DOMAIN_USER_AUTHORITY_FILTER));
groups.add(group);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return groups;
}
@Override
public boolean updatePrincipal(String principal, int newState) {
final String caller = "updatePrincipal";
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_PRINCIPAL)) {
ps.setInt(1, newState);
ps.setString(2, principal);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public List<String> getPrincipals(int queriedState) {
final String caller = "getPrincipals";
List<String> principals = new ArrayList<>();
try (PreparedStatement ps = con.prepareStatement(SQL_GET_PRINCIPAL)) {
ps.setInt(1, queriedState);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
principals.add(rs.getString(ZMSConsts.DB_COLUMN_NAME));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return principals;
}
// To avoid firing multiple queries against DB, this function will generate 1 consolidated query for all domains->templates combination
public String generateDomainTemplateVersionQuery(Map<String, Integer> templateNameAndLatestVersion) {
StringBuilder query = new StringBuilder();
query.append("SELECT domain.name, domain_template.template FROM domain_template " +
"JOIN domain ON domain_template.domain_id=domain.domain_id WHERE ");
for (String templateName : templateNameAndLatestVersion.keySet()) {
query.append("(domain_template.template = '").append(templateName).append("' and current_version < ")
.append(templateNameAndLatestVersion.get(templateName)).append(") OR ");
}
//To remove the last occurrence of "OR" from the generated query
query.delete(query.lastIndexOf(") OR"), query.lastIndexOf("OR") + 3).append(");");
return query.toString();
}
RuntimeException notFoundError(String caller, String objectType, String objectName) {
rollbackChanges();
String message = "unknown " + objectType + " - " + objectName;
return ZMSUtils.notFoundError(message, caller);
}
RuntimeException requestError(String caller, String message) {
rollbackChanges();
return ZMSUtils.requestError(message, caller);
}
RuntimeException internalServerError(String caller, String message) {
rollbackChanges();
return ZMSUtils.internalServerError(message, caller);
}
RuntimeException sqlError(SQLException ex, String caller) {
// check to see if this is a conflict error in which case
// we're going to let the server to retry the caller
// The two SQL states that are 'retry-able' are 08S01
// for a communications error, and 40001 for deadlock.
// also check for the error code where the mysql server is
// in read-mode which could happen if we had a failover
// and the connections are still going to the old master
String sqlState = ex.getSQLState();
int code = ResourceException.INTERNAL_SERVER_ERROR;
String msg;
if ("08S01".equals(sqlState) || "40001".equals(sqlState)) {
code = ResourceException.CONFLICT;
msg = "Concurrent update conflict, please retry your operation later.";
} else if (ex.getErrorCode() == MYSQL_ER_OPTION_PREVENTS_STATEMENT) {
code = ResourceException.GONE;
msg = "MySQL Database running in read-only mode";
} else if (ex.getErrorCode() == MYSQL_ER_OPTION_DUPLICATE_ENTRY) {
code = ResourceException.BAD_REQUEST;
msg = "Entry already exists";
} else if (ex instanceof SQLTimeoutException) {
code = ResourceException.SERVICE_UNAVAILABLE;
msg = "Statement cancelled due to timeout";
} else {
msg = ex.getMessage() + ", state: " + sqlState + ", code: " + ex.getErrorCode();
}
rollbackChanges();
return ZMSUtils.error(code, msg, caller);
}
Boolean nullIfDefaultValue(boolean flag, boolean defaultValue) {
return flag == defaultValue ? null : flag;
}
Integer nullIfDefaultValue(int value, int defaultValue) {
return value == defaultValue ? null : value;
}
private void addTagsToRoles(Map<String, Role> roleMap, String domainName) {
Map<String, Map<String, StringList>> domainRoleTags = getDomainRoleTags(domainName);
if (domainRoleTags != null) {
for (Map.Entry<String, Role> roleEntry : roleMap.entrySet()) {
Map<String, StringList> roleTag = domainRoleTags.get(roleEntry.getKey());
if (roleTag != null) {
roleEntry.getValue().setTags(roleTag);
}
}
}
}
Map<String, Map<String, StringList>> getDomainRoleTags(String domainName) {
final String caller = "getDomainRoleTags";
Map<String, Map<String, StringList>> domainRoleTags = null;
try (PreparedStatement ps = con.prepareStatement(SQL_GET_DOMAIN_ROLE_TAGS)) {
ps.setString(1, domainName);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
String roleName = rs.getString(1);
String tagKey = rs.getString(2);
String tagValue = rs.getString(3);
if (domainRoleTags == null) {
domainRoleTags = new HashMap<>();
}
Map<String, StringList> roleTag = domainRoleTags.computeIfAbsent(roleName, tags -> new HashMap<>());
StringList tagValues = roleTag.computeIfAbsent(tagKey, k -> new StringList().setList(new ArrayList<>()));
tagValues.getList().add(tagValue);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return domainRoleTags;
}
@Override
public Map<String, StringList> getRoleTags(String domainName, String roleName) {
final String caller = "getRoleTags";
Map<String, StringList> roleTag = null;
try (PreparedStatement ps = con.prepareStatement(SQL_GET_ROLE_TAGS)) {
ps.setString(1, domainName);
ps.setString(2, roleName);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
String tagKey = rs.getString(1);
String tagValue = rs.getString(2);
if (roleTag == null) {
roleTag = new HashMap<>();
}
StringList tagValues = roleTag.computeIfAbsent(tagKey, k -> new StringList().setList(new ArrayList<>()));
tagValues.getList().add(tagValue);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return roleTag;
}
@Override
public boolean insertRoleTags(String roleName, String domainName, Map<String, StringList> roleTags) {
final String caller = "insertRoleTags";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int roleId = getRoleId(domainId, roleName);
if (roleId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_ROLE, ResourceUtils.roleResourceName(domainName, roleName));
}
int curTagCount = getRoleTagsCount(roleId);
int remainingTagsToInsert = roleTagsLimit - curTagCount;
boolean res = true;
for (Map.Entry<String, StringList> e : roleTags.entrySet()) {
for (int i = 0; i < e.getValue().getList().size() && remainingTagsToInsert-- > 0; i++) {
String tagValue = e.getValue().getList().get(i);
try (PreparedStatement ps = con.prepareStatement(SQL_INSERT_ROLE_TAG)) {
ps.setInt(1, roleId);
ps.setString(2, processInsertValue(e.getKey()));
ps.setString(3, processInsertValue(tagValue));
res &= (executeUpdate(ps, caller) > 0);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
}
}
if (remainingTagsToInsert < 0) {
LOG.info("Role tags limit for role: [{}], domain: [{}] has reached", roleName, domainName);
}
return res;
}
private int getRoleTagsCount(int roleId) {
final String caller = "getRoleTagsCount";
int count = 0;
try (PreparedStatement ps = con.prepareStatement(SQL_ROLE_TAG_COUNT)) {
ps.setInt(1, roleId);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
count = rs.getInt(1);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return count;
}
@Override
public boolean deleteRoleTags(String roleName, String domainName, Set<String> tagKeys) {
final String caller = "deleteRoleTags";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int roleId = getRoleId(domainId, roleName);
if (roleId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_ROLE, ResourceUtils.roleResourceName(domainName, roleName));
}
boolean res = true;
for (String tagKey : tagKeys) {
try (PreparedStatement ps = con.prepareStatement(SQL_DELETE_ROLE_TAG)) {
ps.setInt(1, roleId);
ps.setString(2, processInsertValue(tagKey));
res &= (executeUpdate(ps, caller) > 0);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
}
return res;
}
}
| 1 | 5,644 | why are we adding the current tag count to new tag count ? | AthenZ-athenz | java |
@@ -12,7 +12,7 @@ func (c *ServerConfig) SetServerMode(port int, network, netmask string) {
func (c *ServerConfig) SetTLSServer() {
c.setFlag("tls-server")
- c.AddOptions(OptionFile("dh", "none"))
+ c.AddOptions(OptionFile("dh", "none", "none"))
}
func (c *ServerConfig) SetProtocol(protocol string) { | 1 | package openvpn
type ServerConfig struct {
*Config
}
func (c *ServerConfig) SetServerMode(port int, network, netmask string) {
c.SetPort(port)
c.setParam("server", network+" "+netmask)
c.setParam("topology", "subnet")
}
func (c *ServerConfig) SetTLSServer() {
c.setFlag("tls-server")
c.AddOptions(OptionFile("dh", "none"))
}
func (c *ServerConfig) SetProtocol(protocol string) {
if protocol == "tcp" {
c.setParam("proto", "tcp-server")
} else if protocol == "udp" {
c.setFlag("explicit-exit-notify")
}
}
| 1 | 11,024 | Are you writing "none" to file content? no good | mysteriumnetwork-node | go |
@@ -21,10 +21,10 @@ class SnippetWidget extends BaseWidget
string $target = Target::NOWHERE,
string $zone = RequestZone::NOWHERE
) {
- $this->template = $snippet;
- $this->name = $name;
- $this->target = $target;
- $this->zone = $zone;
+ $this->setTemplate($snippet);
+ $this->setName($name);
+ $this->setTargets([$target]);
+ $this->setZone($zone);
}
protected function run(array $params = []): ?string | 1 | <?php
declare(strict_types=1);
namespace Bolt\Widget;
use Bolt\Widget\Injector\RequestZone;
use Bolt\Widget\Injector\Target;
class SnippetWidget extends BaseWidget
{
protected $name;
protected $type;
protected $target;
protected $zone;
protected $priority;
public function __construct(
string $snippet = '<!-- snippet -->',
string $name = 'Nameless Snippet',
string $target = Target::NOWHERE,
string $zone = RequestZone::NOWHERE
) {
$this->template = $snippet;
$this->name = $name;
$this->target = $target;
$this->zone = $zone;
}
protected function run(array $params = []): ?string
{
return $this->getTemplate();
}
}
| 1 | 12,710 | Should we allow `string|string[]` here? | bolt-core | php |
@@ -546,7 +546,17 @@ module.exports = class Dashboard extends Plugin {
})
// 4. Add all dropped files
- getDroppedFiles(event.dataTransfer)
+ let executedDropErrorOnce = false
+ const logDropError = (error) => {
+ this.uppy.log(error.message, 'error')
+
+ // In practice all drop errors are most likely the same, so let's just show one to avoid overwhelming the user
+ if (!executedDropErrorOnce) {
+ this.uppy.info(error.message, 'error')
+ executedDropErrorOnce = true
+ }
+ }
+ getDroppedFiles(event.dataTransfer, { logDropError })
.then((files) => {
if (files.length > 0) {
this.uppy.log('[Dashboard] Files were dropped') | 1 | const { Plugin } = require('@uppy/core')
const Translator = require('@uppy/utils/lib/Translator')
const DashboardUI = require('./components/Dashboard')
const StatusBar = require('@uppy/status-bar')
const Informer = require('@uppy/informer')
const ThumbnailGenerator = require('@uppy/thumbnail-generator')
const findAllDOMElements = require('@uppy/utils/lib/findAllDOMElements')
const toArray = require('@uppy/utils/lib/toArray')
const getDroppedFiles = require('@uppy/utils/lib/getDroppedFiles')
const trapFocus = require('./utils/trapFocus')
const cuid = require('cuid')
const ResizeObserver = require('resize-observer-polyfill').default || require('resize-observer-polyfill')
const { defaultPickerIcon } = require('./components/icons')
const createSuperFocus = require('./utils/createSuperFocus')
const memoize = require('memoize-one').default || require('memoize-one')
const TAB_KEY = 9
const ESC_KEY = 27
function createPromise () {
const o = {}
o.promise = new Promise((resolve, reject) => {
o.resolve = resolve
o.reject = reject
})
return o
}
/**
* Dashboard UI with previews, metadata editing, tabs for various services and more
*/
module.exports = class Dashboard extends Plugin {
static VERSION = require('../package.json').version
constructor (uppy, opts) {
super(uppy, opts)
this.id = this.opts.id || 'Dashboard'
this.title = 'Dashboard'
this.type = 'orchestrator'
this.modalName = `uppy-Dashboard-${cuid()}`
this.defaultLocale = {
strings: {
closeModal: 'Close Modal',
importFrom: 'Import from %{name}',
addingMoreFiles: 'Adding more files',
addMoreFiles: 'Add more files',
dashboardWindowTitle: 'File Uploader Window (Press escape to close)',
dashboardTitle: 'File Uploader',
copyLinkToClipboardSuccess: 'Link copied to clipboard',
copyLinkToClipboardFallback: 'Copy the URL below',
copyLink: 'Copy link',
link: 'Link',
fileSource: 'File source: %{name}',
done: 'Done',
back: 'Back',
addMore: 'Add more',
removeFile: 'Remove file',
editFile: 'Edit file',
editing: 'Editing %{file}',
edit: 'Edit',
finishEditingFile: 'Finish editing file',
saveChanges: 'Save changes',
cancel: 'Cancel',
myDevice: 'My Device',
dropPasteImport: 'Drop files here, paste, %{browse} or import from',
dropPaste: 'Drop files here, paste or %{browse}',
dropHint: 'Drop your files here',
browse: 'browse',
uploadComplete: 'Upload complete',
uploadPaused: 'Upload paused',
resumeUpload: 'Resume upload',
pauseUpload: 'Pause upload',
retryUpload: 'Retry upload',
cancelUpload: 'Cancel upload',
xFilesSelected: {
0: '%{smart_count} file selected',
1: '%{smart_count} files selected',
2: '%{smart_count} files selected'
},
uploadingXFiles: {
0: 'Uploading %{smart_count} file',
1: 'Uploading %{smart_count} files',
2: 'Uploading %{smart_count} files'
},
processingXFiles: {
0: 'Processing %{smart_count} file',
1: 'Processing %{smart_count} files',
2: 'Processing %{smart_count} files'
},
poweredBy: 'Powered by'
}
}
// set default options
const defaultOptions = {
target: 'body',
metaFields: [],
trigger: '#uppy-select-files',
inline: false,
width: 750,
height: 550,
thumbnailWidth: 280,
defaultPickerIcon,
showLinkToFileUploadResult: true,
showProgressDetails: false,
hideUploadButton: false,
hideRetryButton: false,
hidePauseResumeCancelButtons: false,
hideProgressAfterFinish: false,
note: null,
closeModalOnClickOutside: false,
closeAfterFinish: false,
disableStatusBar: false,
disableInformer: false,
disableThumbnailGenerator: false,
disablePageScrollWhenModalOpen: true,
animateOpenClose: true,
proudlyDisplayPoweredByUppy: true,
onRequestCloseModal: () => this.closeModal(),
showSelectedFiles: true,
browserBackButtonClose: false
}
// merge default options with the ones set by user
this.opts = { ...defaultOptions, ...opts }
// i18n
this.translator = new Translator([this.defaultLocale, this.uppy.locale, this.opts.locale])
this.i18n = this.translator.translate.bind(this.translator)
this.i18nArray = this.translator.translateArray.bind(this.translator)
this.openModal = this.openModal.bind(this)
this.closeModal = this.closeModal.bind(this)
this.requestCloseModal = this.requestCloseModal.bind(this)
this.isModalOpen = this.isModalOpen.bind(this)
this.addTarget = this.addTarget.bind(this)
this.removeTarget = this.removeTarget.bind(this)
this.hideAllPanels = this.hideAllPanels.bind(this)
this.showPanel = this.showPanel.bind(this)
this.toggleFileCard = this.toggleFileCard.bind(this)
this.toggleAddFilesPanel = this.toggleAddFilesPanel.bind(this)
this.initEvents = this.initEvents.bind(this)
this.handlePopState = this.handlePopState.bind(this)
this.handleKeyDownInModal = this.handleKeyDownInModal.bind(this)
this.handleKeyDownInInline = this.handleKeyDownInInline.bind(this)
this.handleComplete = this.handleComplete.bind(this)
this.handleClickOutside = this.handleClickOutside.bind(this)
this.handlePaste = this.handlePaste.bind(this)
this.handlePasteOnBody = this.handlePasteOnBody.bind(this)
this.handleInputChange = this.handleInputChange.bind(this)
this.handleDragOver = this.handleDragOver.bind(this)
this.handleDragLeave = this.handleDragLeave.bind(this)
this.handleDrop = this.handleDrop.bind(this)
this.superFocusOnEachUpdate = this.superFocusOnEachUpdate.bind(this)
this.recordIfFocusedOnUppyRecently = this.recordIfFocusedOnUppyRecently.bind(this)
this.render = this.render.bind(this)
this.install = this.install.bind(this)
this.superFocus = createSuperFocus()
this.ifFocusedOnUppyRecently = false
// Timeouts
this.makeDashboardInsidesVisibleAnywayTimeout = null
this.removeDragOverClassTimeout = null
}
removeTarget (plugin) {
const pluginState = this.getPluginState()
// filter out the one we want to remove
const newTargets = pluginState.targets.filter(target => target.id !== plugin.id)
this.setPluginState({
targets: newTargets
})
}
addTarget (plugin) {
const callerPluginId = plugin.id || plugin.constructor.name
const callerPluginName = plugin.title || callerPluginId
const callerPluginType = plugin.type
if (callerPluginType !== 'acquirer' &&
callerPluginType !== 'progressindicator' &&
callerPluginType !== 'presenter') {
const msg = 'Dashboard: Modal can only be used by plugins of types: acquirer, progressindicator, presenter'
this.uppy.log(msg)
return
}
const target = {
id: callerPluginId,
name: callerPluginName,
type: callerPluginType
}
const state = this.getPluginState()
const newTargets = state.targets.slice()
newTargets.push(target)
this.setPluginState({
targets: newTargets
})
return this.el
}
hideAllPanels () {
this.setPluginState({
activePickerPanel: false,
showAddFilesPanel: false,
activeOverlayType: null
})
}
showPanel (id) {
const { targets } = this.getPluginState()
const activePickerPanel = targets.filter((target) => {
return target.type === 'acquirer' && target.id === id
})[0]
this.setPluginState({
activePickerPanel: activePickerPanel,
activeOverlayType: 'PickerPanel'
})
}
openModal () {
const { promise, resolve } = createPromise()
// save scroll position
this.savedScrollPosition = window.pageYOffset
// save active element, so we can restore focus when modal is closed
this.savedActiveElement = document.activeElement
if (this.opts.disablePageScrollWhenModalOpen) {
document.body.classList.add('uppy-Dashboard-isFixed')
}
if (this.opts.animateOpenClose && this.getPluginState().isClosing) {
const handler = () => {
this.setPluginState({
isHidden: false
})
this.el.removeEventListener('animationend', handler, false)
resolve()
}
this.el.addEventListener('animationend', handler, false)
} else {
this.setPluginState({
isHidden: false
})
resolve()
}
if (this.opts.browserBackButtonClose) {
this.updateBrowserHistory()
}
// handle ESC and TAB keys in modal dialog
document.addEventListener('keydown', this.handleKeyDownInModal)
this.uppy.emit('dashboard:modal-open')
return promise
}
closeModal (opts = {}) {
const {
manualClose = true // Whether the modal is being closed by the user (`true`) or by other means (e.g. browser back button)
} = opts
const { isHidden, isClosing } = this.getPluginState()
if (isHidden || isClosing) {
// short-circuit if animation is ongoing
return
}
const { promise, resolve } = createPromise()
if (this.opts.disablePageScrollWhenModalOpen) {
document.body.classList.remove('uppy-Dashboard-isFixed')
}
if (this.opts.animateOpenClose) {
this.setPluginState({
isClosing: true
})
const handler = () => {
this.setPluginState({
isHidden: true,
isClosing: false
})
this.superFocus.cancel()
this.savedActiveElement.focus()
this.el.removeEventListener('animationend', handler, false)
resolve()
}
this.el.addEventListener('animationend', handler, false)
} else {
this.setPluginState({
isHidden: true
})
this.superFocus.cancel()
this.savedActiveElement.focus()
resolve()
}
// handle ESC and TAB keys in modal dialog
document.removeEventListener('keydown', this.handleKeyDownInModal)
if (manualClose) {
if (this.opts.browserBackButtonClose) {
// Make sure that the latest entry in the history state is our modal name
if (history.state && history.state[this.modalName]) {
// Go back in history to clear out the entry we created (ultimately closing the modal)
history.go(-1)
}
}
}
this.uppy.emit('dashboard:modal-closed')
return promise
}
isModalOpen () {
return !this.getPluginState().isHidden || false
}
requestCloseModal () {
if (this.opts.onRequestCloseModal) {
return this.opts.onRequestCloseModal()
}
return this.closeModal()
}
toggleFileCard (fileId) {
if (fileId) {
this.uppy.emit('dashboard:file-edit-start')
} else {
this.uppy.emit('dashboard:file-edit-complete')
}
this.setPluginState({
fileCardFor: fileId || null,
activeOverlayType: fileId ? 'FileCard' : null
})
}
toggleAddFilesPanel (show) {
this.setPluginState({
showAddFilesPanel: show,
activeOverlayType: show ? 'AddFiles' : null
})
}
addFile (file) {
try {
this.uppy.addFile({
source: this.id,
name: file.name,
type: file.type,
data: file,
meta: {
// path of the file relative to the ancestor directory the user selected.
// e.g. 'docs/Old Prague/airbnb.pdf'
relativePath: file.relativePath || null
}
})
} catch (err) {
if (!err.isRestriction) {
this.uppy.log(err)
}
}
}
// ___Why make insides of Dashboard invisible until first ResizeObserver event is emitted?
// ResizeOberserver doesn't emit the first resize event fast enough, users can see the jump from one .uppy-size-- to another (e.g. in Safari)
// ___Why not apply visibility property to .uppy-Dashboard-inner?
// Because ideally, acc to specs, ResizeObserver should see invisible elements as of width 0. So even though applying invisibility to .uppy-Dashboard-inner works now, it may not work in the future.
startListeningToResize () {
// Watch for Dashboard container (`.uppy-Dashboard-inner`) resize
// and update containerWidth/containerHeight in plugin state accordingly.
// Emits first event on initialization.
this.resizeObserver = new ResizeObserver((entries, observer) => {
const uppyDashboardInnerEl = entries[0]
const { width, height } = uppyDashboardInnerEl.contentRect
this.uppy.log(`[Dashboard] resized: ${width} / ${height}`, 'debug')
this.setPluginState({
containerWidth: width,
containerHeight: height,
areInsidesReadyToBeVisible: true
})
})
this.resizeObserver.observe(this.el.querySelector('.uppy-Dashboard-inner'))
// If ResizeObserver fails to emit an event telling us what size to use - default to the mobile view
this.makeDashboardInsidesVisibleAnywayTimeout = setTimeout(() => {
const pluginState = this.getPluginState()
const isModalAndClosed = !this.opts.inline && pluginState.isHidden
if (
// if ResizeObserver hasn't yet fired,
!pluginState.areInsidesReadyToBeVisible &&
// and it's not due to the modal being closed
!isModalAndClosed
) {
this.uppy.log("[Dashboard] resize event didn't fire on time: defaulted to mobile layout", 'debug')
this.setPluginState({
areInsidesReadyToBeVisible: true
})
}
}, 1000)
}
stopListeningToResize () {
this.resizeObserver.disconnect()
clearTimeout(this.makeDashboardInsidesVisibleAnywayTimeout)
}
// Records whether we have been interacting with uppy right now, which is then used to determine whether state updates should trigger a refocusing.
recordIfFocusedOnUppyRecently (event) {
if (this.el.contains(event.target)) {
this.ifFocusedOnUppyRecently = true
} else {
this.ifFocusedOnUppyRecently = false
// ___Why run this.superFocus.cancel here when it already runs in superFocusOnEachUpdate?
// Because superFocus is debounced, when we move from Uppy to some other element on the page,
// previously run superFocus sometimes hits and moves focus back to Uppy.
this.superFocus.cancel()
}
}
updateBrowserHistory () {
// Ensure history state does not already contain our modal name to avoid double-pushing
if (!history.state || !history.state[this.modalName]) {
// Push to history so that the page is not lost on browser back button press
history.pushState({
...history.state,
[this.modalName]: true
}, '')
}
// Listen for back button presses
window.addEventListener('popstate', this.handlePopState, false)
}
handlePopState (event) {
// Close the modal if the history state no longer contains our modal name
if (this.isModalOpen() && (!event.state || !event.state[this.modalName])) {
this.closeModal({ manualClose: false })
}
// When the browser back button is pressed and uppy is now the latest entry in the history but the modal is closed, fix the history by removing the uppy history entry
// This occurs when another entry is added into the history state while the modal is open, and then the modal gets manually closed
// Solves PR #575 (https://github.com/transloadit/uppy/pull/575)
if (!this.isModalOpen() && event.state && event.state[this.modalName]) {
history.go(-1)
}
}
handleKeyDownInModal (event) {
// close modal on esc key press
if (event.keyCode === ESC_KEY) this.requestCloseModal(event)
// trap focus on tab key press
if (event.keyCode === TAB_KEY) trapFocus.forModal(event, this.getPluginState().activeOverlayType, this.el)
}
handleClickOutside () {
if (this.opts.closeModalOnClickOutside) this.requestCloseModal()
}
handlePaste (event) {
// 1. Let any acquirer plugin (Url/Webcam/etc.) handle pastes to the root
this.uppy.iteratePlugins((plugin) => {
if (plugin.type === 'acquirer') {
// Every Plugin with .type acquirer can define handleRootPaste(event)
plugin.handleRootPaste && plugin.handleRootPaste(event)
}
})
// 2. Add all dropped files
const files = toArray(event.clipboardData.files)
files.forEach((file) => {
this.uppy.log('[Dashboard] File pasted')
this.addFile(file)
})
}
handleInputChange (event) {
event.preventDefault()
const files = toArray(event.target.files)
files.forEach((file) =>
this.addFile(file)
)
}
handleDragOver (event) {
event.preventDefault()
event.stopPropagation()
clearTimeout(this.removeDragOverClassTimeout)
this.setPluginState({ isDraggingOver: true })
}
handleDragLeave (event) {
event.preventDefault()
event.stopPropagation()
clearTimeout(this.removeDragOverClassTimeout)
// Timeout against flickering, this solution is taken from drag-drop library. Solution with 'pointer-events: none' didn't work across browsers.
this.removeDragOverClassTimeout = setTimeout(() => {
this.setPluginState({ isDraggingOver: false })
}, 50)
}
handleDrop (event, dropCategory) {
event.preventDefault()
event.stopPropagation()
clearTimeout(this.removeDragOverClassTimeout)
// 1. Add a small (+) icon on drop
event.dataTransfer.dropEffect = 'copy'
// 2. Remove dragover class
this.setPluginState({ isDraggingOver: false })
// 3. Let any acquirer plugin (Url/Webcam/etc.) handle drops to the root
this.uppy.iteratePlugins((plugin) => {
if (plugin.type === 'acquirer') {
// Every Plugin with .type acquirer can define handleRootDrop(event)
plugin.handleRootDrop && plugin.handleRootDrop(event)
}
})
// 4. Add all dropped files
getDroppedFiles(event.dataTransfer)
.then((files) => {
if (files.length > 0) {
this.uppy.log('[Dashboard] Files were dropped')
files.forEach((file) =>
this.addFile(file)
)
}
})
}
handleKeyDownInInline (event) {
// Trap focus on tab key press.
if (event.keyCode === TAB_KEY) trapFocus.forInline(event, this.getPluginState().activeOverlayType, this.el)
}
// ___Why do we listen to the 'paste' event on a document instead of onPaste={props.handlePaste} prop, or this.el.addEventListener('paste')?
// Because (at least) Chrome doesn't handle paste if focus is on some button, e.g. 'My Device'.
// => Therefore, the best option is to listen to all 'paste' events, and only react to them when we are focused on our particular Uppy instance.
// ___Why do we still need onPaste={props.handlePaste} for the DashboardUi?
// Because if we click on the 'Drop files here' caption e.g., `document.activeElement` will be 'body'. Which means our standard determination of whether we're pasting into our Uppy instance won't work.
// => Therefore, we need a traditional onPaste={props.handlePaste} handler too.
handlePasteOnBody (event) {
const isFocusInOverlay = this.el.contains(document.activeElement)
if (isFocusInOverlay) {
this.handlePaste(event)
}
}
handleComplete ({ failed, uploadID }) {
if (this.opts.closeAfterFinish && failed.length === 0) {
// All uploads are done
this.requestCloseModal()
}
}
initEvents () {
// Modal open button
const showModalTrigger = findAllDOMElements(this.opts.trigger)
if (!this.opts.inline && showModalTrigger) {
showModalTrigger.forEach(trigger => trigger.addEventListener('click', this.openModal))
}
if (!this.opts.inline && !showModalTrigger) {
this.uppy.log('Dashboard modal trigger not found. Make sure `trigger` is set in Dashboard options unless you are planning to call openModal() method yourself', 'error')
}
this.startListeningToResize()
document.addEventListener('paste', this.handlePasteOnBody)
this.uppy.on('plugin-remove', this.removeTarget)
this.uppy.on('file-added', this.hideAllPanels)
this.uppy.on('dashboard:modal-closed', this.hideAllPanels)
this.uppy.on('complete', this.handleComplete)
// ___Why fire on capture?
// Because this.ifFocusedOnUppyRecently needs to change before onUpdate() fires.
document.addEventListener('focus', this.recordIfFocusedOnUppyRecently, true)
document.addEventListener('click', this.recordIfFocusedOnUppyRecently, true)
if (this.opts.inline) {
this.el.addEventListener('keydown', this.handleKeyDownInInline)
}
}
removeEvents () {
const showModalTrigger = findAllDOMElements(this.opts.trigger)
if (!this.opts.inline && showModalTrigger) {
showModalTrigger.forEach(trigger => trigger.removeEventListener('click', this.openModal))
}
this.stopListeningToResize()
document.removeEventListener('paste', this.handlePasteOnBody)
window.removeEventListener('popstate', this.handlePopState, false)
this.uppy.off('plugin-remove', this.removeTarget)
this.uppy.off('file-added', this.hideAllPanels)
this.uppy.off('dashboard:modal-closed', this.hideAllPanels)
this.uppy.off('complete', this.handleComplete)
document.removeEventListener('focus', this.recordIfFocusedOnUppyRecently)
document.removeEventListener('click', this.recordIfFocusedOnUppyRecently)
if (this.opts.inline) {
this.el.removeEventListener('keydown', this.handleKeyDownInInline)
}
}
superFocusOnEachUpdate () {
const isFocusInUppy = this.el.contains(document.activeElement)
// When focus is lost on the page (== focus is on body for most browsers, or focus is null for IE11)
const isFocusNowhere = document.activeElement === document.querySelector('body') || document.activeElement === null
const isInformerHidden = this.uppy.getState().info.isHidden
const isModal = !this.opts.inline
if (
// If update is connected to showing the Informer - let the screen reader calmly read it.
isInformerHidden &&
(
// If we are in a modal - always superfocus without concern for other elements on the page (user is unlikely to want to interact with the rest of the page)
isModal ||
// If we are already inside of Uppy, or
isFocusInUppy ||
// If we are not focused on anything BUT we have already, at least once, focused on uppy
// 1. We focus when isFocusNowhere, because when the element we were focused on disappears (e.g. an overlay), - focus gets lost. If user is typing something somewhere else on the page, - focus won't be 'nowhere'.
// 2. We only focus when focus is nowhere AND this.ifFocusedOnUppyRecently, to avoid focus jumps if we do something else on the page.
// [Practical check] Without '&& this.ifFocusedOnUppyRecently', in Safari, in inline mode, when file is uploading, - navigate via tab to the checkbox, try to press space multiple times. Focus will jump to Uppy.
(isFocusNowhere && this.ifFocusedOnUppyRecently)
)
) {
this.superFocus(this.el, this.getPluginState().activeOverlayType)
} else {
this.superFocus.cancel()
}
}
afterUpdate () {
this.superFocusOnEachUpdate()
}
cancelUpload = (fileID) => {
this.uppy.removeFile(fileID)
}
saveFileCard = (meta, fileID) => {
this.uppy.setFileMeta(fileID, meta)
this.toggleFileCard()
}
_attachRenderFunctionToTarget = (target) => {
const plugin = this.uppy.getPlugin(target.id)
return {
...target,
icon: plugin.icon || this.opts.defaultPickerIcon,
render: plugin.render
}
}
_isTargetSupported = (target) => {
const plugin = this.uppy.getPlugin(target.id)
// If the plugin does not provide a `supported` check, assume the plugin works everywhere.
if (typeof plugin.isSupported !== 'function') {
return true
}
return plugin.isSupported()
}
_getAcquirers = memoize((targets) => {
return targets
.filter(target => target.type === 'acquirer' && this._isTargetSupported(target))
.map(this._attachRenderFunctionToTarget)
})
_getProgressIndicators = memoize((targets) => {
return targets
.filter(target => target.type === 'progressindicator')
.map(this._attachRenderFunctionToTarget)
})
render (state) {
const pluginState = this.getPluginState()
const { files, capabilities, allowNewUpload } = state
// TODO: move this to Core, to share between Status Bar and Dashboard
// (and any other plugin that might need it, too)
const newFiles = Object.keys(files).filter((file) => {
return !files[file].progress.uploadStarted
})
const uploadStartedFiles = Object.keys(files).filter((file) => {
return files[file].progress.uploadStarted
})
const pausedFiles = Object.keys(files).filter((file) => {
return files[file].isPaused
})
const completeFiles = Object.keys(files).filter((file) => {
return files[file].progress.uploadComplete
})
const erroredFiles = Object.keys(files).filter((file) => {
return files[file].error
})
const inProgressFiles = Object.keys(files).filter((file) => {
return !files[file].progress.uploadComplete &&
files[file].progress.uploadStarted
})
const inProgressNotPausedFiles = inProgressFiles.filter((file) => {
return !files[file].isPaused
})
const processingFiles = Object.keys(files).filter((file) => {
return files[file].progress.preprocess || files[file].progress.postprocess
})
const isUploadStarted = uploadStartedFiles.length > 0
const isAllComplete = state.totalProgress === 100 &&
completeFiles.length === Object.keys(files).length &&
processingFiles.length === 0
const isAllErrored = isUploadStarted &&
erroredFiles.length === uploadStartedFiles.length
const isAllPaused = inProgressFiles.length !== 0 &&
pausedFiles.length === inProgressFiles.length
const acquirers = this._getAcquirers(pluginState.targets)
const progressindicators = this._getProgressIndicators(pluginState.targets)
return DashboardUI({
state,
isHidden: pluginState.isHidden,
files,
newFiles,
uploadStartedFiles,
completeFiles,
erroredFiles,
inProgressFiles,
inProgressNotPausedFiles,
processingFiles,
isUploadStarted,
isAllComplete,
isAllErrored,
isAllPaused,
totalFileCount: Object.keys(files).length,
totalProgress: state.totalProgress,
allowNewUpload,
acquirers,
activePickerPanel: pluginState.activePickerPanel,
animateOpenClose: this.opts.animateOpenClose,
isClosing: pluginState.isClosing,
getPlugin: this.uppy.getPlugin,
progressindicators: progressindicators,
autoProceed: this.uppy.opts.autoProceed,
id: this.id,
closeModal: this.requestCloseModal,
handleClickOutside: this.handleClickOutside,
handleInputChange: this.handleInputChange,
handlePaste: this.handlePaste,
inline: this.opts.inline,
showPanel: this.showPanel,
hideAllPanels: this.hideAllPanels,
log: this.uppy.log,
i18n: this.i18n,
i18nArray: this.i18nArray,
addFile: this.uppy.addFile,
removeFile: this.uppy.removeFile,
info: this.uppy.info,
note: this.opts.note,
metaFields: pluginState.metaFields,
resumableUploads: capabilities.resumableUploads || false,
individualCancellation: capabilities.individualCancellation,
pauseUpload: this.uppy.pauseResume,
retryUpload: this.uppy.retryUpload,
cancelUpload: this.cancelUpload,
cancelAll: this.uppy.cancelAll,
fileCardFor: pluginState.fileCardFor,
toggleFileCard: this.toggleFileCard,
toggleAddFilesPanel: this.toggleAddFilesPanel,
showAddFilesPanel: pluginState.showAddFilesPanel,
saveFileCard: this.saveFileCard,
width: this.opts.width,
height: this.opts.height,
showLinkToFileUploadResult: this.opts.showLinkToFileUploadResult,
proudlyDisplayPoweredByUppy: this.opts.proudlyDisplayPoweredByUppy,
containerWidth: pluginState.containerWidth,
areInsidesReadyToBeVisible: pluginState.areInsidesReadyToBeVisible,
isTargetDOMEl: this.isTargetDOMEl,
parentElement: this.el,
allowedFileTypes: this.uppy.opts.restrictions.allowedFileTypes,
maxNumberOfFiles: this.uppy.opts.restrictions.maxNumberOfFiles,
showSelectedFiles: this.opts.showSelectedFiles,
// drag props
isDraggingOver: pluginState.isDraggingOver,
handleDragOver: this.handleDragOver,
handleDragLeave: this.handleDragLeave,
handleDrop: this.handleDrop
})
}
discoverProviderPlugins () {
this.uppy.iteratePlugins((plugin) => {
if (plugin && !plugin.target && plugin.opts && plugin.opts.target === this.constructor) {
this.addTarget(plugin)
}
})
}
install () {
// Set default state for Dashboard
this.setPluginState({
isHidden: true,
fileCardFor: null,
activeOverlayType: null,
showAddFilesPanel: false,
activePickerPanel: false,
metaFields: this.opts.metaFields,
targets: [],
// We'll make them visible once .containerWidth is determined
areInsidesReadyToBeVisible: false,
isDraggingOver: false
})
const { inline, closeAfterFinish } = this.opts
if (inline && closeAfterFinish) {
throw new Error('[Dashboard] `closeAfterFinish: true` cannot be used on an inline Dashboard, because an inline Dashboard cannot be closed at all. Either set `inline: false`, or disable the `closeAfterFinish` option.')
}
const { allowMultipleUploads } = this.uppy.opts
if (allowMultipleUploads && closeAfterFinish) {
this.uppy.log('[Dashboard] When using `closeAfterFinish`, we recommended setting the `allowMultipleUploads` option to `false` in the Uppy constructor. See https://uppy.io/docs/uppy/#allowMultipleUploads-true', 'warning')
}
const { target } = this.opts
if (target) {
this.mount(target, this)
}
const plugins = this.opts.plugins || []
plugins.forEach((pluginID) => {
const plugin = this.uppy.getPlugin(pluginID)
if (plugin) {
plugin.mount(this, plugin)
}
})
if (!this.opts.disableStatusBar) {
this.uppy.use(StatusBar, {
id: `${this.id}:StatusBar`,
target: this,
hideUploadButton: this.opts.hideUploadButton,
hideRetryButton: this.opts.hideRetryButton,
hidePauseResumeButton: this.opts.hidePauseResumeButton,
hideCancelButton: this.opts.hideCancelButton,
showProgressDetails: this.opts.showProgressDetails,
hideAfterFinish: this.opts.hideProgressAfterFinish,
locale: this.opts.locale
})
}
if (!this.opts.disableInformer) {
this.uppy.use(Informer, {
id: `${this.id}:Informer`,
target: this
})
}
if (!this.opts.disableThumbnailGenerator) {
this.uppy.use(ThumbnailGenerator, {
id: `${this.id}:ThumbnailGenerator`,
thumbnailWidth: this.opts.thumbnailWidth
})
}
this.discoverProviderPlugins()
this.initEvents()
}
uninstall () {
if (!this.opts.disableInformer) {
const informer = this.uppy.getPlugin(`${this.id}:Informer`)
// Checking if this plugin exists, in case it was removed by uppy-core
// before the Dashboard was.
if (informer) this.uppy.removePlugin(informer)
}
if (!this.opts.disableStatusBar) {
const statusBar = this.uppy.getPlugin(`${this.id}:StatusBar`)
if (statusBar) this.uppy.removePlugin(statusBar)
}
if (!this.opts.disableThumbnailGenerator) {
const thumbnail = this.uppy.getPlugin(`${this.id}:ThumbnailGenerator`)
if (thumbnail) this.uppy.removePlugin(thumbnail)
}
const plugins = this.opts.plugins || []
plugins.forEach((pluginID) => {
const plugin = this.uppy.getPlugin(pluginID)
if (plugin) plugin.unmount()
})
this.unmount()
this.removeEvents()
}
}
| 1 | 12,399 | Is there a reason for logging `error.message` specifically, maybe log the whole error object? | transloadit-uppy | js |
@@ -42,7 +42,9 @@ namespace OpenTelemetry.Trace
var aspnetOptions = new AspNetInstrumentationOptions();
configureAspNetInstrumentationOptions?.Invoke(aspnetOptions);
- builder.AddDiagnosticSourceInstrumentation((activitySource) => new AspNetInstrumentation(activitySource, aspnetOptions));
+ builder.AddInstrumentation(() => new AspNetInstrumentation(aspnetOptions));
+ builder.AddSource(typeof(AspNetInstrumentation).Assembly.GetName().Name);
+ builder.AddLegacyActivityOperationName("Microsoft.AspNet.HttpReqIn");
return builder;
} | 1 | // <copyright file="TracerProviderBuilderExtensions.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using OpenTelemetry.Instrumentation.AspNet;
namespace OpenTelemetry.Trace
{
/// <summary>
/// Extension methods to simplify registering of ASP.NET request instrumentation.
/// </summary>
public static class TracerProviderBuilderExtensions
{
/// <summary>
/// Enables the incoming requests automatic data collection for ASP.NET.
/// </summary>
/// <param name="builder"><see cref="TracerProviderBuilder"/> being configured.</param>
/// <param name="configureAspNetInstrumentationOptions">ASP.NET Request configuration options.</param>
/// <returns>The instance of <see cref="TracerProviderBuilder"/> to chain the calls.</returns>
public static TracerProviderBuilder AddAspNetInstrumentation(
this TracerProviderBuilder builder,
Action<AspNetInstrumentationOptions> configureAspNetInstrumentationOptions = null)
{
if (builder == null)
{
throw new ArgumentNullException(nameof(builder));
}
var aspnetOptions = new AspNetInstrumentationOptions();
configureAspNetInstrumentationOptions?.Invoke(aspnetOptions);
builder.AddDiagnosticSourceInstrumentation((activitySource) => new AspNetInstrumentation(activitySource, aspnetOptions));
return builder;
}
}
}
| 1 | 19,267 | Why is this needed? | open-telemetry-opentelemetry-dotnet | .cs |
@@ -404,6 +404,14 @@ func (proxy *Proxy) ContainerStarted(ident string) {
proxy.notifyWaiters(ident, err)
}
+func (proxy *Proxy) ContainerConnected(ident string) {
+ err := proxy.attach(ident)
+ // if err != nil {
+ // TODO: Not sure what is needed here.
+ // }
+ proxy.notifyWaiters(ident, err)
+}
+
func containerShouldAttach(container *docker.Container) bool {
return len(container.Config.Entrypoint) > 0 && container.Config.Entrypoint[0] == weaveWaitEntrypoint[0]
} | 1 | package proxy
import (
"bytes"
"crypto/tls"
"errors"
"fmt"
"net"
"net/http"
"os"
"path/filepath"
"regexp"
"strings"
"sync"
"syscall"
"time"
docker "github.com/fsouza/go-dockerclient"
weaveapi "github.com/weaveworks/weave/api"
weavedocker "github.com/weaveworks/weave/common/docker"
weavenet "github.com/weaveworks/weave/net"
"github.com/weaveworks/weave/net/address"
)
const (
defaultCaFile = "ca.pem"
defaultKeyFile = "key.pem"
defaultCertFile = "cert.pem"
weaveSock = "/var/run/weave/weave.sock"
weaveSockUnix = "unix://" + weaveSock
initialInterval = 2 * time.Second
maxInterval = 1 * time.Minute
)
var (
containerCreateRegexp = dockerAPIEndpoint("containers/create")
containerStartRegexp = dockerAPIEndpoint("containers/[^/]*/(re)?start")
containerInspectRegexp = dockerAPIEndpoint("containers/[^/]*/json")
execCreateRegexp = dockerAPIEndpoint("containers/[^/]*/exec")
execInspectRegexp = dockerAPIEndpoint("exec/[^/]*/json")
ErrWeaveCIDRNone = errors.New("the container was created with the '-e WEAVE_CIDR=none' option")
ErrNoDefaultIPAM = errors.New("the container was created without specifying an IP address with '-e WEAVE_CIDR=...' and the proxy was started with the '--no-default-ipalloc' option")
)
func dockerAPIEndpoint(endpoint string) *regexp.Regexp {
return regexp.MustCompile("^(/v[0-9\\.]*)?/" + endpoint + "$")
}
type Config struct {
HostnameFromLabel string
HostnameMatch string
HostnameReplacement string
Image string
ListenAddrs []string
RewriteInspect bool
NoDefaultIPAM bool
NoRewriteHosts bool
TLSConfig TLSConfig
Version string
WithoutDNS bool
NoMulticastRoute bool
DockerBridge string
DockerHost string
}
type wait struct {
ident string
ch chan error
done bool
}
type Proxy struct {
sync.Mutex
Config
client *docker.Client
dockerBridgeIP string
hostnameMatchRegexp *regexp.Regexp
weaveWaitVolume string
weaveWaitNoopVolume string
weaveWaitNomcastVolume string
normalisedAddrs []string
waiters map[*http.Request]*wait
attachJobs map[string]*attachJob
quit chan struct{}
}
type attachJob struct {
id string
tryInterval time.Duration // retry delay on next failure
timer *time.Timer
}
func (proxy *Proxy) attachWithRetry(id string) {
proxy.Lock()
defer proxy.Unlock()
if j, ok := proxy.attachJobs[id]; ok {
j.timer.Reset(time.Duration(0))
return
}
j := &attachJob{id: id, tryInterval: initialInterval}
j.timer = time.AfterFunc(time.Duration(0), func() {
if err := proxy.attach(id); err != nil {
// The delay at the nth retry is a random value in the range
// [i-i/2,i+i/2], where i = initialInterval * 1.5^(n-1).
j.timer.Reset(j.tryInterval)
j.tryInterval = j.tryInterval * 3 / 2
if j.tryInterval > maxInterval {
j.tryInterval = maxInterval
}
return
}
proxy.notifyWaiters(id, nil)
})
proxy.attachJobs[id] = j
}
func (j attachJob) Stop() {
j.timer.Stop()
}
func NewProxy(c Config) (*Proxy, error) {
p := &Proxy{
Config: c,
waiters: make(map[*http.Request]*wait),
attachJobs: make(map[string]*attachJob),
quit: make(chan struct{}),
}
if err := p.TLSConfig.LoadCerts(); err != nil {
Log.Fatalf("Could not configure tls for proxy: %s", err)
}
// We pin the protocol version to 1.18 (which corresponds to
// Docker 1.6.x; the earliest version supported by weave) in order
// to insulate ourselves from breaking changes to the API, as
// happened in 1.20 (Docker 1.8.0) when the presentation of
// volumes changed in `inspect`.
client, err := weavedocker.NewVersionedClient(c.DockerHost, "1.18")
if err != nil {
return nil, err
}
Log.Info(client.Info())
p.client = client.Client
if !p.WithoutDNS {
ip, err := weavenet.FindBridgeIP(c.DockerBridge, nil)
if err != nil {
return nil, err
}
p.dockerBridgeIP = ip.String()
Log.Infof("Using docker bridge IP for DNS: %v", p.dockerBridgeIP)
}
p.hostnameMatchRegexp, err = regexp.Compile(c.HostnameMatch)
if err != nil {
err := fmt.Errorf("Incorrect hostname match '%s': %s", c.HostnameMatch, err.Error())
return nil, err
}
if err = p.findWeaveWaitVolumes(); err != nil {
return nil, err
}
client.AddObserver(p)
return p, nil
}
func (proxy *Proxy) AttachExistingContainers() {
containers, _ := proxy.client.ListContainers(docker.ListContainersOptions{})
for _, c := range containers {
proxy.attachWithRetry(c.ID)
}
}
func (proxy *Proxy) Dial() (net.Conn, error) {
proto := "tcp"
addr := proxy.Config.DockerHost
switch {
case strings.HasPrefix(addr, "unix://"):
proto = "unix"
addr = strings.TrimPrefix(addr, "unix://")
case strings.HasPrefix(addr, "tcp://"):
addr = strings.TrimPrefix(addr, "tcp://")
}
return net.Dial(proto, addr)
}
func (proxy *Proxy) findWeaveWaitVolumes() error {
var err error
if proxy.weaveWaitVolume, err = proxy.findVolume("/w"); err != nil {
return err
}
if proxy.weaveWaitNoopVolume, err = proxy.findVolume("/w-noop"); err != nil {
return err
}
proxy.weaveWaitNomcastVolume, err = proxy.findVolume("/w-nomcast")
return err
}
func (proxy *Proxy) findVolume(v string) (string, error) {
container, err := proxy.client.InspectContainer("weaveproxy")
if err != nil {
return "", fmt.Errorf("Could not find the weavewait volume: %s", err)
}
if container.Volumes == nil {
return "", fmt.Errorf("Could not find the weavewait volume")
}
volume, ok := container.Volumes[v]
if !ok {
return "", fmt.Errorf("Could not find the weavewait volume")
}
return volume, nil
}
func (proxy *Proxy) ServeHTTP(w http.ResponseWriter, r *http.Request) {
Log.Infof("%s %s", r.Method, r.URL)
path := r.URL.Path
var i interceptor
switch {
case containerCreateRegexp.MatchString(path):
i = &createContainerInterceptor{proxy}
case containerStartRegexp.MatchString(path):
i = &startContainerInterceptor{proxy}
case containerInspectRegexp.MatchString(path):
i = &inspectContainerInterceptor{proxy}
case execCreateRegexp.MatchString(path):
i = &createExecInterceptor{proxy}
case execInspectRegexp.MatchString(path):
i = &inspectExecInterceptor{proxy}
default:
i = &nullInterceptor{}
}
proxy.Intercept(i, w, r)
}
func (proxy *Proxy) Listen() []net.Listener {
listeners := []net.Listener{}
proxy.normalisedAddrs = []string{}
unixAddrs := []string{}
for _, addr := range proxy.ListenAddrs {
if strings.HasPrefix(addr, "unix://") || strings.HasPrefix(addr, "/") {
unixAddrs = append(unixAddrs, addr)
continue
}
listener, normalisedAddr, err := proxy.listen(addr)
if err != nil {
Log.Fatalf("Cannot listen on %s: %s", addr, err)
}
listeners = append(listeners, listener)
proxy.normalisedAddrs = append(proxy.normalisedAddrs, normalisedAddr)
}
if len(unixAddrs) > 0 {
listener, _, err := proxy.listen(weaveSockUnix)
if err != nil {
Log.Fatalf("Cannot listen on %s: %s", weaveSockUnix, err)
}
listeners = append(listeners, listener)
if err := proxy.symlink(unixAddrs); err != nil {
Log.Fatalf("Cannot listen on unix sockets: %s", err)
}
proxy.normalisedAddrs = append(proxy.normalisedAddrs, weaveSockUnix)
}
for _, addr := range proxy.normalisedAddrs {
Log.Infoln("proxy listening on", addr)
}
return listeners
}
func (proxy *Proxy) Serve(listeners []net.Listener) {
errs := make(chan error)
for _, listener := range listeners {
go func(listener net.Listener) {
errs <- (&http.Server{Handler: proxy}).Serve(listener)
}(listener)
}
for range listeners {
err := <-errs
if err != nil {
Log.Fatalf("Serve failed: %s", err)
}
}
}
func (proxy *Proxy) ListenAndServeStatus(socket string) {
listener, err := weavenet.ListenUnixSocket(socket)
if err != nil {
Log.Fatalf("ListenAndServeStatus failed: %s", err)
}
handler := http.HandlerFunc(proxy.StatusHTTP)
if err := (&http.Server{Handler: handler}).Serve(listener); err != nil {
Log.Fatalf("ListenAndServeStatus failed: %s", err)
}
}
func (proxy *Proxy) StatusHTTP(w http.ResponseWriter, r *http.Request) {
for _, addr := range proxy.normalisedAddrs {
fmt.Fprintln(w, addr)
}
}
func copyOwnerAndPermissions(from, to string) error {
stat, err := os.Stat(from)
if err != nil {
return err
}
if err = os.Chmod(to, stat.Mode()); err != nil {
return err
}
moreStat, ok := stat.Sys().(*syscall.Stat_t)
if !ok {
return nil
}
if err = os.Chown(to, int(moreStat.Uid), int(moreStat.Gid)); err != nil {
return err
}
return nil
}
func (proxy *Proxy) listen(protoAndAddr string) (net.Listener, string, error) {
var (
listener net.Listener
err error
proto, addr string
)
if protoAddrParts := strings.SplitN(protoAndAddr, "://", 2); len(protoAddrParts) == 2 {
proto, addr = protoAddrParts[0], protoAddrParts[1]
} else if strings.HasPrefix(protoAndAddr, "/") {
proto, addr = "unix", protoAndAddr
} else {
proto, addr = "tcp", protoAndAddr
}
switch proto {
case "tcp":
listener, err = net.Listen(proto, addr)
if err != nil {
return nil, "", err
}
if proxy.TLSConfig.IsEnabled() {
listener = tls.NewListener(listener, proxy.TLSConfig.Config)
}
case "unix":
// remove socket from last invocation
if err := os.Remove(addr); err != nil && !os.IsNotExist(err) {
return nil, "", err
}
listener, err = net.Listen(proto, addr)
if err != nil {
return nil, "", err
}
if strings.HasPrefix(proxy.Config.DockerHost, "unix://") {
if err = copyOwnerAndPermissions(strings.TrimPrefix(proxy.Config.DockerHost, "unix://"), addr); err != nil {
return nil, "", err
}
}
default:
Log.Fatalf("Invalid protocol format: %q", proto)
}
return &MalformedHostHeaderOverride{listener}, fmt.Sprintf("%s://%s", proto, addr), nil
}
// weavedocker.ContainerObserver interface
func (proxy *Proxy) ContainerStarted(ident string) {
err := proxy.attach(ident)
if err != nil {
var e error
// attach failed: if we have a request waiting on the start, kill the container,
// otherwise assume it is a Docker-initated restart and kill the process inside.
if proxy.waitChan(ident) != nil {
e = proxy.client.KillContainer(docker.KillContainerOptions{ID: ident})
} else {
var c *docker.Container
if c, e = proxy.client.InspectContainer(ident); e == nil {
var process *os.Process
if process, e = os.FindProcess(c.State.Pid); e == nil {
e = process.Kill()
}
}
}
if e != nil {
Log.Warningf("Error killing %s: %s", ident, e)
}
}
proxy.notifyWaiters(ident, err)
}
func containerShouldAttach(container *docker.Container) bool {
return len(container.Config.Entrypoint) > 0 && container.Config.Entrypoint[0] == weaveWaitEntrypoint[0]
}
func containerIsWeaveRouter(container *docker.Container) bool {
return container.Name == weaveContainerName &&
len(container.Config.Entrypoint) > 0 && container.Config.Entrypoint[0] == weaveEntrypoint
}
func (proxy *Proxy) createWait(r *http.Request, ident string) {
proxy.Lock()
proxy.waiters[r] = &wait{ident: ident, ch: make(chan error, 1)}
proxy.Unlock()
}
func (proxy *Proxy) removeWait(r *http.Request) {
proxy.Lock()
delete(proxy.waiters, r)
proxy.Unlock()
}
func (proxy *Proxy) notifyWaiters(ident string, err error) {
proxy.Lock()
if j, ok := proxy.attachJobs[ident]; ok {
j.Stop()
delete(proxy.attachJobs, ident)
}
for _, wait := range proxy.waiters {
if ident == wait.ident && !wait.done {
wait.ch <- err
close(wait.ch)
wait.done = true
}
}
proxy.Unlock()
}
func (proxy *Proxy) waitForStart(r *http.Request) error {
var ch chan error
proxy.Lock()
wait, found := proxy.waiters[r]
if found {
ch = wait.ch
}
proxy.Unlock()
if ch != nil {
Log.Debugf("Wait for start of container %s", wait.ident)
return <-ch
}
return nil
}
func (proxy *Proxy) waitChan(ident string) chan error {
proxy.Lock()
defer proxy.Unlock()
for _, wait := range proxy.waiters {
if ident == wait.ident && !wait.done {
return wait.ch
}
}
return nil
}
// If some other operation is waiting for a container to start, join in the wait
func (proxy *Proxy) waitForStartByIdent(ident string) error {
if ch := proxy.waitChan(ident); ch != nil {
Log.Debugf("Wait for start of container %s", ident)
return <-ch
}
return nil
}
func (proxy *Proxy) ContainerDied(ident string) {}
func (proxy *Proxy) ContainerDestroyed(ident string) {}
// Check if this container needs to be attached, if so then attach it,
// and return nil on success or not needed.
func (proxy *Proxy) attach(containerID string) error {
container, err := proxy.client.InspectContainer(containerID)
if err != nil {
if _, ok := err.(*docker.NoSuchContainer); !ok {
Log.Warningf("unable to attach existing container %s since inspecting it failed: %v", containerID, err)
}
return nil
}
if containerIsWeaveRouter(container) {
Log.Infof("Attaching weave router container: %s", container.ID)
return callWeaveAttach(container, []string{"attach-router"})
}
if !containerShouldAttach(container) || !container.State.Running {
return nil
}
cidrs, err := proxy.weaveCIDRs(container.HostConfig.NetworkMode, container.Config.Env)
if err != nil {
Log.Infof("Leaving container %s alone because %s", containerID, err)
return nil
}
Log.Infof("Attaching container %s with WEAVE_CIDR \"%s\" to weave network", container.ID, strings.Join(cidrs, " "))
if err := validateCIDRs(cidrs); err != nil {
return err
}
args := []string{"attach"}
args = append(args, cidrs...)
if !proxy.NoRewriteHosts {
args = append(args, "--rewrite-hosts")
if container.HostConfig != nil {
for _, eh := range container.HostConfig.ExtraHosts {
args = append(args, fmt.Sprintf("--add-host=%s", eh))
}
}
}
if proxy.NoMulticastRoute {
args = append(args, "--no-multicast-route")
}
args = append(args, container.ID)
return callWeaveAttach(container, args)
}
func callWeaveAttach(container *docker.Container, args []string) error {
if _, stderr, err := callWeave(args...); err != nil {
Log.Warningf("Attaching container %s to weave network failed: %s", container.ID, string(stderr))
return errors.New(string(stderr))
} else if len(stderr) > 0 {
Log.Warningf("Attaching container %s to weave network: %s", container.ID, string(stderr))
}
return nil
}
func validateCIDRs(cidrs []string) error {
for _, cidr := range cidrs {
if cidr == "net:default" {
continue
}
for _, prefix := range []string{"ip:", "net:", ""} {
if strings.HasPrefix(cidr, prefix) {
if _, err := address.ParseCIDR(strings.TrimPrefix(cidr, prefix)); err == nil {
break
}
return fmt.Errorf("invalid WEAVE_CIDR: %s", cidr)
}
}
}
return nil
}
func (proxy *Proxy) weaveCIDRs(networkMode string, env []string) ([]string, error) {
if networkMode == "host" || strings.HasPrefix(networkMode, "container:") ||
// Anything else, other than blank/none/default/bridge, is some sort of network plugin
(networkMode != "" && networkMode != "none" && networkMode != "default" && networkMode != "bridge") {
return nil, fmt.Errorf("the container has '--net=%s'", networkMode)
}
for _, e := range env {
if strings.HasPrefix(e, "WEAVE_CIDR=") {
if e[11:] == "none" {
return nil, ErrWeaveCIDRNone
}
return strings.Fields(e[11:]), nil
}
}
if proxy.NoDefaultIPAM {
return nil, ErrNoDefaultIPAM
}
return nil, nil
}
func (proxy *Proxy) setWeaveDNS(hostConfig jsonObject, hostname, dnsDomain string) error {
dns, err := hostConfig.StringArray("Dns")
if err != nil {
return err
}
hostConfig["Dns"] = append(dns, proxy.dockerBridgeIP)
dnsSearch, err := hostConfig.StringArray("DnsSearch")
if err != nil {
return err
}
if len(dnsSearch) == 0 {
if hostname == "" {
hostConfig["DnsSearch"] = []string{dnsDomain}
} else {
hostConfig["DnsSearch"] = []string{"."}
}
}
return nil
}
func (proxy *Proxy) getDNSDomain() string {
if proxy.WithoutDNS {
return ""
}
weave := weaveapi.NewClient(os.Getenv("WEAVE_HTTP_ADDR"), Log)
domain, _ := weave.DNSDomain()
return domain
}
func (proxy *Proxy) updateContainerNetworkSettings(container jsonObject) error {
containerID, err := container.String("Id")
if err != nil {
return err
}
state, err := container.Object("State")
if err != nil {
return err
}
pid, err := state.Int("Pid")
if err != nil {
return err
}
if err := proxy.waitForStartByIdent(containerID); err != nil {
return err
}
netDevs, err := weavenet.GetWeaveNetDevs(pid)
if err != nil || len(netDevs) == 0 || len(netDevs[0].CIDRs) == 0 {
return err
}
networkSettings, err := container.Object("NetworkSettings")
if err != nil {
return err
}
networkSettings["MacAddress"] = netDevs[0].MAC.String()
networkSettings["IPAddress"] = netDevs[0].CIDRs[0].IP.String()
networkSettings["IPPrefixLen"], _ = netDevs[0].CIDRs[0].Mask.Size()
return nil
}
func (proxy *Proxy) symlink(unixAddrs []string) (err error) {
var container *docker.Container
binds := []string{"/var/run/weave:/var/run/weave"}
froms := []string{}
for _, addr := range unixAddrs {
from := strings.TrimPrefix(addr, "unix://")
if from == weaveSock {
continue
}
dir := filepath.Dir(from)
binds = append(binds, dir+":"+filepath.Join("/host", dir))
froms = append(froms, filepath.Join("/host", from))
proxy.normalisedAddrs = append(proxy.normalisedAddrs, addr)
}
if len(froms) == 0 {
return
}
env := []string{
"PATH=/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
}
if val := os.Getenv("WEAVE_DEBUG"); val != "" {
env = append(env, fmt.Sprintf("%s=%s", "WEAVE_DEBUG", val))
}
container, err = proxy.client.CreateContainer(docker.CreateContainerOptions{
Config: &docker.Config{
Image: proxy.Image,
Entrypoint: []string{"/home/weave/symlink", weaveSock},
Cmd: froms,
Env: env,
},
HostConfig: &docker.HostConfig{Binds: binds},
})
if err != nil {
return
}
defer func() {
err2 := proxy.client.RemoveContainer(docker.RemoveContainerOptions{ID: container.ID})
if err == nil {
err = err2
}
}()
err = proxy.client.StartContainer(container.ID, nil)
if err != nil {
return
}
var buf bytes.Buffer
err = proxy.client.AttachToContainer(docker.AttachToContainerOptions{
Container: container.ID,
ErrorStream: &buf,
Logs: true,
Stderr: true,
})
if err != nil {
return
}
var rc int
rc, err = proxy.client.WaitContainer(container.ID)
if err != nil {
return
}
if rc != 0 {
err = errors.New(buf.String())
}
return
}
func (proxy *Proxy) Stop() {
close(proxy.quit)
proxy.Lock()
defer proxy.Unlock()
for _, j := range proxy.attachJobs {
j.Stop()
}
}
| 1 | 14,516 | Nothing. We only expect container-connected events when going via the plugin, not the proxy. | weaveworks-weave | go |
@@ -11,6 +11,8 @@ import { mount } from './diff/mount';
import { patch } from './diff/patch';
import { createInternal } from './tree';
+let rootId = 0;
+
/**
*
* @param {import('./internal').PreactElement} parentDom The DOM element to | 1 | import {
MODE_HYDRATE,
MODE_MUTATIVE_HYDRATE,
MODE_SVG,
UNDEFINED
} from './constants';
import { commitRoot } from './diff/commit';
import { createElement, Fragment } from './create-element';
import options from './options';
import { mount } from './diff/mount';
import { patch } from './diff/patch';
import { createInternal } from './tree';
/**
*
* @param {import('./internal').PreactElement} parentDom The DOM element to
* @returns {import('./internal').Root}
*/
export function createRoot(parentDom) {
let rootInternal,
commitQueue,
firstChild,
flags = 0;
function render(vnode) {
if (options._root) options._root(vnode, parentDom);
vnode = createElement(Fragment, { _parentDom: parentDom }, [vnode]);
firstChild =
/** @type {import('./internal').PreactElement} */ (parentDom.firstChild);
// List of effects that need to be called after diffing:
commitQueue = [];
if (rootInternal) {
patch(parentDom, vnode, rootInternal, commitQueue);
} else {
rootInternal = createInternal(vnode);
// Store the VDOM tree root on the DOM element in a (minified) property:
parentDom._children = rootInternal;
// Calling createRoot().render() on an Element with existing children triggers mutative hydrate mode:
if (firstChild) {
flags = flags || MODE_MUTATIVE_HYDRATE;
}
// If the parent of this tree is within an inline SVG, the tree should start off in SVG mode:
if (parentDom.ownerSVGElement !== UNDEFINED) {
flags |= MODE_SVG;
}
rootInternal.flags |= flags;
rootInternal._context = {};
mount(parentDom, vnode, rootInternal, commitQueue, firstChild);
}
// Flush all queued effects
commitRoot(commitQueue, rootInternal);
}
return {
hydrate(vnode) {
flags |= MODE_HYDRATE;
render(vnode);
},
render
};
}
| 1 | 17,561 | We might want to introduce some randomness here in case there are multiple completely separate Preact installations running, i.e. 2 widgets as those will be two roots with a resetting `_domDepth` | preactjs-preact | js |
@@ -488,7 +488,7 @@ func TestPushChunkToNextClosest(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if ta2.Get(tags.StateSent) != 2 {
+ if ta2.Get(tags.StateSent) != 1 {
t.Fatalf("tags error")
}
| 1 | // Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pushsync_test
import (
"bytes"
"context"
"errors"
"io/ioutil"
"sync"
"testing"
"time"
"github.com/ethersphere/bee/pkg/accounting"
accountingmock "github.com/ethersphere/bee/pkg/accounting/mock"
"github.com/ethersphere/bee/pkg/crypto"
cryptomock "github.com/ethersphere/bee/pkg/crypto/mock"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/p2p"
"github.com/ethersphere/bee/pkg/p2p/protobuf"
"github.com/ethersphere/bee/pkg/p2p/streamtest"
"github.com/ethersphere/bee/pkg/postage"
pricermock "github.com/ethersphere/bee/pkg/pricer/mock"
"github.com/ethersphere/bee/pkg/pushsync"
"github.com/ethersphere/bee/pkg/pushsync/pb"
statestore "github.com/ethersphere/bee/pkg/statestore/mock"
"github.com/ethersphere/bee/pkg/storage"
mocks "github.com/ethersphere/bee/pkg/storage/mock"
testingc "github.com/ethersphere/bee/pkg/storage/testing"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/tags"
"github.com/ethersphere/bee/pkg/topology"
"github.com/ethersphere/bee/pkg/topology/mock"
)
const (
fixedPrice = uint64(10)
)
type pricerParameters struct {
price uint64
peerPrice uint64
}
var (
defaultPrices = pricerParameters{price: fixedPrice, peerPrice: fixedPrice}
defaultSigner = cryptomock.New(cryptomock.WithSignFunc(func([]byte) ([]byte, error) {
return nil, nil
}))
)
// TestPushClosest inserts a chunk as uploaded chunk in db. This triggers sending a chunk to the closest node
// and expects a receipt. The message are intercepted in the outgoing stream to check for correctness.
func TestPushClosest(t *testing.T) {
// chunk data to upload
chunk := testingc.FixtureChunk("7000")
// create a pivot node and a mocked closest node
pivotNode := swarm.MustParseHexAddress("0000000000000000000000000000000000000000000000000000000000000000") // base is 0000
closestPeer := swarm.MustParseHexAddress("6000000000000000000000000000000000000000000000000000000000000000") // binary 0110 -> po 1
// peer is the node responding to the chunk receipt message
// mock should return ErrWantSelf since there's no one to forward to
psPeer, storerPeer, _, peerAccounting := createPushSyncNode(t, closestPeer, defaultPrices, nil, nil, defaultSigner, mock.WithClosestPeerErr(topology.ErrWantSelf))
defer storerPeer.Close()
recorder := streamtest.New(streamtest.WithProtocols(psPeer.Protocol()), streamtest.WithBaseAddr(pivotNode))
// pivot node needs the streamer since the chunk is intercepted by
// the chunk worker, then gets sent by opening a new stream
psPivot, storerPivot, _, pivotAccounting := createPushSyncNode(t, pivotNode, defaultPrices, recorder, nil, defaultSigner, mock.WithClosestPeer(closestPeer))
defer storerPivot.Close()
// Trigger the sending of chunk to the closest node
receipt, err := psPivot.PushChunkToClosest(context.Background(), chunk)
if err != nil {
t.Fatal(err)
}
if !chunk.Address().Equal(receipt.Address) {
t.Fatal("invalid receipt")
}
// this intercepts the outgoing delivery message
waitOnRecordAndTest(t, closestPeer, recorder, chunk.Address(), chunk.Data())
// this intercepts the incoming receipt message
waitOnRecordAndTest(t, closestPeer, recorder, chunk.Address(), nil)
balance, err := pivotAccounting.Balance(closestPeer)
if err != nil {
t.Fatal(err)
}
if balance.Int64() != -int64(fixedPrice) {
t.Fatalf("unexpected balance on pivot. want %d got %d", -int64(fixedPrice), balance)
}
balance, err = peerAccounting.Balance(pivotNode)
if err != nil {
t.Fatal(err)
}
if balance.Int64() != int64(fixedPrice) {
t.Fatalf("unexpected balance on peer. want %d got %d", int64(fixedPrice), balance)
}
}
// TestReplicateBeforeReceipt tests that a chunk is pushed and a receipt is received.
// Also the storer node initiates a pushsync to N closest nodes of the chunk as it's sending back the receipt.
// The second storer should only store it and not forward it. The balance of all nodes is tested.
func TestReplicateBeforeReceipt(t *testing.T) {
// chunk data to upload
chunk := testingc.FixtureChunk("7000") // base 0111
// create a pivot node and a mocked closest node
pivotNode := swarm.MustParseHexAddress("0000000000000000000000000000000000000000000000000000000000000000") // base is 0000
closestPeer := swarm.MustParseHexAddress("6000000000000000000000000000000000000000000000000000000000000000") // binary 0110
secondPeer := swarm.MustParseHexAddress("4000000000000000000000000000000000000000000000000000000000000000") // binary 0100
emptyPeer := swarm.MustParseHexAddress("5000000000000000000000000000000000000000000000000000000000000000") // binary 0101, this peer should not get the chunk
// node that is connected to secondPeer
// it's address is closer to the chunk than secondPeer but it will not receive the chunk
psEmpty, storerEmpty, _, _ := createPushSyncNode(t, emptyPeer, defaultPrices, nil, nil, defaultSigner)
defer storerEmpty.Close()
emptyRecorder := streamtest.New(streamtest.WithProtocols(psEmpty.Protocol()), streamtest.WithBaseAddr(secondPeer))
wFunc := func(addr swarm.Address) bool {
return true
}
// node that is connected to closestPeer
// will receieve chunk from closestPeer
psSecond, storerSecond, _, secondAccounting := createPushSyncNode(t, secondPeer, defaultPrices, emptyRecorder, nil, defaultSigner, mock.WithPeers(emptyPeer), mock.WithIsWithinFunc(wFunc))
defer storerSecond.Close()
secondRecorder := streamtest.New(streamtest.WithProtocols(psSecond.Protocol()), streamtest.WithBaseAddr(closestPeer))
psStorer, storerPeer, _, storerAccounting := createPushSyncNode(t, closestPeer, defaultPrices, secondRecorder, nil, defaultSigner, mock.WithPeers(secondPeer), mock.WithClosestPeerErr(topology.ErrWantSelf))
defer storerPeer.Close()
recorder := streamtest.New(streamtest.WithProtocols(psStorer.Protocol()), streamtest.WithBaseAddr(pivotNode))
// pivot node needs the streamer since the chunk is intercepted by
// the chunk worker, then gets sent by opening a new stream
psPivot, storerPivot, _, pivotAccounting := createPushSyncNode(t, pivotNode, defaultPrices, recorder, nil, defaultSigner, mock.WithClosestPeer(closestPeer))
defer storerPivot.Close()
// Trigger the sending of chunk to the closest node
receipt, err := psPivot.PushChunkToClosest(context.Background(), chunk)
if err != nil {
t.Fatal(err)
}
if !chunk.Address().Equal(receipt.Address) {
t.Fatal("invalid receipt")
}
// this intercepts the outgoing delivery message
waitOnRecordAndTest(t, closestPeer, recorder, chunk.Address(), chunk.Data())
// this intercepts the incoming receipt message
waitOnRecordAndTest(t, closestPeer, recorder, chunk.Address(), nil)
// sleep for a bit to allow the second peer to the store replicated chunk
time.Sleep(time.Millisecond * 500)
// this intercepts the outgoing delivery message from storer node to second storer node
waitOnRecordAndTest(t, secondPeer, secondRecorder, chunk.Address(), chunk.Data())
// this intercepts the incoming receipt message
waitOnRecordAndTest(t, secondPeer, secondRecorder, chunk.Address(), nil)
_, err = storerEmpty.Get(context.Background(), storage.ModeGetSync, chunk.Address())
if !errors.Is(err, storage.ErrNotFound) {
t.Fatal(err)
}
balance, err := pivotAccounting.Balance(closestPeer)
if err != nil {
t.Fatal(err)
}
if balance.Int64() != -int64(fixedPrice) {
t.Fatalf("unexpected balance on storer node. want %d got %d", int64(fixedPrice), balance)
}
balance, err = storerAccounting.Balance(pivotNode)
if err != nil {
t.Fatal(err)
}
if balance.Int64() != int64(fixedPrice) {
t.Fatalf("unexpected balance on storer node. want %d got %d", int64(fixedPrice), balance)
}
balance, err = secondAccounting.Balance(closestPeer)
if err != nil {
t.Fatal(err)
}
if balance.Int64() != int64(fixedPrice) {
t.Fatalf("unexpected balance on second storer. want %d got %d", int64(fixedPrice), balance)
}
balance, err = storerAccounting.Balance(secondPeer)
if err != nil {
t.Fatal(err)
}
if balance.Int64() != -int64(fixedPrice) {
t.Fatalf("unexpected balance on storer node. want %d got %d", -int64(fixedPrice), balance)
}
}
func TestFailToReplicateBeforeReceipt(t *testing.T) {
// chunk data to upload
chunk := testingc.FixtureChunk("7000") // base 0111
// create a pivot node and a mocked closest node
pivotNode := swarm.MustParseHexAddress("0000000000000000000000000000000000000000000000000000000000000000") // base is 0000
closestPeer := swarm.MustParseHexAddress("6000000000000000000000000000000000000000000000000000000000000000") // binary 0110
secondPeer := swarm.MustParseHexAddress("4000000000000000000000000000000000000000000000000000000000000000") // binary 0100
emptyPeer := swarm.MustParseHexAddress("5000000000000000000000000000000000000000000000000000000000000000") // binary 0101, this peer should not get the chunk
// node that is connected to secondPeer
// it's address is closer to the chunk than secondPeer but it will not receive the chunk
_, storerEmpty, _, _ := createPushSyncNode(t, emptyPeer, defaultPrices, nil, nil, defaultSigner)
defer storerEmpty.Close()
wFunc := func(addr swarm.Address) bool {
return false
}
// node that is connected to closestPeer
// will receieve chunk from closestPeer
psSecond, storerSecond, _, secondAccounting := createPushSyncNode(t, secondPeer, defaultPrices, nil, nil, defaultSigner, mock.WithPeers(emptyPeer), mock.WithIsWithinFunc(wFunc))
defer storerSecond.Close()
secondRecorder := streamtest.New(streamtest.WithProtocols(psSecond.Protocol()), streamtest.WithBaseAddr(closestPeer))
psStorer, storerPeer, _, storerAccounting := createPushSyncNode(t, closestPeer, defaultPrices, secondRecorder, nil, defaultSigner, mock.WithPeers(secondPeer), mock.WithClosestPeerErr(topology.ErrWantSelf))
defer storerPeer.Close()
recorder := streamtest.New(streamtest.WithProtocols(psStorer.Protocol()), streamtest.WithBaseAddr(pivotNode))
// pivot node needs the streamer since the chunk is intercepted by
// the chunk worker, then gets sent by opening a new stream
psPivot, storerPivot, _, pivotAccounting := createPushSyncNode(t, pivotNode, defaultPrices, recorder, nil, defaultSigner, mock.WithClosestPeer(closestPeer))
defer storerPivot.Close()
// Trigger the sending of chunk to the closest node
receipt, err := psPivot.PushChunkToClosest(context.Background(), chunk)
if err != nil {
t.Fatal(err)
}
if !chunk.Address().Equal(receipt.Address) {
t.Fatal("invalid receipt")
}
// this intercepts the outgoing delivery message
waitOnRecordAndTest(t, closestPeer, recorder, chunk.Address(), chunk.Data())
// this intercepts the incoming receipt message
waitOnRecordAndTest(t, closestPeer, recorder, chunk.Address(), nil)
// sleep for a bit to allow the second peer to the store replicated chunk
time.Sleep(time.Millisecond * 500)
// this intercepts the outgoing delivery message from storer node to second storer node
waitOnRecordAndTest(t, secondPeer, secondRecorder, chunk.Address(), chunk.Data())
// this intercepts the incoming receipt message
waitOnRecordAndTest(t, secondPeer, secondRecorder, chunk.Address(), nil)
_, err = storerEmpty.Get(context.Background(), storage.ModeGetSync, chunk.Address())
if !errors.Is(err, storage.ErrNotFound) {
t.Fatal(err)
}
balance, err := pivotAccounting.Balance(closestPeer)
if err != nil {
t.Fatal(err)
}
if balance.Int64() != -int64(fixedPrice) {
t.Fatalf("unexpected balance on storer node. want %d got %d", int64(fixedPrice), balance)
}
balance, err = storerAccounting.Balance(pivotNode)
if err != nil {
t.Fatal(err)
}
if balance.Int64() != int64(fixedPrice) {
t.Fatalf("unexpected balance on storer node. want %d got %d", int64(fixedPrice), balance)
}
balance, err = secondAccounting.Balance(closestPeer)
if err != nil {
t.Fatal(err)
}
if balance.Int64() != int64(0) {
t.Fatalf("unexpected balance on second storer. want %d got %d", int64(0), balance)
}
balance, err = storerAccounting.Balance(secondPeer)
if err != nil {
t.Fatal(err)
}
if balance.Int64() != -int64(0) {
t.Fatalf("unexpected balance on storer node. want %d got %d", -int64(0), balance)
}
}
// PushChunkToClosest tests the sending of chunk to closest peer from the origination source perspective.
// it also checks wether the tags are incremented properly if they are present
func TestPushChunkToClosest(t *testing.T) {
// chunk data to upload
chunk := testingc.FixtureChunk("7000")
// create a pivot node and a mocked closest node
pivotNode := swarm.MustParseHexAddress("0000000000000000000000000000000000000000000000000000000000000000") // base is 0000
closestPeer := swarm.MustParseHexAddress("6000000000000000000000000000000000000000000000000000000000000000") // binary 0110 -> po 1
callbackC := make(chan struct{}, 1)
// peer is the node responding to the chunk receipt message
// mock should return ErrWantSelf since there's no one to forward to
psPeer, storerPeer, _, peerAccounting := createPushSyncNode(t, closestPeer, defaultPrices, nil, chanFunc(callbackC), defaultSigner, mock.WithClosestPeerErr(topology.ErrWantSelf))
defer storerPeer.Close()
recorder := streamtest.New(streamtest.WithProtocols(psPeer.Protocol()), streamtest.WithBaseAddr(pivotNode))
// pivot node needs the streamer since the chunk is intercepted by
// the chunk worker, then gets sent by opening a new stream
psPivot, storerPivot, pivotTags, pivotAccounting := createPushSyncNode(t, pivotNode, defaultPrices, recorder, nil, defaultSigner, mock.WithClosestPeer(closestPeer))
defer storerPivot.Close()
ta, err := pivotTags.Create(1)
if err != nil {
t.Fatal(err)
}
chunk = chunk.WithTagID(ta.Uid)
ta1, err := pivotTags.Get(ta.Uid)
if err != nil {
t.Fatal(err)
}
if ta1.Get(tags.StateSent) != 0 || ta1.Get(tags.StateSynced) != 0 {
t.Fatalf("tags initialization error")
}
// Trigger the sending of chunk to the closest node
receipt, err := psPivot.PushChunkToClosest(context.Background(), chunk)
if err != nil {
t.Fatal(err)
}
if !chunk.Address().Equal(receipt.Address) {
t.Fatal("invalid receipt")
}
// this intercepts the outgoing delivery message
waitOnRecordAndTest(t, closestPeer, recorder, chunk.Address(), chunk.Data())
// this intercepts the incoming receipt message
waitOnRecordAndTest(t, closestPeer, recorder, chunk.Address(), nil)
ta2, err := pivotTags.Get(ta.Uid)
if err != nil {
t.Fatal(err)
}
if ta2.Get(tags.StateSent) != 1 {
t.Fatalf("tags error")
}
balance, err := pivotAccounting.Balance(closestPeer)
if err != nil {
t.Fatal(err)
}
if balance.Int64() != -int64(fixedPrice) {
t.Fatalf("unexpected balance on pivot. want %d got %d", -int64(fixedPrice), balance)
}
balance, err = peerAccounting.Balance(pivotNode)
if err != nil {
t.Fatal(err)
}
if balance.Int64() != int64(fixedPrice) {
t.Fatalf("unexpected balance on peer. want %d got %d", int64(fixedPrice), balance)
}
// check if the pss delivery hook is called
select {
case <-callbackC:
case <-time.After(100 * time.Millisecond):
t.Fatalf("delivery hook was not called")
}
}
func TestPushChunkToNextClosest(t *testing.T) {
// chunk data to upload
chunk := testingc.FixtureChunk("7000")
// create a pivot node and a mocked closest node
pivotNode := swarm.MustParseHexAddress("0000000000000000000000000000000000000000000000000000000000000000") // base is 0000
peer1 := swarm.MustParseHexAddress("6000000000000000000000000000000000000000000000000000000000000000")
peer2 := swarm.MustParseHexAddress("5000000000000000000000000000000000000000000000000000000000000000")
// peer is the node responding to the chunk receipt message
// mock should return ErrWantSelf since there's no one to forward to
psPeer1, storerPeer1, _, peerAccounting1 := createPushSyncNode(t, peer1, defaultPrices, nil, nil, defaultSigner, mock.WithClosestPeerErr(topology.ErrWantSelf))
defer storerPeer1.Close()
psPeer2, storerPeer2, _, peerAccounting2 := createPushSyncNode(t, peer2, defaultPrices, nil, nil, defaultSigner, mock.WithClosestPeerErr(topology.ErrWantSelf))
defer storerPeer2.Close()
var fail = true
var lock sync.Mutex
recorder := streamtest.New(
streamtest.WithProtocols(
psPeer1.Protocol(),
psPeer2.Protocol(),
),
streamtest.WithMiddlewares(
func(h p2p.HandlerFunc) p2p.HandlerFunc {
return func(ctx context.Context, peer p2p.Peer, stream p2p.Stream) error {
// this hack is required to simulate first storer node failing
lock.Lock()
defer lock.Unlock()
if fail {
fail = false
stream.Close()
return errors.New("peer not reachable")
}
if err := h(ctx, peer, stream); err != nil {
return err
}
// close stream after all previous middlewares wrote to it
// so that the receiving peer can get all the post messages
return stream.Close()
}
},
),
streamtest.WithBaseAddr(pivotNode),
)
// pivot node needs the streamer since the chunk is intercepted by
// the chunk worker, then gets sent by opening a new stream
psPivot, storerPivot, pivotTags, pivotAccounting := createPushSyncNode(t, pivotNode, defaultPrices, recorder, nil, defaultSigner, mock.WithPeers(peer1, peer2))
defer storerPivot.Close()
ta, err := pivotTags.Create(1)
if err != nil {
t.Fatal(err)
}
chunk = chunk.WithTagID(ta.Uid)
ta1, err := pivotTags.Get(ta.Uid)
if err != nil {
t.Fatal(err)
}
if ta1.Get(tags.StateSent) != 0 || ta1.Get(tags.StateSynced) != 0 {
t.Fatalf("tags initialization error")
}
// Trigger the sending of chunk to the closest node
receipt, err := psPivot.PushChunkToClosest(context.Background(), chunk)
if err != nil {
t.Fatal(err)
}
if !chunk.Address().Equal(receipt.Address) {
t.Fatal("invalid receipt")
}
// this intercepts the outgoing delivery message
waitOnRecordAndTest(t, peer2, recorder, chunk.Address(), chunk.Data())
// this intercepts the incoming receipt message
waitOnRecordAndTest(t, peer2, recorder, chunk.Address(), nil)
ta2, err := pivotTags.Get(ta.Uid)
if err != nil {
t.Fatal(err)
}
if ta2.Get(tags.StateSent) != 2 {
t.Fatalf("tags error")
}
balance, err := pivotAccounting.Balance(peer2)
if err != nil {
t.Fatal(err)
}
if balance.Int64() != -int64(fixedPrice) {
t.Fatalf("unexpected balance on pivot. want %d got %d", -int64(fixedPrice), balance)
}
balance2, err := peerAccounting2.Balance(pivotNode)
if err != nil {
t.Fatal(err)
}
if balance2.Int64() != int64(fixedPrice) {
t.Fatalf("unexpected balance on peer2. want %d got %d", int64(fixedPrice), balance2)
}
balance1, err := peerAccounting1.Balance(peer1)
if err != nil {
t.Fatal(err)
}
if balance1.Int64() != 0 {
t.Fatalf("unexpected balance on peer1. want %d got %d", 0, balance1)
}
}
func TestPushChunkToClosestFailedAttemptRetry(t *testing.T) {
// chunk data to upload
chunk := testingc.FixtureChunk("7000")
// create a pivot node and a mocked closest node
pivotNode := swarm.MustParseHexAddress("0000000000000000000000000000000000000000000000000000000000000000") // base is 0000
peer1 := swarm.MustParseHexAddress("6000000000000000000000000000000000000000000000000000000000000000")
peer2 := swarm.MustParseHexAddress("5000000000000000000000000000000000000000000000000000000000000000")
peer3 := swarm.MustParseHexAddress("9000000000000000000000000000000000000000000000000000000000000000")
peer4 := swarm.MustParseHexAddress("4000000000000000000000000000000000000000000000000000000000000000")
// peer is the node responding to the chunk receipt message
// mock should return ErrWantSelf since there's no one to forward to
psPeer1, storerPeer1, _, peerAccounting1 := createPushSyncNode(t, peer1, defaultPrices, nil, nil, defaultSigner, mock.WithClosestPeerErr(topology.ErrWantSelf))
defer storerPeer1.Close()
psPeer2, storerPeer2, _, peerAccounting2 := createPushSyncNode(t, peer2, defaultPrices, nil, nil, defaultSigner, mock.WithClosestPeerErr(topology.ErrWantSelf))
defer storerPeer2.Close()
psPeer3, storerPeer3, _, peerAccounting3 := createPushSyncNode(t, peer3, defaultPrices, nil, nil, defaultSigner, mock.WithClosestPeerErr(topology.ErrWantSelf))
defer storerPeer3.Close()
psPeer4, storerPeer4, _, peerAccounting4 := createPushSyncNode(t, peer4, defaultPrices, nil, nil, defaultSigner, mock.WithClosestPeerErr(topology.ErrWantSelf))
defer storerPeer4.Close()
recorder := streamtest.New(
streamtest.WithProtocols(
psPeer1.Protocol(),
psPeer2.Protocol(),
psPeer3.Protocol(),
psPeer4.Protocol(),
),
streamtest.WithBaseAddr(pivotNode),
)
pivotAccounting := accountingmock.NewAccounting(
accountingmock.WithReserveFunc(func(ctx context.Context, peer swarm.Address, price uint64) error {
if peer.String() == peer4.String() {
return nil
}
return errors.New("unable to reserve")
}),
)
psPivot, storerPivot, pivotTags := createPushSyncNodeWithAccounting(t, pivotNode, defaultPrices, recorder, nil, defaultSigner, pivotAccounting, mock.WithPeers(peer1, peer2, peer3, peer4))
defer storerPivot.Close()
ta, err := pivotTags.Create(1)
if err != nil {
t.Fatal(err)
}
chunk = chunk.WithTagID(ta.Uid)
ta1, err := pivotTags.Get(ta.Uid)
if err != nil {
t.Fatal(err)
}
if ta1.Get(tags.StateSent) != 0 || ta1.Get(tags.StateSynced) != 0 {
t.Fatalf("tags initialization error")
}
// Trigger the sending of chunk to the closest node
receipt, err := psPivot.PushChunkToClosest(context.Background(), chunk)
if err != nil {
t.Fatal(err)
}
if !chunk.Address().Equal(receipt.Address) {
t.Fatal("invalid receipt")
}
// this intercepts the outgoing delivery message
waitOnRecordAndTest(t, peer4, recorder, chunk.Address(), chunk.Data())
// this intercepts the incoming receipt message
waitOnRecordAndTest(t, peer4, recorder, chunk.Address(), nil)
ta2, err := pivotTags.Get(ta.Uid)
if err != nil {
t.Fatal(err)
}
// out of 4, 3 peers should return accouting error. So we should have effectively
// sent only 1 msg
if ta2.Get(tags.StateSent) != 1 {
t.Fatalf("tags error")
}
balance, err := pivotAccounting.Balance(peer4)
if err != nil {
t.Fatal(err)
}
if balance.Int64() != -int64(fixedPrice) {
t.Fatalf("unexpected balance on pivot. want %d got %d", -int64(fixedPrice), balance)
}
balance4, err := peerAccounting4.Balance(pivotNode)
if err != nil {
t.Fatal(err)
}
if balance4.Int64() != int64(fixedPrice) {
t.Fatalf("unexpected balance on peer4. want %d got %d", int64(fixedPrice), balance4)
}
for _, p := range []struct {
addr swarm.Address
acct accounting.Interface
}{
{peer1, peerAccounting1},
{peer2, peerAccounting2},
{peer3, peerAccounting3},
} {
bal, err := p.acct.Balance(p.addr)
if err != nil {
t.Fatal(err)
}
if bal.Int64() != 0 {
t.Fatalf("unexpected balance on %s. want %d got %d", p.addr, 0, bal)
}
}
}
// TestHandler expect a chunk from a node on a stream. It then stores the chunk in the local store and
// sends back a receipt. This is tested by intercepting the incoming stream for proper messages.
// It also sends the chunk to the closest peer and receives a receipt.
//
// Chunk moves from TriggerPeer -> PivotPeer -> ClosestPeer
//
func TestHandler(t *testing.T) {
// chunk data to upload
chunk := testingc.FixtureChunk("7000")
// create a pivot node and a mocked closest node
triggerPeer := swarm.MustParseHexAddress("0000000000000000000000000000000000000000000000000000000000000000")
pivotPeer := swarm.MustParseHexAddress("5000000000000000000000000000000000000000000000000000000000000000")
closestPeer := swarm.MustParseHexAddress("6000000000000000000000000000000000000000000000000000000000000000")
// Create the closest peer
psClosestPeer, closestStorerPeerDB, _, closestAccounting := createPushSyncNode(t, closestPeer, defaultPrices, nil, nil, defaultSigner, mock.WithClosestPeerErr(topology.ErrWantSelf))
defer closestStorerPeerDB.Close()
closestRecorder := streamtest.New(streamtest.WithProtocols(psClosestPeer.Protocol()), streamtest.WithBaseAddr(pivotPeer))
// creating the pivot peer
psPivot, storerPivotDB, _, pivotAccounting := createPushSyncNode(t, pivotPeer, defaultPrices, closestRecorder, nil, defaultSigner, mock.WithClosestPeer(closestPeer))
defer storerPivotDB.Close()
pivotRecorder := streamtest.New(streamtest.WithProtocols(psPivot.Protocol()), streamtest.WithBaseAddr(triggerPeer))
// Creating the trigger peer
psTriggerPeer, triggerStorerDB, _, triggerAccounting := createPushSyncNode(t, triggerPeer, defaultPrices, pivotRecorder, nil, defaultSigner, mock.WithClosestPeer(pivotPeer))
defer triggerStorerDB.Close()
receipt, err := psTriggerPeer.PushChunkToClosest(context.Background(), chunk)
if err != nil {
t.Fatal(err)
}
if !chunk.Address().Equal(receipt.Address) {
t.Fatal("invalid receipt")
}
// In pivot peer, intercept the incoming delivery chunk from the trigger peer and check for correctness
waitOnRecordAndTest(t, pivotPeer, pivotRecorder, chunk.Address(), chunk.Data())
// Pivot peer will forward the chunk to its closest peer. Intercept the incoming stream from pivot node and check
// for the correctness of the chunk
waitOnRecordAndTest(t, closestPeer, closestRecorder, chunk.Address(), chunk.Data())
// Similarly intercept the same incoming stream to see if the closest peer is sending a proper receipt
waitOnRecordAndTest(t, closestPeer, closestRecorder, chunk.Address(), nil)
// In the received stream, check if a receipt is sent from pivot peer and check for its correctness.
waitOnRecordAndTest(t, pivotPeer, pivotRecorder, chunk.Address(), nil)
balance, err := triggerAccounting.Balance(pivotPeer)
if err != nil {
t.Fatal(err)
}
if balance.Int64() != -int64(fixedPrice) {
t.Fatalf("unexpected balance on trigger. want %d got %d", -int64(fixedPrice), balance)
}
balance, err = pivotAccounting.Balance(triggerPeer)
if err != nil {
t.Fatal(err)
}
if balance.Int64() != int64(fixedPrice) {
t.Fatalf("unexpected balance on pivot. want %d got %d", int64(fixedPrice), balance)
}
balance, err = pivotAccounting.Balance(closestPeer)
if err != nil {
t.Fatal(err)
}
if balance.Int64() != -int64(fixedPrice) {
t.Fatalf("unexpected balance on pivot. want %d got %d", -int64(fixedPrice), balance)
}
balance, err = closestAccounting.Balance(pivotPeer)
if err != nil {
t.Fatal(err)
}
if balance.Int64() != int64(fixedPrice) {
t.Fatalf("unexpected balance on closest. want %d got %d", int64(fixedPrice), balance)
}
}
func TestSignsReceipt(t *testing.T) {
// chunk data to upload
chunk := testingc.FixtureChunk("7000")
signer := cryptomock.New(cryptomock.WithSignFunc(func([]byte) ([]byte, error) {
return []byte{1}, nil
}))
// create a pivot node and a mocked closest node
pivotPeer := swarm.MustParseHexAddress("0000000000000000000000000000000000000000000000000000000000000000")
closestPeer := swarm.MustParseHexAddress("6000000000000000000000000000000000000000000000000000000000000000")
// Create the closest peer
psClosestPeer, closestStorerPeerDB, _, _ := createPushSyncNode(t, closestPeer, defaultPrices, nil, nil, signer, mock.WithClosestPeerErr(topology.ErrWantSelf))
defer closestStorerPeerDB.Close()
closestRecorder := streamtest.New(streamtest.WithProtocols(psClosestPeer.Protocol()), streamtest.WithBaseAddr(pivotPeer))
// creating the pivot peer who will act as a forwarder node with a higher price (17)
psPivot, storerPivotDB, _, _ := createPushSyncNode(t, pivotPeer, defaultPrices, closestRecorder, nil, signer, mock.WithClosestPeer(closestPeer))
defer storerPivotDB.Close()
receipt, err := psPivot.PushChunkToClosest(context.Background(), chunk)
if err != nil {
t.Fatal(err)
}
if !chunk.Address().Equal(receipt.Address) {
t.Fatal("invalid receipt")
}
if !bytes.Equal(chunk.Address().Bytes(), receipt.Address.Bytes()) {
t.Fatal("chunk address do not match")
}
if !bytes.Equal([]byte{1}, receipt.Signature) {
t.Fatal("receipt signature is not present")
}
}
func createPushSyncNode(t *testing.T, addr swarm.Address, prices pricerParameters, recorder *streamtest.Recorder, unwrap func(swarm.Chunk), signer crypto.Signer, mockOpts ...mock.Option) (*pushsync.PushSync, *mocks.MockStorer, *tags.Tags, accounting.Interface) {
t.Helper()
mockAccounting := accountingmock.NewAccounting()
ps, mstorer, ts := createPushSyncNodeWithAccounting(t, addr, prices, recorder, unwrap, signer, mockAccounting, mockOpts...)
return ps, mstorer, ts, mockAccounting
}
func createPushSyncNodeWithAccounting(t *testing.T, addr swarm.Address, prices pricerParameters, recorder *streamtest.Recorder, unwrap func(swarm.Chunk), signer crypto.Signer, acct accounting.Interface, mockOpts ...mock.Option) (*pushsync.PushSync, *mocks.MockStorer, *tags.Tags) {
t.Helper()
logger := logging.New(ioutil.Discard, 0)
storer := mocks.NewStorer()
mockTopology := mock.NewTopologyDriver(mockOpts...)
mockStatestore := statestore.NewStateStore()
mtag := tags.NewTags(mockStatestore, logger)
mockPricer := pricermock.NewMockService(prices.price, prices.peerPrice)
recorderDisconnecter := streamtest.NewRecorderDisconnecter(recorder)
if unwrap == nil {
unwrap = func(swarm.Chunk) {}
}
validStamp := func(ch swarm.Chunk, stamp []byte) (swarm.Chunk, error) {
return ch.WithStamp(postage.NewStamp(nil, nil, nil, nil)), nil
}
return pushsync.New(addr, recorderDisconnecter, storer, mockTopology, mtag, true, unwrap, validStamp, logger, acct, mockPricer, signer, nil, 0), storer, mtag
}
func waitOnRecordAndTest(t *testing.T, peer swarm.Address, recorder *streamtest.Recorder, add swarm.Address, data []byte) {
t.Helper()
records := recorder.WaitRecords(t, peer, pushsync.ProtocolName, pushsync.ProtocolVersion, pushsync.StreamName, 1, 5)
if data != nil {
messages, err := protobuf.ReadMessages(
bytes.NewReader(records[0].In()),
func() protobuf.Message { return new(pb.Delivery) },
)
if err != nil {
t.Fatal(err)
}
if messages == nil {
t.Fatal("nil rcvd. for message")
}
if len(messages) > 1 {
t.Fatal("too many messages")
}
delivery := messages[0].(*pb.Delivery)
if !bytes.Equal(delivery.Address, add.Bytes()) {
t.Fatalf("chunk address mismatch")
}
if !bytes.Equal(delivery.Data, data) {
t.Fatalf("chunk data mismatch")
}
} else {
messages, err := protobuf.ReadMessages(
bytes.NewReader(records[0].In()),
func() protobuf.Message { return new(pb.Receipt) },
)
if err != nil {
t.Fatal(err)
}
if messages == nil {
t.Fatal("nil rcvd. for message")
}
if len(messages) > 1 {
t.Fatal("too many messages")
}
receipt := messages[0].(*pb.Receipt)
receiptAddress := swarm.NewAddress(receipt.Address)
if !receiptAddress.Equal(add) {
t.Fatalf("receipt address mismatch")
}
}
}
func TestFailureRequestCache(t *testing.T) {
cache := pushsync.FailedRequestCache()
peer := swarm.MustParseHexAddress("0000000000000000000000000000000000000000000000000000000000000000")
chunk := swarm.MustParseHexAddress("6000000000000000000000000000000000000000000000000000000000000000")
t.Run("not useful after threshold", func(t *testing.T) {
if !cache.Useful(peer, chunk) {
t.Fatal("incorrect initial cache state")
}
cache.RecordFailure(peer, chunk)
if !cache.Useful(peer, chunk) {
t.Fatal("incorrect cache state after 1st failure")
}
cache.RecordFailure(peer, chunk)
if !cache.Useful(peer, chunk) {
t.Fatal("incorrect cache state after 2nd failure")
}
cache.RecordFailure(peer, chunk)
if cache.Useful(peer, chunk) {
t.Fatal("peer should no longer be useful")
}
})
t.Run("reset after success", func(t *testing.T) {
cache.RecordSuccess(peer, chunk)
if !cache.Useful(peer, chunk) {
t.Fatal("incorrect cache state after success")
}
cache.RecordFailure(peer, chunk)
if !cache.Useful(peer, chunk) {
t.Fatal("incorrect cache state after first failure")
}
cache.RecordSuccess(peer, chunk)
// success should remove the peer from failed cache. We should have swallowed
// the previous failed request and the peer should still be useful after
// more failures
cache.RecordFailure(peer, chunk)
cache.RecordFailure(peer, chunk)
if !cache.Useful(peer, chunk) {
t.Fatal("peer should still be useful after intermittent success")
}
})
}
func TestPushChunkToClosestSkipFailed(t *testing.T) {
// chunk data to upload
chunk := testingc.FixtureChunk("7000")
// create a pivot node and a mocked closest node
pivotNode := swarm.MustParseHexAddress("0000000000000000000000000000000000000000000000000000000000000000") // base is 0000
peer1 := swarm.MustParseHexAddress("6000000000000000000000000000000000000000000000000000000000000000")
peer2 := swarm.MustParseHexAddress("5000000000000000000000000000000000000000000000000000000000000000")
peer3 := swarm.MustParseHexAddress("4000000000000000000000000000000000000000000000000000000000000000")
peer4 := swarm.MustParseHexAddress("9000000000000000000000000000000000000000000000000000000000000000")
// peer is the node responding to the chunk receipt message
// mock should return ErrWantSelf since there's no one to forward to
psPeer1, storerPeer1, _, peerAccounting1 := createPushSyncNode(t, peer1, defaultPrices, nil, nil, defaultSigner, mock.WithClosestPeerErr(topology.ErrWantSelf))
defer storerPeer1.Close()
psPeer2, storerPeer2, _, peerAccounting2 := createPushSyncNode(t, peer2, defaultPrices, nil, nil, defaultSigner, mock.WithClosestPeerErr(topology.ErrWantSelf))
defer storerPeer2.Close()
psPeer3, storerPeer3, _, peerAccounting3 := createPushSyncNode(t, peer3, defaultPrices, nil, nil, defaultSigner, mock.WithClosestPeerErr(topology.ErrWantSelf))
defer storerPeer3.Close()
psPeer4, storerPeer4, _, peerAccounting4 := createPushSyncNode(
t, peer4, defaultPrices, nil, nil, defaultSigner,
mock.WithClosestPeerErr(topology.ErrWantSelf),
mock.WithIsWithinFunc(func(_ swarm.Address) bool { return true }),
)
defer storerPeer4.Close()
var (
fail = true
lock sync.Mutex
)
recorder := streamtest.New(
streamtest.WithPeerProtocols(
map[string]p2p.ProtocolSpec{
peer1.String(): psPeer1.Protocol(),
peer2.String(): psPeer2.Protocol(),
peer3.String(): psPeer3.Protocol(),
peer4.String(): psPeer4.Protocol(),
},
),
streamtest.WithStreamError(
func(addr swarm.Address, _, _, _ string) error {
lock.Lock()
defer lock.Unlock()
if fail && addr.String() != peer4.String() {
return errors.New("peer not reachable")
}
return nil
},
),
streamtest.WithBaseAddr(pivotNode),
)
psPivot, storerPivot, pivotTags, pivotAccounting := createPushSyncNode(t, pivotNode, defaultPrices, recorder, nil, defaultSigner, mock.WithPeers(peer1, peer2, peer3, peer4))
defer storerPivot.Close()
ta, err := pivotTags.Create(1)
if err != nil {
t.Fatal(err)
}
chunk = chunk.WithTagID(ta.Uid)
ta1, err := pivotTags.Get(ta.Uid)
if err != nil {
t.Fatal(err)
}
if ta1.Get(tags.StateSent) != 0 || ta1.Get(tags.StateSynced) != 0 {
t.Fatalf("tags initialization error")
}
for i := 0; i < 3; i++ {
_, err := psPivot.PushChunkToClosest(context.Background(), chunk)
if err == nil {
t.Fatal("expected error while pushing")
}
}
// Trigger the sending of chunk to the closest node
receipt, err := psPivot.PushChunkToClosest(context.Background(), chunk)
if err != nil {
t.Fatal(err)
}
if !chunk.Address().Equal(receipt.Address) {
t.Fatal("invalid receipt")
}
// this intercepts the outgoing delivery message
waitOnRecordAndTest(t, peer4, recorder, chunk.Address(), chunk.Data())
// this intercepts the incoming receipt message
waitOnRecordAndTest(t, peer4, recorder, chunk.Address(), nil)
ta2, err := pivotTags.Get(ta.Uid)
if err != nil {
t.Fatal(err)
}
// out of 4, 3 peers should return accouting error. So we should have effectively
// sent only 1 msg
if ta2.Get(tags.StateSent) != 1 {
t.Fatalf("tags error")
}
balance, err := pivotAccounting.Balance(peer4)
if err != nil {
t.Fatal(err)
}
if balance.Int64() != -int64(fixedPrice) {
t.Fatalf("unexpected balance on pivot. want %d got %d", -int64(fixedPrice), balance)
}
balance4, err := peerAccounting4.Balance(pivotNode)
if err != nil {
t.Fatal(err)
}
if balance4.Int64() != int64(fixedPrice) {
t.Fatalf("unexpected balance on peer4. want %d got %d", int64(fixedPrice), balance4)
}
for _, p := range []struct {
addr swarm.Address
acct accounting.Interface
}{
{peer1, peerAccounting1},
{peer2, peerAccounting2},
{peer3, peerAccounting3},
} {
bal, err := p.acct.Balance(p.addr)
if err != nil {
t.Fatal(err)
}
if bal.Int64() != 0 {
t.Fatalf("unexpected balance on %s. want %d got %d", p.addr, 0, bal)
}
}
}
func chanFunc(c chan<- struct{}) func(swarm.Chunk) {
return func(_ swarm.Chunk) {
c <- struct{}{}
}
}
| 1 | 14,935 | this seems a bit wrong no? why is this change needed? | ethersphere-bee | go |
@@ -145,7 +145,7 @@ Transaction.prototype._validateFees = function() {
return 'Fee is more than ' + Transaction.FEE_SECURITY_MARGIN + ' times the suggested amount';
}
if (this._getUnspentValue() < this._estimateFee() / Transaction.FEE_SECURITY_MARGIN) {
- return 'Fee is less than ' + Transaction.FEE_SECURITY_MARGIN + ' times the suggested amount';
+ return 'Fee is ' + Transaction.FEE_SECURITY_MARGIN + ' times less than the suggested amount';
}
};
| 1 | 'use strict';
var _ = require('lodash');
var $ = require('../util/preconditions');
var buffer = require('buffer');
var errors = require('../errors');
var BufferUtil = require('../util/buffer');
var JSUtil = require('../util/js');
var BufferReader = require('../encoding/bufferreader');
var BufferWriter = require('../encoding/bufferwriter');
var Hash = require('../crypto/hash');
var Signature = require('../crypto/signature');
var Sighash = require('./sighash');
var Address = require('../address');
var UnspentOutput = require('./unspentoutput');
var Input = require('./input');
var PublicKeyHashInput = Input.PublicKeyHash;
var MultiSigScriptHashInput = Input.MultiSigScriptHash;
var Output = require('./output');
var Script = require('../script');
var PrivateKey = require('../privatekey');
var Block = require('../block');
var BN = require('../crypto/bn');
var CURRENT_VERSION = 1;
var DEFAULT_NLOCKTIME = 0;
var DEFAULT_SEQNUMBER = 0xFFFFFFFF;
/**
* Represents a transaction, a set of inputs and outputs to change ownership of tokens
*
* @param {*} serialized
* @constructor
*/
function Transaction(serialized) {
if (!(this instanceof Transaction)) {
return new Transaction(serialized);
}
this.inputs = [];
this.outputs = [];
this._inputAmount = 0;
this._outputAmount = 0;
if (serialized) {
if (serialized instanceof Transaction) {
return Transaction.shallowCopy(serialized);
} else if (JSUtil.isHexa(serialized)) {
this.fromString(serialized);
} else if (JSUtil.isValidJSON(serialized)) {
this.fromJSON(serialized);
} else if (BufferUtil.isBuffer(serialized)) {
this.fromBuffer(serialized);
} else if (_.isObject(serialized)) {
this.fromObject(serialized);
} else {
throw new errors.InvalidArgument('Must provide an object or string to deserialize a transaction');
}
} else {
this._newTransaction();
}
}
// max amount of satoshis in circulation
Transaction.MAX_MONEY = 21000000 * 1e8;
// nlocktime limit to be considered block height rather than a timestamp
Transaction.NLOCKTIME_BLOCKHEIGHT_LIMIT = 5e8;
// Max value for an unsigned 32 bit value
Transaction.NLOCKTIME_MAX_VALUE = 4294967295;
/* Constructors and Serialization */
/**
* Create a 'shallow' copy of the transaction, by serializing and deserializing
* it dropping any additional information that inputs and outputs may have hold
*
* @param {Transaction} transaction
* @return {Transaction}
*/
Transaction.shallowCopy = function(transaction) {
var copy = new Transaction(transaction.toBuffer());
return copy;
};
var hashProperty = {
configurable: false,
writeable: false,
enumerable: true,
get: function() {
return new BufferReader(this._getHash()).readReverse().toString('hex');
}
};
Object.defineProperty(Transaction.prototype, 'hash', hashProperty);
Object.defineProperty(Transaction.prototype, 'id', hashProperty);
/**
* Retrieve the little endian hash of the transaction (used for serialization)
* @return {Buffer}
*/
Transaction.prototype._getHash = function() {
return Hash.sha256sha256(this.toBuffer());
};
/**
* Retrieve a hexa string that can be used with bitcoind's CLI interface
* (decoderawtransaction, sendrawtransaction)
*
* @param {boolean=} unsafe if true, skip testing for fees that are too high
* @return {string}
*/
Transaction.prototype.serialize = function(unsafe) {
if (unsafe) {
return this.uncheckedSerialize();
} else {
return this.checkedSerialize();
}
};
Transaction.prototype.uncheckedSerialize = Transaction.prototype.toString = function() {
return this.toBuffer().toString('hex');
};
Transaction.prototype.checkedSerialize = function() {
var feeError = this._validateFees();
var missingChange = this._missingChange();
if (feeError && missingChange) {
throw new errors.Transaction.ChangeAddressMissing();
}
if (feeError && !missingChange) {
throw new errors.Transaction.FeeError(feeError);
}
if (this._hasDustOutputs()) {
throw new errors.Transaction.DustOutputs();
}
return this.uncheckedSerialize();
};
Transaction.FEE_SECURITY_MARGIN = 15;
Transaction.prototype._validateFees = function() {
if (this._getUnspentValue() > Transaction.FEE_SECURITY_MARGIN * this._estimateFee()) {
return 'Fee is more than ' + Transaction.FEE_SECURITY_MARGIN + ' times the suggested amount';
}
if (this._getUnspentValue() < this._estimateFee() / Transaction.FEE_SECURITY_MARGIN) {
return 'Fee is less than ' + Transaction.FEE_SECURITY_MARGIN + ' times the suggested amount';
}
};
Transaction.prototype._missingChange = function() {
return !this._changeScript;
};
Transaction.DUST_AMOUNT = 5460;
Transaction.prototype._hasDustOutputs = function() {
var index, output;
for (index in this.outputs) {
output = this.outputs[index];
if (output.satoshis < Transaction.DUST_AMOUNT && !output.script.isDataOut()) {
return true;
}
}
return false;
};
Transaction.prototype.inspect = function() {
return '<Transaction: ' + this.uncheckedSerialize() + '>';
};
Transaction.prototype.toBuffer = function() {
var writer = new BufferWriter();
return this.toBufferWriter(writer).toBuffer();
};
Transaction.prototype.toBufferWriter = function(writer) {
writer.writeUInt32LE(this.version);
writer.writeVarintNum(this.inputs.length);
_.each(this.inputs, function(input) {
input.toBufferWriter(writer);
});
writer.writeVarintNum(this.outputs.length);
_.each(this.outputs, function(output) {
output.toBufferWriter(writer);
});
writer.writeUInt32LE(this.nLockTime);
return writer;
};
Transaction.prototype.fromBuffer = function(buffer) {
var reader = new BufferReader(buffer);
return this.fromBufferReader(reader);
};
Transaction.prototype.fromBufferReader = function(reader) {
$.checkArgument(!reader.finished(), 'No transaction data received');
var i, sizeTxIns, sizeTxOuts;
this.version = reader.readUInt32LE();
sizeTxIns = reader.readVarintNum();
for (i = 0; i < sizeTxIns; i++) {
var input = Input.fromBufferReader(reader);
this.inputs.push(input);
}
sizeTxOuts = reader.readVarintNum();
for (i = 0; i < sizeTxOuts; i++) {
this.outputs.push(Output.fromBufferReader(reader));
}
this.nLockTime = reader.readUInt32LE();
return this;
};
Transaction.prototype.fromJSON = function(json) {
if (JSUtil.isValidJSON(json)) {
json = JSON.parse(json);
}
return this.fromObject(json);
};
Transaction.prototype.toObject = function toObject() {
var inputs = [];
this.inputs.forEach(function(input) {
inputs.push(input.toObject());
});
var outputs = [];
this.outputs.forEach(function(output) {
outputs.push(output.toObject());
});
var obj = {
version: this.version,
inputs: inputs,
outputs: outputs,
nLockTime: this.nLockTime
};
if (this._changeScript) {
obj.changeScript = this._changeScript.toString();
}
if (!_.isUndefined(this._changeIndex)) {
obj.changeIndex = this._changeIndex;
}
if (!_.isUndefined(this._fee)) {
obj.fee = this._fee;
}
return obj;
};
Transaction.prototype.fromObject = function(transaction) {
var self = this;
_.each(transaction.inputs, function(input) {
if (!input.output || !input.output.script) {
self.uncheckedAddInput(new Input(input));
return;
}
input.output.script = new Script(input.output.script);
var txin;
if (input.output.script.isPublicKeyHashOut()) {
txin = new Input.PublicKeyHash(input);
} else if (input.output.script.isScriptHashOut() && input.publicKeys && input.threshold) {
txin = new Input.MultiSigScriptHash(
input, input.publicKeys, input.threshold, input.signatures
);
} else {
throw new errors.Transaction.Input.UnsupportedScript(input.output.script);
}
self.addInput(txin);
});
_.each(transaction.outputs, function(output) {
self.addOutput(new Output(output));
});
if (transaction.changeIndex) {
this._changeIndex = transaction.changeIndex;
}
if (transaction.changeScript) {
this._changeScript = new Script(transaction.changeScript);
}
if (transaction.fee) {
this.fee(transaction.fee);
}
this.nLockTime = transaction.nLockTime;
this.version = transaction.version;
this._checkConsistency();
return this;
};
Transaction.prototype._checkConsistency = function() {
if (!_.isUndefined(this._changeIndex)) {
$.checkState(this._changeScript);
$.checkState(this.outputs[this._changeIndex]);
$.checkState(this.outputs[this._changeIndex].script.toString() ===
this._changeScript.toString());
}
// TODO: add other checks
};
/**
* Sets nLockTime so that transaction is not valid until the desired date(a
* timestamp in seconds since UNIX epoch is also accepted)
*
* @param {Date | Number} time
* @return {Transaction} this
*/
Transaction.prototype.lockUntilDate = function(time) {
$.checkArgument(time);
if (_.isNumber(time) && time < Transaction.NLOCKTIME_BLOCKHEIGHT_LIMIT) {
throw new errors.Transaction.LockTimeTooEarly();
}
if (_.isDate(time)) {
time = time.getTime() / 1000;
}
this.nLockTime = time;
return this;
};
/**
* Sets nLockTime so that transaction is not valid until the desired block
* height.
*
* @param {Number} height
* @return {Transaction} this
*/
Transaction.prototype.lockUntilBlockHeight = function(height) {
$.checkArgument(_.isNumber(height));
if (height >= Transaction.NLOCKTIME_BLOCKHEIGHT_LIMIT) {
throw new errors.Transaction.BlockHeightTooHigh();
}
if (height < 0) {
throw new errors.Transaction.NLockTimeOutOfRange();
}
this.nLockTime = height;
return this;
};
/**
* Returns a semantic version of the transaction's nLockTime.
* @return {Number|Date}
* If nLockTime is 0, it returns null,
* if it is < 500000000, it returns a block height (number)
* else it returns a Date object.
*/
Transaction.prototype.getLockTime = function() {
if (!this.nLockTime) {
return null;
}
if (this.nLockTime < Transaction.NLOCKTIME_BLOCKHEIGHT_LIMIT) {
return this.nLockTime;
}
return new Date(1000 * this.nLockTime);
};
Transaction.prototype.toJSON = function toJSON() {
return JSON.stringify(this.toObject());
};
Transaction.prototype.fromString = function(string) {
this.fromBuffer(new buffer.Buffer(string, 'hex'));
};
Transaction.prototype._newTransaction = function() {
this.version = CURRENT_VERSION;
this.nLockTime = DEFAULT_NLOCKTIME;
};
/* Transaction creation interface */
/**
* Add an input to this transaction. This is a high level interface
* to add an input, for more control, use @{link Transaction#addInput}.
*
* Can receive, as output information, the output of bitcoind's `listunspent` command,
* and a slightly fancier format recognized by bitcore:
*
* ```
* {
* address: 'mszYqVnqKoQx4jcTdJXxwKAissE3Jbrrc1',
* txId: 'a477af6b2667c29670467e4e0728b685ee07b240235771862318e29ddbe58458',
* outputIndex: 0,
* script: Script.empty(),
* satoshis: 1020000
* }
* ```
* Where `address` can be either a string or a bitcore Address object. The
* same is true for `script`, which can be a string or a bitcore Script.
*
* Beware that this resets all the signatures for inputs (in further versions,
* SIGHASH_SINGLE or SIGHASH_NONE signatures will not be reset).
*
* @example
* ```javascript
* var transaction = new Transaction();
*
* // From a pay to public key hash output from bitcoind's listunspent
* transaction.from({'txid': '0000...', vout: 0, amount: 0.1, scriptPubKey: 'OP_DUP ...'});
*
* // From a pay to public key hash output
* transaction.from({'txId': '0000...', outputIndex: 0, satoshis: 1000, script: 'OP_DUP ...'});
*
* // From a multisig P2SH output
* transaction.from({'txId': '0000...', inputIndex: 0, satoshis: 1000, script: '... OP_HASH'},
* ['03000...', '02000...'], 2);
* ```
*
* @param {Object} utxo
* @param {Array=} pubkeys
* @param {number=} threshold
*/
Transaction.prototype.from = function(utxo, pubkeys, threshold) {
if (_.isArray(utxo)) {
var self = this;
_.each(utxo, function(utxo) {
self.from(utxo, pubkeys, threshold);
});
return this;
}
var exists = _.any(this.inputs, function(input) {
// TODO: Maybe prevTxId should be a string? Or defined as read only property?
return input.prevTxId.toString('hex') === utxo.txId && input.outputIndex === utxo.outputIndex;
});
if (exists) {
return;
}
if (pubkeys && threshold) {
this._fromMultisigUtxo(utxo, pubkeys, threshold);
} else {
this._fromNonP2SH(utxo);
}
return this;
};
Transaction.prototype._fromNonP2SH = function(utxo) {
var clazz;
utxo = new UnspentOutput(utxo);
if (utxo.script.isPublicKeyHashOut()) {
clazz = PublicKeyHashInput;
} else {
clazz = Input;
}
this.addInput(new clazz({
output: new Output({
script: utxo.script,
satoshis: utxo.satoshis
}),
prevTxId: utxo.txId,
outputIndex: utxo.outputIndex,
sequenceNumber: DEFAULT_SEQNUMBER,
script: Script.empty()
}));
};
Transaction.prototype._fromMultisigUtxo = function(utxo, pubkeys, threshold) {
$.checkArgument(threshold <= pubkeys.length,
'Number of required signatures must be greater than the number of public keys');
utxo = new UnspentOutput(utxo);
this.addInput(new MultiSigScriptHashInput({
output: new Output({
script: utxo.script,
satoshis: utxo.satoshis
}),
prevTxId: utxo.txId,
outputIndex: utxo.outputIndex,
sequenceNumber: DEFAULT_SEQNUMBER,
script: Script.empty()
}, pubkeys, threshold));
};
/**
* Add an input to this transaction. The input must be an instance of the `Input` class.
* It should have information about the Output that it's spending, but if it's not already
* set, two additional parameters, `outputScript` and `satoshis` can be provided.
*
* @param {Input} input
* @param {String|Script} outputScript
* @param {number} satoshis
* @return Transaction this, for chaining
*/
Transaction.prototype.addInput = function(input, outputScript, satoshis) {
$.checkArgumentType(input, Input, 'input');
if (!input.output && (_.isUndefined(outputScript) || _.isUndefined(satoshis))) {
throw new errors.Transaction.NeedMoreInfo('Need information about the UTXO script and satoshis');
}
if (!input.output && outputScript && !_.isUndefined(satoshis)) {
outputScript = outputScript instanceof Script ? outputScript : new Script(outputScript);
$.checkArgumentType(satoshis, 'number', 'satoshis');
input.output = new Output({
script: outputScript,
satoshis: satoshis
});
}
return this.uncheckedAddInput(input);
};
/**
* Add an input to this transaction, without checking that the input has information about
* the output that it's spending.
*
* @param {Input} input
* @return Transaction this, for chaining
*/
Transaction.prototype.uncheckedAddInput = function(input) {
$.checkArgumentType(input, Input, 'input');
this.inputs.push(input);
if (input.output) {
this._inputAmount += input.output.satoshis;
}
this._updateChangeOutput();
return this;
};
/**
* Returns true if the transaction has enough info on all inputs to be correctly validated
*
* @return {boolean}
*/
Transaction.prototype.hasAllUtxoInfo = function() {
return _.all(this.inputs.map(function(input) {
return !!input.output;
}));
};
/**
* Manually set the fee for this transaction. Beware that this resets all the signatures
* for inputs (in further versions, SIGHASH_SINGLE or SIGHASH_NONE signatures will not
* be reset).
*
* @param {number} amount satoshis to be sent
* @return {Transaction} this, for chaining
*/
Transaction.prototype.fee = function(amount) {
this._fee = amount;
this._updateChangeOutput();
return this;
};
/* Output management */
/**
* Set the change address for this transaction
*
* Beware that this resets all the signatures for inputs (in further versions,
* SIGHASH_SINGLE or SIGHASH_NONE signatures will not be reset).
*
* @param {address} An address for change to be sent to.
* @return {Transaction} this, for chaining
*/
Transaction.prototype.change = function(address) {
this._changeScript = Script.fromAddress(address);
this._updateChangeOutput();
return this;
};
/**
* @return {Output} change output, if it exists
*/
Transaction.prototype.getChangeOutput = function() {
if (!_.isUndefined(this._changeIndex)) {
return this.outputs[this._changeIndex];
}
return null;
};
/**
* Add an output to the transaction.
*
* Beware that this resets all the signatures for inputs (in further versions,
* SIGHASH_SINGLE or SIGHASH_NONE signatures will not be reset).
*
* @param {string|Address} address
* @param {number} amount in satoshis
* @return {Transaction} this, for chaining
*/
Transaction.prototype.to = function(address, amount) {
this.addOutput(new Output({
script: Script(new Address(address)),
satoshis: amount
}));
return this;
};
/**
* Add an OP_RETURN output to the transaction.
*
* Beware that this resets all the signatures for inputs (in further versions,
* SIGHASH_SINGLE or SIGHASH_NONE signatures will not be reset).
*
* @param {Buffer|string} value the data to be stored in the OP_RETURN output.
* In case of a string, the UTF-8 representation will be stored
* @return {Transaction} this, for chaining
*/
Transaction.prototype.addData = function(value) {
this.addOutput(new Output({
script: Script.buildDataOut(value),
satoshis: 0
}));
return this;
};
Transaction.prototype.addOutput = function(output) {
$.checkArgumentType(output, Output, 'output');
this._addOutput(output);
this._updateChangeOutput();
};
Transaction.prototype._addOutput = function(output) {
this.outputs.push(output);
this._outputAmount += output.satoshis;
};
Transaction.prototype._updateChangeOutput = function() {
if (!this._changeScript) {
return;
}
this._clearSignatures();
if (!_.isUndefined(this._changeIndex)) {
this._removeOutput(this._changeIndex);
}
var available = this._getUnspentValue();
var fee = this.getFee();
var changeAmount = available - fee;
if (changeAmount > 0) {
this._changeIndex = this.outputs.length;
this._addOutput(new Output({
script: this._changeScript,
satoshis: changeAmount
}));
} else {
this._changeIndex = undefined;
}
};
/**
* Calculates the fees for the transaction.
*
* If there is no change output set, the fee will be the
* output amount minus the input amount.
* If there's a fixed fee set, return that
* If there's no fee set, estimate it based on size
* @return {Number} miner fee for this transaction in satoshis
*/
Transaction.prototype.getFee = function() {
// if no change output is set, fees should equal all the unspent amount
if (!this._changeScript) {
return this._getUnspentValue();
}
return _.isUndefined(this._fee) ? this._estimateFee() : this._fee;
};
/**
* Estimates fee from serialized transaction size in bytes.
*/
Transaction.prototype._estimateFee = function() {
var estimatedSize = this._estimateSize();
var available = this._getUnspentValue();
return Transaction._estimateFee(estimatedSize, available);
};
Transaction.prototype._getUnspentValue = function() {
return this._inputAmount - this._outputAmount;
};
Transaction.prototype._clearSignatures = function() {
_.each(this.inputs, function(input) {
input.clearSignatures();
});
};
Transaction.FEE_PER_KB = 10000;
// Safe upper bound for change address script
Transaction.CHANGE_OUTPUT_MAX_SIZE = 20 + 4 + 34 + 4;
Transaction._estimateFee = function(size, amountAvailable) {
var fee = Math.ceil(size / Transaction.FEE_PER_KB);
if (amountAvailable > fee) {
size += Transaction.CHANGE_OUTPUT_MAX_SIZE;
}
return Math.ceil(size / 1000) * Transaction.FEE_PER_KB;
};
Transaction.MAXIMUM_EXTRA_SIZE = 4 + 9 + 9 + 4;
Transaction.prototype._estimateSize = function() {
var result = Transaction.MAXIMUM_EXTRA_SIZE;
_.each(this.inputs, function(input) {
result += input._estimateSize();
});
_.each(this.outputs, function(output) {
result += output.script.toBuffer().length + 9;
});
return result;
};
Transaction.prototype._removeOutput = function(index) {
var output = this.outputs[index];
this._outputAmount -= output.satoshis;
this.outputs = _.without(this.outputs, output);
};
Transaction.prototype.removeOutput = function(index) {
this._removeOutput(index);
this._updateChangeOutput();
};
Transaction.prototype.removeInput = function(txId, outputIndex) {
var index;
if (!outputIndex && _.isNumber(txId)) {
index = txId;
} else {
index = _.findIndex(this.inputs, function(input) {
return input.prevTxId.toString('hex') === txId && input.outputIndex === outputIndex;
});
}
if (index < 0 || index >= this.inputs.length) {
throw new errors.Transaction.InvalidIndex(index, this.inputs.length);
}
var input = this.inputs[index];
this._inputAmount -= input.output.satoshis;
this.inputs = _.without(this.inputs, input);
this._updateChangeOutput();
};
/* Signature handling */
/**
* Sign the transaction using one or more private keys.
*
* It tries to sign each input, verifying that the signature will be valid
* (matches a public key).
*
* @param {Array|String|PrivateKey} privateKey
* @param {number} sigtype
* @return {Transaction} this, for chaining
*/
Transaction.prototype.sign = function(privateKey, sigtype) {
$.checkState(this.hasAllUtxoInfo());
var self = this;
if (_.isArray(privateKey)) {
_.each(privateKey, function(privateKey) {
self.sign(privateKey, sigtype);
});
return this;
}
_.each(this.getSignatures(privateKey, sigtype), function(signature) {
self.applySignature(signature);
});
return this;
};
Transaction.prototype.getSignatures = function(privKey, sigtype) {
privKey = new PrivateKey(privKey);
sigtype = sigtype || Signature.SIGHASH_ALL;
var transaction = this;
var results = [];
var hashData = Hash.sha256ripemd160(privKey.publicKey.toBuffer());
_.each(this.inputs, function forEachInput(input, index) {
_.each(input.getSignatures(transaction, privKey, index, sigtype, hashData), function(signature) {
results.push(signature);
});
});
return results;
};
/**
* Add a signature to the transaction
*
* @param {Object} signature
* @param {number} signature.inputIndex
* @param {number} signature.sigtype
* @param {PublicKey} signature.publicKey
* @param {Signature} signature.signature
* @return {Transaction} this, for chaining
*/
Transaction.prototype.applySignature = function(signature) {
this.inputs[signature.inputIndex].addSignature(this, signature);
return this;
};
Transaction.prototype.isFullySigned = function() {
_.each(this.inputs, function(input) {
if (input.isFullySigned === Input.prototype.isFullySigned) {
throw new errors.Transaction.UnableToVerifySignature(
'Unrecognized script kind, or not enough information to execute script.' +
'This usually happens when creating a transaction from a serialized transaction'
);
}
});
return _.all(_.map(this.inputs, function(input) {
return input.isFullySigned();
}));
};
Transaction.prototype.isValidSignature = function(signature) {
var self = this;
if (this.inputs[signature.inputIndex].isValidSignature === Input.prototype.isValidSignature) {
throw new errors.Transaction.UnableToVerifySignature(
'Unrecognized script kind, or not enough information to execute script.' +
'This usually happens when creating a transaction from a serialized transaction'
);
}
return this.inputs[signature.inputIndex].isValidSignature(self, signature);
};
/**
* @returns {bool} whether the signature is valid for this transaction input
*/
Transaction.prototype.verifySignature = function(sig, pubkey, nin, subscript) {
return Sighash.verify(this, sig, pubkey, nin, subscript);
};
/**
* Check that a transaction passes basic sanity tests. If not, return a string
* describing the error. This function contains the same logic as
* CheckTransaction in bitcoin core.
*/
Transaction.prototype.verify = function() {
// Basic checks that don't depend on any context
if (this.inputs.length === 0) {
return 'transaction txins empty';
}
if (this.outputs.length === 0) {
return 'transaction txouts empty';
}
// Size limits
if (this.toBuffer().length > Block.MAX_BLOCK_SIZE) {
return 'transaction over the maximum block size';
}
// Check for negative or overflow output values
var valueoutbn = new BN(0);
for (var i = 0; i < this.outputs.length; i++) {
var txout = this.outputs[i];
var valuebn = txout._satoshisBN;
if (valuebn.lt(BN.Zero)) {
return 'transaction txout ' + i + ' negative';
}
if (valuebn.gt(new BN(Transaction.MAX_MONEY, 10))) {
return 'transaction txout ' + i + ' greater than MAX_MONEY';
}
valueoutbn = valueoutbn.add(valuebn);
if (valueoutbn.gt(new BN(Transaction.MAX_MONEY))) {
return 'transaction txout ' + i + ' total output greater than MAX_MONEY';
}
}
// Check for duplicate inputs
var txinmap = {};
for (i = 0; i < this.inputs.length; i++) {
var txin = this.inputs[i];
var inputid = txin.prevTxId + ':' + txin.outputIndex;
if (!_.isUndefined(txinmap[inputid])) {
return 'transaction input ' + i + ' duplicate input';
}
txinmap[inputid] = true;
}
var isCoinbase = this.isCoinbase();
if (isCoinbase) {
var buf = this.inputs[0]._script.toBuffer();
if (buf.length < 2 || buf.length > 100) {
return 'coinbase trasaction script size invalid';
}
} else {
for (i = 0; i < this.inputs.length; i++) {
if (this.inputs[i].isNull()) {
return 'tranasction input ' + i + ' has null input';
}
}
}
return true;
};
/**
* Analagous to bitcoind's IsCoinBase function in transaction.h
*/
Transaction.prototype.isCoinbase = function() {
return (this.inputs.length === 1 && this.inputs[0].isNull());
};
module.exports = Transaction;
| 1 | 14,102 | Still confusing. Why don't we return something like `'Fee too low: expected X but found Y'`? | bitpay-bitcore | js |
@@ -81,14 +81,16 @@ type ProviderModeConfig struct {
type ConsumerConfig struct {
PublicKey string `json:"PublicKey"`
// IP is needed when provider is behind NAT. In such case provider parses this IP and tries to ping consumer.
- IP string `json:"IP,omitempty"`
+ IP string `json:"IP,omitempty"`
+ Ports []int `json:"Ports"`
}
// ServiceConfig represent a Wireguard service provider configuration that will be passed to the consumer for establishing a connection.
type ServiceConfig struct {
// LocalPort and RemotePort are needed for NAT hole punching only.
- LocalPort int `json:"-"`
- RemotePort int `json:"-"`
+ LocalPort int `json:"-"`
+ RemotePort int `json:"-"`
+ Ports []int `json:"Ports"`
Provider struct {
PublicKey string | 1 | /*
* Copyright (C) 2018 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package wireguard
import (
"encoding/base64"
"encoding/hex"
"encoding/json"
"fmt"
"net"
"strings"
"time"
"github.com/mysteriumnetwork/node/market"
)
// ServiceType indicates "wireguard" service type
const ServiceType = "wireguard"
// ServiceDefinition structure represents "wireguard" service parameters
type ServiceDefinition struct {
// Approximate information on location where the service is provided from
Location market.Location `json:"location"`
// Approximate information on location where the actual tunnelled traffic will originate from.
// This is used by providers having their own means of setting tunnels to other remote exit points.
LocationOriginate market.Location `json:"location_originate"`
}
// GetLocation returns geographic location of service definition provider
func (service ServiceDefinition) GetLocation() market.Location {
return service.Location
}
// EndpointFactory creates new connection endpoint.
type EndpointFactory func() (ConnectionEndpoint, error)
// ConnectionEndpoint represents Wireguard network instance, it provide information
// required for establishing connection between service provider and consumer.
type ConnectionEndpoint interface {
StartConsumerMode(config ConsumerModeConfig) error
StartProviderMode(config ProviderModeConfig) error
AddPeer(iface string, peer Peer) error
PeerStats() (*Stats, error)
ConfigureRoutes(ip net.IP) error
Config() (ServiceConfig, error)
InterfaceName() string
Stop() error
}
// ConsumerModeConfig is consumer endpoint startup configuration.
type ConsumerModeConfig struct {
PrivateKey string
IPAddress net.IPNet
ListenPort int
}
// ProviderModeConfig is provider endpoint startup configuration.
type ProviderModeConfig struct {
Network net.IPNet
ListenPort int
PublicIP string
}
// ConsumerConfig is used for sending the public key and IP from consumer to provider
type ConsumerConfig struct {
PublicKey string `json:"PublicKey"`
// IP is needed when provider is behind NAT. In such case provider parses this IP and tries to ping consumer.
IP string `json:"IP,omitempty"`
}
// ServiceConfig represent a Wireguard service provider configuration that will be passed to the consumer for establishing a connection.
type ServiceConfig struct {
// LocalPort and RemotePort are needed for NAT hole punching only.
LocalPort int `json:"-"`
RemotePort int `json:"-"`
Provider struct {
PublicKey string
Endpoint net.UDPAddr
}
Consumer struct {
IPAddress net.IPNet
DNSIPs string
ConnectDelay int
}
}
// MarshalJSON implements json.Marshaler interface to provide human readable configuration.
func (s ServiceConfig) MarshalJSON() ([]byte, error) {
type provider struct {
PublicKey string `json:"public_key"`
Endpoint string `json:"endpoint"`
}
type consumer struct {
IPAddress string `json:"ip_address"`
DNSIPs string `json:"dns_ips"`
ConnectDelay int `json:"connect_delay"`
}
return json.Marshal(&struct {
LocalPort int `json:"local_port"`
RemotePort int `json:"remote_port"`
Provider provider `json:"provider"`
Consumer consumer `json:"consumer"`
}{
LocalPort: s.LocalPort,
RemotePort: s.RemotePort,
Provider: provider{
PublicKey: s.Provider.PublicKey,
Endpoint: s.Provider.Endpoint.String(),
},
Consumer: consumer{
IPAddress: s.Consumer.IPAddress.String(),
ConnectDelay: s.Consumer.ConnectDelay,
DNSIPs: s.Consumer.DNSIPs,
},
})
}
// UnmarshalJSON implements json.Unmarshaler interface to receive human readable configuration.
func (s *ServiceConfig) UnmarshalJSON(data []byte) error {
type provider struct {
PublicKey string `json:"public_key"`
Endpoint string `json:"endpoint"`
}
type consumer struct {
IPAddress string `json:"ip_address"`
DNSIPs string `json:"dns_ips"`
ConnectDelay int `json:"connect_delay"`
}
var config struct {
LocalPort int `json:"local_port"`
RemotePort int `json:"remote_port"`
Provider provider `json:"provider"`
Consumer consumer `json:"consumer"`
}
if err := json.Unmarshal(data, &config); err != nil {
return err
}
endpoint, err := net.ResolveUDPAddr("udp", config.Provider.Endpoint)
if err != nil {
return err
}
ip, ipnet, err := net.ParseCIDR(config.Consumer.IPAddress)
if err != nil {
return err
}
s.LocalPort = config.LocalPort
s.RemotePort = config.RemotePort
s.Provider.Endpoint = *endpoint
s.Provider.PublicKey = config.Provider.PublicKey
s.Consumer.DNSIPs = config.Consumer.DNSIPs
s.Consumer.IPAddress = *ipnet
s.Consumer.IPAddress.IP = ip
s.Consumer.ConnectDelay = config.Consumer.ConnectDelay
return nil
}
// DeviceConfig describes wireguard device configuration.
type DeviceConfig struct {
IfaceName string
Subnet net.IPNet
PrivateKey string
ListenPort int
}
// Encode encodes device config into string representation which is used for
// userspace and kernel space wireguard configuration.
func (dc *DeviceConfig) Encode() string {
var res strings.Builder
keyBytes, err := base64.StdEncoding.DecodeString(dc.PrivateKey)
if err != nil {
return ""
}
hexKey := hex.EncodeToString(keyBytes)
res.WriteString(fmt.Sprintf("private_key=%s\n", hexKey))
res.WriteString(fmt.Sprintf("listen_port=%d\n", dc.ListenPort))
return res.String()
}
// Peer represents wireguard peer.
type Peer struct {
PublicKey string
Endpoint *net.UDPAddr
AllowedIPs []string
KeepAlivePeriodSeconds int
}
// Encode encodes device peer config into string representation which is used for
// userspace and kernel space wireguard configuration.
func (p *Peer) Encode() string {
var res strings.Builder
keyBytes, err := base64.StdEncoding.DecodeString(p.PublicKey)
if err != nil {
return ""
}
hexKey := hex.EncodeToString(keyBytes)
res.WriteString(fmt.Sprintf("public_key=%s\n", hexKey))
res.WriteString(fmt.Sprintf("persistent_keepalive_interval=%d\n", p.KeepAlivePeriodSeconds))
if p.Endpoint != nil {
res.WriteString(fmt.Sprintf("endpoint=%s\n", p.Endpoint.String()))
}
for _, ip := range p.AllowedIPs {
res.WriteString(fmt.Sprintf("allowed_ip=%s\n", ip))
}
return res.String()
}
// Stats represents wireguard peer statistics information.
type Stats struct {
BytesSent uint64
BytesReceived uint64
LastHandshake time.Time
}
// ParseDevicePeerStats parses current active consumer stats.
func ParseDevicePeerStats(d *UserspaceDevice) (*Stats, error) {
if len(d.Peers) != 1 {
return nil, fmt.Errorf("exactly 1 peer expected, got %d", len(d.Peers))
}
p := d.Peers[0]
return &Stats{
BytesSent: uint64(p.TransmitBytes),
BytesReceived: uint64(p.ReceiveBytes),
LastHandshake: p.LastHandshakeTime,
}, nil
}
| 1 | 15,766 | Should be from lowercase `json:"ports"` the same is defined in MarshalJSON | mysteriumnetwork-node | go |
@@ -3,7 +3,7 @@
# Purpose:
# sns-ruby-example-create-subscription.rb demonstrates how to create an Amazon Simple Notification Services (SNS) subscription using
-# the AWS SDK for JavaScript (v3).
+# the AWS SDK for Ruby.
# Inputs:
# - REGION | 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# Purpose:
# sns-ruby-example-create-subscription.rb demonstrates how to create an Amazon Simple Notification Services (SNS) subscription using
# the AWS SDK for JavaScript (v3).
# Inputs:
# - REGION
# - SNS_TOPIC_ARN
# - EMAIL_ADDRESS
# snippet-start:[sns.Ruby.createSubscription]
require 'aws-sdk-sns' # v2: require 'aws-sdk'
def subscription_created?(sns_client, topic_arn, protocol, endpoint)
sns_client.subscribe(topic_arn: topic_arn, protocol: protocol, endpoint: endpoint)
rescue StandardError => e
puts "Error while creating the subscription: #{e.message}"
end
# Full example call:
def run_me
protocol = 'email'
endpoint = 'EMAIL_ADDRESS'
topic_arn = 'TOPIC_ARN'
region = 'REGION'
sns_client = Aws::SNS::Client.new(region: region)
puts "Creating the subscription."
if subscription_created?(sns_client, topic_arn, protocol, endpoint)
puts 'The subscriptions was created.'
else
puts 'The subscription was not created. Stopping program.'
exit 1
end
end
run_me if $PROGRAM_NAME == __FILE__
# snippet-end:[sns.Ruby.createSubscription]
| 1 | 20,564 | Simple Notification **Service** (singular) | awsdocs-aws-doc-sdk-examples | rb |
@@ -32,7 +32,9 @@ public class NashornScriptInterpreter implements ScriptInterpreter {
@Override
public void runScript(String scriptContent, Consumer<Exception> errorCallback) {
- nashornEngineFactory.createEngine().eval(scriptContent, errorCallback);
+ final NashornEngine engine = nashornEngineFactory.createEngine();
+ engine.eval("var tr = Packages.org.phoenicis.configuration.localisation.Localisation.tr;", errorCallback);
+ engine.eval(scriptContent, errorCallback);
}
@Override | 1 | /*
* Copyright (C) 2015-2017 PÂRIS Quentin
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
package org.phoenicis.scripts.nashorn;
import org.phoenicis.scripts.interpreter.InteractiveScriptSession;
import org.phoenicis.scripts.interpreter.ScriptInterpreter;
import java.util.function.Consumer;
public class NashornScriptInterpreter implements ScriptInterpreter {
private final NashornEngineFactory nashornEngineFactory;
public NashornScriptInterpreter(NashornEngineFactory nashornEngineFactory) {
this.nashornEngineFactory = nashornEngineFactory;
}
@Override
public void runScript(String scriptContent, Consumer<Exception> errorCallback) {
nashornEngineFactory.createEngine().eval(scriptContent, errorCallback);
}
@Override
public InteractiveScriptSession createInteractiveSession() {
return new NashornInteractiveSession(nashornEngineFactory);
}
}
| 1 | 10,273 | This code should be executed in NashornEngineFactory | PhoenicisOrg-phoenicis | java |
@@ -53,6 +53,7 @@ type Tags struct {
// NewTags creates a tags object
func NewTags(stateStore storage.StateStorer, logger logging.Logger) *Tags {
+ rand.Seed(time.Now().UnixNano())
return &Tags{
tags: &sync.Map{},
stateStore: stateStore, | 1 | // Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package tags provides the implementation for
// upload progress tracking.
package tags
import (
"context"
"encoding/json"
"errors"
"fmt"
"math/rand"
"sort"
"strconv"
"sync"
"time"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
)
const (
maxPage = 1000 // hard limit of page size
tagKeyPrefix = "tags_"
)
var (
TagUidFunc = rand.Uint32
ErrNotFound = errors.New("tag not found")
)
// Tags hold tag information indexed by a unique random uint32
type Tags struct {
tags *sync.Map
stateStore storage.StateStorer
logger logging.Logger
}
// NewTags creates a tags object
func NewTags(stateStore storage.StateStorer, logger logging.Logger) *Tags {
return &Tags{
tags: &sync.Map{},
stateStore: stateStore,
logger: logger,
}
}
// Create creates a new tag, stores it by the UID and returns it
// it returns an error if the tag with this UID already exists
func (ts *Tags) Create(total int64) (*Tag, error) {
t := NewTag(context.Background(), TagUidFunc(), total, nil, ts.stateStore, ts.logger)
if _, loaded := ts.tags.LoadOrStore(t.Uid, t); loaded {
return nil, errExists
}
return t, nil
}
// All returns all existing tags in Tags' sync.Map
// Note that tags are returned in no particular order
func (ts *Tags) All() (t []*Tag) {
ts.tags.Range(func(k, v interface{}) bool {
t = append(t, v.(*Tag))
return true
})
return t
}
// Get returns the underlying tag for the uid or an error if not found
func (ts *Tags) Get(uid uint32) (*Tag, error) {
t, ok := ts.tags.Load(uid)
if !ok {
// see if the tag is present in the store
// if yes, load it in to the memory
ta, err := ts.getTagFromStore(uid)
if err != nil {
return nil, ErrNotFound
}
ts.tags.LoadOrStore(ta.Uid, ta)
return ta, nil
}
return t.(*Tag), nil
}
// GetByAddress returns the latest underlying tag for the address or an error if not found
func (ts *Tags) GetByAddress(address swarm.Address) (*Tag, error) {
var t *Tag
var lastTime time.Time
ts.tags.Range(func(key interface{}, value interface{}) bool {
rcvdTag := value.(*Tag)
if rcvdTag.Address.Equal(address) && rcvdTag.StartedAt.After(lastTime) {
t = rcvdTag
lastTime = rcvdTag.StartedAt
}
return true
})
if t == nil {
return nil, ErrNotFound
}
return t, nil
}
// Range exposes sync.Map's iterator
func (ts *Tags) Range(fn func(k, v interface{}) bool) {
ts.tags.Range(fn)
}
func (ts *Tags) Delete(k interface{}) {
ts.tags.Delete(k)
// k is a uint32, try to create the tag key and remove
// from statestore
if uid, ok := k.(uint32); ok && uid != 0 {
key := tagKey(uid)
_ = ts.stateStore.Delete(key)
}
}
func (ts *Tags) MarshalJSON() (out []byte, err error) {
m := make(map[string]*Tag)
ts.Range(func(k, v interface{}) bool {
key := fmt.Sprintf("%d", k)
val := v.(*Tag)
// don't persist tags which were already done
if !val.Done(StateSynced) {
m[key] = val
}
return true
})
return json.Marshal(m)
}
func (ts *Tags) UnmarshalJSON(value []byte) error {
m := make(map[string]*Tag)
err := json.Unmarshal(value, &m)
if err != nil {
return err
}
for k, v := range m {
key, err := strconv.ParseUint(k, 10, 32)
if err != nil {
return err
}
// prevent a condition where a chunk was sent before shutdown
// and the node was turned off before the receipt was received
v.Sent = v.Synced
ts.tags.Store(key, v)
}
return err
}
func (ts *Tags) ListAll(ctx context.Context, offset, limit int) (t []*Tag, err error) {
if limit > maxPage {
limit = maxPage
}
// range sync.Map first
allTags := ts.All()
sort.Slice(allTags, func(i, j int) bool { return allTags[i].Uid < allTags[j].Uid })
for _, tag := range allTags {
if offset > 0 {
offset--
continue
}
t = append(t, tag)
limit--
if limit == 0 {
break
}
}
if limit == 0 {
return
}
// and then from statestore
err = ts.stateStore.Iterate(tagKeyPrefix, func(key, value []byte) (stop bool, err error) {
if offset > 0 {
offset--
return false, nil
}
var ta *Tag
ta, err = decodeTagValueFromStore(value)
if err != nil {
return true, err
}
if _, ok := ts.tags.Load(ta.Uid); ok {
// tag was already returned from sync.Map
return false, nil
}
t = append(t, ta)
limit--
if limit == 0 {
return true, nil
}
return false, nil
})
return t, err
}
func decodeTagValueFromStore(value []byte) (*Tag, error) {
var data []byte
err := json.Unmarshal(value, &data)
if err != nil {
return nil, err
}
var ta Tag
err = ta.UnmarshalBinary(data)
if err != nil {
return nil, err
}
return &ta, nil
}
// getTagFromStore get a given tag from the state store.
func (ts *Tags) getTagFromStore(uid uint32) (*Tag, error) {
key := tagKey(uid)
var data []byte
err := ts.stateStore.Get(key, &data)
if err != nil {
return nil, err
}
var ta Tag
err = ta.UnmarshalBinary(data)
if err != nil {
return nil, err
}
return &ta, nil
}
// Close is called when the node goes down. This is when all the tags in memory is persisted.
func (ts *Tags) Close() (err error) {
// store all the tags in memory
tags := ts.All()
for _, t := range tags {
ts.logger.Trace("updating tag: ", t.Uid)
err := t.saveTag()
if err != nil {
return err
}
}
return nil
}
func tagKey(uid uint32) string {
return tagKeyPrefix + strconv.Itoa(int(uid))
}
| 1 | 15,349 | this is not so great as it sets the global rand seed. in tests that invoke this method multiple times for whatever reason, the global seed would be overridden every time the constructor would be called and the instances would have a different seed after every constructor call. it would be better to just pass a new source here as a parameter and do the same within the test. it should simplify things and clean this up | ethersphere-bee | go |
@@ -831,6 +831,14 @@ public class BesuCommand implements DefaultCommandValues, Runnable {
arity = "1")
private final Wei txFeeCap = DEFAULT_RPC_TX_FEE_CAP;
+ @Option(
+ names = {"--rpc-require-chainid-in-txs"},
+ description =
+ "Allow for unprotected (non EIP155 signed) transactions to be submitted via RPC (default: ${DEFAULT-VALUE})",
+ arity = "1")
+ // TODO: set default to false for next major release
+ private final Boolean unprotectedTransactionsAllowed = true;
+
@Option(
names = {"--min-block-occupancy-ratio"},
description = "Minimum occupancy ratio for a mined block (default: ${DEFAULT-VALUE})", | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.cli;
import static com.google.common.base.Preconditions.checkNotNull;
import static java.nio.charset.StandardCharsets.UTF_8;
import static java.util.Arrays.asList;
import static java.util.Collections.singletonList;
import static org.hyperledger.besu.cli.DefaultCommandValues.getDefaultBesuDataPath;
import static org.hyperledger.besu.cli.config.NetworkName.MAINNET;
import static org.hyperledger.besu.cli.util.CommandLineUtils.DEPENDENCY_WARNING_MSG;
import static org.hyperledger.besu.controller.BesuController.DATABASE_PATH;
import static org.hyperledger.besu.ethereum.api.graphql.GraphQLConfiguration.DEFAULT_GRAPHQL_HTTP_PORT;
import static org.hyperledger.besu.ethereum.api.jsonrpc.JsonRpcConfiguration.DEFAULT_JSON_RPC_PORT;
import static org.hyperledger.besu.ethereum.api.jsonrpc.RpcApis.DEFAULT_JSON_RPC_APIS;
import static org.hyperledger.besu.ethereum.api.jsonrpc.websocket.WebSocketConfiguration.DEFAULT_WEBSOCKET_PORT;
import static org.hyperledger.besu.ethereum.permissioning.GoQuorumPermissioningConfiguration.QIP714_DEFAULT_BLOCK;
import static org.hyperledger.besu.metrics.BesuMetricCategory.DEFAULT_METRIC_CATEGORIES;
import static org.hyperledger.besu.metrics.MetricsProtocol.PROMETHEUS;
import static org.hyperledger.besu.metrics.prometheus.MetricsConfiguration.DEFAULT_METRICS_PORT;
import static org.hyperledger.besu.metrics.prometheus.MetricsConfiguration.DEFAULT_METRICS_PUSH_PORT;
import static org.hyperledger.besu.nat.kubernetes.KubernetesNatManager.DEFAULT_BESU_SERVICE_NAME_FILTER;
import org.hyperledger.besu.BesuInfo;
import org.hyperledger.besu.Runner;
import org.hyperledger.besu.RunnerBuilder;
import org.hyperledger.besu.chainexport.RlpBlockExporter;
import org.hyperledger.besu.chainimport.JsonBlockImporter;
import org.hyperledger.besu.chainimport.RlpBlockImporter;
import org.hyperledger.besu.cli.config.EthNetworkConfig;
import org.hyperledger.besu.cli.config.NetworkName;
import org.hyperledger.besu.cli.converter.MetricCategoryConverter;
import org.hyperledger.besu.cli.converter.PercentageConverter;
import org.hyperledger.besu.cli.converter.RpcApisConverter;
import org.hyperledger.besu.cli.custom.CorsAllowedOriginsProperty;
import org.hyperledger.besu.cli.custom.JsonRPCAllowlistHostsProperty;
import org.hyperledger.besu.cli.custom.RpcAuthFileValidator;
import org.hyperledger.besu.cli.error.BesuExceptionHandler;
import org.hyperledger.besu.cli.options.unstable.DataStorageOptions;
import org.hyperledger.besu.cli.options.unstable.DnsOptions;
import org.hyperledger.besu.cli.options.unstable.EthProtocolOptions;
import org.hyperledger.besu.cli.options.unstable.EthstatsOptions;
import org.hyperledger.besu.cli.options.unstable.LauncherOptions;
import org.hyperledger.besu.cli.options.unstable.MetricsCLIOptions;
import org.hyperledger.besu.cli.options.unstable.MiningOptions;
import org.hyperledger.besu.cli.options.unstable.NatOptions;
import org.hyperledger.besu.cli.options.unstable.NativeLibraryOptions;
import org.hyperledger.besu.cli.options.unstable.NetworkingOptions;
import org.hyperledger.besu.cli.options.unstable.RPCOptions;
import org.hyperledger.besu.cli.options.unstable.SynchronizerOptions;
import org.hyperledger.besu.cli.options.unstable.TransactionPoolOptions;
import org.hyperledger.besu.cli.presynctasks.PreSynchronizationTaskRunner;
import org.hyperledger.besu.cli.presynctasks.PrivateDatabaseMigrationPreSyncTask;
import org.hyperledger.besu.cli.subcommands.PasswordSubCommand;
import org.hyperledger.besu.cli.subcommands.PublicKeySubCommand;
import org.hyperledger.besu.cli.subcommands.RetestethSubCommand;
import org.hyperledger.besu.cli.subcommands.blocks.BlocksSubCommand;
import org.hyperledger.besu.cli.subcommands.operator.OperatorSubCommand;
import org.hyperledger.besu.cli.subcommands.rlp.RLPSubCommand;
import org.hyperledger.besu.cli.util.BesuCommandCustomFactory;
import org.hyperledger.besu.cli.util.CommandLineUtils;
import org.hyperledger.besu.cli.util.ConfigOptionSearchAndRunHandler;
import org.hyperledger.besu.cli.util.VersionProvider;
import org.hyperledger.besu.config.GenesisConfigFile;
import org.hyperledger.besu.config.GenesisConfigOptions;
import org.hyperledger.besu.config.GoQuorumOptions;
import org.hyperledger.besu.config.experimental.ExperimentalEIPs;
import org.hyperledger.besu.controller.BesuController;
import org.hyperledger.besu.controller.BesuControllerBuilder;
import org.hyperledger.besu.controller.TargetingGasLimitCalculator;
import org.hyperledger.besu.crypto.KeyPair;
import org.hyperledger.besu.crypto.KeyPairSecurityModule;
import org.hyperledger.besu.crypto.KeyPairUtil;
import org.hyperledger.besu.crypto.NodeKey;
import org.hyperledger.besu.crypto.SignatureAlgorithmFactory;
import org.hyperledger.besu.enclave.EnclaveFactory;
import org.hyperledger.besu.enclave.GoQuorumEnclave;
import org.hyperledger.besu.ethereum.api.ApiConfiguration;
import org.hyperledger.besu.ethereum.api.ImmutableApiConfiguration;
import org.hyperledger.besu.ethereum.api.graphql.GraphQLConfiguration;
import org.hyperledger.besu.ethereum.api.jsonrpc.JsonRpcConfiguration;
import org.hyperledger.besu.ethereum.api.jsonrpc.RpcApi;
import org.hyperledger.besu.ethereum.api.jsonrpc.RpcApis;
import org.hyperledger.besu.ethereum.api.jsonrpc.websocket.WebSocketConfiguration;
import org.hyperledger.besu.ethereum.api.tls.FileBasedPasswordProvider;
import org.hyperledger.besu.ethereum.api.tls.TlsClientAuthConfiguration;
import org.hyperledger.besu.ethereum.api.tls.TlsConfiguration;
import org.hyperledger.besu.ethereum.blockcreation.GasLimitCalculator;
import org.hyperledger.besu.ethereum.chain.Blockchain;
import org.hyperledger.besu.ethereum.core.Address;
import org.hyperledger.besu.ethereum.core.GoQuorumPrivacyParameters;
import org.hyperledger.besu.ethereum.core.Hash;
import org.hyperledger.besu.ethereum.core.MiningParameters;
import org.hyperledger.besu.ethereum.core.PrivacyParameters;
import org.hyperledger.besu.ethereum.core.Wei;
import org.hyperledger.besu.ethereum.eth.sync.SyncMode;
import org.hyperledger.besu.ethereum.eth.sync.SynchronizerConfiguration;
import org.hyperledger.besu.ethereum.eth.transactions.TransactionPoolConfiguration;
import org.hyperledger.besu.ethereum.mainnet.precompiles.AbstractAltBnPrecompiledContract;
import org.hyperledger.besu.ethereum.p2p.config.DiscoveryConfiguration;
import org.hyperledger.besu.ethereum.p2p.peers.EnodeDnsConfiguration;
import org.hyperledger.besu.ethereum.p2p.peers.EnodeURL;
import org.hyperledger.besu.ethereum.p2p.peers.StaticNodesParser;
import org.hyperledger.besu.ethereum.permissioning.GoQuorumPermissioningConfiguration;
import org.hyperledger.besu.ethereum.permissioning.LocalPermissioningConfiguration;
import org.hyperledger.besu.ethereum.permissioning.PermissioningConfiguration;
import org.hyperledger.besu.ethereum.permissioning.PermissioningConfigurationBuilder;
import org.hyperledger.besu.ethereum.permissioning.SmartContractPermissioningConfiguration;
import org.hyperledger.besu.ethereum.privacy.storage.keyvalue.PrivacyKeyValueStorageProvider;
import org.hyperledger.besu.ethereum.privacy.storage.keyvalue.PrivacyKeyValueStorageProviderBuilder;
import org.hyperledger.besu.ethereum.storage.StorageProvider;
import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueStorageProvider;
import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueStorageProviderBuilder;
import org.hyperledger.besu.ethereum.worldstate.DefaultWorldStateArchive;
import org.hyperledger.besu.ethereum.worldstate.PrunerConfiguration;
import org.hyperledger.besu.ethereum.worldstate.WorldStateArchive;
import org.hyperledger.besu.ethereum.worldstate.WorldStatePreimageStorage;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
import org.hyperledger.besu.metrics.BesuMetricCategory;
import org.hyperledger.besu.metrics.MetricCategoryRegistryImpl;
import org.hyperledger.besu.metrics.MetricsProtocol;
import org.hyperledger.besu.metrics.MetricsSystemFactory;
import org.hyperledger.besu.metrics.ObservableMetricsSystem;
import org.hyperledger.besu.metrics.StandardMetricCategory;
import org.hyperledger.besu.metrics.prometheus.MetricsConfiguration;
import org.hyperledger.besu.metrics.vertx.VertxMetricsAdapterFactory;
import org.hyperledger.besu.nat.NatMethod;
import org.hyperledger.besu.plugin.services.BesuConfiguration;
import org.hyperledger.besu.plugin.services.BesuEvents;
import org.hyperledger.besu.plugin.services.MetricsSystem;
import org.hyperledger.besu.plugin.services.PicoCLIOptions;
import org.hyperledger.besu.plugin.services.SecurityModuleService;
import org.hyperledger.besu.plugin.services.StorageService;
import org.hyperledger.besu.plugin.services.exception.StorageException;
import org.hyperledger.besu.plugin.services.metrics.MetricCategory;
import org.hyperledger.besu.plugin.services.metrics.MetricCategoryRegistry;
import org.hyperledger.besu.plugin.services.securitymodule.SecurityModule;
import org.hyperledger.besu.plugin.services.storage.PrivacyKeyValueStorageFactory;
import org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDBPlugin;
import org.hyperledger.besu.services.BesuEventsImpl;
import org.hyperledger.besu.services.BesuPluginContextImpl;
import org.hyperledger.besu.services.PicoCLIOptionsImpl;
import org.hyperledger.besu.services.SecurityModuleServiceImpl;
import org.hyperledger.besu.services.StorageServiceImpl;
import org.hyperledger.besu.services.kvstore.InMemoryStoragePlugin;
import org.hyperledger.besu.util.NetworkUtility;
import org.hyperledger.besu.util.PermissioningConfigurationValidator;
import org.hyperledger.besu.util.number.Fraction;
import org.hyperledger.besu.util.number.Percentage;
import org.hyperledger.besu.util.number.PositiveNumber;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.math.BigInteger;
import java.net.InetAddress;
import java.net.SocketException;
import java.net.URI;
import java.net.UnknownHostException;
import java.nio.file.Path;
import java.time.Clock;
import java.util.ArrayList;
import java.util.Base64;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.OptionalLong;
import java.util.Set;
import java.util.TreeMap;
import java.util.function.Function;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Strings;
import com.google.common.base.Suppliers;
import com.google.common.collect.ImmutableMap;
import com.google.common.io.Files;
import com.google.common.io.Resources;
import io.vertx.core.Vertx;
import io.vertx.core.VertxOptions;
import io.vertx.core.json.DecodeException;
import io.vertx.core.metrics.MetricsOptions;
import net.consensys.quorum.mainnet.launcher.LauncherManager;
import net.consensys.quorum.mainnet.launcher.config.ImmutableLauncherConfig;
import net.consensys.quorum.mainnet.launcher.exception.LauncherException;
import net.consensys.quorum.mainnet.launcher.util.ParseArgsHelper;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.core.config.Configurator;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.units.bigints.UInt256;
import picocli.CommandLine;
import picocli.CommandLine.AbstractParseResultHandler;
import picocli.CommandLine.Command;
import picocli.CommandLine.ExecutionException;
import picocli.CommandLine.Option;
import picocli.CommandLine.ParameterException;
@SuppressWarnings("FieldCanBeLocal") // because Picocli injected fields report false positives
@Command(
description = "This command runs the Besu Ethereum client full node.",
abbreviateSynopsis = true,
name = "besu",
mixinStandardHelpOptions = true,
versionProvider = VersionProvider.class,
header = "Usage:",
synopsisHeading = "%n",
descriptionHeading = "%nDescription:%n%n",
optionListHeading = "%nOptions:%n",
footerHeading = "%n",
footer = "Besu is licensed under the Apache License 2.0")
public class BesuCommand implements DefaultCommandValues, Runnable {
@SuppressWarnings("PrivateStaticFinalLoggers")
// non-static for testing
private final Logger logger;
private CommandLine commandLine;
private final Supplier<RlpBlockImporter> rlpBlockImporter;
private final Function<BesuController, JsonBlockImporter> jsonBlockImporterFactory;
private final Function<Blockchain, RlpBlockExporter> rlpBlockExporterFactory;
// Unstable CLI options
final NetworkingOptions unstableNetworkingOptions = NetworkingOptions.create();
final SynchronizerOptions unstableSynchronizerOptions = SynchronizerOptions.create();
final EthProtocolOptions unstableEthProtocolOptions = EthProtocolOptions.create();
final MetricsCLIOptions unstableMetricsCLIOptions = MetricsCLIOptions.create();
final TransactionPoolOptions unstableTransactionPoolOptions = TransactionPoolOptions.create();
private final EthstatsOptions unstableEthstatsOptions = EthstatsOptions.create();
private final DataStorageOptions unstableDataStorageOptions = DataStorageOptions.create();
private final DnsOptions unstableDnsOptions = DnsOptions.create();
private final MiningOptions unstableMiningOptions = MiningOptions.create();
private final NatOptions unstableNatOptions = NatOptions.create();
private final NativeLibraryOptions unstableNativeLibraryOptions = NativeLibraryOptions.create();
private final RPCOptions unstableRPCOptions = RPCOptions.create();
final LauncherOptions unstableLauncherOptions = LauncherOptions.create();
private final RunnerBuilder runnerBuilder;
private final BesuController.Builder controllerBuilderFactory;
private final BesuPluginContextImpl besuPluginContext;
private final StorageServiceImpl storageService;
private final SecurityModuleServiceImpl securityModuleService;
private final Map<String, String> environment;
private final MetricCategoryRegistryImpl metricCategoryRegistry =
new MetricCategoryRegistryImpl();
private final MetricCategoryConverter metricCategoryConverter = new MetricCategoryConverter();
// Public IP stored to prevent having to research it each time we need it.
private InetAddress autoDiscoveredDefaultIP = null;
private final PreSynchronizationTaskRunner preSynchronizationTaskRunner =
new PreSynchronizationTaskRunner();
private final Set<Integer> allocatedPorts = new HashSet<>();
// CLI options defined by user at runtime.
// Options parsing is done with CLI library Picocli https://picocli.info/
// While this variable is never read it is needed for the PicoCLI to create
// the config file option that is read elsewhere.
@SuppressWarnings("UnusedVariable")
@CommandLine.Option(
names = {CONFIG_FILE_OPTION_NAME},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description = "TOML config file (default: none)")
private final File configFile = null;
@CommandLine.Option(
names = {"--data-path"},
paramLabel = MANDATORY_PATH_FORMAT_HELP,
description = "The path to Besu data directory (default: ${DEFAULT-VALUE})")
final Path dataPath = getDefaultBesuDataPath(this);
// Genesis file path with null default option if the option
// is not defined on command line as this default is handled by Runner
// to use mainnet json file from resources as indicated in the
// default network option
// Then we have no control over genesis default value here.
@CommandLine.Option(
names = {"--genesis-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description =
"Genesis file. Setting this option makes --network option ignored and requires --network-id to be set.")
private final File genesisFile = null;
@CommandLine.Option(
names = {"--node-private-key-file"},
paramLabel = MANDATORY_PATH_FORMAT_HELP,
description =
"The node's private key file (default: a file named \"key\" in the Besu data folder)")
private final File nodePrivateKeyFile = null;
@Option(
names = "--identity",
paramLabel = "<String>",
description = "Identification for this node in the Client ID",
arity = "1")
private final Optional<String> identityString = Optional.empty();
// Completely disables P2P within Besu.
@Option(
names = {"--p2p-enabled"},
description = "Enable P2P functionality (default: ${DEFAULT-VALUE})",
arity = "1")
private final Boolean p2pEnabled = true;
// Boolean option to indicate if peers should NOT be discovered, default to
// false indicates that
// the peers should be discovered by default.
//
// This negative option is required because of the nature of the option that is
// true when
// added on the command line. You can't do --option=false, so false is set as
// default
// and you have not to set the option at all if you want it false.
// This seems to be the only way it works with Picocli.
// Also many other software use the same negative option scheme for false
// defaults
// meaning that it's probably the right way to handle disabling options.
@Option(
names = {"--discovery-enabled"},
description = "Enable P2P discovery (default: ${DEFAULT-VALUE})",
arity = "1")
private final Boolean peerDiscoveryEnabled = true;
// A list of bootstrap nodes can be passed
// and a hardcoded list will be used otherwise by the Runner.
// NOTE: we have no control over default value here.
@Option(
names = {"--bootnodes"},
paramLabel = "<enode://id@host:port>",
description =
"Comma separated enode URLs for P2P discovery bootstrap. "
+ "Default is a predefined list.",
split = ",",
arity = "0..*")
private final List<String> bootNodes = null;
@Option(
names = {"--max-peers"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
description = "Maximum P2P connections that can be established (default: ${DEFAULT-VALUE})")
private final Integer maxPeers = DEFAULT_MAX_PEERS;
@Option(
names = {"--remote-connections-limit-enabled"},
description =
"Whether to limit the number of P2P connections initiated remotely. (default: ${DEFAULT-VALUE})")
private final Boolean isLimitRemoteWireConnectionsEnabled = true;
@Option(
names = {"--remote-connections-max-percentage"},
paramLabel = MANDATORY_DOUBLE_FORMAT_HELP,
description =
"The maximum percentage of P2P connections that can be initiated remotely. Must be between 0 and 100 inclusive. (default: ${DEFAULT-VALUE})",
arity = "1",
converter = PercentageConverter.class)
private final Integer maxRemoteConnectionsPercentage =
Fraction.fromFloat(DEFAULT_FRACTION_REMOTE_WIRE_CONNECTIONS_ALLOWED)
.toPercentage()
.getValue();
@Option(
names = {"--random-peer-priority-enabled"},
description =
"Allow for incoming connections to be prioritized randomly. This will prevent (typically small, stable) networks from forming impenetrable peer cliques. (default: ${DEFAULT-VALUE})")
private final Boolean randomPeerPriority = false;
@Option(
names = {"--banned-node-ids", "--banned-node-id"},
paramLabel = MANDATORY_NODE_ID_FORMAT_HELP,
description = "A list of node IDs to ban from the P2P network.",
split = ",",
arity = "1..*")
void setBannedNodeIds(final List<String> values) {
try {
bannedNodeIds =
values.stream()
.filter(value -> !value.isEmpty())
.map(EnodeURL::parseNodeId)
.collect(Collectors.toList());
} catch (final IllegalArgumentException e) {
throw new ParameterException(
commandLine, "Invalid ids supplied to '--banned-node-ids'. " + e.getMessage());
}
}
private Collection<Bytes> bannedNodeIds = new ArrayList<>();
@Option(
names = {"--sync-mode"},
paramLabel = MANDATORY_MODE_FORMAT_HELP,
description =
"Synchronization mode, possible values are ${COMPLETION-CANDIDATES} (default: FAST if a --network is supplied and privacy isn't enabled. FULL otherwise.)")
private SyncMode syncMode = null;
@Option(
names = {"--fast-sync-min-peers"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
description =
"Minimum number of peers required before starting fast sync. (default: ${DEFAULT-VALUE})")
private final Integer fastSyncMinPeerCount = FAST_SYNC_MIN_PEER_COUNT;
@Option(
names = {"--network"},
paramLabel = MANDATORY_NETWORK_FORMAT_HELP,
description =
"Synchronize against the indicated network, possible values are ${COMPLETION-CANDIDATES}."
+ " (default: MAINNET)")
private final NetworkName network = null;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--p2p-host"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description = "Ip address this node advertises to its peers (default: ${DEFAULT-VALUE})",
arity = "1")
private String p2pHost = autoDiscoverDefaultIP().getHostAddress();
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--p2p-interface"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description =
"The network interface address on which this node listens for P2P communication (default: ${DEFAULT-VALUE})",
arity = "1")
private String p2pInterface = NetworkUtility.INADDR_ANY;
@Option(
names = {"--p2p-port"},
paramLabel = MANDATORY_PORT_FORMAT_HELP,
description = "Port on which to listen for P2P communication (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer p2pPort = EnodeURL.DEFAULT_LISTENING_PORT;
@Option(
names = {"--nat-method"},
description =
"Specify the NAT circumvention method to be used, possible values are ${COMPLETION-CANDIDATES}."
+ " NONE disables NAT functionality. (default: ${DEFAULT-VALUE})")
private final NatMethod natMethod = DEFAULT_NAT_METHOD;
@Option(
names = {"--network-id"},
paramLabel = "<BIG INTEGER>",
description =
"P2P network identifier. (default: the selected network chain ID or custom genesis chain ID)",
arity = "1")
private final BigInteger networkId = null;
@Option(
names = {"--graphql-http-enabled"},
description = "Set to start the GraphQL HTTP service (default: ${DEFAULT-VALUE})")
private final Boolean isGraphQLHttpEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--graphql-http-host"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description = "Host for GraphQL HTTP to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private String graphQLHttpHost = autoDiscoverDefaultIP().getHostAddress();
@Option(
names = {"--graphql-http-port"},
paramLabel = MANDATORY_PORT_FORMAT_HELP,
description = "Port for GraphQL HTTP to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer graphQLHttpPort = DEFAULT_GRAPHQL_HTTP_PORT;
@Option(
names = {"--graphql-http-cors-origins"},
description = "Comma separated origin domain URLs for CORS validation (default: none)")
private final CorsAllowedOriginsProperty graphQLHttpCorsAllowedOrigins =
new CorsAllowedOriginsProperty();
@Option(
names = {"--rpc-http-enabled"},
description = "Set to start the JSON-RPC HTTP service (default: ${DEFAULT-VALUE})")
private final Boolean isRpcHttpEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--rpc-http-host"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description = "Host for JSON-RPC HTTP to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private String rpcHttpHost = autoDiscoverDefaultIP().getHostAddress();
@Option(
names = {"--rpc-http-port"},
paramLabel = MANDATORY_PORT_FORMAT_HELP,
description = "Port for JSON-RPC HTTP to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer rpcHttpPort = DEFAULT_JSON_RPC_PORT;
@Option(
names = {"--rpc-http-max-active-connections"},
description =
"Maximum number of HTTP connections allowed for JSON-RPC (default: ${DEFAULT-VALUE}). Once this limit is reached, incoming connections will be rejected.",
arity = "1")
private final Integer rpcHttpMaxConnections = DEFAULT_HTTP_MAX_CONNECTIONS;
// A list of origins URLs that are accepted by the JsonRpcHttpServer (CORS)
@Option(
names = {"--rpc-http-cors-origins"},
description = "Comma separated origin domain URLs for CORS validation (default: none)")
private final CorsAllowedOriginsProperty rpcHttpCorsAllowedOrigins =
new CorsAllowedOriginsProperty();
@Option(
names = {"--rpc-http-api", "--rpc-http-apis"},
paramLabel = "<api name>",
split = ",",
arity = "1..*",
converter = RpcApisConverter.class,
description =
"Comma separated list of APIs to enable on JSON-RPC HTTP service (default: ${DEFAULT-VALUE})")
private final Collection<RpcApi> rpcHttpApis = DEFAULT_JSON_RPC_APIS;
@Option(
names = {"--rpc-http-authentication-enabled"},
description =
"Require authentication for the JSON-RPC HTTP service (default: ${DEFAULT-VALUE})")
private final Boolean isRpcHttpAuthenticationEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@CommandLine.Option(
names = {"--rpc-http-authentication-credentials-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description =
"Storage file for JSON-RPC HTTP authentication credentials (default: ${DEFAULT-VALUE})",
arity = "1")
private String rpcHttpAuthenticationCredentialsFile = null;
@CommandLine.Option(
names = {"--rpc-http-authentication-jwt-public-key-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description = "JWT public key file for JSON-RPC HTTP authentication",
arity = "1")
private final File rpcHttpAuthenticationPublicKeyFile = null;
@Option(
names = {"--rpc-http-tls-enabled"},
description = "Enable TLS for the JSON-RPC HTTP service (default: ${DEFAULT-VALUE})")
private final Boolean isRpcHttpTlsEnabled = false;
@Option(
names = {"--rpc-http-tls-keystore-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description =
"Keystore (PKCS#12) containing key/certificate for the JSON-RPC HTTP service. Required if TLS is enabled.")
private final Path rpcHttpTlsKeyStoreFile = null;
@Option(
names = {"--rpc-http-tls-keystore-password-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description =
"File containing password to unlock keystore for the JSON-RPC HTTP service. Required if TLS is enabled.")
private final Path rpcHttpTlsKeyStorePasswordFile = null;
@Option(
names = {"--rpc-http-tls-client-auth-enabled"},
description =
"Enable TLS client authentication for the JSON-RPC HTTP service (default: ${DEFAULT-VALUE})")
private final Boolean isRpcHttpTlsClientAuthEnabled = false;
@Option(
names = {"--rpc-http-tls-known-clients-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description =
"Path to file containing clients certificate common name and fingerprint for client authentication")
private final Path rpcHttpTlsKnownClientsFile = null;
@Option(
names = {"--rpc-http-tls-ca-clients-enabled"},
description =
"Enable to accept clients certificate signed by a valid CA for client authentication (default: ${DEFAULT-VALUE})")
private final Boolean isRpcHttpTlsCAClientsEnabled = false;
@Option(
names = {"--rpc-ws-enabled"},
description = "Set to start the JSON-RPC WebSocket service (default: ${DEFAULT-VALUE})")
private final Boolean isRpcWsEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--rpc-ws-host"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description = "Host for JSON-RPC WebSocket service to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private String rpcWsHost = autoDiscoverDefaultIP().getHostAddress();
@Option(
names = {"--rpc-ws-port"},
paramLabel = MANDATORY_PORT_FORMAT_HELP,
description = "Port for JSON-RPC WebSocket service to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer rpcWsPort = DEFAULT_WEBSOCKET_PORT;
@Option(
names = {"--rpc-ws-max-active-connections"},
description =
"Maximum number of WebSocket connections allowed for JSON-RPC (default: ${DEFAULT-VALUE}). Once this limit is reached, incoming connections will be rejected.",
arity = "1")
private final Integer rpcWsMaxConnections = DEFAULT_WS_MAX_CONNECTIONS;
@Option(
names = {"--rpc-ws-api", "--rpc-ws-apis"},
paramLabel = "<api name>",
split = ",",
arity = "1..*",
converter = RpcApisConverter.class,
description =
"Comma separated list of APIs to enable on JSON-RPC WebSocket service (default: ${DEFAULT-VALUE})")
private final List<RpcApi> rpcWsApis = DEFAULT_JSON_RPC_APIS;
@Option(
names = {"--rpc-ws-authentication-enabled"},
description =
"Require authentication for the JSON-RPC WebSocket service (default: ${DEFAULT-VALUE})")
private final Boolean isRpcWsAuthenticationEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@CommandLine.Option(
names = {"--rpc-ws-authentication-credentials-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description =
"Storage file for JSON-RPC WebSocket authentication credentials (default: ${DEFAULT-VALUE})",
arity = "1")
private String rpcWsAuthenticationCredentialsFile = null;
@CommandLine.Option(
names = {"--rpc-ws-authentication-jwt-public-key-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description = "JWT public key file for JSON-RPC WebSocket authentication",
arity = "1")
private final File rpcWsAuthenticationPublicKeyFile = null;
@Option(
names = {"--privacy-tls-enabled"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description = "Enable TLS for connecting to privacy enclave (default: ${DEFAULT-VALUE})")
private final Boolean isPrivacyTlsEnabled = false;
@Option(
names = "--privacy-tls-keystore-file",
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description =
"Path to a PKCS#12 formatted keystore; used to enable TLS on inbound connections.")
private final Path privacyKeyStoreFile = null;
@Option(
names = "--privacy-tls-keystore-password-file",
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description = "Path to a file containing the password used to decrypt the keystore.")
private final Path privacyKeyStorePasswordFile = null;
@Option(
names = "--privacy-tls-known-enclave-file",
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description = "Path to a file containing the fingerprints of the authorized privacy enclave.")
private final Path privacyTlsKnownEnclaveFile = null;
@Option(
names = {"--metrics-enabled"},
description = "Set to start the metrics exporter (default: ${DEFAULT-VALUE})")
private final Boolean isMetricsEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--metrics-protocol"},
description =
"Metrics protocol, one of PROMETHEUS, OPENTELEMETRY or NONE. (default: ${DEFAULT-VALUE})")
private MetricsProtocol metricsProtocol = PROMETHEUS;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--metrics-host"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description = "Host for the metrics exporter to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private String metricsHost = autoDiscoverDefaultIP().getHostAddress();
@Option(
names = {"--metrics-port"},
paramLabel = MANDATORY_PORT_FORMAT_HELP,
description = "Port for the metrics exporter to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer metricsPort = DEFAULT_METRICS_PORT;
@Option(
names = {"--metrics-category", "--metrics-categories"},
paramLabel = "<category name>",
split = ",",
arity = "1..*",
description =
"Comma separated list of categories to track metrics for (default: ${DEFAULT-VALUE})")
private final Set<MetricCategory> metricCategories = DEFAULT_METRIC_CATEGORIES;
@Option(
names = {"--metrics-push-enabled"},
description = "Enable the metrics push gateway integration (default: ${DEFAULT-VALUE})")
private final Boolean isMetricsPushEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--metrics-push-host"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description = "Host of the Prometheus Push Gateway for push mode (default: ${DEFAULT-VALUE})",
arity = "1")
private String metricsPushHost = autoDiscoverDefaultIP().getHostAddress();
@Option(
names = {"--metrics-push-port"},
paramLabel = MANDATORY_PORT_FORMAT_HELP,
description = "Port of the Prometheus Push Gateway for push mode (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer metricsPushPort = DEFAULT_METRICS_PUSH_PORT;
@Option(
names = {"--metrics-push-interval"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
description =
"Interval in seconds to push metrics when in push mode (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer metricsPushInterval = 15;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--metrics-push-prometheus-job"},
description = "Job name to use when in push mode (default: ${DEFAULT-VALUE})",
arity = "1")
private String metricsPrometheusJob = "besu-client";
@Option(
names = {"--host-allowlist"},
paramLabel = "<hostname>[,<hostname>...]... or * or all",
description =
"Comma separated list of hostnames to allow for RPC access, or * to accept any host (default: ${DEFAULT-VALUE})",
defaultValue = "localhost,127.0.0.1")
private final JsonRPCAllowlistHostsProperty hostsAllowlist = new JsonRPCAllowlistHostsProperty();
@Option(
names = {"--host-whitelist"},
hidden = true,
paramLabel = "<hostname>[,<hostname>...]... or * or all",
description =
"Deprecated in favor of --host-allowlist. Comma separated list of hostnames to allow for RPC access, or * to accept any host (default: ${DEFAULT-VALUE})")
private final JsonRPCAllowlistHostsProperty hostsWhitelist = new JsonRPCAllowlistHostsProperty();
@Option(
names = {"--logging", "-l"},
paramLabel = "<LOG VERBOSITY LEVEL>",
description = "Logging verbosity levels: OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE, ALL")
private final Level logLevel = null;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"})
@Option(
names = {"--color-enabled"},
description =
"Force color output to be enabled/disabled (default: colorized only if printing to console)")
private static Boolean colorEnabled = null;
@Option(
names = {"--reorg-logging-threshold"},
description =
"How deep a chain reorganization must be in order for it to be logged (default: ${DEFAULT-VALUE})")
private final Long reorgLoggingThreshold = 6L;
@Option(
names = {"--miner-enabled"},
description = "Set if node will perform mining (default: ${DEFAULT-VALUE})")
private final Boolean isMiningEnabled = false;
@Option(
names = {"--miner-stratum-enabled"},
description = "Set if node will perform Stratum mining (default: ${DEFAULT-VALUE})")
private final Boolean iStratumMiningEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--miner-stratum-host"},
description = "Host for Stratum network mining service (default: ${DEFAULT-VALUE})")
private String stratumNetworkInterface = "0.0.0.0";
@Option(
names = {"--miner-stratum-port"},
description = "Stratum port binding (default: ${DEFAULT-VALUE})")
private final Integer stratumPort = 8008;
@Option(
names = {"--miner-coinbase"},
description =
"Account to which mining rewards are paid. You must specify a valid coinbase if "
+ "mining is enabled using --miner-enabled option",
arity = "1")
private final Address coinbase = null;
@Option(
names = {"--min-gas-price"},
description =
"Minimum price (in Wei) offered by a transaction for it to be included in a mined "
+ "block (default: ${DEFAULT-VALUE})",
arity = "1")
private final Wei minTransactionGasPrice = DEFAULT_MIN_TRANSACTION_GAS_PRICE;
@Option(
names = {"--rpc-tx-feecap"},
description =
"Maximum transaction fees (in Wei) accepted for transaction submitted through RPC (default: ${DEFAULT-VALUE})",
arity = "1")
private final Wei txFeeCap = DEFAULT_RPC_TX_FEE_CAP;
@Option(
names = {"--min-block-occupancy-ratio"},
description = "Minimum occupancy ratio for a mined block (default: ${DEFAULT-VALUE})",
arity = "1")
private final Double minBlockOccupancyRatio = DEFAULT_MIN_BLOCK_OCCUPANCY_RATIO;
@Option(
names = {"--miner-extra-data"},
description =
"A hex string representing the (32) bytes to be included in the extra data "
+ "field of a mined block (default: ${DEFAULT-VALUE})",
arity = "1")
private final Bytes extraData = DEFAULT_EXTRA_DATA;
@Option(
names = {"--pruning-enabled"},
description =
"Enable disk-space saving optimization that removes old state that is unlikely to be required (default: ${DEFAULT-VALUE})")
private final Boolean pruningEnabled = false;
@Option(
names = {"--permissions-nodes-config-file-enabled"},
description = "Enable node level permissions (default: ${DEFAULT-VALUE})")
private final Boolean permissionsNodesEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@CommandLine.Option(
names = {"--permissions-nodes-config-file"},
description =
"Node permissioning config TOML file (default: a file named \"permissions_config.toml\" in the Besu data folder)")
private String nodePermissionsConfigFile = null;
@Option(
names = {"--permissions-accounts-config-file-enabled"},
description = "Enable account level permissions (default: ${DEFAULT-VALUE})")
private final Boolean permissionsAccountsEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@CommandLine.Option(
names = {"--permissions-accounts-config-file"},
description =
"Account permissioning config TOML file (default: a file named \"permissions_config.toml\" in the Besu data folder)")
private String accountPermissionsConfigFile = null;
@Option(
names = {"--permissions-nodes-contract-address"},
description = "Address of the node permissioning smart contract",
arity = "1")
private final Address permissionsNodesContractAddress = null;
@Option(
names = {"--permissions-nodes-contract-version"},
description = "Version of the EEA Node Permissioning interface (default: ${DEFAULT-VALUE})")
private final Integer permissionsNodesContractVersion = 1;
@Option(
names = {"--permissions-nodes-contract-enabled"},
description = "Enable node level permissions via smart contract (default: ${DEFAULT-VALUE})")
private final Boolean permissionsNodesContractEnabled = false;
@Option(
names = {"--permissions-accounts-contract-address"},
description = "Address of the account permissioning smart contract",
arity = "1")
private final Address permissionsAccountsContractAddress = null;
@Option(
names = {"--permissions-accounts-contract-enabled"},
description =
"Enable account level permissions via smart contract (default: ${DEFAULT-VALUE})")
private final Boolean permissionsAccountsContractEnabled = false;
@Option(
names = {"--privacy-enabled"},
description = "Enable private transactions (default: ${DEFAULT-VALUE})")
private final Boolean isPrivacyEnabled = false;
@Option(
names = {"--privacy-multi-tenancy-enabled"},
description = "Enable multi-tenant private transactions (default: ${DEFAULT-VALUE})")
private final Boolean isPrivacyMultiTenancyEnabled = false;
@Option(
names = {"--revert-reason-enabled"},
description =
"Enable passing the revert reason back through TransactionReceipts (default: ${DEFAULT-VALUE})")
private final Boolean isRevertReasonEnabled = false;
@Option(
names = {"--required-blocks", "--required-block"},
paramLabel = "BLOCK=HASH",
description = "Block number and hash peers are required to have.",
arity = "*",
split = ",")
private final Map<Long, Hash> requiredBlocks = new HashMap<>();
@Option(
names = {"--privacy-url"},
description = "The URL on which the enclave is running")
private final URI privacyUrl = PrivacyParameters.DEFAULT_ENCLAVE_URL;
@Option(
names = {"--privacy-public-key-file"},
description = "The enclave's public key file")
private final File privacyPublicKeyFile = null;
@Option(
names = {"--privacy-precompiled-address"},
description =
"The address to which the privacy pre-compiled contract will be mapped (default: ${DEFAULT-VALUE})",
hidden = true)
private final Integer privacyPrecompiledAddress = Address.PRIVACY;
@Option(
names = {"--privacy-marker-transaction-signing-key-file"},
description =
"The name of a file containing the private key used to sign privacy marker transactions. If unset, each will be signed with a random key.")
private final Path privacyMarkerTransactionSigningKeyPath = null;
@Option(
names = {"--privacy-enable-database-migration"},
description = "Enable private database metadata migration (default: ${DEFAULT-VALUE})")
private final Boolean migratePrivateDatabase = false;
@Option(
names = {"--privacy-flexible-groups-enabled", "--privacy-onchain-groups-enabled"},
description = "Enable flexible (onchain) privacy groups (default: ${DEFAULT-VALUE})")
private final Boolean isFlexiblePrivacyGroupsEnabled = false;
@Option(
names = {"--target-gas-limit"},
description =
"Sets target gas limit per block. If set each block's gas limit will approach this setting over time if the current gas limit is different.")
private final Long targetGasLimit = null;
@Option(
names = {"--tx-pool-max-size"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
description =
"Maximum number of pending transactions that will be kept in the transaction pool (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer txPoolMaxSize = TransactionPoolConfiguration.MAX_PENDING_TRANSACTIONS;
@Option(
names = {"--tx-pool-hashes-max-size"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
description =
"Maximum number of pending transaction hashes that will be kept in the transaction pool (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer pooledTransactionHashesSize =
TransactionPoolConfiguration.MAX_PENDING_TRANSACTIONS_HASHES;
@Option(
names = {"--tx-pool-retention-hours"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
description =
"Maximum retention period of pending transactions in hours (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer pendingTxRetentionPeriod =
TransactionPoolConfiguration.DEFAULT_TX_RETENTION_HOURS;
@Option(
names = {"--tx-pool-price-bump"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
converter = PercentageConverter.class,
description =
"Price bump percentage to replace an already existing transaction (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer priceBump = TransactionPoolConfiguration.DEFAULT_PRICE_BUMP.getValue();
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--key-value-storage"},
description = "Identity for the key-value storage to be used.",
arity = "1")
private String keyValueStorageName = DEFAULT_KEY_VALUE_STORAGE_NAME;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"})
@Option(
names = {"--security-module"},
paramLabel = "<NAME>",
description = "Identity for the Security Module to be used.",
arity = "1")
private String securityModuleName = DEFAULT_SECURITY_MODULE;
@Option(
names = {"--auto-log-bloom-caching-enabled"},
description = "Enable automatic log bloom caching (default: ${DEFAULT-VALUE})",
arity = "1")
private final Boolean autoLogBloomCachingEnabled = true;
@Option(
names = {"--override-genesis-config"},
paramLabel = "NAME=VALUE",
description = "Overrides configuration values in the genesis file. Use with care.",
arity = "*",
hidden = true,
split = ",")
private final Map<String, String> genesisConfigOverrides =
new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
@Option(
names = {"--pruning-blocks-retained"},
defaultValue = "1024",
paramLabel = "<INTEGER>",
description =
"Minimum number of recent blocks for which to keep entire world state (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer pruningBlocksRetained = PrunerConfiguration.DEFAULT_PRUNING_BLOCKS_RETAINED;
@Option(
names = {"--pruning-block-confirmations"},
defaultValue = "10",
paramLabel = "<INTEGER>",
description =
"Minimum number of confirmations on a block before marking begins (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer pruningBlockConfirmations =
PrunerConfiguration.DEFAULT_PRUNING_BLOCK_CONFIRMATIONS;
@CommandLine.Option(
names = {"--pid-path"},
paramLabel = MANDATORY_PATH_FORMAT_HELP,
description = "Path to PID file (optional)")
private final Path pidPath = null;
@CommandLine.Option(
names = {"--api-gas-price-blocks"},
paramLabel = MANDATORY_PATH_FORMAT_HELP,
description = "Number of blocks to consider for eth_gasPrice (default: ${DEFAULT-VALUE})")
private final Long apiGasPriceBlocks = 100L;
@CommandLine.Option(
names = {"--api-gas-price-percentile"},
paramLabel = MANDATORY_PATH_FORMAT_HELP,
description = "Percentile value to measure for eth_gasPrice (default: ${DEFAULT-VALUE})")
private final Double apiGasPricePercentile = 50.0;
@CommandLine.Option(
names = {"--api-gas-price-max"},
paramLabel = MANDATORY_PATH_FORMAT_HELP,
description = "Maximum gas price for eth_gasPrice (default: ${DEFAULT-VALUE})")
private final Long apiGasPriceMax = 500_000_000_000L;
@Option(
names = {"--goquorum-compatibility-enabled"},
hidden = true,
description = "Start Besu in GoQuorum compatibility mode (default: ${DEFAULT-VALUE})")
private final Boolean isGoQuorumCompatibilityMode = false;
@CommandLine.Option(
names = {"--static-nodes-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description =
"Specifies the static node file containing the static nodes for this node to connect to")
private final Path staticNodesFile = null;
private EthNetworkConfig ethNetworkConfig;
private JsonRpcConfiguration jsonRpcConfiguration;
private GraphQLConfiguration graphQLConfiguration;
private WebSocketConfiguration webSocketConfiguration;
private ApiConfiguration apiConfiguration;
private MetricsConfiguration metricsConfiguration;
private Optional<PermissioningConfiguration> permissioningConfiguration;
private Collection<EnodeURL> staticNodes;
private BesuController besuController;
private BesuConfiguration pluginCommonConfiguration;
private final Supplier<ObservableMetricsSystem> metricsSystem =
Suppliers.memoize(() -> MetricsSystemFactory.create(metricsConfiguration()));
private Vertx vertx;
private EnodeDnsConfiguration enodeDnsConfiguration;
private KeyValueStorageProvider keyValueStorageProvider;
public BesuCommand(
final Logger logger,
final Supplier<RlpBlockImporter> rlpBlockImporter,
final Function<BesuController, JsonBlockImporter> jsonBlockImporterFactory,
final Function<Blockchain, RlpBlockExporter> rlpBlockExporterFactory,
final RunnerBuilder runnerBuilder,
final BesuController.Builder controllerBuilderFactory,
final BesuPluginContextImpl besuPluginContext,
final Map<String, String> environment) {
this(
logger,
rlpBlockImporter,
jsonBlockImporterFactory,
rlpBlockExporterFactory,
runnerBuilder,
controllerBuilderFactory,
besuPluginContext,
environment,
new StorageServiceImpl(),
new SecurityModuleServiceImpl());
}
@VisibleForTesting
protected BesuCommand(
final Logger logger,
final Supplier<RlpBlockImporter> rlpBlockImporter,
final Function<BesuController, JsonBlockImporter> jsonBlockImporterFactory,
final Function<Blockchain, RlpBlockExporter> rlpBlockExporterFactory,
final RunnerBuilder runnerBuilder,
final BesuController.Builder controllerBuilderFactory,
final BesuPluginContextImpl besuPluginContext,
final Map<String, String> environment,
final StorageServiceImpl storageService,
final SecurityModuleServiceImpl securityModuleService) {
this.logger = logger;
this.rlpBlockImporter = rlpBlockImporter;
this.rlpBlockExporterFactory = rlpBlockExporterFactory;
this.jsonBlockImporterFactory = jsonBlockImporterFactory;
this.runnerBuilder = runnerBuilder;
this.controllerBuilderFactory = controllerBuilderFactory;
this.besuPluginContext = besuPluginContext;
this.environment = environment;
this.storageService = storageService;
this.securityModuleService = securityModuleService;
pluginCommonConfiguration = new BesuCommandConfigurationService();
besuPluginContext.addService(BesuConfiguration.class, pluginCommonConfiguration);
}
public void parse(
final AbstractParseResultHandler<List<Object>> resultHandler,
final BesuExceptionHandler exceptionHandler,
final InputStream in,
final String... args) {
commandLine =
new CommandLine(this, new BesuCommandCustomFactory(besuPluginContext))
.setCaseInsensitiveEnumValuesAllowed(true);
enableExperimentalEIPs();
addSubCommands(resultHandler, in);
registerConverters();
handleUnstableOptions();
preparePlugins();
parse(resultHandler, exceptionHandler, args);
}
@Override
public void run() {
try {
configureLogging(true);
configureNativeLibs();
logger.info("Starting Besu version: {}", BesuInfo.nodeName(identityString));
// Need to create vertx after cmdline has been parsed, such that metricsSystem is configurable
vertx = createVertx(createVertxOptions(metricsSystem.get()));
final BesuCommand controller = validateOptions().configure().controller();
preSynchronizationTaskRunner.runTasks(controller.besuController);
controller.startPlugins().startSynchronization();
} catch (final Exception e) {
throw new ParameterException(this.commandLine, e.getMessage(), e);
}
}
@VisibleForTesting
void setBesuConfiguration(final BesuConfiguration pluginCommonConfiguration) {
this.pluginCommonConfiguration = pluginCommonConfiguration;
}
private void enableExperimentalEIPs() {
// Usage of static command line flags is strictly reserved for experimental EIPs
commandLine.addMixin("experimentalEIPs", ExperimentalEIPs.class);
}
private void addSubCommands(
final AbstractParseResultHandler<List<Object>> resultHandler, final InputStream in) {
commandLine.addSubcommand(
BlocksSubCommand.COMMAND_NAME,
new BlocksSubCommand(
rlpBlockImporter,
jsonBlockImporterFactory,
rlpBlockExporterFactory,
resultHandler.out()));
commandLine.addSubcommand(
PublicKeySubCommand.COMMAND_NAME,
new PublicKeySubCommand(resultHandler.out(), this::buildNodeKey));
commandLine.addSubcommand(
PasswordSubCommand.COMMAND_NAME, new PasswordSubCommand(resultHandler.out()));
commandLine.addSubcommand(RetestethSubCommand.COMMAND_NAME, new RetestethSubCommand());
commandLine.addSubcommand(
RLPSubCommand.COMMAND_NAME, new RLPSubCommand(resultHandler.out(), in));
commandLine.addSubcommand(
OperatorSubCommand.COMMAND_NAME, new OperatorSubCommand(resultHandler.out()));
}
private void registerConverters() {
commandLine.registerConverter(Address.class, Address::fromHexStringStrict);
commandLine.registerConverter(Bytes.class, Bytes::fromHexString);
commandLine.registerConverter(Level.class, Level::valueOf);
commandLine.registerConverter(SyncMode.class, SyncMode::fromString);
commandLine.registerConverter(MetricsProtocol.class, MetricsProtocol::fromString);
commandLine.registerConverter(UInt256.class, (arg) -> UInt256.valueOf(new BigInteger(arg)));
commandLine.registerConverter(Wei.class, (arg) -> Wei.of(Long.parseUnsignedLong(arg)));
commandLine.registerConverter(PositiveNumber.class, PositiveNumber::fromString);
commandLine.registerConverter(Hash.class, Hash::fromHexString);
commandLine.registerConverter(Optional.class, Optional::of);
commandLine.registerConverter(Double.class, Double::parseDouble);
metricCategoryConverter.addCategories(BesuMetricCategory.class);
metricCategoryConverter.addCategories(StandardMetricCategory.class);
commandLine.registerConverter(MetricCategory.class, metricCategoryConverter);
}
private void handleUnstableOptions() {
// Add unstable options
final ImmutableMap.Builder<String, Object> unstableOptionsBuild = ImmutableMap.builder();
final ImmutableMap<String, Object> unstableOptions =
unstableOptionsBuild
.put("Ethereum Wire Protocol", unstableEthProtocolOptions)
.put("Metrics", unstableMetricsCLIOptions)
.put("P2P Network", unstableNetworkingOptions)
.put("RPC", unstableRPCOptions)
.put("DNS Configuration", unstableDnsOptions)
.put("NAT Configuration", unstableNatOptions)
.put("Synchronizer", unstableSynchronizerOptions)
.put("TransactionPool", unstableTransactionPoolOptions)
.put("Ethstats", unstableEthstatsOptions)
.put("Mining", unstableMiningOptions)
.put("Native Library", unstableNativeLibraryOptions)
.put("Data Storage Options", unstableDataStorageOptions)
.put("Launcher", unstableLauncherOptions)
.build();
UnstableOptionsSubCommand.createUnstableOptions(commandLine, unstableOptions);
}
private void preparePlugins() {
besuPluginContext.addService(PicoCLIOptions.class, new PicoCLIOptionsImpl(commandLine));
besuPluginContext.addService(SecurityModuleService.class, securityModuleService);
besuPluginContext.addService(StorageService.class, storageService);
besuPluginContext.addService(MetricCategoryRegistry.class, metricCategoryRegistry);
// register built-in plugins
new RocksDBPlugin().register(besuPluginContext);
new InMemoryStoragePlugin().register(besuPluginContext);
besuPluginContext.registerPlugins(pluginsDir());
metricCategoryRegistry
.getMetricCategories()
.forEach(metricCategoryConverter::addRegistryCategory);
// register default security module
securityModuleService.register(
DEFAULT_SECURITY_MODULE, Suppliers.memoize(this::defaultSecurityModule));
}
private SecurityModule defaultSecurityModule() {
return new KeyPairSecurityModule(loadKeyPair());
}
@VisibleForTesting
KeyPair loadKeyPair() {
return KeyPairUtil.loadKeyPair(nodePrivateKeyFile());
}
private void parse(
final AbstractParseResultHandler<List<Object>> resultHandler,
final BesuExceptionHandler exceptionHandler,
final String... args) {
// Create a handler that will search for a config file option and use it for
// default values
// and eventually it will run regular parsing of the remaining options.
final ConfigOptionSearchAndRunHandler configParsingHandler =
new ConfigOptionSearchAndRunHandler(
resultHandler, exceptionHandler, CONFIG_FILE_OPTION_NAME, environment);
ParseArgsHelper.getLauncherOptions(unstableLauncherOptions, args);
if (unstableLauncherOptions.isLauncherMode()
|| unstableLauncherOptions.isLauncherModeForced()) {
try {
final ImmutableLauncherConfig launcherConfig =
ImmutableLauncherConfig.builder()
.launcherScript(BesuCommand.class.getResourceAsStream("launcher.json"))
.addCommandClasses(
this, unstableNatOptions, unstableEthstatsOptions, unstableMiningOptions)
.isLauncherForced(unstableLauncherOptions.isLauncherModeForced())
.build();
final File file = new LauncherManager(launcherConfig).run();
logger.info("Config file location : {}", file.getAbsolutePath());
commandLine.parseWithHandlers(
configParsingHandler,
exceptionHandler,
String.format("%s=%s", CONFIG_FILE_OPTION_NAME, file.getAbsolutePath()));
} catch (LauncherException e) {
logger.warn("Unable to run the launcher {}", e.getMessage());
}
} else {
commandLine.parseWithHandlers(configParsingHandler, exceptionHandler, args);
}
}
private void startSynchronization() {
synchronize(
besuController,
p2pEnabled,
peerDiscoveryEnabled,
ethNetworkConfig,
maxPeers,
p2pHost,
p2pInterface,
p2pPort,
graphQLConfiguration,
jsonRpcConfiguration,
webSocketConfiguration,
apiConfiguration,
metricsConfiguration,
permissioningConfiguration,
staticNodes,
pidPath);
}
private BesuCommand startPlugins() {
besuPluginContext.addService(
BesuEvents.class,
new BesuEventsImpl(
besuController.getProtocolContext().getBlockchain(),
besuController.getProtocolManager().getBlockBroadcaster(),
besuController.getTransactionPool(),
besuController.getSyncState()));
besuPluginContext.addService(MetricsSystem.class, getMetricsSystem());
besuController.getAdditionalPluginServices().appendPluginServices(besuPluginContext);
besuPluginContext.startPlugins();
return this;
}
public void configureLogging(final boolean announce) {
// To change the configuration if color was enabled/disabled
Configurator.reconfigure();
// set log level per CLI flags
if (logLevel != null) {
if (announce) {
System.out.println("Setting logging level to " + logLevel.name());
}
Configurator.setAllLevels("", logLevel);
}
}
public static Optional<Boolean> getColorEnabled() {
return Optional.ofNullable(colorEnabled);
}
private void configureNativeLibs() {
if (unstableNativeLibraryOptions.getNativeAltbn128()) {
AbstractAltBnPrecompiledContract.enableNative();
}
if (unstableNativeLibraryOptions.getNativeSecp256k1()) {
SignatureAlgorithmFactory.getInstance().enableNative();
}
}
private BesuCommand validateOptions() {
issueOptionWarnings();
validateP2PInterface(p2pInterface);
validateMiningParams();
validateNatParams();
validateNetStatsParams();
validateDnsOptionsParams();
return this;
}
@SuppressWarnings("ConstantConditions")
private void validateMiningParams() {
if (isMiningEnabled && coinbase == null) {
throw new ParameterException(
this.commandLine,
"Unable to mine without a valid coinbase. Either disable mining (remove --miner-enabled) "
+ "or specify the beneficiary of mining (via --miner-coinbase <Address>)");
}
if (!isMiningEnabled && iStratumMiningEnabled) {
throw new ParameterException(
this.commandLine,
"Unable to mine with Stratum if mining is disabled. Either disable Stratum mining (remove --miner-stratum-enabled) "
+ "or specify mining is enabled (--miner-enabled)");
}
}
protected void validateP2PInterface(final String p2pInterface) {
final String failMessage = "The provided --p2p-interface is not available: " + p2pInterface;
try {
if (!NetworkUtility.isNetworkInterfaceAvailable(p2pInterface)) {
throw new ParameterException(commandLine, failMessage);
}
} catch (final UnknownHostException | SocketException e) {
throw new ParameterException(commandLine, failMessage, e);
}
}
@SuppressWarnings("ConstantConditions")
private void validateNatParams() {
if (!(natMethod.equals(NatMethod.AUTO) || natMethod.equals(NatMethod.KUBERNETES))
&& !unstableNatOptions
.getNatManagerServiceName()
.equals(DEFAULT_BESU_SERVICE_NAME_FILTER)) {
throw new ParameterException(
this.commandLine,
"The `--Xnat-kube-service-name` parameter is only used in kubernetes mode. Either remove --Xnat-kube-service-name"
+ " or select the KUBERNETES mode (via --nat--method=KUBERNETES)");
}
if (natMethod.equals(NatMethod.AUTO) && !unstableNatOptions.getNatMethodFallbackEnabled()) {
throw new ParameterException(
this.commandLine,
"The `--Xnat-method-fallback-enabled` parameter cannot be used in AUTO mode. Either remove --Xnat-method-fallback-enabled"
+ " or select another mode (via --nat--method=XXXX)");
}
}
private void validateNetStatsParams() {
if (Strings.isNullOrEmpty(unstableEthstatsOptions.getEthstatsUrl())
&& !unstableEthstatsOptions.getEthstatsContact().isEmpty()) {
throw new ParameterException(
this.commandLine,
"The `--Xethstats-contact` requires ethstats server URL to be provided. Either remove --Xethstats-contact"
+ " or provide an url (via --Xethstats=nodename:secret@host:port)");
}
}
private void validateDnsOptionsParams() {
if (!unstableDnsOptions.getDnsEnabled() && unstableDnsOptions.getDnsUpdateEnabled()) {
throw new ParameterException(
this.commandLine,
"The `--Xdns-update-enabled` requires dns to be enabled. Either remove --Xdns-update-enabled"
+ " or specify dns is enabled (--Xdns-enabled)");
}
}
private GenesisConfigOptions readGenesisConfigOptions() {
final GenesisConfigOptions genesisConfigOptions;
try {
final GenesisConfigFile genesisConfigFile = GenesisConfigFile.fromConfig(genesisConfig());
genesisConfigOptions = genesisConfigFile.getConfigOptions(genesisConfigOverrides);
} catch (final Exception e) {
throw new IllegalStateException("Unable to read genesis file for GoQuorum options", e);
}
return genesisConfigOptions;
}
private void issueOptionWarnings() {
// Check that P2P options are able to work
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--p2p-enabled",
!p2pEnabled,
asList(
"--bootnodes",
"--discovery-enabled",
"--max-peers",
"--banned-node-id",
"--banned-node-ids",
"--p2p-host",
"--p2p-interface",
"--p2p-port",
"--remote-connections-max-percentage"));
// Check that mining options are able to work
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--miner-enabled",
!isMiningEnabled,
asList(
"--miner-coinbase",
"--min-gas-price",
"--min-block-occupancy-ratio",
"--miner-extra-data",
"--miner-stratum-enabled",
"--Xminer-remote-sealers-limit",
"--Xminer-remote-sealers-hashrate-ttl"));
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--sync-mode",
!SyncMode.FAST.equals(syncMode),
singletonList("--fast-sync-min-peers"));
if (!securityModuleName.equals(DEFAULT_SECURITY_MODULE) && nodePrivateKeyFile != null) {
logger.warn(
DEPENDENCY_WARNING_MSG,
"--node-private-key-file",
"--security-module=" + DEFAULT_SECURITY_MODULE);
}
}
private BesuCommand configure() throws Exception {
checkPortClash();
syncMode =
Optional.ofNullable(syncMode)
.orElse(
genesisFile == null && !isPrivacyEnabled && network != NetworkName.DEV
? SyncMode.FAST
: SyncMode.FULL);
ethNetworkConfig = updateNetworkConfig(getNetwork());
if (isGoQuorumCompatibilityMode) {
checkGoQuorumCompatibilityConfig(ethNetworkConfig);
}
jsonRpcConfiguration = jsonRpcConfiguration();
graphQLConfiguration = graphQLConfiguration();
webSocketConfiguration = webSocketConfiguration();
apiConfiguration = apiConfiguration();
// hostsWhitelist is a hidden option. If it is specified, add the list to hostAllowlist
if (!hostsWhitelist.isEmpty()) {
// if allowlist == default values, remove the default values
if (hostsAllowlist.size() == 2
&& hostsAllowlist.containsAll(List.of("localhost", "127.0.0.1"))) {
hostsAllowlist.removeAll(List.of("localhost", "127.0.0.1"));
}
hostsAllowlist.addAll(hostsWhitelist);
}
permissioningConfiguration = permissioningConfiguration();
staticNodes = loadStaticNodes();
logger.info("Connecting to {} static nodes.", staticNodes.size());
logger.trace("Static Nodes = {}", staticNodes);
final List<EnodeURL> enodeURIs = ethNetworkConfig.getBootNodes();
permissioningConfiguration
.flatMap(PermissioningConfiguration::getLocalConfig)
.ifPresent(p -> ensureAllNodesAreInAllowlist(enodeURIs, p));
permissioningConfiguration
.flatMap(PermissioningConfiguration::getLocalConfig)
.ifPresent(p -> ensureAllNodesAreInAllowlist(staticNodes, p));
metricsConfiguration = metricsConfiguration();
logger.info("Security Module: {}", securityModuleName);
return this;
}
private GoQuorumPrivacyParameters configureGoQuorumPrivacy(
final KeyValueStorageProvider storageProvider) {
return new GoQuorumPrivacyParameters(
createGoQuorumEnclave(),
readEnclaveKey(),
storageProvider.createGoQuorumPrivateStorage(),
createPrivateWorldStateArchive(storageProvider));
}
private GoQuorumEnclave createGoQuorumEnclave() {
final EnclaveFactory enclaveFactory = new EnclaveFactory(Vertx.vertx());
if (privacyKeyStoreFile != null) {
return enclaveFactory.createGoQuorumEnclave(
privacyUrl, privacyKeyStoreFile, privacyKeyStorePasswordFile, privacyTlsKnownEnclaveFile);
} else {
return enclaveFactory.createGoQuorumEnclave(privacyUrl);
}
}
private String readEnclaveKey() {
final String key;
try {
key = Files.asCharSource(privacyPublicKeyFile, UTF_8).read();
} catch (final Exception e) {
throw new ParameterException(
this.commandLine,
"--privacy-public-key-file must be set when --goquorum-compatibility-enabled is set to true.",
e);
}
if (key.length() != 44) {
throw new IllegalArgumentException(
"Contents of enclave public key file needs to be 44 characters long to decode to a valid 32 byte public key.");
}
// throws exception if invalid base 64
Base64.getDecoder().decode(key);
return key;
}
private NetworkName getNetwork() {
// noinspection ConstantConditions network is not always null but injected by
// PicoCLI if used
return network == null ? MAINNET : network;
}
private void ensureAllNodesAreInAllowlist(
final Collection<EnodeURL> enodeAddresses,
final LocalPermissioningConfiguration permissioningConfiguration) {
try {
PermissioningConfigurationValidator.areAllNodesAreInAllowlist(
enodeAddresses, permissioningConfiguration);
} catch (final Exception e) {
throw new ParameterException(this.commandLine, e.getMessage());
}
}
private BesuCommand controller() {
besuController = buildController();
return this;
}
public BesuController buildController() {
try {
return getControllerBuilder().build();
} catch (final Exception e) {
throw new ExecutionException(this.commandLine, e.getMessage(), e);
}
}
public BesuControllerBuilder getControllerBuilder() {
final KeyValueStorageProvider storageProvider = keyValueStorageProvider(keyValueStorageName);
return controllerBuilderFactory
.fromEthNetworkConfig(updateNetworkConfig(getNetwork()), genesisConfigOverrides)
.synchronizerConfiguration(buildSyncConfig())
.ethProtocolConfiguration(unstableEthProtocolOptions.toDomainObject())
.dataDirectory(dataDir())
.miningParameters(
new MiningParameters(
coinbase,
minTransactionGasPrice,
extraData,
isMiningEnabled,
iStratumMiningEnabled,
stratumNetworkInterface,
stratumPort,
unstableMiningOptions.getStratumExtranonce(),
Optional.empty(),
minBlockOccupancyRatio,
unstableMiningOptions.getRemoteSealersLimit(),
unstableMiningOptions.getRemoteSealersTimeToLive()))
.transactionPoolConfiguration(buildTransactionPoolConfiguration())
.nodeKey(buildNodeKey())
.metricsSystem(metricsSystem.get())
.privacyParameters(privacyParameters(storageProvider))
.clock(Clock.systemUTC())
.isRevertReasonEnabled(isRevertReasonEnabled)
.storageProvider(storageProvider)
.isPruningEnabled(isPruningEnabled())
.pruningConfiguration(
new PrunerConfiguration(pruningBlockConfirmations, pruningBlocksRetained))
.genesisConfigOverrides(genesisConfigOverrides)
.gasLimitCalculator(
Optional.ofNullable(targetGasLimit)
.<GasLimitCalculator>map(TargetingGasLimitCalculator::new)
.orElse(GasLimitCalculator.constant()))
.requiredBlocks(requiredBlocks)
.reorgLoggingThreshold(reorgLoggingThreshold)
.dataStorageConfiguration(unstableDataStorageOptions.toDomainObject());
}
private GraphQLConfiguration graphQLConfiguration() {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--graphql-http-enabled",
!isGraphQLHttpEnabled,
asList("--graphql-http-cors-origins", "--graphql-http-host", "--graphql-http-port"));
final GraphQLConfiguration graphQLConfiguration = GraphQLConfiguration.createDefault();
graphQLConfiguration.setEnabled(isGraphQLHttpEnabled);
graphQLConfiguration.setHost(graphQLHttpHost);
graphQLConfiguration.setPort(graphQLHttpPort);
graphQLConfiguration.setHostsAllowlist(hostsAllowlist);
graphQLConfiguration.setCorsAllowedDomains(graphQLHttpCorsAllowedOrigins);
graphQLConfiguration.setHttpTimeoutSec(unstableRPCOptions.getHttpTimeoutSec());
return graphQLConfiguration;
}
private JsonRpcConfiguration jsonRpcConfiguration() {
checkRpcTlsClientAuthOptionsDependencies();
checkRpcTlsOptionsDependencies();
checkRpcHttpOptionsDependencies();
if (isRpcHttpAuthenticationEnabled
&& rpcHttpAuthenticationCredentialsFile() == null
&& rpcHttpAuthenticationPublicKeyFile == null) {
throw new ParameterException(
commandLine,
"Unable to authenticate JSON-RPC HTTP endpoint without a supplied credentials file or authentication public key file");
}
final JsonRpcConfiguration jsonRpcConfiguration = JsonRpcConfiguration.createDefault();
jsonRpcConfiguration.setEnabled(isRpcHttpEnabled);
jsonRpcConfiguration.setHost(rpcHttpHost);
jsonRpcConfiguration.setPort(rpcHttpPort);
jsonRpcConfiguration.setMaxActiveConnections(rpcHttpMaxConnections);
jsonRpcConfiguration.setCorsAllowedDomains(rpcHttpCorsAllowedOrigins);
jsonRpcConfiguration.setRpcApis(rpcHttpApis.stream().distinct().collect(Collectors.toList()));
jsonRpcConfiguration.setHostsAllowlist(hostsAllowlist);
jsonRpcConfiguration.setAuthenticationEnabled(isRpcHttpAuthenticationEnabled);
jsonRpcConfiguration.setAuthenticationCredentialsFile(rpcHttpAuthenticationCredentialsFile());
jsonRpcConfiguration.setAuthenticationPublicKeyFile(rpcHttpAuthenticationPublicKeyFile);
jsonRpcConfiguration.setTlsConfiguration(rpcHttpTlsConfiguration());
jsonRpcConfiguration.setHttpTimeoutSec(unstableRPCOptions.getHttpTimeoutSec());
return jsonRpcConfiguration;
}
private void checkRpcHttpOptionsDependencies() {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--rpc-http-enabled",
!isRpcHttpEnabled,
asList(
"--rpc-http-api",
"--rpc-http-apis",
"--rpc-http-cors-origins",
"--rpc-http-host",
"--rpc-http-port",
"--rpc-http-max-active-connections",
"--rpc-http-authentication-enabled",
"--rpc-http-authentication-credentials-file",
"--rpc-http-authentication-public-key-file",
"--rpc-http-tls-enabled",
"--rpc-http-tls-keystore-file",
"--rpc-http-tls-keystore-password-file",
"--rpc-http-tls-client-auth-enabled",
"--rpc-http-tls-known-clients-file",
"--rpc-http-tls-ca-clients-enabled"));
}
private void checkRpcTlsOptionsDependencies() {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--rpc-http-tls-enabled",
!isRpcHttpTlsEnabled,
asList(
"--rpc-http-tls-keystore-file",
"--rpc-http-tls-keystore-password-file",
"--rpc-http-tls-client-auth-enabled",
"--rpc-http-tls-known-clients-file",
"--rpc-http-tls-ca-clients-enabled"));
}
private void checkRpcTlsClientAuthOptionsDependencies() {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--rpc-http-tls-client-auth-enabled",
!isRpcHttpTlsClientAuthEnabled,
asList("--rpc-http-tls-known-clients-file", "--rpc-http-tls-ca-clients-enabled"));
}
private void checkPrivacyTlsOptionsDependencies() {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--privacy-tls-enabled",
!isPrivacyTlsEnabled,
asList(
"--privacy-tls-keystore-file",
"--privacy-tls-keystore-password-file",
"--privacy-tls-known-enclave-file"));
}
private Optional<TlsConfiguration> rpcHttpTlsConfiguration() {
if (!isRpcTlsConfigurationRequired()) {
return Optional.empty();
}
if (rpcHttpTlsKeyStoreFile == null) {
throw new ParameterException(
commandLine, "Keystore file is required when TLS is enabled for JSON-RPC HTTP endpoint");
}
if (rpcHttpTlsKeyStorePasswordFile == null) {
throw new ParameterException(
commandLine,
"File containing password to unlock keystore is required when TLS is enabled for JSON-RPC HTTP endpoint");
}
if (isRpcHttpTlsClientAuthEnabled
&& !isRpcHttpTlsCAClientsEnabled
&& rpcHttpTlsKnownClientsFile == null) {
throw new ParameterException(
commandLine,
"Known-clients file must be specified or CA clients must be enabled when TLS client authentication is enabled for JSON-RPC HTTP endpoint");
}
return Optional.of(
TlsConfiguration.Builder.aTlsConfiguration()
.withKeyStorePath(rpcHttpTlsKeyStoreFile)
.withKeyStorePasswordSupplier(
new FileBasedPasswordProvider(rpcHttpTlsKeyStorePasswordFile))
.withClientAuthConfiguration(rpcHttpTlsClientAuthConfiguration())
.build());
}
private TlsClientAuthConfiguration rpcHttpTlsClientAuthConfiguration() {
if (isRpcHttpTlsClientAuthEnabled) {
return TlsClientAuthConfiguration.Builder.aTlsClientAuthConfiguration()
.withKnownClientsFile(rpcHttpTlsKnownClientsFile)
.withCaClientsEnabled(isRpcHttpTlsCAClientsEnabled)
.build();
}
return null;
}
private boolean isRpcTlsConfigurationRequired() {
return isRpcHttpEnabled && isRpcHttpTlsEnabled;
}
private WebSocketConfiguration webSocketConfiguration() {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--rpc-ws-enabled",
!isRpcWsEnabled,
asList(
"--rpc-ws-api",
"--rpc-ws-apis",
"--rpc-ws-host",
"--rpc-ws-port",
"--rpc-ws-max-active-connections",
"--rpc-ws-authentication-enabled",
"--rpc-ws-authentication-credentials-file",
"--rpc-ws-authentication-public-key-file"));
if (isRpcWsAuthenticationEnabled
&& rpcWsAuthenticationCredentialsFile() == null
&& rpcWsAuthenticationPublicKeyFile == null) {
throw new ParameterException(
commandLine,
"Unable to authenticate JSON-RPC WebSocket endpoint without a supplied credentials file or authentication public key file");
}
final WebSocketConfiguration webSocketConfiguration = WebSocketConfiguration.createDefault();
webSocketConfiguration.setEnabled(isRpcWsEnabled);
webSocketConfiguration.setHost(rpcWsHost);
webSocketConfiguration.setPort(rpcWsPort);
webSocketConfiguration.setMaxActiveConnections(rpcWsMaxConnections);
webSocketConfiguration.setRpcApis(rpcWsApis);
webSocketConfiguration.setAuthenticationEnabled(isRpcWsAuthenticationEnabled);
webSocketConfiguration.setAuthenticationCredentialsFile(rpcWsAuthenticationCredentialsFile());
webSocketConfiguration.setHostsAllowlist(hostsAllowlist);
webSocketConfiguration.setAuthenticationPublicKeyFile(rpcWsAuthenticationPublicKeyFile);
webSocketConfiguration.setTimeoutSec(unstableRPCOptions.getWsTimeoutSec());
return webSocketConfiguration;
}
private ApiConfiguration apiConfiguration() {
return ImmutableApiConfiguration.builder()
.gasPriceBlocks(apiGasPriceBlocks)
.gasPricePercentile(apiGasPricePercentile)
.gasPriceMin(minTransactionGasPrice.toLong())
.gasPriceMax(apiGasPriceMax)
.build();
}
public MetricsConfiguration metricsConfiguration() {
if (isMetricsEnabled && isMetricsPushEnabled) {
throw new ParameterException(
this.commandLine,
"--metrics-enabled option and --metrics-push-enabled option can't be used at the same "
+ "time. Please refer to CLI reference for more details about this constraint.");
}
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--metrics-enabled",
!isMetricsEnabled,
asList("--metrics-host", "--metrics-port"));
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--metrics-push-enabled",
!isMetricsPushEnabled,
asList(
"--metrics-push-host",
"--metrics-push-port",
"--metrics-push-interval",
"--metrics-push-prometheus-job"));
return unstableMetricsCLIOptions
.toDomainObject()
.enabled(isMetricsEnabled)
.host(metricsHost)
.port(metricsPort)
.protocol(metricsProtocol)
.metricCategories(metricCategories)
.pushEnabled(isMetricsPushEnabled)
.pushHost(metricsPushHost)
.pushPort(metricsPushPort)
.pushInterval(metricsPushInterval)
.hostsAllowlist(hostsAllowlist)
.prometheusJob(metricsPrometheusJob)
.build();
}
private Optional<PermissioningConfiguration> permissioningConfiguration() throws Exception {
if (!(localPermissionsEnabled() || contractPermissionsEnabled())) {
if (rpcHttpApis.contains(RpcApis.PERM) || rpcWsApis.contains(RpcApis.PERM)) {
logger.warn(
"Permissions are disabled. Cannot enable PERM APIs when not using Permissions.");
}
return Optional.empty();
}
final Optional<LocalPermissioningConfiguration> localPermissioningConfigurationOptional;
if (localPermissionsEnabled()) {
final Optional<String> nodePermissioningConfigFile =
Optional.ofNullable(nodePermissionsConfigFile);
final Optional<String> accountPermissioningConfigFile =
Optional.ofNullable(accountPermissionsConfigFile);
final LocalPermissioningConfiguration localPermissioningConfiguration =
PermissioningConfigurationBuilder.permissioningConfiguration(
permissionsNodesEnabled,
getEnodeDnsConfiguration(),
nodePermissioningConfigFile.orElse(getDefaultPermissioningFilePath()),
permissionsAccountsEnabled,
accountPermissioningConfigFile.orElse(getDefaultPermissioningFilePath()));
localPermissioningConfigurationOptional = Optional.of(localPermissioningConfiguration);
} else {
if (nodePermissionsConfigFile != null && !permissionsNodesEnabled) {
logger.warn(
"Node permissioning config file set {} but no permissions enabled",
nodePermissionsConfigFile);
}
if (accountPermissionsConfigFile != null && !permissionsAccountsEnabled) {
logger.warn(
"Account permissioning config file set {} but no permissions enabled",
accountPermissionsConfigFile);
}
localPermissioningConfigurationOptional = Optional.empty();
}
final SmartContractPermissioningConfiguration smartContractPermissioningConfiguration =
SmartContractPermissioningConfiguration.createDefault();
if (permissionsNodesContractEnabled) {
if (permissionsNodesContractAddress == null) {
throw new ParameterException(
this.commandLine,
"No node permissioning contract address specified. Cannot enable smart contract based node permissioning.");
} else {
smartContractPermissioningConfiguration.setSmartContractNodeAllowlistEnabled(
permissionsNodesContractEnabled);
smartContractPermissioningConfiguration.setNodeSmartContractAddress(
permissionsNodesContractAddress);
smartContractPermissioningConfiguration.setNodeSmartContractInterfaceVersion(
permissionsNodesContractVersion);
}
} else if (permissionsNodesContractAddress != null) {
logger.warn(
"Node permissioning smart contract address set {} but smart contract node permissioning is disabled.",
permissionsNodesContractAddress);
}
if (permissionsAccountsContractEnabled) {
if (permissionsAccountsContractAddress == null) {
throw new ParameterException(
this.commandLine,
"No account permissioning contract address specified. Cannot enable smart contract based account permissioning.");
} else {
smartContractPermissioningConfiguration.setSmartContractAccountAllowlistEnabled(
permissionsAccountsContractEnabled);
smartContractPermissioningConfiguration.setAccountSmartContractAddress(
permissionsAccountsContractAddress);
}
} else if (permissionsAccountsContractAddress != null) {
logger.warn(
"Account permissioning smart contract address set {} but smart contract account permissioning is disabled.",
permissionsAccountsContractAddress);
}
final PermissioningConfiguration permissioningConfiguration =
new PermissioningConfiguration(
localPermissioningConfigurationOptional,
Optional.of(smartContractPermissioningConfiguration),
quorumPermissioningConfig());
return Optional.of(permissioningConfiguration);
}
private Optional<GoQuorumPermissioningConfiguration> quorumPermissioningConfig() {
if (!isGoQuorumCompatibilityMode) {
return Optional.empty();
}
try {
final GenesisConfigOptions genesisConfigOptions = readGenesisConfigOptions();
final OptionalLong qip714BlockNumber = genesisConfigOptions.getQip714BlockNumber();
return Optional.of(
GoQuorumPermissioningConfiguration.enabled(
qip714BlockNumber.orElse(QIP714_DEFAULT_BLOCK)));
} catch (final Exception e) {
throw new IllegalStateException("Error reading GoQuorum permissioning options", e);
}
}
private boolean localPermissionsEnabled() {
return permissionsAccountsEnabled || permissionsNodesEnabled;
}
private boolean contractPermissionsEnabled() {
return permissionsNodesContractEnabled || permissionsAccountsContractEnabled;
}
private PrivacyParameters privacyParameters(final KeyValueStorageProvider storageProvider) {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--privacy-enabled",
!isPrivacyEnabled,
asList(
"--privacy-url",
"--privacy-public-key-file",
"--privacy-multi-tenancy-enabled",
"--privacy-tls-enabled"));
checkPrivacyTlsOptionsDependencies();
final PrivacyParameters.Builder privacyParametersBuilder = new PrivacyParameters.Builder();
if (isPrivacyEnabled) {
final String errorSuffix = "cannot be enabled with privacy.";
if (syncMode == SyncMode.FAST) {
throw new ParameterException(commandLine, String.format("%s %s", "Fast sync", errorSuffix));
}
if (isPruningEnabled()) {
throw new ParameterException(commandLine, String.format("%s %s", "Pruning", errorSuffix));
}
if (isGoQuorumCompatibilityMode) {
throw new ParameterException(
commandLine, String.format("%s %s", "GoQuorum mode", errorSuffix));
}
if (isPrivacyMultiTenancyEnabled
&& !jsonRpcConfiguration.isAuthenticationEnabled()
&& !webSocketConfiguration.isAuthenticationEnabled()) {
throw new ParameterException(
commandLine,
"Privacy multi-tenancy requires either http authentication to be enabled or WebSocket authentication to be enabled");
}
privacyParametersBuilder.setEnabled(true);
privacyParametersBuilder.setEnclaveUrl(privacyUrl);
privacyParametersBuilder.setMultiTenancyEnabled(isPrivacyMultiTenancyEnabled);
privacyParametersBuilder.setOnchainPrivacyGroupsEnabled(isFlexiblePrivacyGroupsEnabled);
final boolean hasPrivacyPublicKey = privacyPublicKeyFile != null;
if (hasPrivacyPublicKey && !isPrivacyMultiTenancyEnabled) {
try {
privacyParametersBuilder.setEnclavePublicKeyUsingFile(privacyPublicKeyFile);
} catch (final IOException e) {
throw new ParameterException(
commandLine, "Problem with privacy-public-key-file: " + e.getMessage(), e);
} catch (final IllegalArgumentException e) {
throw new ParameterException(
commandLine, "Contents of privacy-public-key-file invalid: " + e.getMessage(), e);
}
} else if (hasPrivacyPublicKey) {
throw new ParameterException(
commandLine, "Privacy multi-tenancy and privacy public key cannot be used together");
} else if (!isPrivacyMultiTenancyEnabled) {
throw new ParameterException(
commandLine, "Please specify Enclave public key file path to enable privacy");
}
if (Wei.ZERO.compareTo(minTransactionGasPrice) < 0) {
// if gas is required, cannot use random keys to sign private tx
// ie --privacy-marker-transaction-signing-key-file must be set
if (privacyMarkerTransactionSigningKeyPath == null) {
throw new ParameterException(
commandLine,
"Not a free gas network. --privacy-marker-transaction-signing-key-file must be specified and must be a funded account. Private transactions cannot be signed by random (non-funded) accounts in paid gas networks");
}
}
if (!Address.PRIVACY.equals(privacyPrecompiledAddress)) {
logger.warn(
"--privacy-precompiled-address option is deprecated. This address is derived, based on --privacy-onchain-groups-enabled.");
}
privacyParametersBuilder.setPrivateKeyPath(privacyMarkerTransactionSigningKeyPath);
privacyParametersBuilder.setStorageProvider(
privacyKeyStorageProvider(keyValueStorageName + "-privacy"));
if (isPrivacyTlsEnabled) {
privacyParametersBuilder.setPrivacyKeyStoreFile(privacyKeyStoreFile);
privacyParametersBuilder.setPrivacyKeyStorePasswordFile(privacyKeyStorePasswordFile);
privacyParametersBuilder.setPrivacyTlsKnownEnclaveFile(privacyTlsKnownEnclaveFile);
}
privacyParametersBuilder.setEnclaveFactory(new EnclaveFactory(vertx));
} else if (isGoQuorumCompatibilityMode) {
privacyParametersBuilder.setGoQuorumPrivacyParameters(
Optional.of(configureGoQuorumPrivacy(storageProvider)));
}
if (!isPrivacyEnabled && anyPrivacyApiEnabled()) {
logger.warn("Privacy is disabled. Cannot use EEA/PRIV API methods when not using Privacy.");
}
if (!isGoQuorumCompatibilityMode
&& (rpcHttpApis.contains(RpcApis.GOQUORUM) || rpcWsApis.contains(RpcApis.GOQUORUM))) {
logger.warn("Cannot use GOQUORUM API methods when not in GoQuorum mode.");
}
final PrivacyParameters privacyParameters = privacyParametersBuilder.build();
if (isPrivacyEnabled) {
preSynchronizationTaskRunner.addTask(
new PrivateDatabaseMigrationPreSyncTask(privacyParameters, migratePrivateDatabase));
}
return privacyParameters;
}
public WorldStateArchive createPrivateWorldStateArchive(final StorageProvider storageProvider) {
final WorldStateStorage privateWorldStateStorage =
storageProvider.createPrivateWorldStateStorage();
final WorldStatePreimageStorage preimageStorage =
storageProvider.createPrivateWorldStatePreimageStorage();
return new DefaultWorldStateArchive(privateWorldStateStorage, preimageStorage);
}
private boolean anyPrivacyApiEnabled() {
return rpcHttpApis.contains(RpcApis.EEA)
|| rpcWsApis.contains(RpcApis.EEA)
|| rpcHttpApis.contains(RpcApis.PRIV)
|| rpcWsApis.contains(RpcApis.PRIV);
}
private PrivacyKeyValueStorageProvider privacyKeyStorageProvider(final String name) {
return new PrivacyKeyValueStorageProviderBuilder()
.withStorageFactory(privacyKeyValueStorageFactory(name))
.withCommonConfiguration(pluginCommonConfiguration)
.withMetricsSystem(getMetricsSystem())
.build();
}
private PrivacyKeyValueStorageFactory privacyKeyValueStorageFactory(final String name) {
return (PrivacyKeyValueStorageFactory)
storageService
.getByName(name)
.orElseThrow(
() -> new StorageException("No KeyValueStorageFactory found for key: " + name));
}
private KeyValueStorageProvider keyValueStorageProvider(final String name) {
if (this.keyValueStorageProvider == null) {
this.keyValueStorageProvider =
new KeyValueStorageProviderBuilder()
.withStorageFactory(
storageService
.getByName(name)
.orElseThrow(
() ->
new StorageException(
"No KeyValueStorageFactory found for key: " + name)))
.withCommonConfiguration(pluginCommonConfiguration)
.withMetricsSystem(getMetricsSystem())
.build();
}
return this.keyValueStorageProvider;
}
private SynchronizerConfiguration buildSyncConfig() {
return unstableSynchronizerOptions
.toDomainObject()
.syncMode(syncMode)
.fastSyncMinimumPeerCount(fastSyncMinPeerCount)
.build();
}
private TransactionPoolConfiguration buildTransactionPoolConfiguration() {
return unstableTransactionPoolOptions
.toDomainObject()
.txPoolMaxSize(txPoolMaxSize)
.pooledTransactionHashesSize(pooledTransactionHashesSize)
.pendingTxRetentionPeriod(pendingTxRetentionPeriod)
.priceBump(Percentage.fromInt(priceBump))
.txFeeCap(txFeeCap)
.build();
}
private boolean isPruningEnabled() {
return pruningEnabled;
}
// Blockchain synchronisation from peers.
private void synchronize(
final BesuController controller,
final boolean p2pEnabled,
final boolean peerDiscoveryEnabled,
final EthNetworkConfig ethNetworkConfig,
final int maxPeers,
final String p2pAdvertisedHost,
final String p2pListenInterface,
final int p2pListenPort,
final GraphQLConfiguration graphQLConfiguration,
final JsonRpcConfiguration jsonRpcConfiguration,
final WebSocketConfiguration webSocketConfiguration,
final ApiConfiguration apiConfiguration,
final MetricsConfiguration metricsConfiguration,
final Optional<PermissioningConfiguration> permissioningConfiguration,
final Collection<EnodeURL> staticNodes,
final Path pidPath) {
checkNotNull(runnerBuilder);
permissioningConfiguration.ifPresent(runnerBuilder::permissioningConfiguration);
final ObservableMetricsSystem metricsSystem = this.metricsSystem.get();
final Runner runner =
runnerBuilder
.vertx(vertx)
.besuController(controller)
.p2pEnabled(p2pEnabled)
.natMethod(natMethod)
.natManagerServiceName(unstableNatOptions.getNatManagerServiceName())
.natMethodFallbackEnabled(unstableNatOptions.getNatMethodFallbackEnabled())
.discovery(peerDiscoveryEnabled)
.ethNetworkConfig(ethNetworkConfig)
.p2pAdvertisedHost(p2pAdvertisedHost)
.p2pListenInterface(p2pListenInterface)
.p2pListenPort(p2pListenPort)
.maxPeers(maxPeers)
.limitRemoteWireConnectionsEnabled(isLimitRemoteWireConnectionsEnabled)
.fractionRemoteConnectionsAllowed(
Fraction.fromPercentage(maxRemoteConnectionsPercentage).getValue())
.randomPeerPriority(randomPeerPriority)
.networkingConfiguration(unstableNetworkingOptions.toDomainObject())
.graphQLConfiguration(graphQLConfiguration)
.jsonRpcConfiguration(jsonRpcConfiguration)
.webSocketConfiguration(webSocketConfiguration)
.apiConfiguration(apiConfiguration)
.pidPath(pidPath)
.dataDir(dataDir())
.bannedNodeIds(bannedNodeIds)
.metricsSystem(metricsSystem)
.metricsConfiguration(metricsConfiguration)
.staticNodes(staticNodes)
.identityString(identityString)
.besuPluginContext(besuPluginContext)
.autoLogBloomCaching(autoLogBloomCachingEnabled)
.ethstatsUrl(unstableEthstatsOptions.getEthstatsUrl())
.ethstatsContact(unstableEthstatsOptions.getEthstatsContact())
.storageProvider(keyValueStorageProvider(keyValueStorageName))
.forkIdSupplier(() -> besuController.getProtocolManager().getForkIdAsBytesList())
.build();
addShutdownHook(runner);
runner.start();
runner.awaitStop();
}
protected Vertx createVertx(final VertxOptions vertxOptions) {
return Vertx.vertx(vertxOptions);
}
private VertxOptions createVertxOptions(final MetricsSystem metricsSystem) {
return new VertxOptions()
.setMetricsOptions(
new MetricsOptions()
.setEnabled(true)
.setFactory(new VertxMetricsAdapterFactory(metricsSystem)));
}
private void addShutdownHook(final Runner runner) {
Runtime.getRuntime()
.addShutdownHook(
new Thread(
() -> {
try {
besuPluginContext.stopPlugins();
runner.close();
LogManager.shutdown();
} catch (final Exception e) {
logger.error("Failed to stop Besu");
}
}));
}
// Used to discover the default IP of the client.
// Loopback IP is used by default as this is how smokeTests require it to be
// and it's probably a good security behaviour to default only on the localhost.
private InetAddress autoDiscoverDefaultIP() {
if (autoDiscoveredDefaultIP != null) {
return autoDiscoveredDefaultIP;
}
autoDiscoveredDefaultIP = InetAddress.getLoopbackAddress();
return autoDiscoveredDefaultIP;
}
private EthNetworkConfig updateNetworkConfig(final NetworkName network) {
final EthNetworkConfig.Builder builder =
new EthNetworkConfig.Builder(EthNetworkConfig.getNetworkConfig(network));
// custom genesis file use comes with specific default values for the genesis
// file itself
// but also for the network id and the bootnodes list.
if (genesisFile != null) {
// noinspection ConstantConditions network is not always null but injected by
// PicoCLI if used
if (this.network != null) {
// We check if network option was really provided by user and not only looking
// at the
// default value.
// if user provided it and provided the genesis file option at the same time, it
// raises a
// conflict error
throw new ParameterException(
this.commandLine,
"--network option and --genesis-file option can't be used at the same time. Please "
+ "refer to CLI reference for more details about this constraint.");
}
builder.setGenesisConfig(genesisConfig());
if (networkId == null) {
// if no network id option is defined on the CLI we have to set a default value
// from the
// genesis file.
// We do the genesis parsing only in this case as we already have network id
// constants
// for known networks to speed up the process.
// Also we have to parse the genesis as we don't already have a parsed version
// at this
// stage.
// If no chain id is found in the genesis as it's an optional, we use mainnet
// network id.
try {
builder.setNetworkId(
getGenesisConfigFile()
.getConfigOptions(genesisConfigOverrides)
.getChainId()
.orElse(EthNetworkConfig.getNetworkConfig(MAINNET).getNetworkId()));
} catch (final DecodeException e) {
throw new ParameterException(
this.commandLine, String.format("Unable to parse genesis file %s.", genesisFile), e);
} catch (final ArithmeticException e) {
throw new ParameterException(
this.commandLine,
"No networkId specified and chainId in "
+ "genesis file is too large to be used as a networkId");
}
}
if (bootNodes == null) {
// We default to an empty bootnodes list if the option is not provided on CLI
// because
// mainnet bootnodes won't work as the default value for a custom genesis,
// so it's better to have an empty list as default value that forces to create a
// custom one
// than a useless one that may make user think that it can work when it can't.
builder.setBootNodes(new ArrayList<>());
}
builder.setDnsDiscoveryUrl(null);
}
if (networkId != null) {
builder.setNetworkId(networkId);
}
if (bootNodes != null) {
if (!peerDiscoveryEnabled) {
logger.warn("Discovery disabled: bootnodes will be ignored.");
}
try {
final List<EnodeURL> listBootNodes =
bootNodes.stream()
.filter(value -> !value.isEmpty())
.map(url -> EnodeURL.fromString(url, getEnodeDnsConfiguration()))
.collect(Collectors.toList());
DiscoveryConfiguration.assertValidBootnodes(listBootNodes);
builder.setBootNodes(listBootNodes);
} catch (final IllegalArgumentException e) {
throw new ParameterException(commandLine, e.getMessage());
}
}
return builder.build();
}
private GenesisConfigFile getGenesisConfigFile() {
return GenesisConfigFile.fromConfig(genesisConfig());
}
private String genesisConfig() {
try {
return Resources.toString(genesisFile.toURI().toURL(), UTF_8);
} catch (final IOException e) {
throw new ParameterException(
this.commandLine, String.format("Unable to load genesis file %s.", genesisFile), e);
}
}
// dataDir() is public because it is accessed by subcommands
public Path dataDir() {
return dataPath.toAbsolutePath();
}
private Path pluginsDir() {
final String pluginsDir = System.getProperty("besu.plugins.dir");
if (pluginsDir == null) {
return new File(System.getProperty("besu.home", "."), "plugins").toPath();
} else {
return new File(pluginsDir).toPath();
}
}
@VisibleForTesting
NodeKey buildNodeKey() {
return new NodeKey(securityModule());
}
private SecurityModule securityModule() {
return securityModuleService
.getByName(securityModuleName)
.orElseThrow(() -> new RuntimeException("Security Module not found: " + securityModuleName))
.get();
}
private File nodePrivateKeyFile() {
return Optional.ofNullable(nodePrivateKeyFile)
.orElseGet(() -> KeyPairUtil.getDefaultKeyFile(dataDir()));
}
private String rpcHttpAuthenticationCredentialsFile() {
final String filename = rpcHttpAuthenticationCredentialsFile;
if (filename != null) {
RpcAuthFileValidator.validate(commandLine, filename, "HTTP");
}
return filename;
}
private String rpcWsAuthenticationCredentialsFile() {
final String filename = rpcWsAuthenticationCredentialsFile;
if (filename != null) {
RpcAuthFileValidator.validate(commandLine, filename, "WS");
}
return filename;
}
private String getDefaultPermissioningFilePath() {
return dataDir()
+ System.getProperty("file.separator")
+ DefaultCommandValues.PERMISSIONING_CONFIG_LOCATION;
}
public MetricsSystem getMetricsSystem() {
return metricsSystem.get();
}
private Set<EnodeURL> loadStaticNodes() throws IOException {
final Path staticNodesPath;
if (staticNodesFile != null) {
staticNodesPath = staticNodesFile.toAbsolutePath();
if (!staticNodesPath.toFile().exists()) {
throw new ParameterException(
commandLine, String.format("Static nodes file %s does not exist", staticNodesPath));
}
} else {
final String staticNodesFilename = "static-nodes.json";
staticNodesPath = dataDir().resolve(staticNodesFilename);
}
logger.info("Static Nodes file = {}", staticNodesPath);
return StaticNodesParser.fromPath(staticNodesPath, getEnodeDnsConfiguration());
}
public BesuExceptionHandler exceptionHandler() {
return new BesuExceptionHandler(this::getLogLevel);
}
public EnodeDnsConfiguration getEnodeDnsConfiguration() {
if (enodeDnsConfiguration == null) {
enodeDnsConfiguration = unstableDnsOptions.toDomainObject();
}
return enodeDnsConfiguration;
}
private void checkPortClash() {
getEffectivePorts().stream()
.filter(Objects::nonNull)
.filter(port -> port > 0)
.forEach(
port -> {
if (!allocatedPorts.add(port)) {
throw new ParameterException(
commandLine,
"Port number '"
+ port
+ "' has been specified multiple times. Please review the supplied configuration.");
}
});
}
/**
* * Gets the list of effective ports (ports that are enabled).
*
* @return The list of effective ports
*/
private List<Integer> getEffectivePorts() {
final List<Integer> effectivePorts = new ArrayList<>();
addPortIfEnabled(effectivePorts, p2pPort, p2pEnabled);
addPortIfEnabled(effectivePorts, graphQLHttpPort, isGraphQLHttpEnabled);
addPortIfEnabled(effectivePorts, rpcHttpPort, isRpcHttpEnabled);
addPortIfEnabled(effectivePorts, rpcWsPort, isRpcWsEnabled);
addPortIfEnabled(effectivePorts, metricsPort, isMetricsEnabled);
addPortIfEnabled(effectivePorts, metricsPushPort, isMetricsPushEnabled);
addPortIfEnabled(effectivePorts, stratumPort, iStratumMiningEnabled);
return effectivePorts;
}
/**
* Adds port in the passed list only if enabled.
*
* @param ports The list of ports
* @param port The port value
* @param enabled true if enabled, false otherwise
*/
private void addPortIfEnabled(
final List<Integer> ports, final Integer port, final boolean enabled) {
if (enabled) {
ports.add(port);
}
}
private void checkGoQuorumCompatibilityConfig(final EthNetworkConfig ethNetworkConfig) {
if (isGoQuorumCompatibilityMode) {
final GenesisConfigOptions genesisConfigOptions = readGenesisConfigOptions();
// this static flag is read by the RLP decoder
GoQuorumOptions.goQuorumCompatibilityMode = true;
if (!genesisConfigOptions.isQuorum()) {
throw new IllegalStateException(
"GoQuorum compatibility mode (enabled) can only be used if genesis file has 'isQuorum' flag set to true.");
}
genesisConfigOptions
.getChainId()
.ifPresent(
chainId ->
ensureGoQuorumCompatibilityModeNotUsedOnMainnet(
chainId, isGoQuorumCompatibilityMode));
if (genesisFile != null
&& getGenesisConfigFile().getConfigOptions().isQuorum()
&& !minTransactionGasPrice.isZero()) {
throw new ParameterException(
this.commandLine,
"--min-gas-price must be set to zero if GoQuorum compatibility is enabled in the genesis config.");
}
if (ethNetworkConfig.getNetworkId().equals(EthNetworkConfig.MAINNET_NETWORK_ID)) {
throw new ParameterException(
this.commandLine, "GoQuorum compatibility mode (enabled) cannot be used on Mainnet.");
}
}
}
private void ensureGoQuorumCompatibilityModeNotUsedOnMainnet(
final BigInteger chainId, final boolean isGoQuorumCompatibilityMode) {
if (isGoQuorumCompatibilityMode && chainId.equals(EthNetworkConfig.MAINNET_NETWORK_ID)) {
throw new IllegalStateException(
"GoQuorum compatibility mode (enabled) cannot be used on Mainnet.");
}
}
@VisibleForTesting
Level getLogLevel() {
return logLevel;
}
private class BesuCommandConfigurationService implements BesuConfiguration {
@Override
public Path getStoragePath() {
return dataDir().resolve(DATABASE_PATH);
}
@Override
public Path getDataPath() {
return dataDir();
}
@Override
public int getDatabaseVersion() {
return unstableDataStorageOptions
.toDomainObject()
.getDataStorageFormat()
.getDatabaseVersion();
}
}
}
| 1 | 24,753 | This looks inverted to me: if `--rpc-require-chainid-in-txs=true`, then shouldn't `unprotectedTransactionsAllowed == false`? Suggest changing this variable to `requireTxReplayProtection` or similar. I think the flag name could be clearer as well, maybe: `--require-tx-replay-protection`. | hyperledger-besu | java |
Subsets and Splits